gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
from __future__ import division, absolute_import, print_function
import platform
import pytest
import numpy as np
from numpy import uint16, float16, float32, float64
from numpy.testing import assert_, assert_equal
def assert_raises_fpe(strmatch, callable, *args, **kwargs):
try:
callable(*args, **kwargs)
except FloatingPointError as exc:
assert_(str(exc).find(strmatch) >= 0,
"Did not raise floating point %s error" % strmatch)
else:
assert_(False,
"Did not raise floating point %s error" % strmatch)
class TestHalf(object):
def setup(self):
# An array of all possible float16 values
self.all_f16 = np.arange(0x10000, dtype=uint16)
self.all_f16.dtype = float16
self.all_f32 = np.array(self.all_f16, dtype=float32)
self.all_f64 = np.array(self.all_f16, dtype=float64)
# An array of all non-NaN float16 values, in sorted order
self.nonan_f16 = np.concatenate(
(np.arange(0xfc00, 0x7fff, -1, dtype=uint16),
np.arange(0x0000, 0x7c01, 1, dtype=uint16)))
self.nonan_f16.dtype = float16
self.nonan_f32 = np.array(self.nonan_f16, dtype=float32)
self.nonan_f64 = np.array(self.nonan_f16, dtype=float64)
# An array of all finite float16 values, in sorted order
self.finite_f16 = self.nonan_f16[1:-1]
self.finite_f32 = self.nonan_f32[1:-1]
self.finite_f64 = self.nonan_f64[1:-1]
def test_half_conversions(self):
"""Checks that all 16-bit values survive conversion
to/from 32-bit and 64-bit float"""
# Because the underlying routines preserve the NaN bits, every
# value is preserved when converting to/from other floats.
# Convert from float32 back to float16
b = np.array(self.all_f32, dtype=float16)
assert_equal(self.all_f16.view(dtype=uint16),
b.view(dtype=uint16))
# Convert from float64 back to float16
b = np.array(self.all_f64, dtype=float16)
assert_equal(self.all_f16.view(dtype=uint16),
b.view(dtype=uint16))
# Convert float16 to longdouble and back
# This doesn't necessarily preserve the extra NaN bits,
# so exclude NaNs.
a_ld = np.array(self.nonan_f16, dtype=np.longdouble)
b = np.array(a_ld, dtype=float16)
assert_equal(self.nonan_f16.view(dtype=uint16),
b.view(dtype=uint16))
# Check the range for which all integers can be represented
i_int = np.arange(-2048, 2049)
i_f16 = np.array(i_int, dtype=float16)
j = np.array(i_f16, dtype=int)
assert_equal(i_int, j)
@pytest.mark.parametrize("offset", [None, "up", "down"])
@pytest.mark.parametrize("shift", [None, "up", "down"])
@pytest.mark.parametrize("float_t", [np.float32, np.float64])
def test_half_conversion_rounding(self, float_t, shift, offset):
# Assumes that round to even is used during casting.
max_pattern = np.float16(np.finfo(np.float16).max).view(np.uint16)
# Test all (positive) finite numbers, denormals are most interesting
# however:
f16s_patterns = np.arange(0, max_pattern+1, dtype=np.uint16)
f16s_float = f16s_patterns.view(np.float16).astype(float_t)
# Shift the values by half a bit up or a down (or do not shift),
if shift == "up":
f16s_float = 0.5 * (f16s_float[:-1] + f16s_float[1:])[1:]
elif shift == "down":
f16s_float = 0.5 * (f16s_float[:-1] + f16s_float[1:])[:-1]
else:
f16s_float = f16s_float[1:-1]
# Increase the float by a minimal value:
if offset == "up":
f16s_float = np.nextafter(f16s_float, float_t(1e50))
elif offset == "down":
f16s_float = np.nextafter(f16s_float, float_t(-1e50))
# Convert back to float16 and its bit pattern:
res_patterns = f16s_float.astype(np.float16).view(np.uint16)
# The above calculations tries the original values, or the exact
# mid points between the float16 values. It then further offsets them
# by as little as possible. If no offset occurs, "round to even"
# logic will be necessary, an arbitrarily small offset should cause
# normal up/down rounding always.
# Calculate the expecte pattern:
cmp_patterns = f16s_patterns[1:-1].copy()
if shift == "down" and offset != "up":
shift_pattern = -1
elif shift == "up" and offset != "down":
shift_pattern = 1
else:
# There cannot be a shift, either shift is None, so all rounding
# will go back to original, or shift is reduced by offset too much.
shift_pattern = 0
# If rounding occurs, is it normal rounding or round to even?
if offset is None:
# Round to even occurs, modify only non-even, cast to allow + (-1)
cmp_patterns[0::2].view(np.int16)[...] += shift_pattern
else:
cmp_patterns.view(np.int16)[...] += shift_pattern
assert_equal(res_patterns, cmp_patterns)
@pytest.mark.parametrize(["float_t", "uint_t", "bits"],
[(np.float32, np.uint32, 23),
(np.float64, np.uint64, 52)])
def test_half_conversion_denormal_round_even(self, float_t, uint_t, bits):
# Test specifically that all bits are considered when deciding
# whether round to even should occur (i.e. no bits are lost at the
# end. Compare also gh-12721. The most bits can get lost for the
# smallest denormal:
smallest_value = np.uint16(1).view(np.float16).astype(float_t)
assert smallest_value == 2**-24
# Will be rounded to zero based on round to even rule:
rounded_to_zero = smallest_value / float_t(2)
assert rounded_to_zero.astype(np.float16) == 0
# The significand will be all 0 for the float_t, test that we do not
# lose the lower ones of these:
for i in range(bits):
# slightly increasing the value should make it round up:
larger_pattern = rounded_to_zero.view(uint_t) | uint_t(1 << i)
larger_value = larger_pattern.view(float_t)
assert larger_value.astype(np.float16) == smallest_value
def test_nans_infs(self):
with np.errstate(all='ignore'):
# Check some of the ufuncs
assert_equal(np.isnan(self.all_f16), np.isnan(self.all_f32))
assert_equal(np.isinf(self.all_f16), np.isinf(self.all_f32))
assert_equal(np.isfinite(self.all_f16), np.isfinite(self.all_f32))
assert_equal(np.signbit(self.all_f16), np.signbit(self.all_f32))
assert_equal(np.spacing(float16(65504)), np.inf)
# Check comparisons of all values with NaN
nan = float16(np.nan)
assert_(not (self.all_f16 == nan).any())
assert_(not (nan == self.all_f16).any())
assert_((self.all_f16 != nan).all())
assert_((nan != self.all_f16).all())
assert_(not (self.all_f16 < nan).any())
assert_(not (nan < self.all_f16).any())
assert_(not (self.all_f16 <= nan).any())
assert_(not (nan <= self.all_f16).any())
assert_(not (self.all_f16 > nan).any())
assert_(not (nan > self.all_f16).any())
assert_(not (self.all_f16 >= nan).any())
assert_(not (nan >= self.all_f16).any())
def test_half_values(self):
"""Confirms a small number of known half values"""
a = np.array([1.0, -1.0,
2.0, -2.0,
0.0999755859375, 0.333251953125, # 1/10, 1/3
65504, -65504, # Maximum magnitude
2.0**(-14), -2.0**(-14), # Minimum normal
2.0**(-24), -2.0**(-24), # Minimum subnormal
0, -1/1e1000, # Signed zeros
np.inf, -np.inf])
b = np.array([0x3c00, 0xbc00,
0x4000, 0xc000,
0x2e66, 0x3555,
0x7bff, 0xfbff,
0x0400, 0x8400,
0x0001, 0x8001,
0x0000, 0x8000,
0x7c00, 0xfc00], dtype=uint16)
b.dtype = float16
assert_equal(a, b)
def test_half_rounding(self):
"""Checks that rounding when converting to half is correct"""
a = np.array([2.0**-25 + 2.0**-35, # Rounds to minimum subnormal
2.0**-25, # Underflows to zero (nearest even mode)
2.0**-26, # Underflows to zero
1.0+2.0**-11 + 2.0**-16, # rounds to 1.0+2**(-10)
1.0+2.0**-11, # rounds to 1.0 (nearest even mode)
1.0+2.0**-12, # rounds to 1.0
65519, # rounds to 65504
65520], # rounds to inf
dtype=float64)
rounded = [2.0**-24,
0.0,
0.0,
1.0+2.0**(-10),
1.0,
1.0,
65504,
np.inf]
# Check float64->float16 rounding
b = np.array(a, dtype=float16)
assert_equal(b, rounded)
# Check float32->float16 rounding
a = np.array(a, dtype=float32)
b = np.array(a, dtype=float16)
assert_equal(b, rounded)
def test_half_correctness(self):
"""Take every finite float16, and check the casting functions with
a manual conversion."""
# Create an array of all finite float16s
a_bits = self.finite_f16.view(dtype=uint16)
# Convert to 64-bit float manually
a_sgn = (-1.0)**((a_bits & 0x8000) >> 15)
a_exp = np.array((a_bits & 0x7c00) >> 10, dtype=np.int32) - 15
a_man = (a_bits & 0x03ff) * 2.0**(-10)
# Implicit bit of normalized floats
a_man[a_exp != -15] += 1
# Denormalized exponent is -14
a_exp[a_exp == -15] = -14
a_manual = a_sgn * a_man * 2.0**a_exp
a32_fail = np.nonzero(self.finite_f32 != a_manual)[0]
if len(a32_fail) != 0:
bad_index = a32_fail[0]
assert_equal(self.finite_f32, a_manual,
"First non-equal is half value %x -> %g != %g" %
(self.finite_f16[bad_index],
self.finite_f32[bad_index],
a_manual[bad_index]))
a64_fail = np.nonzero(self.finite_f64 != a_manual)[0]
if len(a64_fail) != 0:
bad_index = a64_fail[0]
assert_equal(self.finite_f64, a_manual,
"First non-equal is half value %x -> %g != %g" %
(self.finite_f16[bad_index],
self.finite_f64[bad_index],
a_manual[bad_index]))
def test_half_ordering(self):
"""Make sure comparisons are working right"""
# All non-NaN float16 values in reverse order
a = self.nonan_f16[::-1].copy()
# 32-bit float copy
b = np.array(a, dtype=float32)
# Should sort the same
a.sort()
b.sort()
assert_equal(a, b)
# Comparisons should work
assert_((a[:-1] <= a[1:]).all())
assert_(not (a[:-1] > a[1:]).any())
assert_((a[1:] >= a[:-1]).all())
assert_(not (a[1:] < a[:-1]).any())
# All != except for +/-0
assert_equal(np.nonzero(a[:-1] < a[1:])[0].size, a.size-2)
assert_equal(np.nonzero(a[1:] > a[:-1])[0].size, a.size-2)
def test_half_funcs(self):
"""Test the various ArrFuncs"""
# fill
assert_equal(np.arange(10, dtype=float16),
np.arange(10, dtype=float32))
# fillwithscalar
a = np.zeros((5,), dtype=float16)
a.fill(1)
assert_equal(a, np.ones((5,), dtype=float16))
# nonzero and copyswap
a = np.array([0, 0, -1, -1/1e20, 0, 2.0**-24, 7.629e-6], dtype=float16)
assert_equal(a.nonzero()[0],
[2, 5, 6])
a = a.byteswap().newbyteorder()
assert_equal(a.nonzero()[0],
[2, 5, 6])
# dot
a = np.arange(0, 10, 0.5, dtype=float16)
b = np.ones((20,), dtype=float16)
assert_equal(np.dot(a, b),
95)
# argmax
a = np.array([0, -np.inf, -2, 0.5, 12.55, 7.3, 2.1, 12.4], dtype=float16)
assert_equal(a.argmax(),
4)
a = np.array([0, -np.inf, -2, np.inf, 12.55, np.nan, 2.1, 12.4], dtype=float16)
assert_equal(a.argmax(),
5)
# getitem
a = np.arange(10, dtype=float16)
for i in range(10):
assert_equal(a.item(i), i)
def test_spacing_nextafter(self):
"""Test np.spacing and np.nextafter"""
# All non-negative finite #'s
a = np.arange(0x7c00, dtype=uint16)
hinf = np.array((np.inf,), dtype=float16)
a_f16 = a.view(dtype=float16)
assert_equal(np.spacing(a_f16[:-1]), a_f16[1:]-a_f16[:-1])
assert_equal(np.nextafter(a_f16[:-1], hinf), a_f16[1:])
assert_equal(np.nextafter(a_f16[0], -hinf), -a_f16[1])
assert_equal(np.nextafter(a_f16[1:], -hinf), a_f16[:-1])
# switch to negatives
a |= 0x8000
assert_equal(np.spacing(a_f16[0]), np.spacing(a_f16[1]))
assert_equal(np.spacing(a_f16[1:]), a_f16[:-1]-a_f16[1:])
assert_equal(np.nextafter(a_f16[0], hinf), -a_f16[1])
assert_equal(np.nextafter(a_f16[1:], hinf), a_f16[:-1])
assert_equal(np.nextafter(a_f16[:-1], -hinf), a_f16[1:])
def test_half_ufuncs(self):
"""Test the various ufuncs"""
a = np.array([0, 1, 2, 4, 2], dtype=float16)
b = np.array([-2, 5, 1, 4, 3], dtype=float16)
c = np.array([0, -1, -np.inf, np.nan, 6], dtype=float16)
assert_equal(np.add(a, b), [-2, 6, 3, 8, 5])
assert_equal(np.subtract(a, b), [2, -4, 1, 0, -1])
assert_equal(np.multiply(a, b), [0, 5, 2, 16, 6])
assert_equal(np.divide(a, b), [0, 0.199951171875, 2, 1, 0.66650390625])
assert_equal(np.equal(a, b), [False, False, False, True, False])
assert_equal(np.not_equal(a, b), [True, True, True, False, True])
assert_equal(np.less(a, b), [False, True, False, False, True])
assert_equal(np.less_equal(a, b), [False, True, False, True, True])
assert_equal(np.greater(a, b), [True, False, True, False, False])
assert_equal(np.greater_equal(a, b), [True, False, True, True, False])
assert_equal(np.logical_and(a, b), [False, True, True, True, True])
assert_equal(np.logical_or(a, b), [True, True, True, True, True])
assert_equal(np.logical_xor(a, b), [True, False, False, False, False])
assert_equal(np.logical_not(a), [True, False, False, False, False])
assert_equal(np.isnan(c), [False, False, False, True, False])
assert_equal(np.isinf(c), [False, False, True, False, False])
assert_equal(np.isfinite(c), [True, True, False, False, True])
assert_equal(np.signbit(b), [True, False, False, False, False])
assert_equal(np.copysign(b, a), [2, 5, 1, 4, 3])
assert_equal(np.maximum(a, b), [0, 5, 2, 4, 3])
x = np.maximum(b, c)
assert_(np.isnan(x[3]))
x[3] = 0
assert_equal(x, [0, 5, 1, 0, 6])
assert_equal(np.minimum(a, b), [-2, 1, 1, 4, 2])
x = np.minimum(b, c)
assert_(np.isnan(x[3]))
x[3] = 0
assert_equal(x, [-2, -1, -np.inf, 0, 3])
assert_equal(np.fmax(a, b), [0, 5, 2, 4, 3])
assert_equal(np.fmax(b, c), [0, 5, 1, 4, 6])
assert_equal(np.fmin(a, b), [-2, 1, 1, 4, 2])
assert_equal(np.fmin(b, c), [-2, -1, -np.inf, 4, 3])
assert_equal(np.floor_divide(a, b), [0, 0, 2, 1, 0])
assert_equal(np.remainder(a, b), [0, 1, 0, 0, 2])
assert_equal(np.divmod(a, b), ([0, 0, 2, 1, 0], [0, 1, 0, 0, 2]))
assert_equal(np.square(b), [4, 25, 1, 16, 9])
assert_equal(np.reciprocal(b), [-0.5, 0.199951171875, 1, 0.25, 0.333251953125])
assert_equal(np.ones_like(b), [1, 1, 1, 1, 1])
assert_equal(np.conjugate(b), b)
assert_equal(np.absolute(b), [2, 5, 1, 4, 3])
assert_equal(np.negative(b), [2, -5, -1, -4, -3])
assert_equal(np.positive(b), b)
assert_equal(np.sign(b), [-1, 1, 1, 1, 1])
assert_equal(np.modf(b), ([0, 0, 0, 0, 0], b))
assert_equal(np.frexp(b), ([-0.5, 0.625, 0.5, 0.5, 0.75], [2, 3, 1, 3, 2]))
assert_equal(np.ldexp(b, [0, 1, 2, 4, 2]), [-2, 10, 4, 64, 12])
def test_half_coercion(self):
"""Test that half gets coerced properly with the other types"""
a16 = np.array((1,), dtype=float16)
a32 = np.array((1,), dtype=float32)
b16 = float16(1)
b32 = float32(1)
assert_equal(np.power(a16, 2).dtype, float16)
assert_equal(np.power(a16, 2.0).dtype, float16)
assert_equal(np.power(a16, b16).dtype, float16)
assert_equal(np.power(a16, b32).dtype, float16)
assert_equal(np.power(a16, a16).dtype, float16)
assert_equal(np.power(a16, a32).dtype, float32)
assert_equal(np.power(b16, 2).dtype, float64)
assert_equal(np.power(b16, 2.0).dtype, float64)
assert_equal(np.power(b16, b16).dtype, float16)
assert_equal(np.power(b16, b32).dtype, float32)
assert_equal(np.power(b16, a16).dtype, float16)
assert_equal(np.power(b16, a32).dtype, float32)
assert_equal(np.power(a32, a16).dtype, float32)
assert_equal(np.power(a32, b16).dtype, float32)
assert_equal(np.power(b32, a16).dtype, float16)
assert_equal(np.power(b32, b16).dtype, float32)
@pytest.mark.skipif(platform.machine() == "armv5tel",
reason="See gh-413.")
def test_half_fpe(self):
with np.errstate(all='raise'):
sx16 = np.array((1e-4,), dtype=float16)
bx16 = np.array((1e4,), dtype=float16)
sy16 = float16(1e-4)
by16 = float16(1e4)
# Underflow errors
assert_raises_fpe('underflow', lambda a, b:a*b, sx16, sx16)
assert_raises_fpe('underflow', lambda a, b:a*b, sx16, sy16)
assert_raises_fpe('underflow', lambda a, b:a*b, sy16, sx16)
assert_raises_fpe('underflow', lambda a, b:a*b, sy16, sy16)
assert_raises_fpe('underflow', lambda a, b:a/b, sx16, bx16)
assert_raises_fpe('underflow', lambda a, b:a/b, sx16, by16)
assert_raises_fpe('underflow', lambda a, b:a/b, sy16, bx16)
assert_raises_fpe('underflow', lambda a, b:a/b, sy16, by16)
assert_raises_fpe('underflow', lambda a, b:a/b,
float16(2.**-14), float16(2**11))
assert_raises_fpe('underflow', lambda a, b:a/b,
float16(-2.**-14), float16(2**11))
assert_raises_fpe('underflow', lambda a, b:a/b,
float16(2.**-14+2**-24), float16(2))
assert_raises_fpe('underflow', lambda a, b:a/b,
float16(-2.**-14-2**-24), float16(2))
assert_raises_fpe('underflow', lambda a, b:a/b,
float16(2.**-14+2**-23), float16(4))
# Overflow errors
assert_raises_fpe('overflow', lambda a, b:a*b, bx16, bx16)
assert_raises_fpe('overflow', lambda a, b:a*b, bx16, by16)
assert_raises_fpe('overflow', lambda a, b:a*b, by16, bx16)
assert_raises_fpe('overflow', lambda a, b:a*b, by16, by16)
assert_raises_fpe('overflow', lambda a, b:a/b, bx16, sx16)
assert_raises_fpe('overflow', lambda a, b:a/b, bx16, sy16)
assert_raises_fpe('overflow', lambda a, b:a/b, by16, sx16)
assert_raises_fpe('overflow', lambda a, b:a/b, by16, sy16)
assert_raises_fpe('overflow', lambda a, b:a+b,
float16(65504), float16(17))
assert_raises_fpe('overflow', lambda a, b:a-b,
float16(-65504), float16(17))
assert_raises_fpe('overflow', np.nextafter, float16(65504), float16(np.inf))
assert_raises_fpe('overflow', np.nextafter, float16(-65504), float16(-np.inf))
assert_raises_fpe('overflow', np.spacing, float16(65504))
# Invalid value errors
assert_raises_fpe('invalid', np.divide, float16(np.inf), float16(np.inf))
assert_raises_fpe('invalid', np.spacing, float16(np.inf))
assert_raises_fpe('invalid', np.spacing, float16(np.nan))
assert_raises_fpe('invalid', np.nextafter, float16(np.inf), float16(0))
assert_raises_fpe('invalid', np.nextafter, float16(-np.inf), float16(0))
assert_raises_fpe('invalid', np.nextafter, float16(0), float16(np.nan))
# These should not raise
float16(65472)+float16(32)
float16(2**-13)/float16(2)
float16(2**-14)/float16(2**10)
np.spacing(float16(-65504))
np.nextafter(float16(65504), float16(-np.inf))
np.nextafter(float16(-65504), float16(np.inf))
float16(2**-14)/float16(2**10)
float16(-2**-14)/float16(2**10)
float16(2**-14+2**-23)/float16(2)
float16(-2**-14-2**-23)/float16(2)
def test_half_array_interface(self):
"""Test that half is compatible with __array_interface__"""
class Dummy:
pass
a = np.ones((1,), dtype=float16)
b = Dummy()
b.__array_interface__ = a.__array_interface__
c = np.array(b)
assert_(c.dtype == float16)
assert_equal(a, c)
|
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2015, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Functions for configuring Bokeh output.
'''
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
# Stdlib imports
import logging
logger = logging.getLogger(__name__)
import io, itertools, os, warnings
# Third-party imports
# Bokeh imports
from . import browserlib
from .document import Document
from .embed import notebook_div, file_html, autoload_server
from .models import Component
from .models.plots import GridPlot
from .models.widgets.layouts import HBox, VBox, VBoxForm
from .state import State
from .util.notebook import load_notebook, publish_display_data
from .util.string import decode_utf8
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
_new_param = {'tab': 2, 'window': 1}
_state = State()
#-----------------------------------------------------------------------------
# Local utilities
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def output_file(filename, title="Bokeh Plot", autosave=False, mode="inline", root_dir=None):
''' Configure the default output state to generate output saved
to a file when :func:`show` is called.
Args:
filename (str) : a filename for saving the HTML document
title (str, optional) : a title for the HTML document (default: "Bokeh Plot")
autosave (bool, optional) : whether to automatically save (default: False)
If True, then Bokeh plotting APIs may opt to automatically
save the file more frequently (e.g., after any plotting
command). If False, then the file is only saved upon calling
:func:`show` or :func:`save`.
mode (str, optional) : how to include BokehJS (default: ``'inline'``)
One of: ``'inline'``, ``'cdn'``, ``'relative(-dev)'`` or
``'absolute(-dev)'``. See :class:`bokeh.resources.Resources` for more details.
root_dir (str, optional) : root directory to use for 'absolute' resources. (default: None)
This value is ignored for other resource types, e.g. ``INLINE`` or
``CDN``.
Returns:
None
.. note::
Generally, this should be called at the beginning of an interactive
session or the top of a script.
.. warning::
This output file will be overwritten on every save, e.g., each time
show() or save() is invoked, or any time a Bokeh plotting API
causes a save, if ``autosave`` is True.
'''
_state.output_file(
filename,
title=title,
autosave=autosave,
mode=mode,
root_dir=root_dir
)
def output_notebook(url=None, docname=None, session=None, name=None,
resources=None, verbose=False, hide_banner=False):
''' Configure the default output state to generate output in
Jupyter/IPython notebook cells when :func:`show` is called.
Args:
url (str, optional) : URL of the Bokeh server (default: "default")
If "default", then ``session.DEFAULT_SERVER_URL`` is used.
docname (str) : Name of document to push on Bokeh server (default: None)
Any existing documents with the same name will be overwritten.
session (Session, optional) : An explicit session to use (default: None)
If None, a new default session is created.
name (str, optional) : A name for the session (default: None)
If None, the server URL is used as the name
resources (Resource, optional) :
How and where to load BokehJS from (default: INLINE)
verbose (bool, optional) :
whether to display detailed BokehJS banner (default: False)
hide_banner (bool, optional):
whether to hide the Bokeh banner (default: False)
Returns:
None
.. note::
Generally, this should be called at the beginning of an interactive
session or the top of a script.
'''
load_notebook(resources, verbose, hide_banner)
_state.output_notebook(
url=url, docname=docname, session=session, name=name
)
def output_server(docname, session=None, url="default", name=None, clear=True):
''' Configure the default output state to generate output that gets
pushed to a bokeh-server when :func:`show` or :func:`push` is called.
Args:
docname (str) : Name of document to push on Bokeh server
Any existing documents with the same name will be overwritten.
session (Session, optional) : An explicit session to use (default: None)
If None, a new default session is created.
url (str, optional) : URL of the Bokeh server (default: "default")
If "default", then ``session.DEFAULT_SERVER_URL`` is used.
name (str, optional) : A name for the session (default: None)
If None, the server URL is used as the name
clear (bool, optional) : Whether to clear the document (default: True)
If True, an existing server document will be cleared of any
existing objects.
Returns:
None
.. note::
Generally, this should be called at the beginning of an interactive
session or the top of a script.
.. warning::
Calling this function will replace any existing default session.
'''
_state.output_server(
docname, session=session, url=url, name=name, clear=clear
)
def curdoc():
''' Return the document for the current default state.
Returns:
doc : the current default document object.
.. note::
When using this API form within the server (e.g. in a Bokeh app), the
appropriate document from the request context is returned, rather than
the standard default global state. Doing so allows the same code using
curdoc() to function correctly whether it is being run inside a server
or not.
'''
try:
from flask import request
doc = request.bokeh_server_document
logger.debug("curdoc() returning Document from flask request context")
return doc
except (ImportError, RuntimeError, AttributeError):
return _state.document
def cursession():
''' Return the session for the current default state, if there is one.
Returns:
the current default Session object (or None)
'''
return _state.session
def show(obj, browser=None, new="tab"):
''' Immediately display a plot object.
In an IPython/Jupyter notebook, the output is displayed in an output
cell. Otherwise, a browser window or tab is autoraised to display the
plot object.
If both a server session and notebook output have been configured on
the default output state then the notebook output will be generated to
load the plot from that server session.
Args:
obj (Component object) : a plot object to display
browser (str, optional) : browser to show with (default: None)
For systems that support it, the **browser** argument allows
specifying which browser to display in, e.g. "safari", "firefox",
"opera", "windows-default" (see the ``webbrowser`` module
documentation in the standard lib for more details).
new (str, optional) : new file output mode (default: "tab")
For file-based output, opens or raises the browser window
showing the current output file. If **new** is 'tab', then
opens a new tab. If **new** is 'window', then opens a new window.
Returns:
None
.. note::
The ``browser`` and ``new`` parameters are ignored when showing in
an IPython/Jupyter notebook.
'''
_show_with_state(obj, _state, browser, new)
def _show_with_state(obj, state, browser, new):
controller = browserlib.get_browser_controller(browser=browser)
if state.notebook:
_show_notebook_with_state(obj, state)
elif state.session:
_show_server_with_state(obj, state, new, controller)
if state.file:
_show_file_with_state(obj, state, new, controller)
def _show_file_with_state(obj, state, new, controller):
save(obj, state=state)
controller.open("file://" + os.path.abspath(state.file['filename']), new=_new_param[new])
def _show_notebook_with_state(obj, state):
if state.session:
push(state=state)
snippet = autoload_server(obj, state.session)
publish_display_data({'text/html': snippet})
else:
publish_display_data({'text/html': notebook_div(obj)})
def _show_server_with_state(obj, state, new, controller):
push(state=state)
controller.open(state.session.object_link(state.document.context), new=_new_param[new])
def save(obj, filename=None, resources=None, title=None, state=None):
''' Save an HTML file with the data for the current document.
Will fall back to the default output state (or an explicitly provided
:class:`State` object) for ``filename``, ``resources``, or ``title`` if they
are not provided.
Args:
obj (Document or Component object) : a plot object to save
filename (str, optional) : filename to save document under (default: None)
If None, use the default state configuration, otherwise raise a
``RuntimeError``.
resources (Resources, optional) : A Resources config to use (default: None)
If None, use the default state configuration, if there is one.
otherwise use ``resources.INLINE``.
title (str, optional) : a title for the HTML document (default: None)
If None, use the default state title value, if there is one.
Otherwise, use "Bokeh Plot"
Returns:
None
Raises:
RuntimeError
'''
if state is None:
state = _state
filename, resources, title = _get_save_args(state, filename, resources, title)
_save_helper(obj, filename, resources, title)
def _get_save_args(state, filename, resources, title):
if filename is None and state.file:
filename = state.file['filename']
if resources is None and state.file:
resources = state.file['resources']
if title is None and state.file:
title = state.file['title']
if filename is None:
raise RuntimeError("save() called but no filename was supplied and output_file(...) was never called, nothing saved")
if resources is None:
warnings.warn("save() called but no resources was supplied and output_file(...) was never called, defaulting to resources.INLINE")
from .resources import INLINE
resources = INLINE
if title is None:
warnings.warn("save() called but no title was supplied and output_file(...) was never called, using default title 'Bokeh Plot'")
title = "Bokeh Plot"
return filename, resources, title
def _save_helper(obj, filename, resources, title):
if isinstance(obj, Component):
doc = Document()
doc.add(obj)
elif isinstance(obj, Document):
doc = obj
else:
raise RuntimeError("Unable to save object of type '%s'" % type(obj))
html = file_html(doc, resources, title)
with io.open(filename, "w", encoding="utf-8") as f:
f.write(decode_utf8(html))
def push(session=None, document=None, state=None):
''' Update the server with the data for the current document.
Will fall back to the default output state (or an explicitly provided
:class:`State` object) for ``session`` or ``document`` if they are not
provided.
Args:
session (Session, optional) : a Bokeh server session to push objects to
document (Document, optional) : A :class:`bokeh.document.Document` to use
Returns:
None
'''
if state is None:
state = _state
if not session:
session = state.session
if not document:
document = state.document
if not session:
warnings.warn("push() called but no session was supplied and output_server(...) was never called, nothing pushed")
return
return session.store_document(document)
def reset_output(state=None):
''' Clear the default state of all output modes.
Returns:
None
'''
_state.reset()
def _deduplicate_plots(plot, subplots):
doc = _state.document
doc.context.children = list(set(doc.context.children) - set(subplots))
doc.add(plot)
doc._current_plot = plot # TODO (bev) don't use private attrs
def _push_or_save(obj):
if _state.session and _state.document.autostore:
push()
if _state.file and _state.file['autosave']:
save(obj)
def gridplot(plot_arrangement, **kwargs):
''' Generate a plot that arranges several subplots into a grid.
Args:
plot_arrangement (nested list of Plots) : plots to arrange in a grid
**kwargs: additional attributes to pass in to GridPlot() constructor
.. note:: ``plot_arrangement`` can be nested, e.g [[p1, p2], [p3, p4]]
Returns:
grid_plot: a new :class:`GridPlot <bokeh.models.plots.GridPlot>`
'''
grid = GridPlot(children=plot_arrangement, **kwargs)
subplots = itertools.chain.from_iterable(plot_arrangement)
_deduplicate_plots(grid, subplots)
_push_or_save(grid)
return grid
def hplot(*children, **kwargs):
''' Generate a layout that arranges several subplots horizontally.
'''
layout = HBox(children=list(children), **kwargs)
_deduplicate_plots(layout, children)
_push_or_save(layout)
return layout
def vplot(*children, **kwargs):
''' Generate a layout that arranges several subplots vertically.
'''
layout = VBox(children=list(children), **kwargs)
_deduplicate_plots(layout, children)
_push_or_save(layout)
return layout
def vform(*children, **kwargs):
''' Generate a layout that arranges several subplots vertically.
'''
layout = VBoxForm(children=list(children), **kwargs)
_push_or_save(layout)
return layout
|
|
import argparse
import logging
import os
from shutil import copytree, rmtree
from subprocess import Popen, PIPE
try:
from venv import create as create_venv
VENV_CREATE_KWARGS = {'symlinks': True}
except ImportError:
from virtualenv import create_environment as create_venv
VENV_CREATE_KWARGS = {'symlink': True}
try:
import configparser
except ImportError:
import ConfigParser as configparser
from . import VERSION
DIR_CURRENT = os.getcwd()
DJANGO_DEFAULT_VERSION = '1.8'
APPS_DIRNAME = 'apps'
VENVS_DIRNAME = 'venvs'
MANAGE_PY = '''
# This file is created automatically by django-dev.
# Do not try to edit it. All changes manually done will be lost.
import os
import sys
if __name__ == '__main__':
#PROJECT_PATH = os.path.realpath(os.path.dirname(__file__))
sys.path = ['%(apps_path)s'] + sys.path
try:
import south
south = ('south',)
except:
south = ()
from django import get_version
print('* Django: {}'.format(get_version()))
from django.conf import settings, global_settings
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django.contrib.flatpages',
)
if not settings.configured:
settings.configure(
INSTALLED_APPS=INSTALLED_APPS + south + ('%(apps_available)s',),
DATABASES={'default': {'ENGINE': 'django.db.backends.sqlite3'}},
MIDDLEWARE_CLASSES=global_settings.MIDDLEWARE_CLASSES, # Prevents Django 1.7 warning.
SOUTH_MIGRATION_MODULES = {%(south_migration_modules)s}
)
try: # Django 1.7 +
from django import setup
setup()
except ImportError:
pass
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
'''
class DjangoDevException(Exception):
"""Exception class used by django-dev."""
class DevTools(object):
def __init__(self, log_level=None):
self.workshop_path = DIR_CURRENT
self.apps_path = os.path.join(self.workshop_path, APPS_DIRNAME)
self.venvs_path = os.path.join(self.workshop_path, VENVS_DIRNAME)
self.logger = logging.getLogger(self.__class__.__name__)
self.configure_logging(log_level)
def configure_logging(self, verbosity_lvl=None, format='%(message)s'):
"""Switches on logging at a given level.
:param verbosity_lvl:
:param format:
"""
if not verbosity_lvl:
verbosity_lvl = logging.INFO
logging.basicConfig(format=format)
self.logger.setLevel(verbosity_lvl)
def _run_shell_command(self, command, pipe_it=True):
"""Runs the given shell command.
:param command:
:return: bool Status
"""
stdout = None
if pipe_it:
stdout = PIPE
self.logger.debug('Executing shell command: %s' % command)
return not bool(Popen(command, shell=True, stdout=stdout).wait())
def _get_venv_path(self, dj_version):
"""Returns virtual env directory path (named after Django version).
:param str dj_version:
:rtype: str
:return: path
"""
return os.path.join(self.venvs_path, dj_version)
def _get_app_path(self, app_name):
"""Returns application directory path for a given app name.
:param str app_name:
:return: path
"""
return os.path.join(self.apps_path, app_name)
def _get_manage_py_path(self):
"""Returns a path to manage.py file.
:rtype: str
:return: path
"""
return os.path.join(self.workshop_path, 'manage.py')
def run_manage_command(self, command, venv_path, verbose=True):
"""Runs a given Django manage command in a given virtual environment.
:param str command:
:param str venv_path:
:param bool verbose:
"""
self.logger.debug('Running manage command `%s` for `%s` ...' % (command, venv_path))
self._run_shell_command(
'. %s/bin/activate && python %s %s' % (venv_path, self._get_manage_py_path(), command),
pipe_it=(not verbose))
def venv_install(self, package_name, venv_path):
"""Installs a given python package into a given virtual environment.
:param str package_name:
:param str venv_path:
"""
self.logger.debug('Installing `%s` into `%s` ...' % (package_name, venv_path))
self._run_shell_command('. %s/bin/activate && pip install -U %s' % (venv_path, package_name))
def make_venv(self, dj_version):
"""Creates a virtual environment for a given Django version.
:param str dj_version:
:rtype: str
:return: path to created virtual env
"""
venv_path = self._get_venv_path(dj_version)
self.logger.info('Creating virtual environment for Django %s ...' % dj_version)
try:
create_venv(venv_path, **VENV_CREATE_KWARGS)
except ValueError:
self.logger.warning('Virtual environment directory already exists. Skipped.')
self.venv_install('django==%s' % dj_version, venv_path)
return venv_path
def make_apps_dir(self):
"""Creates an empty directory for symlinks to Django applications.
:rtype: str
:return: created directory path
"""
self.logger.info('Creating a directory for symlinks to your Django applications `%s` ...' % self.apps_path)
try:
os.mkdir(self.apps_path)
except OSError:
pass # Already exists.
return self.apps_path
def dispatch_op(self, op_name, args_dict):
"""Dispatches an operation requested.
:param str op_name:
:param dict args_dict:
"""
self.logger.debug('Requested `%s` command with `%s` args.' % (op_name, args_dict))
method = getattr(self, 'op_%s' % op_name, None)
if method is None:
error_str = '`%s` command is not supported.' % op_name
self.logger.error(error_str)
raise DjangoDevException(error_str)
method(**args_dict)
self.logger.info('Done.')
def get_venvs(self):
"""Returns a list of names of available virtual environments.
:raises: DjangoDevException on errors
:rtype: list
:return: list of names
"""
def raise_():
error_str = 'Virtual environments are not created. Please run `bootstrap` command.'
self.logger.error(error_str)
raise DjangoDevException(error_str)
if not os.path.exists(self.venvs_path):
raise_()
venvs = os.listdir(self.venvs_path)
if not venvs:
raise_()
venvs.sort()
return venvs
def get_apps(self, only=None):
"""Returns a list of names of available Django applications,
Optionally filters it using `only`.
:param list|None only: a list on apps names to to filter all available apps against
:raises: DjangoDevException on errors
:rtype: list
:return: list of apps names
"""
if not os.path.exists(self.apps_path):
error_str = 'It seems that this directory does not contain django-dev project. ' \
'Use `bootstrap` command to create project in the current directory.'
self.logger.error(error_str)
raise DjangoDevException(error_str)
apps = os.listdir(self.apps_path)
if not apps:
error_str = 'Applications directory is empty. ' \
'Please symlink your apps (and other apps that you apps depend upon) into %s' % self.apps_path
self.logger.error(error_str)
raise DjangoDevException(error_str)
apps.sort()
if only is None:
self.create_manage_py(apps)
return apps
diff = set(only).difference(apps)
if diff:
error_str = 'The following apps are not found: `%s`.' % ('`, `'.join(diff))
self.logger.error(error_str)
raise DjangoDevException(error_str)
self.create_manage_py(apps)
return [name for name in apps if name in only]
def create_manage_py(self, apps):
"""Creates manage.py file, with a given list of installed apps.
:param list apps:
"""
self.logger.debug('Creating manage.py ...')
with open(self._get_manage_py_path(), mode='w') as f:
south_migration_modules = []
for app in apps:
south_migration_modules.append("'%(app)s': '%(app)s.south_migrations'" % {'app': app})
f.write(MANAGE_PY % {
'apps_available': "', '".join(apps),
'apps_path': self.apps_path,
'south_migration_modules': ", ".join(south_migration_modules)
})
def op_list_venvs(self):
"""Prints out and returns a list of known virtual environments.
:rtype: list
:return: list of virtual environments
"""
self.logger.info('Listing known virtual environments ...')
venvs = self.get_venvs()
for venv in venvs:
self.logger.info('Found `%s`' % venv)
else:
self.logger.info('No virtual environments found in `%s` directory.' % VENVS_DIRNAME)
return venvs
def op_list_apps(self):
"""Prints out and returns a list of known applications.
:rtype: list
:return: list of applications
"""
self.logger.info('Listing known applications ...')
apps = self.get_apps()
for app in apps:
self.logger.info('Found `%s`' % app)
else:
self.logger.info('\nDONE. No applications found in `%s` directory.\n' % APPS_DIRNAME)
return apps
def op_bootstrap(self):
"""Bootstraps django-dev by creating required directory structure."""
self.logger.info('Bootstrapping django-dev directory structure in current directory ...')
self.make_venv(DJANGO_DEFAULT_VERSION)
venv_path = self.make_venv('1.6.5')
self.venv_install('south==1.0.1', venv_path)
apps_dir = self.make_apps_dir()
self.logger.info('Now you may symlink (ln -s) your apps '
'(and other apps that you apps depend upon) into %s' % apps_dir)
def _make_dirs(self, path):
"""Creates every directory in path as needed. Fails silently.
:param str path:
:rtype: bool
:return: True if no exceptions raised; otherwise - False.
"""
try:
os.makedirs(path)
return True
except OSError: # Probably already exists.
pass
return False
def op_install_package(self, names):
"""Install packages into virtual envs as to satisfy app requirements.
Exact version numbers could be given as in PIP: somedep==1.5
:param list names:
"""
venvs = self.get_venvs()
for venv in venvs:
for name in names:
self.venv_install(name, self._get_venv_path(venv))
def op_make_trans(self, locales=None, apps=None):
"""Generates/updates localization (.po, .mo) files for applications.
:param list|None locales: Locales to generate files for. If `None` all available locales in apps are updated.
:param list|None apps: Target applications filter. If `None` all available apps are processed.
"""
self.logger.info('Making translations ...')
apps = self.get_apps(only=apps)
self.get_venvs() # Sentinel.
venv_path = self._get_venv_path(DJANGO_DEFAULT_VERSION)
if locales is None:
locales = []
for app_name in apps:
self.logger.info('Application: %s' % app_name)
app_path = self._get_app_path(app_name)
locales_path = os.path.join(app_path, 'locale')
if not locales and os.path.exists(locales_path): # Getting all existing locales.
locales = os.listdir(locales_path)
for lang in locales:
self.logger.info('Locale: %s' % lang)
locale_path = os.path.join(locales_path, '%s/LC_MESSAGES' % lang)
self._make_dirs(locale_path)
old_wd = os.getcwd()
os.chdir(app_path)
self.run_manage_command('makemessages -l %s' % lang, venv_path)
self.run_manage_command('compilemessages -l %s' % lang, venv_path)
os.chdir(old_wd)
def op_add_migrations(self, apps=None, relocate_south=False):
self.logger.info('Making migrations ...')
apps = self.get_apps(only=apps)
venvs = self.get_venvs() # Sentinel.
default_venv_path = self._get_venv_path(DJANGO_DEFAULT_VERSION)
def fix_migrations(path):
"""Fixes buggy migrations created with `makemigrations` using Py2.
See: https://code.djangoproject.com/ticket/23455
:param path:
:return:
"""
self.logger.debug('Fixing migrations ...')
for file in os.listdir(path):
if os.path.splitext(file)[1] == '.py':
with open(os.path.join(path, file), 'r+') as f:
contents = f.read()
f.seek(0)
f.write(contents.replace("=b'", "='"))
f.truncate()
last_app_name = None
for venv in venvs:
venv_path = self._get_venv_path(venv)
for app_name in apps:
app_path = self._get_app_path(app_name)
PATH_SOUTH = os.path.join(app_path, 'south_migrations')
PATH_BUILTIN = os.path.join(app_path, 'migrations')
south_exists = os.path.exists(PATH_SOUTH)
if last_app_name != app_name:
self.logger.info('Application: %s' % app_name)
last_app_name = app_name
if relocate_south and not south_exists and os.path.exists(PATH_BUILTIN): # Foolproof.
self.logger.info('Relocating South migrations into %s ...' % PATH_SOUTH)
copytree(PATH_BUILTIN, PATH_SOUTH)
rmtree(PATH_BUILTIN)
south_exists = True
# Add migrations for both.
if venv_path == default_venv_path: # Django with migrations built-in (1.7+)
self.run_manage_command('makemigrations %s' % app_name, venv_path)
else:
flag = '--auto' if south_exists else '--initial'
self.run_manage_command('schemamigration %s %s' % (app_name, flag), venv_path)
if os.path.exists(PATH_BUILTIN):
fix_migrations(PATH_BUILTIN)
def main():
arg_parser = argparse.ArgumentParser(
prog='django-dev', description='Tools to facilitate application development for Django')
arg_parser.add_argument('--version', action='version', version='.'.join(map(str, VERSION)))
arg_parser.add_argument('--debug', help='Show debug messages while processing', action='store_true')
arg_parser_apps = argparse.ArgumentParser(add_help=False)
arg_parser_apps.add_argument(
'--apps', nargs='+', help='Whitespace-separated list of applications names. Example: sitecats, siteflags.')
sub_parsers = arg_parser.add_subparsers(dest='subparser_name')
sub_parsers.add_parser('bootstrap', help='Creates a basic django-dev directory structure in a current directory.')
sub_parsers.add_parser('list_apps', help='Prints out currently available applications.')
sub_parsers.add_parser('list_venvs', help='Prints out currently available virtual environments.')
sub_parser_add_migrations = sub_parsers.add_parser(
'add_migrations', help='Adds both South and Django 1.7+ migrations for apps.', parents=[arg_parser_apps])
sub_parser_add_migrations.add_argument(
'--relocate_south',
help='Flag to relocate old South migrations from `migrations` into `south_migrations` folder.',
action='store_true')
sub_parser_make_trans = sub_parsers.add_parser(
'make_trans', help='Creates translation (.po, .mo) files for the given locales.', parents=[arg_parser_apps])
sub_parser_make_trans.add_argument(
'locales', nargs='*',
help='Locales identifiers to make localization files for. Whitespace-separated values are allowed. '
'Example: ru en.')
sub_parser_install_package = sub_parsers.add_parser(
'install_package', help='Installs packages into virtual environments.')
sub_parser_install_package.add_argument(
'names', nargs='*',
help='Package names to install. Exact version number could be supplied as for PIP: some_package==1.2.3 '
'Whitespace-separated values are allowed.')
parsed_args = arg_parser.parse_args()
parsed_args = vars(parsed_args) # Convert args to dict
log_level = None
if parsed_args['debug']:
log_level = logging.DEBUG
del parsed_args['debug']
dt = DevTools(log_level=log_level)
target_subparser = parsed_args['subparser_name']
del parsed_args['subparser_name']
try:
dt.dispatch_op(target_subparser, parsed_args)
except DjangoDevException:
pass # This will be logged in into stdout.
|
|
import GAN_10
import tensorflow as tf
import numpy as np
import os
import cv2
from ops import *
from utils import *
import time
from tensorflow.examples.tutorials.mnist import input_data
# from MNIST_Classification import Classification_Model
from MNIST_Classification_with_embedding import Classification_Model
from tensorflow.contrib.tensorboard.plugins import projector
from glob import glob
slim = tf.contrib.slim
flags = tf.app.flags
flags.DEFINE_integer("iter", 2, "iter to train ")
flags.DEFINE_float("learning_rate", 0.0002, "Learning rate of for adam [0.0002]")
flags.DEFINE_float("beta1", 0.5, "Momentum term of adam [0.5]")
flags.DEFINE_integer("train_size", np.inf, "The size of train images [np.inf]")
flags.DEFINE_integer("sample_num", 64, "The size of sample images ")
flags.DEFINE_integer("batch_size", 64, "The size of batch images [64]")
flags.DEFINE_integer("input_height", 28, "The size of image to use (will be center cropped). [108]")
flags.DEFINE_integer("input_width", 28, "The size of image to use (will be center cropped). If None, same value as input_height [None]")
flags.DEFINE_integer("output_height", 28, "The size of the output images to produce [64]")
flags.DEFINE_integer("output_width", 28, "The size of the output images to produce. If None, same value as output_height [None]")
flags.DEFINE_string("dataset", "5", "The name of dataset [...]")
flags.DEFINE_string("input_fname_pattern", "*.jpg", "Glob pattern of filename of input images [*]")
flags.DEFINE_string("checkpoint_dir", "./checkpoint", "Directory name to save the checkpoints [checkpoint]")
flags.DEFINE_string("sample_dir", "./samples", "Directory name to save the image samples [samples]")
flags.DEFINE_boolean("train", True, "True for training, False for testing [False]")
flags.DEFINE_boolean("crop", True, "True for training, False for testing [False]")
flags.DEFINE_boolean("visualize", False, "True for visualizing, False for nothing [False]")
flags.DEFINE_integer("C_iter", 20000, "The iteration of training C")
flags.DEFINE_integer("C_batch_size", 64, "The batch_size of extracting feature vector of C")
FLAGS = flags.FLAGS
def start_GAN():
if not os.path.exists(FLAGS.checkpoint_dir):
os.makedirs(FLAGS.checkpoint_dir)
if not os.path.exists(FLAGS.sample_dir):
os.makedirs(FLAGS.sample_dir)
with tf.Graph().as_default():
run_config = tf.ConfigProto()
# run_config.gpu_options.allow_growth=True
run_config.gpu_options.per_process_gpu_memory_fraction = 0.8
with tf.Session(config = run_config) as sess:
dcgan = GAN_10.DCGAN(
sess,
input_width=FLAGS.input_width,
input_height=FLAGS.input_height,
output_width=FLAGS.output_width,
output_height=FLAGS.output_height,
batch_size=FLAGS.batch_size,
sample_num=FLAGS.sample_num,
# dataset_name=FLAGS.dataset,
input_fname_pattern=FLAGS.input_fname_pattern,
crop=FLAGS.crop,
checkpoint_dir=FLAGS.checkpoint_dir,
sample_dir=FLAGS.sample_dir)
dcgan.train(FLAGS)
def start_C(iteration,start = True):
run_config = tf.ConfigProto()
# run_config.gpu_options.allow_growth=True
run_config.gpu_options.per_process_gpu_memory_fraction = 0.8
tf.logging.set_verbosity(tf.logging.DEBUG)
tfrecords_path = './data_tf/'
with tf.Graph().as_default():
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
ckpt = tf.train.get_checkpoint_state(os.path.dirname('./checkpoint_pretrain/checkpoint'))
sess = tf.InteractiveSession(config = run_config)
global_step = slim.create_global_step()
summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
mnist_net = Classification_Model()
# mnist_net.change_dataset("5")
# x, y_ = mnist_net.get_batch()
x,y_ = mnist_net.get_batch_tf(tfrecords_path)
# arg_scope = mnist_net.model_arg_scope()
end_points = {}
# with slim.arg_scope(arg_scope):
logits, end_points = mnist_net.net(x)
extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(extra_update_ops):
losses = mnist_net.losses(logits, y_)
train_step = mnist_net.optimizer(0.001).minimize(losses, global_step=global_step)
embedding, config = mnist_net.get_embedding('./checkpoint_pretrain/')
#total_loss = tf.losses.get_total_loss()
summaries.add(tf.summary.image("img", tf.cast(x, tf.float32)))
summaries.add(tf.summary.scalar('loss', losses))
for variable in tf.trainable_variables():
summaries.add(tf.summary.histogram(variable.op.name, variable))
train_writer = tf.summary.FileWriter('./checkpoint_pretrain/train',sess.graph)
projector.visualize_embeddings(train_writer, config)
correct_prediction = tf.equal(tf.argmax(end_points['Predictions'], 1), tf.argmax(y_, 1))
accuracy = tf.reduce_sum(tf.cast(correct_prediction, tf.float32)) / mnist_net.batch_size
summaries.add(tf.summary.scalar('accuracy', accuracy))
summary_op = tf.summary.merge(list(summaries), name='summary_op')
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
saver = tf.train.Saver(max_to_keep=5,
keep_checkpoint_every_n_hours=1.0,
write_version=2,
pad_step_number=False)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
save_test = []
x_test = tf.placeholder(tf.float32, shape=[None, 28, 28, 1])
y_test = tf.placeholder(tf.float32, shape=[None, 10])
logits_test, end_points_test = mnist_net.net(x_test,is_training=False, reuse = True)
correct_prediction_test = tf.equal(tf.argmax(end_points_test['Predictions'], 1), tf.argmax(y_test, 1))
num_correct = tf.reduce_sum(tf.cast(correct_prediction_test,tf.float32))
assignment = embedding.assign(end_points_test['Features'])
for i in range(iteration):
if i %100 == 0 :
# sum_accuracy_test = 0.0
# batch_size = 100
# for count in range(100):
# test_image = np.reshape(mnist.test.images[count*batch_size:(count+1)*batch_size],(batch_size,28,28,1)) *2.0 -1
# test_label = np.reshape(mnist.test.labels[count*batch_size:(count+1)*batch_size],(batch_size,10))
# num_c = sess.run(num_correct,
# feed_dict = {x_test:test_image, y_test:test_label})
# sum_accuracy_test += num_c
test_batch_x = mnist.test.images[:10000] * 2.0 - 1
test_batch_y = mnist.test.labels[:10000]
sum_accuracy_test, _ = sess.run([num_correct, assignment],
feed_dict={x_test: np.reshape(test_batch_x, (-1, 28, 28, 1)),
y_test: test_batch_y})
print ("test accuracy is: %f" % (sum_accuracy_test /10000.0 ))
# saver.save(sess, "./checkpoint_pretrain/",global_step=global_step_str)
print('****************************')
print ("test accuracy is: %f" % (sum_accuracy_test /10000.0 ))
print('****************************')
if start:
if not save_test:
save_test.append(sum_accuracy_test)
else :
save_test.append(sum_accuracy_test)
if sum_accuracy_test > save_test[0] :
print ('u are getting better!!!!')
print ('saving model')
saver.save(sess, "./checkpoint_pretrain/",global_step= global_step.eval())
break
else:
print('ops, not this time ~!')
else:
if sum_accuracy_test/10000.0 >= 0.995:
print ('saving model')
saver.save(sess, "./checkpoint_pretrain/",global_step= global_step.eval())
break
_,summary_str,current_accuracy = sess.run([train_step,summary_op,accuracy])
if i %10 == 0:
train_writer.add_summary(summary_str,i)
print('%diteration'%i,current_accuracy)
coord.request_stop()
coord.join(threads)
# print ('saving model')
# saver.save(sess, "./checkpoint_pretrain/",global_step= global_step.eval())
time.sleep(3)
def get_feature(batch_size,id = None ):
with tf.Graph().as_default():
ckpt = tf.train.get_checkpoint_state(os.path.dirname('./checkpoint_pretrain/checkpoint'))
sess = tf.InteractiveSession()
num_preprocess_threads = 1
min_queue_examples = 256
image_reader = tf.WholeFileReader()
file_list = glob(os.path.join("./train_data",str(id),"*.jpg"))
filename_queue = tf.train.string_input_producer(file_list[:])
_,image_file = image_reader.read(filename_queue)
image = tf.image.decode_jpeg(image_file)
image = tf.cast(tf.reshape(image,shape = [28,28,1]), dtype = tf.float32)
batch_images = tf.train.batch([image],batch_size = batch_size,
num_threads = num_preprocess_threads,
capacity = min_queue_examples + 3*batch_size)
batch_images = batch_images/ 127.5 -1
# summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
# summaries.add(tf.summary.image("batch_img", tf.cast(batch_images, tf.float32)))
#
# train_writer = tf.summary.FileWriter('./get_feature/%d'%id, sess.graph)
mnist_net = Classification_Model()
# tfrecords_path = './data_tf/'
# batch_images,labels = mnist_net.get_batch_tf(tfrecords_path,shuffle = False)
logits, end_points = mnist_net.net(batch_images,is_training =False)
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
# summary_op = tf.summary.merge(list(summaries), name='summary_op')
saver = tf.train.Saver(max_to_keep=5,
keep_checkpoint_every_n_hours=1.0,
write_version=2,
pad_step_number=False)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord = coord )
all_features = np.zeros((batch_size*100,100))
# all_features = list(range(10))
# for i in range(10):
# all_features[i] = []
for count in range(100):
# summary_str = sess.run(summary_op)
# train_writer.add_summary(summary_str,count)
features_str = sess.run(end_points["Features"])
all_features[count*batch_size:(count+1)*batch_size,:] = features_str
# for _ in range(2000):
# features_str,label_current = sess.run([end_points["Features"],labels])
# print ('getting feaure vectors ....')
# for count in range(batch_size):
# all_features[np.where(label_current[count]==1)[0][0]].append(features_str[count])
np.save("./outputs/features_%d"%id,all_features)
# for i in range(10):
# np.save('./outputs/features_%d'%i,np.asarray(all_features[i]))
print ('******************************')
print('succed save npz once with %d'%id)
print ('succed save npz once')
print ('******************************')
coord.request_stop()
coord.join(threads)
def main(_):
if not os.path.exists('./checkpoint_pretrain'):
os.mkdir('./checkpoint_pretrain')
if not os.path.exists('./data_tf'):
os.mkdir('./data_tf')
if not os.path.exists('./outputs'):
os.mkdir('./outputs')
if not os.path.exists('./samples'):
os.mkdir('./samples')
start_C(FLAGS.C_iter,start= False)
while True:
for i in range(10):
get_feature(FLAGS.C_batch_size,i)
# get_feature(FLAGS.C_batch_size)
start_GAN()
start_C(FLAGS.C_iter)
if __name__ == '__main__':
tf.app.run()
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2016 MIT Probabilistic Computing Project
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import numpy as np
import sklearn.decomposition
from cgpm.cgpm import CGpm
from cgpm.utils import general as gu
from cgpm.utils import mvnormal as multivariate_normal
class FactorAnalysis(CGpm):
"""Factor analysis model with continuous latent variables z in a low
dimensional space. The generative model for a vector x is
z ~ Normal(0, I) where z \in R^L.
e ~ Normal(0, Psi) where Psi = diag(v_1,...,v_D)
x = W.z + mux + e where W \in R^(DxL) and mux \in R^D, learning by EM.
From standard results (Murphy Section 12.1)
z ~ Normal(0, I) Prior.
x|z ~ Normal(W.z + mux, Psi) Likelihood.
x ~ Normal(mux, W.W'+Psi) Marginal.
z|x ~ Normal(m, S) Posterior.
S = inv(I + W'.inv(Psi).W) (covariance)
m = S(W'.inv(Psi).(x-mux)) (mean)
The full joint distribution over [z,x] is then
The mean of [z,x] is [0, mux]
The covariance of [z,x] is (in block form)
I W'
(LxL) (LxD)
W W.W' + Psi
(DxL) (DxD)
where the covariance W' is computed directly
cov(z,x) = cov(z, W.z + mux + e)
= cov(z, W.z) + cov(z, mux) + cov(z, e)
= cov(z, W.z)
= cov(z,z).W'
= I*W'
= W'
Exercise: Confirm that expression for posterior z|x is consistent with
conditioning directly on the joint [z,x] using Schur complement
(Hint: see test suite).
The latent variables are exposed as output variables, but may not be
incorporated.
"""
def __init__(self, outputs, inputs, L=None, distargs=None, params=None,
rng=None):
# Default parameter settings.
if params is None:
params = {}
if distargs is None:
distargs = {}
# Entropy.
if rng is None:
rng = gu.gen_rng(1)
# No inputs.
if inputs:
raise ValueError('FactorAnalysis rejects inputs: %s.' % inputs)
# Correct outputs.
if len(outputs) < 2:
raise ValueError('FactorAnalysis needs >= 2 outputs: %s.' % outputs)
if len(set(outputs)) != len(outputs):
raise ValueError('Duplicate outputs: %s.' % outputs)
# Find low dimensional space.
if L is None:
raise ValueError('Specify latent dimension L: %s.' % L)
if L == 0:
raise ValueError('Latent dimension at least 1: %s.' % L)
if 'outputs' in distargs and any(s != 'numerical'
for s in distargs['outputs']['stattypes']):
raise ValueError('Factor non-numerical outputs: %s.' % distargs)
# Observable and latent variable indexes.
D = len(outputs[:-L])
if D < L:
raise ValueError(
'Latent dimension exceeds observed dimension: (%s,%s)'
% (outputs[:-L], outputs[-L:]))
# Parameters.
mux = params.get('mux', np.zeros(D))
Psi = params.get('Psi', np.eye(D))
W = params.get('W', np.zeros((D,L)))
# Build the object.
self.rng = rng
# Dimensions.
self.L = L
self.D = D
# Variable indexes.
self.outputs = outputs
self.observables = outputs[:-self.L]
self.latents = set(outputs[-self.L:])
self.inputs = []
self.output_mapping = {c:i for i,c in enumerate(self.outputs)}
# Dataset.
self.data = OrderedDict()
self.N = 0
# Parameters of Factor Analysis.
self.mux = np.asarray(mux)
self.Psi = np.asarray(Psi)
self.W = np.asarray(W)
# Parameters of joint distribution [x,z].
self.mu, self.cov = self.joint_parameters()
# Internal factor analysis model.
self.fa = None
def incorporate(self, rowid, observation, inputs=None):
# No duplicate observation.
if rowid in self.data:
raise ValueError('Already observed: %d.' % rowid)
# No inputs.
if inputs:
raise ValueError('No inputs allowed: %s.' % inputs)
if not observation:
raise ValueError('No observation specified: %s.' % observation)
# No unknown variables.
if any(q not in self.outputs for q in observation):
raise ValueError('Unknown variables: (%s,%s).'
% (observation, self.outputs))
# No latent variables.
if any(q in self.latents for q in observation):
raise ValueError('Cannot incorporate latent vars: (%s,%s,%s).'
% (observation, self.outputs, self.latents))
# Incorporate observed observable variables.
x = [observation.get(i, np.nan) for i in self.observables]
# Update dataset and counts.
self.data[rowid] = x
self.N += 1
def unincorporate(self, rowid):
try:
del self.data[rowid]
except KeyError:
raise ValueError('No such observation: %d.' % rowid)
self.N -= 1
def logpdf(self, rowid, targets, constraints=None, inputs=None):
# XXX Deal with observed rowid.
constraints = self.populate_constraints(rowid, targets, constraints)
if inputs:
raise ValueError('Prohibited inputs: %s' % (inputs,))
if not targets:
raise ValueError('No targets: %s' % (targets,))
if any(q not in self.outputs for q in targets):
raise ValueError('Unknown targets: %s' % (targets,))
if any(q in constraints for q in targets):
raise ValueError('Duplicate variable: %s, %s'
% (targets, constraints,))
# Reindex variables.
targets_r = self.reindex(targets)
constraints_r = self.reindex(constraints)
# Retrieve conditional distribution.
muG, covG = FactorAnalysis.mvn_condition(
self.mu, self.cov, targets_r.keys(), constraints_r)
# Compute log density.
x = np.array(targets_r.values())
return multivariate_normal.logpdf(x, muG, covG)
def simulate(self, rowid, targets, constraints=None, inputs=None, N=None):
# XXX Deal with observed rowid.
constraints = self.populate_constraints(rowid, targets, constraints)
if inputs:
raise ValueError('Prohibited inputs: %s' % (inputs,))
if not targets:
raise ValueError('No targets: %s' % (targets,))
if any(q not in self.outputs for q in targets):
raise ValueError('Unknown targets: %s' % (targets,))
if any(q in constraints for q in targets):
raise ValueError('Duplicate variable: %s, %s'
% (targets, constraints,))
# Reindex variables.
targets_r = self.reindex(targets)
constraints_r = self.reindex(constraints)
# Retrieve conditional distribution.
muG, covG = FactorAnalysis.mvn_condition(
self.mu, self.cov, targets_r, constraints_r)
# Generate samples.
sample = self.rng.multivariate_normal(mean=muG, cov=covG, size=N)
def get_sample(samp):
if isinstance(samp, float):
samp = [samp]
assert len(targets) == len(samp)
return dict(zip(targets, samp))
return get_sample(sample) if N is None else map(get_sample, sample)
def logpdf_score(self):
def compute_logpdf(x):
assert len(x) == self.D
targets = {i:v for i,v in enumerate(x) if not np.isnan(v)}
return self.logpdf(None, targets)
return sum(compute_logpdf(x) for x in self.data)
def transition(self, N=None):
X = np.asarray(self.data.values())
# Only run inference on observations without missing entries.
self.fa = sklearn.decomposition.FactorAnalysis(n_components=self.L)
self.fa.fit(X[~np.any(np.isnan(X), axis=1)])
assert self.L, self.D == self.fa.components_.shape
# Update parameters of Factor Analysis.
self.Psi = np.diag(self.fa.noise_variance_)
self.mux = self.fa.mean_
self.W = np.transpose(self.fa.components_)
self.mu, self.cov = self.joint_parameters()
def populate_constraints(self, rowid, targets, constraints):
if constraints is None:
constraints = {}
if rowid in self.data:
values = self.data[rowid]
assert len(values) == len(self.outputs[:self.D])
observations = {
output : value
for output, value in zip(self.outputs[:self.D], values)
if not np.isnan(value)
and output not in targets
and output not in constraints
}
constraints = gu.merged(constraints, observations)
return constraints
# --------------------------------------------------------------------------
# Internal.
def get_params(self):
return {
'mu': self.mu,
'Psi': self.Psi,
'W': self.W
}
@staticmethod
def name():
return 'low_dimensional_mvn'
@staticmethod
def is_continuous():
return True
@staticmethod
def is_conditional():
return False
@staticmethod
def is_numeric():
return True
# --------------------------------------------------------------------------
# Helper.
def reindex(self, variables):
# Reindex an output variable to its index in self.mu
# self.mu has as the first L items the last L items of self.outputs
# and as the remaining D items the first D items of self.outputs.
# The following diagram is useful:
# self.outputs: 12 14 -7 5 | 11 4 3
# <---D=4--->|<--L=3-->
# raw indices: 0 1 2 3 | 4 5 6
# reindexed: 3 4 5 6 | 0 1 2
assert isinstance(variables, (list, dict))
def convert(q):
i = self.output_mapping[q]
return i - self.D if q in self.latents else i + self.L
indexes = [convert(q) for q in variables]
if isinstance(variables, list):
return indexes
else:
return dict(zip(indexes, variables.values()))
def joint_parameters(self):
mean = np.concatenate((np.zeros(self.L), self.mux))
cov = np.row_stack((
np.column_stack((np.eye(self.L), self.W.T)),
np.column_stack((self.W, np.dot(self.W, self.W.T) + self.Psi))
))
return mean, cov
@staticmethod
def mvn_marginalize(mu, cov, query, evidence):
Q, E = query, evidence
# Retrieve means.
muQ = mu[Q]
muE = mu[E]
# Retrieve covariances.
covQ = cov[Q][:,Q]
covE = cov[E][:,E]
covJ = cov[Q][:,E]
covQE = np.row_stack((
np.column_stack((covQ, covJ)),
np.column_stack((covJ.T, covE))
))
assert np.allclose(covQE, covQE.T)
return muQ, muE, covQ, covE, covJ
@staticmethod
def mvn_condition(mu, cov, query, evidence):
assert isinstance(query, list)
assert isinstance(evidence, dict)
assert len(mu) == cov.shape[0] == cov.shape[1]
assert len(query) + len(evidence) <= len(mu)
# Extract indexes and values from evidence.
Ei, Ev = evidence.keys(), evidence.values()
muQ, muE, covQ, covE, covJ = \
FactorAnalysis.mvn_marginalize(mu, cov, query, Ei)
# Invoke Fact 4 from, where G means given.
# http://web4.cs.ucl.ac.uk/staff/C.Bracegirdle/bayesTheoremForGaussians.pdf
P = np.dot(covJ, np.linalg.inv(covE))
muG = muQ + np.dot(P, Ev - muE)
covG = covQ - np.dot(P, covJ.T)
return muG, covG
# --------------------------------------------------------------------------
# Serialization.
def to_metadata(self):
metadata = dict()
metadata['outputs'] = self.outputs
metadata['inputs'] = self.inputs
metadata['N'] = self.N
metadata['L'] = self.L
metadata['data'] = self.data.items()
# Store paramters as list for JSON.
metadata['params'] = dict()
metadata['params']['mux'] = self.mux.tolist()
metadata['params']['Psi'] = self.Psi.tolist()
metadata['params']['W'] = self.W.tolist()
metadata['factory'] = ('cgpm.factor.factor', 'FactorAnalysis')
return metadata
@classmethod
def from_metadata(cls, metadata, rng=None):
if rng is None:
rng = gu.gen_rng(0)
fact = cls(
outputs=metadata['outputs'],
inputs=metadata['inputs'],
L=metadata['L'],
params=metadata['params'],
rng=rng)
fact.data = OrderedDict(metadata['data'])
fact.N = metadata['N']
return fact
|
|
"""
Tests for Factor terms.
"""
from functools import partial
from itertools import product
from nose_parameterized import parameterized
from unittest import TestCase
from toolz import compose
from numpy import (
apply_along_axis,
arange,
array,
datetime64,
empty,
eye,
log1p,
nan,
ones,
rot90,
where,
)
from numpy.random import randn, seed
import pandas as pd
from scipy.stats.mstats import winsorize as scipy_winsorize
from catalyst.errors import BadPercentileBounds, UnknownRankMethod
from catalyst.lib.labelarray import LabelArray
from catalyst.lib.rank import masked_rankdata_2d
from catalyst.lib.normalize import naive_grouped_rowwise_apply as grouped_apply
from catalyst.pipeline import Classifier, Factor, Filter
from catalyst.pipeline.factors import CustomFactor
from catalyst.pipeline.factors.equity import (
Returns,
RSI,
)
from catalyst.testing import (
check_allclose,
check_arrays,
parameter_space,
permute_rows,
)
from catalyst.testing.fixtures import CatalystTestCase
from catalyst.testing.predicates import assert_equal
from catalyst.utils.numpy_utils import (
categorical_dtype,
datetime64ns_dtype,
float64_dtype,
int64_dtype,
NaTns,
)
from catalyst.utils.math_utils import nanmean, nanstd
from .base import BasePipelineTestCase
class F(Factor):
dtype = float64_dtype
inputs = ()
window_length = 0
class OtherF(Factor):
dtype = float64_dtype
inputs = ()
window_length = 0
class C(Classifier):
dtype = int64_dtype
missing_value = -1
inputs = ()
window_length = 0
class OtherC(Classifier):
dtype = int64_dtype
missing_value = -1
inputs = ()
window_length = 0
class Mask(Filter):
inputs = ()
window_length = 0
for_each_factor_dtype = parameterized.expand([
('datetime64[ns]', datetime64ns_dtype),
('float', float64_dtype),
])
class FactorTestCase(BasePipelineTestCase):
def init_instance_fixtures(self):
super(FactorTestCase, self).init_instance_fixtures()
self.f = F()
def test_bad_input(self):
with self.assertRaises(UnknownRankMethod):
self.f.rank("not a real rank method")
@parameter_space(method_name=['isnan', 'notnan', 'isfinite'])
def test_float64_only_ops(self, method_name):
class NotFloat(Factor):
dtype = datetime64ns_dtype
inputs = ()
window_length = 0
nf = NotFloat()
meth = getattr(nf, method_name)
with self.assertRaises(TypeError):
meth()
@parameter_space(custom_missing_value=[-1, 0])
def _test_isnull_int_dtype(self, custom_missing_value):
class CustomMissingValue(Factor):
dtype = int64_dtype
window_length = 0
missing_value = custom_missing_value
inputs = ()
factor = CustomMissingValue()
data = arange(25).reshape(5, 5)
data[eye(5, dtype=bool)] = custom_missing_value
self.check_terms(
{
'isnull': factor.isnull(),
'notnull': factor.notnull(),
},
{
'isnull': eye(5, dtype=bool),
'notnull': ~eye(5, dtype=bool),
},
initial_workspace={factor: data},
mask=self.build_mask(ones((5, 5))),
)
def _test_isnull_datetime_dtype(self):
class DatetimeFactor(Factor):
dtype = datetime64ns_dtype
window_length = 0
inputs = ()
factor = DatetimeFactor()
data = arange(25).reshape(5, 5).astype('datetime64[ns]')
data[eye(5, dtype=bool)] = NaTns
self.check_terms(
{
'isnull': factor.isnull(),
'notnull': factor.notnull(),
},
{
'isnull': eye(5, dtype=bool),
'notnull': ~eye(5, dtype=bool),
},
initial_workspace={factor: data},
mask=self.build_mask(ones((5, 5))),
)
@for_each_factor_dtype
def _test_rank_ascending(self, name, factor_dtype):
f = F(dtype=factor_dtype)
# Generated with:
# data = arange(25).reshape(5, 5).transpose() % 4
data = array([[0, 1, 2, 3, 0],
[1, 2, 3, 0, 1],
[2, 3, 0, 1, 2],
[3, 0, 1, 2, 3],
[0, 1, 2, 3, 0]], dtype=factor_dtype)
expected_ranks = {
'ordinal': array([[1., 3., 4., 5., 2.],
[2., 4., 5., 1., 3.],
[3., 5., 1., 2., 4.],
[4., 1., 2., 3., 5.],
[1., 3., 4., 5., 2.]]),
'average': array([[1.5, 3., 4., 5., 1.5],
[2.5, 4., 5., 1., 2.5],
[3.5, 5., 1., 2., 3.5],
[4.5, 1., 2., 3., 4.5],
[1.5, 3., 4., 5., 1.5]]),
'min': array([[1., 3., 4., 5., 1.],
[2., 4., 5., 1., 2.],
[3., 5., 1., 2., 3.],
[4., 1., 2., 3., 4.],
[1., 3., 4., 5., 1.]]),
'max': array([[2., 3., 4., 5., 2.],
[3., 4., 5., 1., 3.],
[4., 5., 1., 2., 4.],
[5., 1., 2., 3., 5.],
[2., 3., 4., 5., 2.]]),
'dense': array([[1., 2., 3., 4., 1.],
[2., 3., 4., 1., 2.],
[3., 4., 1., 2., 3.],
[4., 1., 2., 3., 4.],
[1., 2., 3., 4., 1.]]),
}
def check(terms):
self.check_terms(
terms,
expected={name: expected_ranks[name] for name in terms},
initial_workspace={f: data},
mask=self.build_mask(ones((5, 5))),
)
check({meth: f.rank(method=meth) for meth in expected_ranks})
check({
meth: f.rank(method=meth, ascending=True)
for meth in expected_ranks
})
# Not passing a method should default to ordinal.
check({'ordinal': f.rank()})
check({'ordinal': f.rank(ascending=True)})
@for_each_factor_dtype
def _test_rank_descending(self, name, factor_dtype):
f = F(dtype=factor_dtype)
# Generated with:
# data = arange(25).reshape(5, 5).transpose() % 4
data = array([[0, 1, 2, 3, 0],
[1, 2, 3, 0, 1],
[2, 3, 0, 1, 2],
[3, 0, 1, 2, 3],
[0, 1, 2, 3, 0]], dtype=factor_dtype)
expected_ranks = {
'ordinal': array([[4., 3., 2., 1., 5.],
[3., 2., 1., 5., 4.],
[2., 1., 5., 4., 3.],
[1., 5., 4., 3., 2.],
[4., 3., 2., 1., 5.]]),
'average': array([[4.5, 3., 2., 1., 4.5],
[3.5, 2., 1., 5., 3.5],
[2.5, 1., 5., 4., 2.5],
[1.5, 5., 4., 3., 1.5],
[4.5, 3., 2., 1., 4.5]]),
'min': array([[4., 3., 2., 1., 4.],
[3., 2., 1., 5., 3.],
[2., 1., 5., 4., 2.],
[1., 5., 4., 3., 1.],
[4., 3., 2., 1., 4.]]),
'max': array([[5., 3., 2., 1., 5.],
[4., 2., 1., 5., 4.],
[3., 1., 5., 4., 3.],
[2., 5., 4., 3., 2.],
[5., 3., 2., 1., 5.]]),
'dense': array([[4., 3., 2., 1., 4.],
[3., 2., 1., 4., 3.],
[2., 1., 4., 3., 2.],
[1., 4., 3., 2., 1.],
[4., 3., 2., 1., 4.]]),
}
def check(terms):
self.check_terms(
terms,
expected={name: expected_ranks[name] for name in terms},
initial_workspace={f: data},
mask=self.build_mask(ones((5, 5))),
)
check({
meth: f.rank(method=meth, ascending=False)
for meth in expected_ranks
})
# Not passing a method should default to ordinal.
check({'ordinal': f.rank(ascending=False)})
@for_each_factor_dtype
def _test_rank_after_mask(self, name, factor_dtype):
f = F(dtype=factor_dtype)
# data = arange(25).reshape(5, 5).transpose() % 4
data = array([[0, 1, 2, 3, 0],
[1, 2, 3, 0, 1],
[2, 3, 0, 1, 2],
[3, 0, 1, 2, 3],
[0, 1, 2, 3, 0]], dtype=factor_dtype)
mask_data = ~eye(5, dtype=bool)
initial_workspace = {f: data, Mask(): mask_data}
terms = {
"ascending_nomask": f.rank(ascending=True),
"ascending_mask": f.rank(ascending=True, mask=Mask()),
"descending_nomask": f.rank(ascending=False),
"descending_mask": f.rank(ascending=False, mask=Mask()),
}
expected = {
"ascending_nomask": array([[1., 3., 4., 5., 2.],
[2., 4., 5., 1., 3.],
[3., 5., 1., 2., 4.],
[4., 1., 2., 3., 5.],
[1., 3., 4., 5., 2.]]),
"descending_nomask": array([[4., 3., 2., 1., 5.],
[3., 2., 1., 5., 4.],
[2., 1., 5., 4., 3.],
[1., 5., 4., 3., 2.],
[4., 3., 2., 1., 5.]]),
# Diagonal should be all nans, and anything whose rank was less
# than the diagonal in the unmasked calc should go down by 1.
"ascending_mask": array([[nan, 2., 3., 4., 1.],
[2., nan, 4., 1., 3.],
[2., 4., nan, 1., 3.],
[3., 1., 2., nan, 4.],
[1., 2., 3., 4., nan]]),
"descending_mask": array([[nan, 3., 2., 1., 4.],
[2., nan, 1., 4., 3.],
[2., 1., nan, 4., 3.],
[1., 4., 3., nan, 2.],
[4., 3., 2., 1., nan]]),
}
self.check_terms(
terms,
expected,
initial_workspace,
mask=self.build_mask(ones((5, 5))),
)
@for_each_factor_dtype
def _test_grouped_rank_ascending(self, name, factor_dtype=float64_dtype):
f = F(dtype=factor_dtype)
c = C()
str_c = C(dtype=categorical_dtype, missing_value=None)
# Generated with:
# data = arange(25).reshape(5, 5).transpose() % 4
data = array([[0, 1, 2, 3, 0],
[1, 2, 3, 0, 1],
[2, 3, 0, 1, 2],
[3, 0, 1, 2, 3],
[0, 1, 2, 3, 0]], dtype=factor_dtype)
# Generated with:
# classifier_data = arange(25).reshape(5, 5).transpose() % 2
classifier_data = array([[0, 1, 0, 1, 0],
[1, 0, 1, 0, 1],
[0, 1, 0, 1, 0],
[1, 0, 1, 0, 1],
[0, 1, 0, 1, 0]], dtype=int64_dtype)
string_classifier_data = LabelArray(
classifier_data.astype(str).astype(object),
missing_value=None,
)
expected_ranks = {
'ordinal': array(
[[1., 1., 3., 2., 2.],
[1., 2., 3., 1., 2.],
[2., 2., 1., 1., 3.],
[2., 1., 1., 2., 3.],
[1., 1., 3., 2., 2.]]
),
'average': array(
[[1.5, 1., 3., 2., 1.5],
[1.5, 2., 3., 1., 1.5],
[2.5, 2., 1., 1., 2.5],
[2.5, 1., 1., 2., 2.5],
[1.5, 1., 3., 2., 1.5]]
),
'min': array(
[[1., 1., 3., 2., 1.],
[1., 2., 3., 1., 1.],
[2., 2., 1., 1., 2.],
[2., 1., 1., 2., 2.],
[1., 1., 3., 2., 1.]]
),
'max': array(
[[2., 1., 3., 2., 2.],
[2., 2., 3., 1., 2.],
[3., 2., 1., 1., 3.],
[3., 1., 1., 2., 3.],
[2., 1., 3., 2., 2.]]
),
'dense': array(
[[1., 1., 2., 2., 1.],
[1., 2., 2., 1., 1.],
[2., 2., 1., 1., 2.],
[2., 1., 1., 2., 2.],
[1., 1., 2., 2., 1.]]
),
}
def check(terms):
self.check_terms(
terms,
expected={name: expected_ranks[name] for name in terms},
initial_workspace={
f: data,
c: classifier_data,
str_c: string_classifier_data,
},
mask=self.build_mask(ones((5, 5))),
)
# Not specifying the value of ascending param should default to True
check({
meth: f.rank(method=meth, groupby=c)
for meth in expected_ranks
})
check({
meth: f.rank(method=meth, groupby=str_c)
for meth in expected_ranks
})
check({
meth: f.rank(method=meth, groupby=c, ascending=True)
for meth in expected_ranks
})
check({
meth: f.rank(method=meth, groupby=str_c, ascending=True)
for meth in expected_ranks
})
# Not passing a method should default to ordinal
check({'ordinal': f.rank(groupby=c)})
check({'ordinal': f.rank(groupby=str_c)})
check({'ordinal': f.rank(groupby=c, ascending=True)})
check({'ordinal': f.rank(groupby=str_c, ascending=True)})
@for_each_factor_dtype
def _test_grouped_rank_descending(self, name, factor_dtype):
f = F(dtype=factor_dtype)
c = C()
str_c = C(dtype=categorical_dtype, missing_value=None)
# Generated with:
# data = arange(25).reshape(5, 5).transpose() % 4
data = array([[0, 1, 2, 3, 0],
[1, 2, 3, 0, 1],
[2, 3, 0, 1, 2],
[3, 0, 1, 2, 3],
[0, 1, 2, 3, 0]], dtype=factor_dtype)
# Generated with:
# classifier_data = arange(25).reshape(5, 5).transpose() % 2
classifier_data = array([[0, 1, 0, 1, 0],
[1, 0, 1, 0, 1],
[0, 1, 0, 1, 0],
[1, 0, 1, 0, 1],
[0, 1, 0, 1, 0]], dtype=int64_dtype)
string_classifier_data = LabelArray(
classifier_data.astype(str).astype(object),
missing_value=None,
)
expected_ranks = {
'ordinal': array(
[[2., 2., 1., 1., 3.],
[2., 1., 1., 2., 3.],
[1., 1., 3., 2., 2.],
[1., 2., 3., 1., 2.],
[2., 2., 1., 1., 3.]]
),
'average': array(
[[2.5, 2., 1., 1., 2.5],
[2.5, 1., 1., 2., 2.5],
[1.5, 1., 3., 2., 1.5],
[1.5, 2., 3., 1., 1.5],
[2.5, 2., 1., 1., 2.5]]
),
'min': array(
[[2., 2., 1., 1., 2.],
[2., 1., 1., 2., 2.],
[1., 1., 3., 2., 1.],
[1., 2., 3., 1., 1.],
[2., 2., 1., 1., 2.]]
),
'max': array(
[[3., 2., 1., 1., 3.],
[3., 1., 1., 2., 3.],
[2., 1., 3., 2., 2.],
[2., 2., 3., 1., 2.],
[3., 2., 1., 1., 3.]]
),
'dense': array(
[[2., 2., 1., 1., 2.],
[2., 1., 1., 2., 2.],
[1., 1., 2., 2., 1.],
[1., 2., 2., 1., 1.],
[2., 2., 1., 1., 2.]]
),
}
def check(terms):
self.check_terms(
terms,
expected={name: expected_ranks[name] for name in terms},
initial_workspace={
f: data,
c: classifier_data,
str_c: string_classifier_data,
},
mask=self.build_mask(ones((5, 5))),
)
check({
meth: f.rank(method=meth, groupby=c, ascending=False)
for meth in expected_ranks
})
check({
meth: f.rank(method=meth, groupby=str_c, ascending=False)
for meth in expected_ranks
})
# Not passing a method should default to ordinal
check({'ordinal': f.rank(groupby=c, ascending=False)})
check({'ordinal': f.rank(groupby=str_c, ascending=False)})
@parameterized.expand([
# Test cases computed by doing:
# from numpy.random import seed, randn
# from talib import RSI
# seed(seed_value)
# data = abs(randn(15, 3))
# expected = [RSI(data[:, i])[-1] for i in range(3)]
(100, array([41.032913785966, 51.553585468393, 51.022005016446])),
(101, array([43.506969935466, 46.145367530182, 50.57407044197])),
(102, array([46.610102205934, 47.646892444315, 52.13182788538])),
])
def test_rsi(self, seed_value, expected):
rsi = RSI()
today = datetime64(1, 'ns')
assets = arange(3)
out = empty((3,), dtype=float)
seed(seed_value) # Seed so we get deterministic results.
test_data = abs(randn(15, 3))
out = empty((3,), dtype=float)
rsi.compute(today, assets, out, test_data)
check_allclose(expected, out)
@parameterized.expand([
(100, 15),
(101, 4),
(102, 100),
])
def test_returns(self, seed_value, window_length):
returns = Returns(window_length=window_length)
today = datetime64(1, 'ns')
assets = arange(3)
out = empty((3,), dtype=float)
seed(seed_value) # Seed so we get deterministic results.
test_data = abs(randn(window_length, 3))
# Calculate the expected returns
expected = (test_data[-1] - test_data[0]) / test_data[0]
out = empty((3,), dtype=float)
returns.compute(today, assets, out, test_data)
check_allclose(expected, out)
def gen_ranking_cases():
seeds = range(int(1e4), int(1e5), int(1e4))
methods = ('ordinal', 'average')
use_mask_values = (True, False)
set_missing_values = (True, False)
ascending_values = (True, False)
return product(
seeds,
methods,
use_mask_values,
set_missing_values,
ascending_values,
)
@parameterized.expand(gen_ranking_cases())
def test_masked_rankdata_2d(self,
seed_value,
method,
use_mask,
set_missing,
ascending):
eyemask = ~eye(5, dtype=bool)
nomask = ones((5, 5), dtype=bool)
seed(seed_value)
asfloat = (randn(5, 5) * seed_value)
asdatetime = (asfloat).copy().view('datetime64[ns]')
mask = eyemask if use_mask else nomask
if set_missing:
asfloat[:, 2] = nan
asdatetime[:, 2] = NaTns
float_result = masked_rankdata_2d(
data=asfloat,
mask=mask,
missing_value=nan,
method=method,
ascending=True,
)
datetime_result = masked_rankdata_2d(
data=asdatetime,
mask=mask,
missing_value=NaTns,
method=method,
ascending=True,
)
check_arrays(float_result, datetime_result)
def _test_normalizations_hand_computed(self):
"""
Test the hand-computed example in factor.demean.
"""
f = self.f
m = Mask()
c = C()
str_c = C(dtype=categorical_dtype, missing_value=None)
factor_data = array(
[[1.0, 2.0, 3.0, 4.0],
[1.5, 2.5, 3.5, 1.0],
[2.0, 3.0, 4.0, 1.5],
[2.5, 3.5, 1.0, 2.0]],
)
filter_data = array(
[[False, True, True, True],
[True, False, True, True],
[True, True, False, True],
[True, True, True, False]],
dtype=bool,
)
classifier_data = array(
[[1, 1, 2, 2],
[1, 1, 2, 2],
[1, 1, 2, 2],
[1, 1, 2, 2]],
dtype=int64_dtype,
)
string_classifier_data = LabelArray(
classifier_data.astype(str).astype(object),
missing_value=None,
)
terms = {
'vanilla': f.demean(),
'masked': f.demean(mask=m),
'grouped': f.demean(groupby=c),
'grouped_str': f.demean(groupby=str_c),
'grouped_masked': f.demean(mask=m, groupby=c),
'grouped_masked_str': f.demean(mask=m, groupby=str_c),
}
expected = {
'vanilla': array(
[[-1.500, -0.500, 0.500, 1.500],
[-0.625, 0.375, 1.375, -1.125],
[-0.625, 0.375, 1.375, -1.125],
[0.250, 1.250, -1.250, -0.250]],
),
'masked': array(
[[nan, -1.000, 0.000, 1.000],
[-0.500, nan, 1.500, -1.000],
[-0.166, 0.833, nan, -0.666],
[0.166, 1.166, -1.333, nan]],
),
'grouped': array(
[[-0.500, 0.500, -0.500, 0.500],
[-0.500, 0.500, 1.250, -1.250],
[-0.500, 0.500, 1.250, -1.250],
[-0.500, 0.500, -0.500, 0.500]],
),
'grouped_masked': array(
[[nan, 0.000, -0.500, 0.500],
[0.000, nan, 1.250, -1.250],
[-0.500, 0.500, nan, 0.000],
[-0.500, 0.500, 0.000, nan]]
)
}
# Changing the classifier dtype shouldn't affect anything.
expected['grouped_str'] = expected['grouped']
expected['grouped_masked_str'] = expected['grouped_masked']
self.check_terms(
terms,
expected,
initial_workspace={
f: factor_data,
c: classifier_data,
str_c: string_classifier_data,
m: filter_data,
},
mask=self.build_mask(self.ones_mask(shape=factor_data.shape)),
# The hand-computed values aren't very precise (in particular,
# we truncate repeating decimals at 3 places) This is just
# asserting that the example isn't misleading by being totally
# wrong.
check=partial(check_allclose, atol=0.001),
)
def _test_winsorize_hand_computed(self):
"""
Test the hand-computed example in factor.winsorize.
"""
f = self.f
m = Mask()
c = C()
str_c = C(dtype=categorical_dtype, missing_value=None)
factor_data = array([
[1., 2., 3., 4., 5., 6.],
[1., 8., 27., 64., 125., 216.],
[6., 5., 4., 3., 2., 1.]
])
filter_data = array(
[[False, True, True, True, True, True],
[True, False, True, True, True, True],
[True, True, False, True, True, True]],
dtype=bool,
)
classifier_data = array(
[[1, 1, 1, 2, 2, 2],
[1, 1, 1, 2, 2, 2],
[1, 1, 1, 2, 2, 2]],
dtype=int64_dtype,
)
string_classifier_data = LabelArray(
classifier_data.astype(str).astype(object),
missing_value=None,
)
terms = {
'winsor_1': f.winsorize(
min_percentile=0.33,
max_percentile=0.67
),
'winsor_2': f.winsorize(
min_percentile=0.49,
max_percentile=1
),
'winsor_3': f.winsorize(
min_percentile=0,
max_percentile=.67
),
'masked': f.winsorize(
min_percentile=0.33,
max_percentile=0.67,
mask=m
),
'grouped': f.winsorize(
min_percentile=0.34,
max_percentile=0.66,
groupby=c
),
'grouped_str': f.winsorize(
min_percentile=0.34,
max_percentile=0.66,
groupby=str_c
),
'grouped_masked': f.winsorize(
min_percentile=0.34,
max_percentile=0.66,
mask=m,
groupby=c
),
'grouped_masked_str': f.winsorize(
min_percentile=0.34,
max_percentile=0.66,
mask=m,
groupby=str_c
),
}
expected = {
'winsor_1': array([
[2., 2., 3., 4., 5., 5.],
[8., 8., 27., 64., 125., 125.],
[5., 5., 4., 3., 2., 2.]
]),
'winsor_2': array([
[3.0, 3., 3., 4., 5., 6.],
[27., 27., 27., 64., 125., 216.],
[6.0, 5., 4., 3., 3., 3.]
]),
'winsor_3': array([
[1., 2., 3., 4., 5., 5.],
[1., 8., 27., 64., 125., 125.],
[5., 5., 4., 3., 2., 1.]
]),
'masked': array([
[nan, 3., 3., 4., 5., 5.],
[27., nan, 27., 64., 125., 125.],
[5.0, 5., nan, 3., 2., 2.]
]),
'grouped': array([
[2., 2., 2., 5., 5., 5.],
[8., 8., 8., 125., 125., 125.],
[5., 5., 5., 2., 2., 2.]
]),
'grouped_masked': array([
[nan, 2., 3., 5., 5., 5.],
[1.0, nan, 27., 125., 125., 125.],
[6.0, 5., nan, 2., 2., 2.]
]),
}
# Changing the classifier dtype shouldn't affect anything.
expected['grouped_str'] = expected['grouped']
expected['grouped_masked_str'] = expected['grouped_masked']
self.check_terms(
terms,
expected,
initial_workspace={
f: factor_data,
c: classifier_data,
str_c: string_classifier_data,
m: filter_data,
},
mask=self.build_mask(self.ones_mask(shape=factor_data.shape)),
check=partial(check_allclose, atol=0.001),
)
def test_winsorize_bad_bounds(self):
"""
Test out of bounds input for factor.winsorize.
"""
f = self.f
bad_percentiles = [
(-.1, 1),
(0, 95),
(5, 95),
(5, 5),
(.6, .4)
]
for min_, max_ in bad_percentiles:
with self.assertRaises(BadPercentileBounds):
f.winsorize(min_percentile=min_, max_percentile=max_)
@parameter_space(
seed_value=range(1, 2),
normalizer_name_and_func=[
('demean', {}, lambda row: row - nanmean(row)),
('zscore', {}, lambda row: (row - nanmean(row)) / nanstd(row)),
(
'winsorize',
{"min_percentile": 0.25, "max_percentile": 0.75},
lambda row: scipy_winsorize(
row,
limits=0.25,
)
),
],
add_nulls_to_factor=(False, True,),
)
def _test_normalizations_randomized(self,
seed_value,
normalizer_name_and_func,
add_nulls_to_factor):
name, kwargs, func = normalizer_name_and_func
shape = (20, 20)
# All Trues.
nomask = self.ones_mask(shape=shape)
# Falses on main diagonal.
eyemask = self.eye_mask(shape=shape)
# Falses on other diagonal.
eyemask90 = rot90(eyemask)
# Falses on both diagonals.
xmask = eyemask & eyemask90
# Block of random data.
factor_data = self.randn_data(seed=seed_value, shape=shape)
if add_nulls_to_factor:
factor_data = where(eyemask, factor_data, nan)
# Cycles of 0, 1, 2, 0, 1, 2, ...
classifier_data = (
(self.arange_data(shape=shape, dtype=int64_dtype) + seed_value) % 3
)
# With -1s on main diagonal.
classifier_data_eyenulls = where(eyemask, classifier_data, -1)
# With -1s on opposite diagonal.
classifier_data_eyenulls90 = where(eyemask90, classifier_data, -1)
# With -1s on both diagonals.
classifier_data_xnulls = where(xmask, classifier_data, -1)
f = self.f
c = C()
c_with_nulls = OtherC()
m = Mask()
method = partial(getattr(f, name), **kwargs)
terms = {
'vanilla': method(),
'masked': method(mask=m),
'grouped': method(groupby=c),
'grouped_with_nulls': method(groupby=c_with_nulls),
'both': method(mask=m, groupby=c),
'both_with_nulls': method(mask=m, groupby=c_with_nulls),
}
expected = {
'vanilla': apply_along_axis(func, 1, factor_data,),
'masked': where(
eyemask,
grouped_apply(factor_data, eyemask, func),
nan,
),
'grouped': grouped_apply(
factor_data,
classifier_data,
func,
),
# If the classifier has nulls, we should get NaNs in the
# corresponding locations in the output.
'grouped_with_nulls': where(
eyemask90,
grouped_apply(factor_data, classifier_data_eyenulls90, func),
nan,
),
# Passing a mask with a classifier should behave as though the
# classifier had nulls where the mask was False.
'both': where(
eyemask,
grouped_apply(
factor_data,
classifier_data_eyenulls,
func,
),
nan,
),
'both_with_nulls': where(
xmask,
grouped_apply(
factor_data,
classifier_data_xnulls,
func,
),
nan,
)
}
self.check_terms(
terms=terms,
expected=expected,
initial_workspace={
f: factor_data,
c: classifier_data,
c_with_nulls: classifier_data_eyenulls90,
Mask(): eyemask,
},
mask=self.build_mask(nomask),
)
@parameter_space(method_name=['demean', 'zscore'])
def test_cant_normalize_non_float(self, method_name):
class DateFactor(Factor):
dtype = datetime64ns_dtype
inputs = ()
window_length = 0
d = DateFactor()
with self.assertRaises(TypeError) as e:
getattr(d, method_name)()
errmsg = str(e.exception)
expected = (
"{normalizer}() is only defined on Factors of dtype float64,"
" but it was called on a Factor of dtype datetime64[ns]."
).format(normalizer=method_name)
self.assertEqual(errmsg, expected)
@parameter_space(seed=[1, 2, 3])
def _test_quantiles_unmasked(self, seed):
permute = partial(permute_rows, seed)
shape = (6, 6)
# Shuffle the input rows to verify that we don't depend on the order.
# Take the log to ensure that we don't depend on linear scaling or
# integrality of inputs
factor_data = permute(log1p(arange(36, dtype=float).reshape(shape)))
f = self.f
# Apply the same shuffle we applied to the input rows to our
# expectations. Doing it this way makes it obvious that our
# expectation corresponds to our input, while still testing against
# a range of input orderings.
permuted_array = compose(permute, partial(array, dtype=int64_dtype))
self.check_terms(
terms={
'2': f.quantiles(bins=2),
'3': f.quantiles(bins=3),
'6': f.quantiles(bins=6),
},
initial_workspace={
f: factor_data,
},
expected={
# The values in the input are all increasing, so the first half
# of each row should be in the bottom bucket, and the second
# half should be in the top bucket.
'2': permuted_array([[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1]]),
# Similar for three buckets.
'3': permuted_array([[0, 0, 1, 1, 2, 2],
[0, 0, 1, 1, 2, 2],
[0, 0, 1, 1, 2, 2],
[0, 0, 1, 1, 2, 2],
[0, 0, 1, 1, 2, 2],
[0, 0, 1, 1, 2, 2]]),
# In the limiting case, we just have every column different.
'6': permuted_array([[0, 1, 2, 3, 4, 5],
[0, 1, 2, 3, 4, 5],
[0, 1, 2, 3, 4, 5],
[0, 1, 2, 3, 4, 5],
[0, 1, 2, 3, 4, 5],
[0, 1, 2, 3, 4, 5]]),
},
mask=self.build_mask(self.ones_mask(shape=shape)),
)
@parameter_space(seed=[1, 2, 3])
def _test_quantiles_masked(self, seed):
permute = partial(permute_rows, seed)
# 7 x 7 so that we divide evenly into 2/3/6-tiles after including the
# nan value in each row.
shape = (7, 7)
# Shuffle the input rows to verify that we don't depend on the order.
# Take the log to ensure that we don't depend on linear scaling or
# integrality of inputs
factor_data = permute(log1p(arange(49, dtype=float).reshape(shape)))
factor_data_w_nans = where(
permute(rot90(self.eye_mask(shape=shape))),
factor_data,
nan,
)
mask_data = permute(self.eye_mask(shape=shape))
f = F()
f_nans = OtherF()
m = Mask()
# Apply the same shuffle we applied to the input rows to our
# expectations. Doing it this way makes it obvious that our
# expectation corresponds to our input, while still testing against
# a range of input orderings.
permuted_array = compose(permute, partial(array, dtype=int64_dtype))
self.check_terms(
terms={
'2_masked': f.quantiles(bins=2, mask=m),
'3_masked': f.quantiles(bins=3, mask=m),
'6_masked': f.quantiles(bins=6, mask=m),
'2_nans': f_nans.quantiles(bins=2),
'3_nans': f_nans.quantiles(bins=3),
'6_nans': f_nans.quantiles(bins=6),
},
initial_workspace={
f: factor_data,
f_nans: factor_data_w_nans,
m: mask_data,
},
expected={
# Expected results here are the same as in
# test_quantiles_unmasked, except with diagonals of -1s
# interpolated to match the effects of masking and/or input
# nans.
'2_masked': permuted_array([[-1, 0, 0, 0, 1, 1, 1],
[0, -1, 0, 0, 1, 1, 1],
[0, 0, -1, 0, 1, 1, 1],
[0, 0, 0, -1, 1, 1, 1],
[0, 0, 0, 1, -1, 1, 1],
[0, 0, 0, 1, 1, -1, 1],
[0, 0, 0, 1, 1, 1, -1]]),
'3_masked': permuted_array([[-1, 0, 0, 1, 1, 2, 2],
[0, -1, 0, 1, 1, 2, 2],
[0, 0, -1, 1, 1, 2, 2],
[0, 0, 1, -1, 1, 2, 2],
[0, 0, 1, 1, -1, 2, 2],
[0, 0, 1, 1, 2, -1, 2],
[0, 0, 1, 1, 2, 2, -1]]),
'6_masked': permuted_array([[-1, 0, 1, 2, 3, 4, 5],
[0, -1, 1, 2, 3, 4, 5],
[0, 1, -1, 2, 3, 4, 5],
[0, 1, 2, -1, 3, 4, 5],
[0, 1, 2, 3, -1, 4, 5],
[0, 1, 2, 3, 4, -1, 5],
[0, 1, 2, 3, 4, 5, -1]]),
'2_nans': permuted_array([[0, 0, 0, 1, 1, 1, -1],
[0, 0, 0, 1, 1, -1, 1],
[0, 0, 0, 1, -1, 1, 1],
[0, 0, 0, -1, 1, 1, 1],
[0, 0, -1, 0, 1, 1, 1],
[0, -1, 0, 0, 1, 1, 1],
[-1, 0, 0, 0, 1, 1, 1]]),
'3_nans': permuted_array([[0, 0, 1, 1, 2, 2, -1],
[0, 0, 1, 1, 2, -1, 2],
[0, 0, 1, 1, -1, 2, 2],
[0, 0, 1, -1, 1, 2, 2],
[0, 0, -1, 1, 1, 2, 2],
[0, -1, 0, 1, 1, 2, 2],
[-1, 0, 0, 1, 1, 2, 2]]),
'6_nans': permuted_array([[0, 1, 2, 3, 4, 5, -1],
[0, 1, 2, 3, 4, -1, 5],
[0, 1, 2, 3, -1, 4, 5],
[0, 1, 2, -1, 3, 4, 5],
[0, 1, -1, 2, 3, 4, 5],
[0, -1, 1, 2, 3, 4, 5],
[-1, 0, 1, 2, 3, 4, 5]]),
},
mask=self.build_mask(self.ones_mask(shape=shape)),
)
def _test_quantiles_uneven_buckets(self):
permute = partial(permute_rows, 5)
shape = (5, 5)
factor_data = permute(log1p(arange(25, dtype=float).reshape(shape)))
mask_data = permute(self.eye_mask(shape=shape))
f = F()
m = Mask()
permuted_array = compose(permute, partial(array, dtype=int64_dtype))
self.check_terms(
terms={
'3_masked': f.quantiles(bins=3, mask=m),
'7_masked': f.quantiles(bins=7, mask=m),
},
initial_workspace={
f: factor_data,
m: mask_data,
},
expected={
'3_masked': permuted_array([[-1, 0, 0, 1, 2],
[0, -1, 0, 1, 2],
[0, 0, -1, 1, 2],
[0, 0, 1, -1, 2],
[0, 0, 1, 2, -1]]),
'7_masked': permuted_array([[-1, 0, 2, 4, 6],
[0, -1, 2, 4, 6],
[0, 2, -1, 4, 6],
[0, 2, 4, -1, 6],
[0, 2, 4, 6, -1]]),
},
mask=self.build_mask(self.ones_mask(shape=shape)),
)
def test_quantile_helpers(self):
f = self.f
m = Mask()
self.assertIs(f.quartiles(), f.quantiles(bins=4))
self.assertIs(f.quartiles(mask=m), f.quantiles(bins=4, mask=m))
self.assertIsNot(f.quartiles(), f.quartiles(mask=m))
self.assertIs(f.quintiles(), f.quantiles(bins=5))
self.assertIs(f.quintiles(mask=m), f.quantiles(bins=5, mask=m))
self.assertIsNot(f.quintiles(), f.quintiles(mask=m))
self.assertIs(f.deciles(), f.quantiles(bins=10))
self.assertIs(f.deciles(mask=m), f.quantiles(bins=10, mask=m))
self.assertIsNot(f.deciles(), f.deciles(mask=m))
class ShortReprTestCase(TestCase):
"""
Tests for short_repr methods of Factors.
"""
def test_demean(self):
r = F().demean().short_repr()
self.assertEqual(r, "GroupedRowTransform('demean')")
def test_zscore(self):
r = F().zscore().short_repr()
self.assertEqual(r, "GroupedRowTransform('zscore')")
def test_winsorize(self):
r = F().winsorize(min_percentile=.05, max_percentile=.95).short_repr()
self.assertEqual(r, "GroupedRowTransform('winsorize')")
class TestWindowSafety(TestCase):
def test_zscore_is_window_safe(self):
self.assertTrue(F().zscore().window_safe)
@parameter_space(__fail_fast=True, is_window_safe=[True, False])
def test_window_safety_propagates_to_recarray_fields(self, is_window_safe):
class MultipleOutputs(CustomFactor):
outputs = ['a', 'b']
inputs = ()
window_length = 5
window_safe = is_window_safe
mo = MultipleOutputs()
for attr in mo.a, mo.b:
self.assertEqual(attr.window_safe, mo.window_safe)
def test_demean_is_window_safe_if_input_is_window_safe(self):
self.assertFalse(F().demean().window_safe)
self.assertFalse(F(window_safe=False).demean().window_safe)
self.assertTrue(F(window_safe=True).demean().window_safe)
def test_winsorize_is_window_safe_if_input_is_window_safe(self):
self.assertFalse(
F().winsorize(min_percentile=.05, max_percentile=.95).window_safe
)
self.assertFalse(
F(window_safe=False).winsorize(
min_percentile=.05,
max_percentile=.95
).window_safe
)
self.assertTrue(
F(window_safe=True).winsorize(
min_percentile=.05,
max_percentile=.95
).window_safe
)
class TestPostProcessAndToWorkSpaceValue(CatalystTestCase):
@parameter_space(dtype_=(float64_dtype, datetime64ns_dtype))
def test_reversability(self, dtype_):
class F(Factor):
inputs = ()
dtype = dtype_
window_length = 0
f = F()
column_data = array(
[[0, f.missing_value],
[1, f.missing_value],
[2, 3]],
dtype=dtype_,
)
assert_equal(f.postprocess(column_data.ravel()), column_data.ravel())
# only include the non-missing data
pipeline_output = pd.Series(
data=array([0, 1, 2, 3], dtype=dtype_),
index=pd.MultiIndex.from_arrays([
[pd.Timestamp('2014-01-01'),
pd.Timestamp('2014-01-02'),
pd.Timestamp('2014-01-03'),
pd.Timestamp('2014-01-03')],
[0, 0, 0, 1],
]),
)
assert_equal(
f.to_workspace_value(pipeline_output, pd.Index([0, 1])),
column_data,
)
|
|
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Service-side implementation of gRPC Python."""
import collections
import enum
import logging
import threading
import time
import six
import grpc
from grpc import _common
from grpc import _interceptor
from grpc._cython import cygrpc
from grpc.framework.foundation import callable_util
_SHUTDOWN_TAG = 'shutdown'
_REQUEST_CALL_TAG = 'request_call'
_RECEIVE_CLOSE_ON_SERVER_TOKEN = 'receive_close_on_server'
_SEND_INITIAL_METADATA_TOKEN = 'send_initial_metadata'
_RECEIVE_MESSAGE_TOKEN = 'receive_message'
_SEND_MESSAGE_TOKEN = 'send_message'
_SEND_INITIAL_METADATA_AND_SEND_MESSAGE_TOKEN = (
'send_initial_metadata * send_message')
_SEND_STATUS_FROM_SERVER_TOKEN = 'send_status_from_server'
_SEND_INITIAL_METADATA_AND_SEND_STATUS_FROM_SERVER_TOKEN = (
'send_initial_metadata * send_status_from_server')
_OPEN = 'open'
_CLOSED = 'closed'
_CANCELLED = 'cancelled'
_EMPTY_FLAGS = 0
_UNEXPECTED_EXIT_SERVER_GRACE = 1.0
def _serialized_request(request_event):
return request_event.batch_operations[0].message()
def _application_code(code):
cygrpc_code = _common.STATUS_CODE_TO_CYGRPC_STATUS_CODE.get(code)
return cygrpc.StatusCode.unknown if cygrpc_code is None else cygrpc_code
def _completion_code(state):
if state.code is None:
return cygrpc.StatusCode.ok
else:
return _application_code(state.code)
def _abortion_code(state, code):
if state.code is None:
return code
else:
return _application_code(state.code)
def _details(state):
return b'' if state.details is None else state.details
class _HandlerCallDetails(
collections.namedtuple('_HandlerCallDetails', (
'method',
'invocation_metadata',
)), grpc.HandlerCallDetails):
pass
class _RPCState(object):
def __init__(self):
self.condition = threading.Condition()
self.due = set()
self.request = None
self.client = _OPEN
self.initial_metadata_allowed = True
self.disable_next_compression = False
self.trailing_metadata = None
self.code = None
self.details = None
self.statused = False
self.rpc_errors = []
self.callbacks = []
self.abortion = None
def _raise_rpc_error(state):
rpc_error = grpc.RpcError()
state.rpc_errors.append(rpc_error)
raise rpc_error
def _possibly_finish_call(state, token):
state.due.remove(token)
if (state.client is _CANCELLED or state.statused) and not state.due:
callbacks = state.callbacks
state.callbacks = None
return state, callbacks
else:
return None, ()
def _send_status_from_server(state, token):
def send_status_from_server(unused_send_status_from_server_event):
with state.condition:
return _possibly_finish_call(state, token)
return send_status_from_server
def _abort(state, call, code, details):
if state.client is not _CANCELLED:
effective_code = _abortion_code(state, code)
effective_details = details if state.details is None else state.details
if state.initial_metadata_allowed:
operations = (
cygrpc.SendInitialMetadataOperation(None, _EMPTY_FLAGS),
cygrpc.SendStatusFromServerOperation(
state.trailing_metadata, effective_code, effective_details,
_EMPTY_FLAGS),
)
token = _SEND_INITIAL_METADATA_AND_SEND_STATUS_FROM_SERVER_TOKEN
else:
operations = (cygrpc.SendStatusFromServerOperation(
state.trailing_metadata, effective_code, effective_details,
_EMPTY_FLAGS),)
token = _SEND_STATUS_FROM_SERVER_TOKEN
call.start_server_batch(operations,
_send_status_from_server(state, token))
state.statused = True
state.due.add(token)
def _receive_close_on_server(state):
def receive_close_on_server(receive_close_on_server_event):
with state.condition:
if receive_close_on_server_event.batch_operations[0].cancelled():
state.client = _CANCELLED
elif state.client is _OPEN:
state.client = _CLOSED
state.condition.notify_all()
return _possibly_finish_call(state, _RECEIVE_CLOSE_ON_SERVER_TOKEN)
return receive_close_on_server
def _receive_message(state, call, request_deserializer):
def receive_message(receive_message_event):
serialized_request = _serialized_request(receive_message_event)
if serialized_request is None:
with state.condition:
if state.client is _OPEN:
state.client = _CLOSED
state.condition.notify_all()
return _possibly_finish_call(state, _RECEIVE_MESSAGE_TOKEN)
else:
request = _common.deserialize(serialized_request,
request_deserializer)
with state.condition:
if request is None:
_abort(state, call, cygrpc.StatusCode.internal,
b'Exception deserializing request!')
else:
state.request = request
state.condition.notify_all()
return _possibly_finish_call(state, _RECEIVE_MESSAGE_TOKEN)
return receive_message
def _send_initial_metadata(state):
def send_initial_metadata(unused_send_initial_metadata_event):
with state.condition:
return _possibly_finish_call(state, _SEND_INITIAL_METADATA_TOKEN)
return send_initial_metadata
def _send_message(state, token):
def send_message(unused_send_message_event):
with state.condition:
state.condition.notify_all()
return _possibly_finish_call(state, token)
return send_message
class _Context(grpc.ServicerContext):
def __init__(self, rpc_event, state, request_deserializer):
self._rpc_event = rpc_event
self._state = state
self._request_deserializer = request_deserializer
def is_active(self):
with self._state.condition:
return self._state.client is not _CANCELLED and not self._state.statused
def time_remaining(self):
return max(self._rpc_event.call_details.deadline - time.time(), 0)
def cancel(self):
self._rpc_event.call.cancel()
def add_callback(self, callback):
with self._state.condition:
if self._state.callbacks is None:
return False
else:
self._state.callbacks.append(callback)
return True
def disable_next_message_compression(self):
with self._state.condition:
self._state.disable_next_compression = True
def invocation_metadata(self):
return self._rpc_event.invocation_metadata
def peer(self):
return _common.decode(self._rpc_event.call.peer())
def peer_identities(self):
return cygrpc.peer_identities(self._rpc_event.call)
def peer_identity_key(self):
id_key = cygrpc.peer_identity_key(self._rpc_event.call)
return id_key if id_key is None else _common.decode(id_key)
def auth_context(self):
return {
_common.decode(key): value
for key, value in six.iteritems(
cygrpc.auth_context(self._rpc_event.call))
}
def send_initial_metadata(self, initial_metadata):
with self._state.condition:
if self._state.client is _CANCELLED:
_raise_rpc_error(self._state)
else:
if self._state.initial_metadata_allowed:
operation = cygrpc.SendInitialMetadataOperation(
initial_metadata, _EMPTY_FLAGS)
self._rpc_event.call.start_server_batch(
(operation,), _send_initial_metadata(self._state))
self._state.initial_metadata_allowed = False
self._state.due.add(_SEND_INITIAL_METADATA_TOKEN)
else:
raise ValueError('Initial metadata no longer allowed!')
def set_trailing_metadata(self, trailing_metadata):
with self._state.condition:
self._state.trailing_metadata = trailing_metadata
def abort(self, code, details):
# treat OK like other invalid arguments: fail the RPC
if code == grpc.StatusCode.OK:
logging.error(
'abort() called with StatusCode.OK; returning UNKNOWN')
code = grpc.StatusCode.UNKNOWN
details = ''
with self._state.condition:
self._state.code = code
self._state.details = _common.encode(details)
self._state.abortion = Exception()
raise self._state.abortion
def set_code(self, code):
with self._state.condition:
self._state.code = code
def set_details(self, details):
with self._state.condition:
self._state.details = _common.encode(details)
class _RequestIterator(object):
def __init__(self, state, call, request_deserializer):
self._state = state
self._call = call
self._request_deserializer = request_deserializer
def _raise_or_start_receive_message(self):
if self._state.client is _CANCELLED:
_raise_rpc_error(self._state)
elif self._state.client is _CLOSED or self._state.statused:
raise StopIteration()
else:
self._call.start_server_batch(
(cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),),
_receive_message(self._state, self._call,
self._request_deserializer))
self._state.due.add(_RECEIVE_MESSAGE_TOKEN)
def _look_for_request(self):
if self._state.client is _CANCELLED:
_raise_rpc_error(self._state)
elif (self._state.request is None and
_RECEIVE_MESSAGE_TOKEN not in self._state.due):
raise StopIteration()
else:
request = self._state.request
self._state.request = None
return request
def _next(self):
with self._state.condition:
self._raise_or_start_receive_message()
while True:
self._state.condition.wait()
request = self._look_for_request()
if request is not None:
return request
def __iter__(self):
return self
def __next__(self):
return self._next()
def next(self):
return self._next()
def _unary_request(rpc_event, state, request_deserializer):
def unary_request():
with state.condition:
if state.client is _CANCELLED or state.statused:
return None
else:
rpc_event.call.start_server_batch(
(cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),),
_receive_message(state, rpc_event.call,
request_deserializer))
state.due.add(_RECEIVE_MESSAGE_TOKEN)
while True:
state.condition.wait()
if state.request is None:
if state.client is _CLOSED:
details = '"{}" requires exactly one request message.'.format(
rpc_event.call_details.method)
_abort(state, rpc_event.call,
cygrpc.StatusCode.unimplemented,
_common.encode(details))
return None
elif state.client is _CANCELLED:
return None
else:
request = state.request
state.request = None
return request
return unary_request
def _call_behavior(rpc_event, state, behavior, argument, request_deserializer):
context = _Context(rpc_event, state, request_deserializer)
try:
return behavior(argument, context), True
except Exception as exception: # pylint: disable=broad-except
with state.condition:
if exception is state.abortion:
_abort(state, rpc_event.call, cygrpc.StatusCode.unknown,
b'RPC Aborted')
elif exception not in state.rpc_errors:
details = 'Exception calling application: {}'.format(exception)
logging.exception(details)
_abort(state, rpc_event.call, cygrpc.StatusCode.unknown,
_common.encode(details))
return None, False
def _take_response_from_response_iterator(rpc_event, state, response_iterator):
try:
return next(response_iterator), True
except StopIteration:
return None, True
except Exception as exception: # pylint: disable=broad-except
with state.condition:
if exception is state.abortion:
_abort(state, rpc_event.call, cygrpc.StatusCode.unknown,
b'RPC Aborted')
elif exception not in state.rpc_errors:
details = 'Exception iterating responses: {}'.format(exception)
logging.exception(details)
_abort(state, rpc_event.call, cygrpc.StatusCode.unknown,
_common.encode(details))
return None, False
def _serialize_response(rpc_event, state, response, response_serializer):
serialized_response = _common.serialize(response, response_serializer)
if serialized_response is None:
with state.condition:
_abort(state, rpc_event.call, cygrpc.StatusCode.internal,
b'Failed to serialize response!')
return None
else:
return serialized_response
def _send_response(rpc_event, state, serialized_response):
with state.condition:
if state.client is _CANCELLED or state.statused:
return False
else:
if state.initial_metadata_allowed:
operations = (
cygrpc.SendInitialMetadataOperation(None, _EMPTY_FLAGS),
cygrpc.SendMessageOperation(serialized_response,
_EMPTY_FLAGS),
)
state.initial_metadata_allowed = False
token = _SEND_INITIAL_METADATA_AND_SEND_MESSAGE_TOKEN
else:
operations = (cygrpc.SendMessageOperation(
serialized_response, _EMPTY_FLAGS),)
token = _SEND_MESSAGE_TOKEN
rpc_event.call.start_server_batch(operations,
_send_message(state, token))
state.due.add(token)
while True:
state.condition.wait()
if token not in state.due:
return state.client is not _CANCELLED and not state.statused
def _status(rpc_event, state, serialized_response):
with state.condition:
if state.client is not _CANCELLED:
code = _completion_code(state)
details = _details(state)
operations = [
cygrpc.SendStatusFromServerOperation(
state.trailing_metadata, code, details, _EMPTY_FLAGS),
]
if state.initial_metadata_allowed:
operations.append(
cygrpc.SendInitialMetadataOperation(None, _EMPTY_FLAGS))
if serialized_response is not None:
operations.append(
cygrpc.SendMessageOperation(serialized_response,
_EMPTY_FLAGS))
rpc_event.call.start_server_batch(
operations,
_send_status_from_server(state, _SEND_STATUS_FROM_SERVER_TOKEN))
state.statused = True
state.due.add(_SEND_STATUS_FROM_SERVER_TOKEN)
def _unary_response_in_pool(rpc_event, state, behavior, argument_thunk,
request_deserializer, response_serializer):
argument = argument_thunk()
if argument is not None:
response, proceed = _call_behavior(rpc_event, state, behavior, argument,
request_deserializer)
if proceed:
serialized_response = _serialize_response(
rpc_event, state, response, response_serializer)
if serialized_response is not None:
_status(rpc_event, state, serialized_response)
def _stream_response_in_pool(rpc_event, state, behavior, argument_thunk,
request_deserializer, response_serializer):
argument = argument_thunk()
if argument is not None:
response_iterator, proceed = _call_behavior(
rpc_event, state, behavior, argument, request_deserializer)
if proceed:
while True:
response, proceed = _take_response_from_response_iterator(
rpc_event, state, response_iterator)
if proceed:
if response is None:
_status(rpc_event, state, None)
break
else:
serialized_response = _serialize_response(
rpc_event, state, response, response_serializer)
if serialized_response is not None:
proceed = _send_response(rpc_event, state,
serialized_response)
if not proceed:
break
else:
break
else:
break
def _handle_unary_unary(rpc_event, state, method_handler, thread_pool):
unary_request = _unary_request(rpc_event, state,
method_handler.request_deserializer)
return thread_pool.submit(_unary_response_in_pool, rpc_event, state,
method_handler.unary_unary, unary_request,
method_handler.request_deserializer,
method_handler.response_serializer)
def _handle_unary_stream(rpc_event, state, method_handler, thread_pool):
unary_request = _unary_request(rpc_event, state,
method_handler.request_deserializer)
return thread_pool.submit(_stream_response_in_pool, rpc_event, state,
method_handler.unary_stream, unary_request,
method_handler.request_deserializer,
method_handler.response_serializer)
def _handle_stream_unary(rpc_event, state, method_handler, thread_pool):
request_iterator = _RequestIterator(state, rpc_event.call,
method_handler.request_deserializer)
return thread_pool.submit(
_unary_response_in_pool, rpc_event, state, method_handler.stream_unary,
lambda: request_iterator, method_handler.request_deserializer,
method_handler.response_serializer)
def _handle_stream_stream(rpc_event, state, method_handler, thread_pool):
request_iterator = _RequestIterator(state, rpc_event.call,
method_handler.request_deserializer)
return thread_pool.submit(
_stream_response_in_pool, rpc_event, state,
method_handler.stream_stream, lambda: request_iterator,
method_handler.request_deserializer, method_handler.response_serializer)
def _find_method_handler(rpc_event, generic_handlers, interceptor_pipeline):
def query_handlers(handler_call_details):
for generic_handler in generic_handlers:
method_handler = generic_handler.service(handler_call_details)
if method_handler is not None:
return method_handler
return None
handler_call_details = _HandlerCallDetails(
_common.decode(rpc_event.call_details.method),
rpc_event.invocation_metadata)
if interceptor_pipeline is not None:
return interceptor_pipeline.execute(query_handlers,
handler_call_details)
else:
return query_handlers(handler_call_details)
def _reject_rpc(rpc_event, status, details):
operations = (
cygrpc.SendInitialMetadataOperation(None, _EMPTY_FLAGS),
cygrpc.ReceiveCloseOnServerOperation(_EMPTY_FLAGS),
cygrpc.SendStatusFromServerOperation(None, status, details,
_EMPTY_FLAGS),
)
rpc_state = _RPCState()
rpc_event.call.start_server_batch(operations,
lambda ignored_event: (rpc_state, (),))
return rpc_state
def _handle_with_method_handler(rpc_event, method_handler, thread_pool):
state = _RPCState()
with state.condition:
rpc_event.call.start_server_batch(
(cygrpc.ReceiveCloseOnServerOperation(_EMPTY_FLAGS),),
_receive_close_on_server(state))
state.due.add(_RECEIVE_CLOSE_ON_SERVER_TOKEN)
if method_handler.request_streaming:
if method_handler.response_streaming:
return state, _handle_stream_stream(rpc_event, state,
method_handler, thread_pool)
else:
return state, _handle_stream_unary(rpc_event, state,
method_handler, thread_pool)
else:
if method_handler.response_streaming:
return state, _handle_unary_stream(rpc_event, state,
method_handler, thread_pool)
else:
return state, _handle_unary_unary(rpc_event, state,
method_handler, thread_pool)
def _handle_call(rpc_event, generic_handlers, interceptor_pipeline, thread_pool,
concurrency_exceeded):
if not rpc_event.success:
return None, None
if rpc_event.call_details.method is not None:
try:
method_handler = _find_method_handler(rpc_event, generic_handlers,
interceptor_pipeline)
except Exception as exception: # pylint: disable=broad-except
details = 'Exception servicing handler: {}'.format(exception)
logging.exception(details)
return _reject_rpc(rpc_event, cygrpc.StatusCode.unknown,
b'Error in service handler!'), None
if method_handler is None:
return _reject_rpc(rpc_event, cygrpc.StatusCode.unimplemented,
b'Method not found!'), None
elif concurrency_exceeded:
return _reject_rpc(rpc_event, cygrpc.StatusCode.resource_exhausted,
b'Concurrent RPC limit exceeded!'), None
else:
return _handle_with_method_handler(rpc_event, method_handler,
thread_pool)
else:
return None, None
@enum.unique
class _ServerStage(enum.Enum):
STOPPED = 'stopped'
STARTED = 'started'
GRACE = 'grace'
class _ServerState(object):
# pylint: disable=too-many-arguments
def __init__(self, completion_queue, server, generic_handlers,
interceptor_pipeline, thread_pool, maximum_concurrent_rpcs):
self.lock = threading.RLock()
self.completion_queue = completion_queue
self.server = server
self.generic_handlers = list(generic_handlers)
self.interceptor_pipeline = interceptor_pipeline
self.thread_pool = thread_pool
self.stage = _ServerStage.STOPPED
self.shutdown_events = None
self.maximum_concurrent_rpcs = maximum_concurrent_rpcs
self.active_rpc_count = 0
# TODO(https://github.com/grpc/grpc/issues/6597): eliminate these fields.
self.rpc_states = set()
self.due = set()
def _add_generic_handlers(state, generic_handlers):
with state.lock:
state.generic_handlers.extend(generic_handlers)
def _add_insecure_port(state, address):
with state.lock:
return state.server.add_http2_port(address)
def _add_secure_port(state, address, server_credentials):
with state.lock:
return state.server.add_http2_port(address,
server_credentials._credentials)
def _request_call(state):
state.server.request_call(state.completion_queue, state.completion_queue,
_REQUEST_CALL_TAG)
state.due.add(_REQUEST_CALL_TAG)
# TODO(https://github.com/grpc/grpc/issues/6597): delete this function.
def _stop_serving(state):
if not state.rpc_states and not state.due:
for shutdown_event in state.shutdown_events:
shutdown_event.set()
state.stage = _ServerStage.STOPPED
return True
else:
return False
def _on_call_completed(state):
with state.lock:
state.active_rpc_count -= 1
def _serve(state):
while True:
event = state.completion_queue.poll()
if event.tag is _SHUTDOWN_TAG:
with state.lock:
state.due.remove(_SHUTDOWN_TAG)
if _stop_serving(state):
return
elif event.tag is _REQUEST_CALL_TAG:
with state.lock:
state.due.remove(_REQUEST_CALL_TAG)
concurrency_exceeded = (
state.maximum_concurrent_rpcs is not None and
state.active_rpc_count >= state.maximum_concurrent_rpcs)
rpc_state, rpc_future = _handle_call(
event, state.generic_handlers, state.interceptor_pipeline,
state.thread_pool, concurrency_exceeded)
if rpc_state is not None:
state.rpc_states.add(rpc_state)
if rpc_future is not None:
state.active_rpc_count += 1
rpc_future.add_done_callback(
lambda unused_future: _on_call_completed(state))
if state.stage is _ServerStage.STARTED:
_request_call(state)
elif _stop_serving(state):
return
else:
rpc_state, callbacks = event.tag(event)
for callback in callbacks:
callable_util.call_logging_exceptions(
callback, 'Exception calling callback!')
if rpc_state is not None:
with state.lock:
state.rpc_states.remove(rpc_state)
if _stop_serving(state):
return
# We want to force the deletion of the previous event
# ~before~ we poll again; if the event has a reference
# to a shutdown Call object, this can induce spinlock.
event = None
def _stop(state, grace):
with state.lock:
if state.stage is _ServerStage.STOPPED:
shutdown_event = threading.Event()
shutdown_event.set()
return shutdown_event
else:
if state.stage is _ServerStage.STARTED:
state.server.shutdown(state.completion_queue, _SHUTDOWN_TAG)
state.stage = _ServerStage.GRACE
state.shutdown_events = []
state.due.add(_SHUTDOWN_TAG)
shutdown_event = threading.Event()
state.shutdown_events.append(shutdown_event)
if grace is None:
state.server.cancel_all_calls()
else:
def cancel_all_calls_after_grace():
shutdown_event.wait(timeout=grace)
with state.lock:
state.server.cancel_all_calls()
thread = threading.Thread(target=cancel_all_calls_after_grace)
thread.start()
return shutdown_event
shutdown_event.wait()
return shutdown_event
def _start(state):
with state.lock:
if state.stage is not _ServerStage.STOPPED:
raise ValueError('Cannot start already-started server!')
state.server.start()
state.stage = _ServerStage.STARTED
_request_call(state)
thread = threading.Thread(target=_serve, args=(state,))
thread.daemon = True
thread.start()
class Server(grpc.Server):
# pylint: disable=too-many-arguments
def __init__(self, thread_pool, generic_handlers, interceptors, options,
maximum_concurrent_rpcs):
completion_queue = cygrpc.CompletionQueue()
server = cygrpc.Server(options)
server.register_completion_queue(completion_queue)
self._state = _ServerState(completion_queue, server, generic_handlers,
_interceptor.service_pipeline(interceptors),
thread_pool, maximum_concurrent_rpcs)
def add_generic_rpc_handlers(self, generic_rpc_handlers):
_add_generic_handlers(self._state, generic_rpc_handlers)
def add_insecure_port(self, address):
return _add_insecure_port(self._state, _common.encode(address))
def add_secure_port(self, address, server_credentials):
return _add_secure_port(self._state, _common.encode(address),
server_credentials)
def start(self):
_start(self._state)
def stop(self, grace):
return _stop(self._state, grace)
def __del__(self):
_stop(self._state, None)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _ # noqa
from horizon import exceptions
from horizon import forms
from horizon.utils import fields
from horizon.utils import validators
from horizon import workflows
from openstack_dashboard import api
AVAILABLE_PROTOCOLS = ('HTTP', 'HTTPS', 'TCP')
AVAILABLE_METHODS = ('ROUND_ROBIN', 'LEAST_CONNECTIONS', 'SOURCE_IP')
class AddPoolAction(workflows.Action):
name = forms.CharField(max_length=80, label=_("Name"))
description = forms.CharField(
initial="", required=False,
max_length=80, label=_("Description"))
# provider is optional because some LBaaS implemetation does
# not support service-type extension.
provider = forms.ChoiceField(label=_("Provider"), required=False)
subnet_id = forms.ChoiceField(label=_("Subnet"))
protocol = forms.ChoiceField(label=_("Protocol"))
lb_method = forms.ChoiceField(label=_("Load Balancing Method"))
admin_state_up = forms.BooleanField(label=_("Admin State"),
initial=True, required=False)
def __init__(self, request, *args, **kwargs):
super(AddPoolAction, self).__init__(request, *args, **kwargs)
tenant_id = request.user.tenant_id
subnet_id_choices = [('', _("Select a Subnet"))]
try:
networks = api.neutron.network_list_for_tenant(request, tenant_id)
except Exception:
exceptions.handle(request,
_('Unable to retrieve networks list.'))
networks = []
for n in networks:
for s in n['subnets']:
subnet_id_choices.append((s.id, s.cidr))
self.fields['subnet_id'].choices = subnet_id_choices
protocol_choices = [('', _("Select a Protocol"))]
[protocol_choices.append((p, p)) for p in AVAILABLE_PROTOCOLS]
self.fields['protocol'].choices = protocol_choices
lb_method_choices = [('', _("Select a Method"))]
[lb_method_choices.append((m, m)) for m in AVAILABLE_METHODS]
self.fields['lb_method'].choices = lb_method_choices
# provider choice
try:
if api.neutron.is_extension_supported(request, 'service-type'):
provider_list = api.neutron.provider_list(request)
providers = [p for p in provider_list
if p['service_type'] == 'LOADBALANCER']
else:
providers = None
except Exception:
exceptions.handle(request,
_('Unable to retrieve providers list.'))
providers = []
if providers:
default_providers = [p for p in providers if p.get('default')]
if default_providers:
default_provider = default_providers[0]['name']
else:
default_provider = None
provider_choices = [(p['name'], p['name']) for p in providers
if p['name'] != default_provider]
if default_provider:
provider_choices.insert(
0, (default_provider,
_("%s (default)") % default_provider))
else:
if providers is None:
msg = _("Provider for Load Balancer is not supported.")
else:
msg = _("No provider is available.")
provider_choices = [('', msg)]
self.fields['provider'].widget.attrs['readonly'] = True
self.fields['provider'].choices = provider_choices
class Meta:
name = _("Add New Pool")
permissions = ('openstack.services.network',)
help_text = _("Create Pool for current project.\n\n"
"Assign a name and description for the pool. "
"Choose one subnet where all members of this "
"pool must be on. "
"Select the protocol and load balancing method "
"for this pool. "
"Admin State is UP (checked) by default.")
class AddPoolStep(workflows.Step):
action_class = AddPoolAction
contributes = ("name", "description", "subnet_id", "provider",
"protocol", "lb_method", "admin_state_up")
def contribute(self, data, context):
context = super(AddPoolStep, self).contribute(data, context)
if data:
return context
class AddPool(workflows.Workflow):
slug = "addpool"
name = _("Add Pool")
finalize_button_name = _("Add")
success_message = _('Added pool "%s".')
failure_message = _('Unable to add pool "%s".')
success_url = "horizon:project:loadbalancers:index"
default_steps = (AddPoolStep,)
def format_status_message(self, message):
name = self.context.get('name')
return message % name
def handle(self, request, context):
try:
api.lbaas.pool_create(request, **context)
return True
except Exception:
return False
class AddVipAction(workflows.Action):
name = forms.CharField(max_length=80, label=_("Name"))
description = forms.CharField(
initial="", required=False,
max_length=80, label=_("Description"))
floatip_address = forms.ChoiceField(
label=_("VIP Address from Floating IPs"),
widget=forms.Select(attrs={'disabled': 'disabled'}),
required=False)
other_address = fields.IPField(required=False,
initial="",
version=fields.IPv4,
mask=False)
protocol_port = forms.IntegerField(label=_("Protocol Port"), min_value=1,
help_text=_("Enter an integer value "
"between 1 and 65535."),
validators=[validators.validate_port_range])
protocol = forms.ChoiceField(label=_("Protocol"))
session_persistence = forms.ChoiceField(
required=False, initial={}, label=_("Session Persistence"),
widget=forms.Select(attrs={
'class': 'switchable',
'data-slug': 'persistence'
}))
cookie_name = forms.CharField(
initial="", required=False,
max_length=80, label=_("Cookie Name"),
help_text=_("Required for APP_COOKIE persistence;"
" Ignored otherwise."),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'persistence',
'data-persistence-app_cookie': 'APP_COOKIE',
}))
connection_limit = forms.IntegerField(
required=False, min_value=-1, label=_("Connection Limit"),
help_text=_("Maximum number of connections allowed "
"for the VIP or '-1' if the limit is not set"))
admin_state_up = forms.BooleanField(
label=_("Admin State"), initial=True, required=False)
def __init__(self, request, *args, **kwargs):
super(AddVipAction, self).__init__(request, *args, **kwargs)
self.fields['other_address'].label = _("Specify a free IP address"
" from %s" %
args[0]['subnet'])
protocol_choices = [('', _("Select a Protocol"))]
[protocol_choices.append((p, p)) for p in AVAILABLE_PROTOCOLS]
self.fields['protocol'].choices = protocol_choices
session_persistence_choices = [('', _("No Session Persistence"))]
for mode in ('SOURCE_IP', 'HTTP_COOKIE', 'APP_COOKIE'):
session_persistence_choices.append((mode.lower(), mode))
self.fields[
'session_persistence'].choices = session_persistence_choices
floatip_address_choices = [('', _("Currently Not Supported"))]
self.fields['floatip_address'].choices = floatip_address_choices
def clean(self):
cleaned_data = super(AddVipAction, self).clean()
persistence = cleaned_data.get('session_persistence')
if persistence:
cleaned_data['session_persistence'] = persistence.upper()
if (cleaned_data.get('session_persistence') == 'APP_COOKIE' and
not cleaned_data.get('cookie_name')):
msg = _('Cookie name is required for APP_COOKIE persistence.')
self._errors['cookie_name'] = self.error_class([msg])
return cleaned_data
class Meta:
name = _("Specify VIP")
permissions = ('openstack.services.network',)
help_text = _("Create a VIP for this pool. "
"Assign a name and description for the VIP. "
"Specify an IP address and port for the VIP. "
"Choose the protocol and session persistence "
"method for the VIP."
"Specify the max connections allowed. "
"Admin State is UP (checked) by default.")
class AddVipStep(workflows.Step):
action_class = AddVipAction
depends_on = ("pool_id", "subnet")
contributes = ("name", "description", "floatip_address",
"other_address", "protocol_port", "protocol",
"session_persistence", "cookie_name",
"connection_limit", "admin_state_up")
def contribute(self, data, context):
context = super(AddVipStep, self).contribute(data, context)
return context
class AddVip(workflows.Workflow):
slug = "addvip"
name = _("Add VIP")
finalize_button_name = _("Add")
success_message = _('Added VIP "%s".')
failure_message = _('Unable to add VIP "%s".')
success_url = "horizon:project:loadbalancers:index"
default_steps = (AddVipStep,)
def format_status_message(self, message):
name = self.context.get('name')
return message % name
def handle(self, request, context):
if context['other_address'] == '':
context['address'] = context['floatip_address']
else:
if not context['floatip_address'] == '':
self.failure_message = _('Only one address can be specified. '
'Unable to add VIP "%s".')
return False
else:
context['address'] = context['other_address']
try:
pool = api.lbaas.pool_get(request, context['pool_id'])
context['subnet_id'] = pool['subnet_id']
except Exception:
context['subnet_id'] = None
self.failure_message = _('Unable to retrieve the specified pool. '
'Unable to add VIP "%s".')
return False
if context['session_persistence']:
stype = context['session_persistence']
if stype == 'APP_COOKIE':
cookie = context['cookie_name']
context['session_persistence'] = {'type': stype,
'cookie_name': cookie}
else:
context['session_persistence'] = {'type': stype}
else:
context['session_persistence'] = {}
try:
api.lbaas.vip_create(request, **context)
return True
except Exception:
return False
class AddMemberAction(workflows.Action):
pool_id = forms.ChoiceField(label=_("Pool"))
members = forms.MultipleChoiceField(
label=_("Member(s)"),
required=True,
initial=["default"],
widget=forms.CheckboxSelectMultiple(),
error_messages={'required':
_('At least one member must be specified')},
help_text=_("Select members for this pool "))
weight = forms.IntegerField(max_value=256, min_value=0, label=_("Weight"),
required=False,
help_text=_("Relative part of requests this "
"pool member serves compared to others"))
protocol_port = forms.IntegerField(label=_("Protocol Port"), min_value=1,
help_text=_("Enter an integer value "
"between 1 and 65535."),
validators=[validators.validate_port_range])
admin_state_up = forms.BooleanField(label=_("Admin State"),
initial=True, required=False)
def __init__(self, request, *args, **kwargs):
super(AddMemberAction, self).__init__(request, *args, **kwargs)
pool_id_choices = [('', _("Select a Pool"))]
try:
tenant_id = self.request.user.tenant_id
pools = api.lbaas.pool_list(request, tenant_id=tenant_id)
except Exception:
pools = []
exceptions.handle(request,
_('Unable to retrieve pools list.'))
pools = sorted(pools,
key=lambda pool: pool.name)
for p in pools:
pool_id_choices.append((p.id, p.name))
self.fields['pool_id'].choices = pool_id_choices
members_choices = []
try:
servers, has_more = api.nova.server_list(request)
except Exception:
servers = []
exceptions.handle(request,
_('Unable to retrieve instances list.'))
if len(servers) == 0:
self.fields['members'].label = _(
"No servers available. To add a member, you"
"need at least one running instance.")
self.fields['members'].required = True
self.fields['members'].help_text = _("Select members "
"for this pool ")
self.fields['pool_id'].required = False
self.fields['protocol_port'].required = False
return
for m in servers:
members_choices.append((m.id, m.name))
self.fields['members'].choices = sorted(
members_choices,
key=lambda member: member[1])
class Meta:
name = _("Add New Member")
permissions = ('openstack.services.network',)
help_text = _("Add member to selected pool.\n\n"
"Choose one or more listed instances to be "
"added to the pool as member(s). "
"Assign a numeric weight for this member "
"Specify the port number the member(s) "
"operate on; e.g., 80.")
class AddMemberStep(workflows.Step):
action_class = AddMemberAction
contributes = ("pool_id", "members", "protocol_port", "weight",
"admin_state_up")
def contribute(self, data, context):
context = super(AddMemberStep, self).contribute(data, context)
return context
class AddMember(workflows.Workflow):
slug = "addmember"
name = _("Add Member")
finalize_button_name = _("Add")
success_message = _('Added member(s).')
failure_message = _('Unable to add member(s).')
success_url = "horizon:project:loadbalancers:index"
default_steps = (AddMemberStep,)
def handle(self, request, context):
for m in context['members']:
params = {'device_id': m}
try:
plist = api.neutron.port_list(request, **params)
except Exception:
return False
if plist:
context['address'] = plist[0].fixed_ips[0]['ip_address']
try:
context['member_id'] = api.lbaas.member_create(
request, **context).id
except Exception:
return False
return True
class AddMonitorAction(workflows.Action):
type = forms.ChoiceField(
label=_("Type"),
choices=[('ping', _('PING')),
('tcp', _('TCP')),
('http', _('HTTP')),
('https', _('HTTPS'))],
widget=forms.Select(attrs={
'class': 'switchable',
'data-slug': 'type'
}))
delay = forms.IntegerField(
min_value=1,
label=_("Delay"),
help_text=_("The minimum time in seconds between regular checks "
"of a member"))
timeout = forms.IntegerField(
min_value=1,
label=_("Timeout"),
help_text=_("The maximum time in seconds for a monitor to wait "
"for a reply"))
max_retries = forms.IntegerField(
max_value=10, min_value=1,
label=_("Max Retries (1~10)"),
help_text=_("Number of permissible failures before changing "
"the status of member to inactive"))
http_method = forms.ChoiceField(
initial="GET",
required=False,
choices=[('GET', _('GET'))],
label=_("HTTP Method"),
help_text=_("HTTP method used to check health status of a member"),
widget=forms.Select(attrs={
'class': 'switched',
'data-switch-on': 'type',
'data-type-http': _('HTTP Method'),
'data-type-https': _('HTTP Method')
}))
url_path = forms.CharField(
initial="/",
required=False,
max_length=80,
label=_("URL"),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'type',
'data-type-http': _('URL'),
'data-type-https': _('URL')
}))
expected_codes = forms.RegexField(
initial="200",
required=False,
max_length=80,
regex=r'^(\d{3}(\s*,\s*\d{3})*)$|^(\d{3}-\d{3})$',
label=_("Expected HTTP Status Codes"),
help_text=_("Expected code may be a single value (e.g. 200), "
"a list of values (e.g. 200, 202), "
"or range of values (e.g. 200-204)"),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'type',
'data-type-http': _('Expected HTTP Status Codes'),
'data-type-https': _('Expected HTTP Status Codes')
}))
admin_state_up = forms.BooleanField(label=_("Admin State"),
initial=True, required=False)
def __init__(self, request, *args, **kwargs):
super(AddMonitorAction, self).__init__(request, *args, **kwargs)
def clean(self):
cleaned_data = super(AddMonitorAction, self).clean()
type_opt = cleaned_data.get('type')
if type_opt in ['http', 'https']:
http_method_opt = cleaned_data.get('http_method')
url_path = cleaned_data.get('url_path')
expected_codes = cleaned_data.get('expected_codes')
if not http_method_opt:
msg = _('Please choose a HTTP method')
self._errors['http_method'] = self.error_class([msg])
if not url_path:
msg = _('Please specify an URL')
self._errors['url_path'] = self.error_class([msg])
if not expected_codes:
msg = _('Please enter a single value (e.g. 200), '
'a list of values (e.g. 200, 202), '
'or range of values (e.g. 200-204)')
self._errors['expected_codes'] = self.error_class([msg])
return cleaned_data
class Meta:
name = _("Add New Monitor")
permissions = ('openstack.services.network',)
help_text = _("Create a monitor template.\n\n"
"Select type of monitoring. "
"Specify delay, timeout, and retry limits "
"required by the monitor. "
"Specify method, URL path, and expected "
"HTTP codes upon success.")
class AddMonitorStep(workflows.Step):
action_class = AddMonitorAction
contributes = ("type", "delay", "timeout", "max_retries",
"http_method", "url_path", "expected_codes",
"admin_state_up")
def contribute(self, data, context):
context = super(AddMonitorStep, self).contribute(data, context)
if data:
return context
class AddMonitor(workflows.Workflow):
slug = "addmonitor"
name = _("Add Monitor")
finalize_button_name = _("Add")
success_message = _('Added monitor')
failure_message = _('Unable to add monitor')
success_url = "horizon:project:loadbalancers:index"
default_steps = (AddMonitorStep,)
def handle(self, request, context):
try:
context['monitor_id'] = api.lbaas.pool_health_monitor_create(
request, **context).get('id')
return True
except Exception:
exceptions.handle(request, _("Unable to add monitor."))
return False
class MonitorMixin():
def _get_monitor_display_name(self, monitor):
fields = ['type', 'delay', 'max_retries', 'timeout']
if monitor.type in ['HTTP', 'HTTPS']:
fields.extend(['url_path', 'expected_codes', 'http_method'])
name = _("%(type)s url:%(url_path)s "
"method:%(http_method)s codes:%(expected_codes)s "
"delay:%(delay)d retries:%(max_retries)d "
"timeout:%(timeout)d")
else:
name = _("%(type)s delay:%(delay)d "
"retries:%(max_retries)d "
"timeout:%(timeout)d")
params = dict((key, getattr(monitor, key)) for key in fields)
return name % params
class AddPMAssociationAction(workflows.Action, MonitorMixin):
monitor_id = forms.ChoiceField(label=_("Monitor"))
def __init__(self, request, *args, **kwargs):
super(AddPMAssociationAction, self).__init__(request, *args, **kwargs)
def populate_monitor_id_choices(self, request, context):
self.fields['monitor_id'].label = _("Select a monitor template "
"for %s" % context['pool_name'])
monitor_id_choices = [('', _("Select a Monitor"))]
try:
tenant_id = self.request.user.tenant_id
monitors = api.lbaas.pool_health_monitor_list(request,
tenant_id=tenant_id)
for m in monitors:
if m.id not in context['pool_monitors']:
display_name = self._get_monitor_display_name(m)
monitor_id_choices.append((m.id, display_name))
except Exception:
exceptions.handle(request,
_('Unable to retrieve monitors list.'))
self.fields['monitor_id'].choices = monitor_id_choices
return monitor_id_choices
class Meta:
name = _("Association Details")
permissions = ('openstack.services.network',)
help_text = _("Associate a health monitor with target pool.")
class AddPMAssociationStep(workflows.Step):
action_class = AddPMAssociationAction
depends_on = ("pool_id", "pool_name", "pool_monitors")
contributes = ("monitor_id",)
def contribute(self, data, context):
context = super(AddPMAssociationStep, self).contribute(data, context)
if data:
return context
class AddPMAssociation(workflows.Workflow):
slug = "addassociation"
name = _("Associate Monitor")
finalize_button_name = _("Associate")
success_message = _('Associated monitor.')
failure_message = _('Unable to associate monitor.')
success_url = "horizon:project:loadbalancers:index"
default_steps = (AddPMAssociationStep,)
def handle(self, request, context):
try:
context['monitor_id'] = api.lbaas.pool_monitor_association_create(
request, **context)
return True
except Exception:
exceptions.handle(request, _("Unable to associate monitor."))
return False
class DeletePMAssociationAction(workflows.Action, MonitorMixin):
monitor_id = forms.ChoiceField(label=_("Monitor"))
def __init__(self, request, *args, **kwargs):
super(DeletePMAssociationAction, self).__init__(
request, *args, **kwargs)
def populate_monitor_id_choices(self, request, context):
self.fields['monitor_id'].label = (_("Select a health monitor of %s") %
context['pool_name'])
monitor_id_choices = [('', _("Select a Monitor"))]
try:
monitors = api.lbaas.pool_health_monitor_list(request)
for m in monitors:
if m.id in context['pool_monitors']:
display_name = self._get_monitor_display_name(m)
monitor_id_choices.append((m.id, display_name))
except Exception:
exceptions.handle(request,
_('Unable to retrieve monitors list.'))
self.fields['monitor_id'].choices = monitor_id_choices
return monitor_id_choices
class Meta:
name = _("Association Details")
permissions = ('openstack.services.network',)
help_text = _("Disassociate a health monitor from target pool. ")
class DeletePMAssociationStep(workflows.Step):
action_class = DeletePMAssociationAction
depends_on = ("pool_id", "pool_name", "pool_monitors")
contributes = ("monitor_id",)
def contribute(self, data, context):
context = super(DeletePMAssociationStep, self).contribute(
data, context)
if data:
return context
class DeletePMAssociation(workflows.Workflow):
slug = "deleteassociation"
name = _("Disassociate Monitor")
finalize_button_name = _("Disassociate")
success_message = _('Disassociated monitor.')
failure_message = _('Unable to disassociate monitor.')
success_url = "horizon:project:loadbalancers:index"
default_steps = (DeletePMAssociationStep,)
def handle(self, request, context):
try:
context['monitor_id'] = api.lbaas.pool_monitor_association_delete(
request, **context)
return True
except Exception:
exceptions.handle(request, _("Unable to disassociate monitor."))
return False
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A tf.learn implementation of tensor_forest (extremely random forests)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib.framework import deprecated_arg_values
from tensorflow.contrib.learn.python.learn import evaluable
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.contrib.tensor_forest.client import eval_metrics
from tensorflow.contrib.tensor_forest.data import data_ops
from tensorflow.contrib.tensor_forest.python import tensor_forest
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import session_run_hook
KEYS_NAME = 'keys'
LOSS_NAME = 'rf_training_loss'
def _assert_float32(tensors):
"""Assert all tensors are float32.
Args:
tensors: `Tensor` or `dict` of `Tensor` objects.
Raises:
TypeError: if any tensor is not float32.
"""
if not isinstance(tensors, dict):
tensors = [tensors]
else:
tensors = tensors.values()
for tensor in tensors:
if tensor.dtype.base_dtype != dtypes.float32:
raise TypeError('Expected dtype=float32, %s.' % tensor)
class TensorForestLossHook(session_run_hook.SessionRunHook):
"""Monitor to request stop when loss stops decreasing."""
def __init__(self, early_stopping_rounds):
self.early_stopping_rounds = early_stopping_rounds
self.min_loss = None
self.last_step = -1
# self.steps records the number of steps for which the loss has been
# non-decreasing
self.steps = 0
def before_run(self, run_context):
return session_run_hook.SessionRunArgs(
{'global_step': contrib_framework.get_global_step(),
'current_loss': run_context.session.graph.get_operation_by_name(
LOSS_NAME).outputs[0]})
def after_run(self, run_context, run_values):
current_loss = run_values.results['current_loss']
current_step = run_values.results['global_step']
self.steps += 1
# Gaurd against the global step going backwards, which might happen
# if we recover from something.
if self.last_step == -1 or self.last_step > current_step:
logging.info('TensorForestLossHook resetting last_step.')
self.last_step = current_step
self.steps = 0
self.min_loss = None
return
self.last_step = current_step
if self.min_loss is None or current_loss < self.min_loss:
self.min_loss = current_loss
self.steps = 0
if self.steps > self.early_stopping_rounds:
logging.info('TensorForestLossHook requesting stop.')
run_context.request_stop()
def get_model_fn(params, graph_builder_class, device_assigner,
weights_name=None, keys_name=None):
"""Return a model function given a way to construct a graph builder."""
def _model_fn(features, labels):
"""Function that returns predictions, training loss, and training op."""
weights = None
keys = None
if weights_name and weights_name in features:
weights = features.pop(weights_name)
if keys_name and keys_name in features:
keys = features.pop(keys_name)
processed_features, spec = data_ops.ParseDataTensorOrDict(features)
_assert_float32(processed_features)
if labels is not None:
labels = data_ops.ParseLabelTensorOrDict(labels)
_assert_float32(labels)
graph_builder = graph_builder_class(params, device_assigner=device_assigner)
inference = {eval_metrics.INFERENCE_PROB_NAME:
graph_builder.inference_graph(processed_features,
data_spec=spec)}
if not params.regression:
inference[eval_metrics.INFERENCE_PRED_NAME] = math_ops.argmax(
inference[eval_metrics.INFERENCE_PROB_NAME], 1)
if keys:
inference[KEYS_NAME] = keys
# labels might be None if we're doing prediction (which brings up the
# question of why we force everything to adhere to a single model_fn).
training_loss = None
training_graph = None
if labels is not None:
training_loss = graph_builder.training_loss(processed_features, labels,
data_spec=spec,
name=LOSS_NAME)
training_graph = control_flow_ops.group(
graph_builder.training_graph(
processed_features, labels, data_spec=spec,
input_weights=weights),
state_ops.assign_add(contrib_framework.get_global_step(), 1))
# Put weights back in
if weights is not None:
features[weights_name] = weights
return (inference, training_loss, training_graph)
return _model_fn
class TensorForestEstimator(evaluable.Evaluable, trainable.Trainable):
"""An estimator that can train and evaluate a random forest."""
def __init__(self, params, device_assigner=None, model_dir=None,
graph_builder_class=tensor_forest.RandomForestGraphs,
config=None, weights_name=None, keys_name=None,
feature_engineering_fn=None, early_stopping_rounds=100):
self.params = params.fill()
self.graph_builder_class = graph_builder_class
self.early_stopping_rounds = early_stopping_rounds
self.weights_name = weights_name
self._estimator = estimator.Estimator(
model_fn=get_model_fn(params, graph_builder_class, device_assigner,
weights_name=weights_name, keys_name=keys_name),
model_dir=model_dir,
config=config,
feature_engineering_fn=feature_engineering_fn)
def evaluate(
self, x=None, y=None, input_fn=None, feed_fn=None, batch_size=None,
steps=None, metrics=None, name=None):
"""See evaluable.Evaluable."""
return self._estimator.evaluate(
input_fn=input_fn, x=x, y=y, feed_fn=feed_fn,
batch_size=batch_size, steps=steps,
metrics=metrics, name=name)
def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None,
monitors=None, max_steps=None):
"""See trainable.Trainable."""
if not monitors:
monitors = [TensorForestLossHook(self.early_stopping_rounds)]
self._estimator.fit(input_fn=input_fn, x=x, y=y,
batch_size=batch_size, steps=steps, monitors=monitors,
max_steps=max_steps)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict_proba(
self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=True):
"""Returns prediction probabilities for given features (classification).
Args:
x: features.
input_fn: Input function. If set, x and y must be None.
batch_size: Override default batch size.
outputs: list of `str`, name of the output to predict.
If `None`, returns all.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted probabilities (or an iterable of predicted
probabilities if as_iterable is True).
Raises:
ValueError: If both or neither of x and input_fn were given.
"""
results = self._estimator.predict(
x=x, input_fn=input_fn, batch_size=batch_size, outputs=outputs,
as_iterable=as_iterable)
if as_iterable:
return (x[eval_metrics.INFERENCE_PROB_NAME] for x in results)
else:
return results[eval_metrics.INFERENCE_PROB_NAME]
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict(
self, x=None, input_fn=None, axis=None, batch_size=None, outputs=None,
as_iterable=True):
"""Returns predictions for given features.
Args:
x: features.
input_fn: Input function. If set, x must be None.
axis: Axis on which to argmax (for classification).
Last axis is used by default.
batch_size: Override default batch size.
outputs: list of `str`, name of the output to predict.
If `None`, returns all.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted classes or regression values (or an iterable of
predictions if as_iterable is True).
"""
results = self._estimator.predict(
x=x, input_fn=input_fn, batch_size=batch_size, outputs=outputs,
as_iterable=as_iterable)
predict_name = (eval_metrics.INFERENCE_PROB_NAME if self.params.regression
else eval_metrics.INFERENCE_PRED_NAME)
if as_iterable:
return (x[predict_name] for x in results)
else:
return results[predict_name]
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict_with_keys(
self, x=None, input_fn=None, axis=None, batch_size=None, outputs=None,
as_iterable=True):
"""Same as predict but also returns the example keys."""
results = self._estimator.predict(
x=x, input_fn=input_fn, batch_size=batch_size, outputs=outputs,
as_iterable=as_iterable)
predict_name = (eval_metrics.INFERENCE_PROB_NAME if self.params.regression
else eval_metrics.INFERENCE_PRED_NAME)
if as_iterable:
return ((x[predict_name], x.get(KEYS_NAME, None)) for x in results)
else:
return results[predict_name], results.get(KEYS_NAME, None)
def export(self,
export_dir,
input_fn,
signature_fn=None,
default_batch_size=1):
"""See BaseEstimator.export."""
# Reset model function with basic device assigner.
# Servo doesn't support distributed inference
# but it will try to respect device assignments if they're there.
# pylint: disable=protected-access
orig_model_fn = self._estimator._model_fn
self._estimator._model_fn = get_model_fn(
self.params, self.graph_builder_class,
tensor_forest.RandomForestDeviceAssigner(),
weights_name=self.weights_name)
result = self._estimator.export(
export_dir=export_dir,
use_deprecated_input_fn=True,
signature_fn=(signature_fn or
(export.regression_signature_fn
if self.params.regression else
export.classification_signature_fn_with_prob)),
default_batch_size=default_batch_size,
prediction_key=eval_metrics.INFERENCE_PROB_NAME)
self._estimator._model_fn = orig_model_fn
# pylint: enable=protected-access
return result
|
|
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
"""
# from activations import *
from activations import LinearActivation, ReluActivation, SoftmaxActivation, \
IdentityActivation, TanhActivation, SequenceSoftmaxActivation
from attrs import ExtraAttr
from default_decorators import wrap_name_default, wrap_act_default, \
wrap_param_default, wrap_bias_attr_default, wrap_param_attr_default
from layers import * # There are too many layers used in network, so import *
from poolings import MaxPooling, SumPooling
from paddle.trainer.config_parser import *
__all__ = [
'sequence_conv_pool', 'simple_lstm', "simple_img_conv_pool",
"img_conv_bn_pool", 'dropout_layer', 'lstmemory_group', 'lstmemory_unit',
'small_vgg', 'img_conv_group', 'vgg_16_network', 'gru_unit', 'gru_group',
'simple_gru', 'simple_attention', 'simple_gru2', 'bidirectional_gru',
'text_conv_pool', 'bidirectional_lstm', 'inputs', 'outputs'
]
######################################################
# Text CNN #
######################################################
@wrap_name_default("sequence_conv_pooling")
def sequence_conv_pool(input,
context_len,
hidden_size,
name=None,
context_start=None,
pool_type=None,
context_proj_layer_name=None,
context_proj_param_attr=False,
fc_layer_name=None,
fc_param_attr=None,
fc_bias_attr=None,
fc_act=None,
pool_bias_attr=None,
fc_attr=None,
context_attr=None,
pool_attr=None):
"""
Text convolution pooling layers helper.
Text input => Context Projection => FC Layer => Pooling => Output.
:param name: name of output layer(pooling layer name)
:type name: basestring
:param input: name of input layer
:type input: LayerOutput
:param context_len: context projection length. See
context_projection's document.
:type context_len: int
:param hidden_size: FC Layer size.
:type hidden_size: int
:param context_start: context projection length. See
context_projection's context_start.
:type context_start: int or None
:param pool_type: pooling layer type. See pooling_layer's document.
:type pool_type: BasePoolingType.
:param context_proj_layer_name: context projection layer name.
None if user don't care.
:type context_proj_layer_name: basestring
:param context_proj_param_attr: context projection parameter attribute.
None if user don't care.
:type context_proj_param_attr: ParameterAttribute or None.
:param fc_layer_name: fc layer name. None if user don't care.
:type fc_layer_name: basestring
:param fc_param_attr: fc layer parameter attribute. None if user don't care.
:type fc_param_attr: ParameterAttribute or None
:param fc_bias_attr: fc bias parameter attribute. False if no bias,
None if user don't care.
:type fc_bias_attr: ParameterAttribute or None
:param fc_act: fc layer activation type. None means tanh
:type fc_act: BaseActivation
:param pool_bias_attr: pooling layer bias attr. None if don't care.
False if no bias.
:type pool_bias_attr: ParameterAttribute or None.
:param fc_attr: fc layer extra attribute.
:type fc_attr: ExtraLayerAttribute
:param context_attr: context projection layer extra attribute.
:type context_attr: ExtraLayerAttribute
:param pool_attr: pooling layer extra attribute.
:type pool_attr: ExtraLayerAttribute
:return: output layer name.
:rtype: LayerOutput
"""
# Set Default Value to param
context_proj_layer_name = "%s_conv_proj" % name \
if context_proj_layer_name is None else context_proj_layer_name
with mixed_layer(
name=context_proj_layer_name,
size=input.size * context_len,
act=LinearActivation(),
layer_attr=context_attr) as m:
m += context_projection(
input,
context_len=context_len,
context_start=context_start,
padding_attr=context_proj_param_attr)
fc_layer_name = "%s_conv_fc" % name \
if fc_layer_name is None else fc_layer_name
fl = fc_layer(
name=fc_layer_name,
input=m,
size=hidden_size,
act=fc_act,
layer_attr=fc_attr,
param_attr=fc_param_attr,
bias_attr=fc_bias_attr)
return pooling_layer(
name=name,
input=fl,
pooling_type=pool_type,
bias_attr=pool_bias_attr,
layer_attr=pool_attr)
text_conv_pool = sequence_conv_pool
############################################################################
# Images #
############################################################################
@wrap_name_default("conv_pool")
def simple_img_conv_pool(input,
filter_size,
num_filters,
pool_size,
name=None,
pool_type=None,
act=None,
groups=1,
conv_stride=1,
conv_padding=0,
bias_attr=None,
num_channel=None,
param_attr=None,
shared_bias=True,
conv_layer_attr=None,
pool_stride=1,
pool_padding=0,
pool_layer_attr=None):
"""
Simple image convolution and pooling group.
Input => conv => pooling
:param name: group name
:type name: basestring
:param input: input layer name.
:type input: LayerOutput
:param filter_size: see img_conv_layer for details
:type filter_size: int
:param num_filters: see img_conv_layer for details
:type num_filters: int
:param pool_size: see img_pool_layer for details
:type pool_size: int
:param pool_type: see img_pool_layer for details
:type pool_type: BasePoolingType
:param act: see img_conv_layer for details
:type act: BaseActivation
:param groups: see img_conv_layer for details
:type groups: int
:param conv_stride: see img_conv_layer for details
:type conv_stride: int
:param conv_padding: see img_conv_layer for details
:type conv_padding: int
:param bias_attr: see img_conv_layer for details
:type bias_attr: ParameterAttribute
:param num_channel: see img_conv_layer for details
:type num_channel: int
:param param_attr: see img_conv_layer for details
:type param_attr: ParameterAttribute
:param shared_bias: see img_conv_layer for details
:type shared_bias: bool
:param conv_layer_attr: see img_conv_layer for details
:type conv_layer_attr: ExtraLayerAttribute
:param pool_stride: see img_pool_layer for details
:type pool_stride: int
:param pool_padding: see img_pool_layer for details
:type pool_padding: int
:param pool_layer_attr: see img_pool_layer for details
:type pool_layer_attr: ExtraLayerAttribute
:return: Layer's output
:rtype: LayerOutput
"""
_conv_ = img_conv_layer(
name="%s_conv" % name,
input=input,
filter_size=filter_size,
num_filters=num_filters,
num_channels=num_channel,
act=act,
groups=groups,
stride=conv_stride,
padding=conv_padding,
bias_attr=bias_attr,
param_attr=param_attr,
shared_biases=shared_bias,
layer_attr=conv_layer_attr)
return img_pool_layer(
name="%s_pool" % name,
input=_conv_,
pool_size=pool_size,
pool_type=pool_type,
stride=pool_stride,
padding=pool_padding,
layer_attr=pool_layer_attr)
@wrap_name_default("conv_bn_pool")
def img_conv_bn_pool(input,
filter_size,
num_filters,
pool_size,
name=None,
pool_type=None,
act=None,
groups=1,
conv_stride=1,
conv_padding=0,
conv_bias_attr=None,
num_channel=None,
conv_param_attr=None,
shared_bias=True,
conv_layer_attr=None,
bn_param_attr=None,
bn_bias_attr=None,
bn_layer_attr=None,
pool_stride=1,
pool_padding=0,
pool_layer_attr=None):
"""
Convolution, batch normalization, pooling group.
:param name: group name
:type name: basestring
:param input: layer's input
:type input: LayerOutput
:param filter_size: see img_conv_layer's document
:type filter_size: int
:param num_filters: see img_conv_layer's document
:type num_filters: int
:param pool_size: see img_pool_layer's document.
:type pool_size: int
:param pool_type: see img_pool_layer's document.
:type pool_type: BasePoolingType
:param act: see batch_norm_layer's document.
:type act: BaseActivation
:param groups: see img_conv_layer's document
:type groups: int
:param conv_stride: see img_conv_layer's document.
:type conv_stride: int
:param conv_padding: see img_conv_layer's document.
:type conv_padding: int
:param conv_bias_attr: see img_conv_layer's document.
:type conv_bias_attr: ParameterAttribute
:param num_channel: see img_conv_layer's document.
:type num_channel: int
:param conv_param_attr: see img_conv_layer's document.
:type conv_param_attr: ParameterAttribute
:param shared_bias: see img_conv_layer's document.
:type shared_bias: bool
:param conv_layer_attr: see img_conv_layer's document.
:type conv_layer_attr: ExtraLayerOutput
:param bn_param_attr: see batch_norm_layer's document.
:type bn_param_attr: ParameterAttribute.
:param bn_bias_attr: see batch_norm_layer's document.
:param bn_layer_attr: ParameterAttribute.
:param pool_stride: see img_pool_layer's document.
:type pool_stride: int
:param pool_padding: see img_pool_layer's document.
:type pool_padding: int
:param pool_layer_attr: see img_pool_layer's document.
:type pool_layer_attr: ExtraLayerAttribute
:return: Layer groups output
:rtype: LayerOutput
"""
__conv__ = img_conv_layer(
name="%s_conv" % name,
input=input,
filter_size=filter_size,
num_filters=num_filters,
num_channels=num_channel,
act=LinearActivation(),
groups=groups,
stride=conv_stride,
padding=conv_padding,
bias_attr=conv_bias_attr,
param_attr=conv_param_attr,
shared_biases=shared_bias,
layer_attr=conv_layer_attr)
__bn__ = batch_norm_layer(
name="%s_bn" % name,
input=__conv__,
act=act,
bias_attr=bn_bias_attr,
param_attr=bn_param_attr,
layer_attr=bn_layer_attr)
return img_pool_layer(
name="%s_pool" % name,
input=__bn__,
pool_type=pool_type,
pool_size=pool_size,
stride=pool_stride,
padding=pool_padding,
layer_attr=pool_layer_attr)
@wrap_act_default(param_names=['conv_act'], act=ReluActivation())
@wrap_param_default(
param_names=['pool_type'], default_factory=lambda _: MaxPooling())
def img_conv_group(input,
conv_num_filter,
pool_size,
num_channels=None,
conv_padding=1,
conv_filter_size=3,
conv_act=None,
conv_with_batchnorm=False,
conv_batchnorm_drop_rate=0,
pool_stride=1,
pool_type=None):
"""
Image Convolution Group, Used for vgg net.
TODO(yuyang18): Complete docs
:param conv_batchnorm_drop_rate:
:param input:
:param conv_num_filter:
:param pool_size:
:param num_channels:
:param conv_padding:
:param conv_filter_size:
:param conv_act:
:param conv_with_batchnorm:
:param pool_stride:
:param pool_type:
:return:
"""
tmp = input
# Type checks
assert isinstance(tmp, LayerOutput)
assert isinstance(conv_num_filter, list) or isinstance(conv_num_filter,
tuple)
for each_num_filter in conv_num_filter:
assert isinstance(each_num_filter, int)
assert isinstance(pool_size, int)
def __extend_list__(obj):
if not hasattr(obj, '__len__'):
return [obj] * len(conv_num_filter)
else:
return obj
conv_padding = __extend_list__(conv_padding)
conv_filter_size = __extend_list__(conv_filter_size)
conv_act = __extend_list__(conv_act)
conv_with_batchnorm = __extend_list__(conv_with_batchnorm)
conv_batchnorm_drop_rate = __extend_list__(conv_batchnorm_drop_rate)
for i in xrange(len(conv_num_filter)):
extra_kwargs = dict()
if num_channels is not None:
extra_kwargs['num_channels'] = num_channels
num_channels = None
if conv_with_batchnorm[i]:
extra_kwargs['act'] = LinearActivation()
else:
extra_kwargs['act'] = conv_act[i]
tmp = img_conv_layer(
input=tmp,
padding=conv_padding[i],
filter_size=conv_filter_size[i],
num_filters=conv_num_filter[i],
**extra_kwargs)
# logger.debug("tmp.num_filters = %d" % tmp.num_filters)
if conv_with_batchnorm[i]:
dropout = conv_batchnorm_drop_rate[i]
if dropout == 0 or abs(dropout) < 1e-5: # dropout not set
tmp = batch_norm_layer(input=tmp, act=conv_act[i])
else:
tmp = batch_norm_layer(
input=tmp,
act=conv_act[i],
layer_attr=ExtraAttr(drop_rate=dropout))
return img_pool_layer(
input=tmp, stride=pool_stride, pool_size=pool_size, pool_type=pool_type)
def small_vgg(input_image, num_channels, num_classes):
def __vgg__(ipt, num_filter, times, dropouts, num_channels_=None):
return img_conv_group(
input=ipt,
num_channels=num_channels_,
pool_size=2,
pool_stride=2,
conv_num_filter=[num_filter] * times,
conv_filter_size=3,
conv_act=ReluActivation(),
conv_with_batchnorm=True,
conv_batchnorm_drop_rate=dropouts,
pool_type=MaxPooling())
tmp = __vgg__(input_image, 64, 2, [0.3, 0], num_channels)
tmp = __vgg__(tmp, 128, 2, [0.4, 0])
tmp = __vgg__(tmp, 256, 3, [0.4, 0.4, 0])
tmp = __vgg__(tmp, 512, 3, [0.4, 0.4, 0])
tmp = img_pool_layer(
input=tmp, stride=2, pool_size=2, pool_type=MaxPooling())
tmp = dropout_layer(input=tmp, dropout_rate=0.5)
tmp = fc_layer(
input=tmp,
size=512,
layer_attr=ExtraAttr(drop_rate=0.5),
act=LinearActivation())
tmp = batch_norm_layer(input=tmp, act=ReluActivation())
return fc_layer(input=tmp, size=num_classes, act=SoftmaxActivation())
def vgg_16_network(input_image, num_channels, num_classes=1000):
"""
Same model from https://gist.github.com/ksimonyan/211839e770f7b538e2d8
:param num_classes:
:param input_image:
:type input_image: LayerOutput
:param num_channels:
:type num_channels: int
:return:
"""
tmp = img_conv_group(
input=input_image,
num_channels=num_channels,
conv_padding=1,
conv_num_filter=[64, 64],
conv_filter_size=3,
conv_act=ReluActivation(),
pool_size=2,
pool_stride=2,
pool_type=MaxPooling())
tmp = img_conv_group(
input=tmp,
conv_num_filter=[128, 128],
conv_padding=1,
conv_filter_size=3,
conv_act=ReluActivation(),
pool_stride=2,
pool_type=MaxPooling(),
pool_size=2)
tmp = img_conv_group(
input=tmp,
conv_num_filter=[256, 256, 256],
conv_padding=1,
conv_filter_size=3,
conv_act=ReluActivation(),
pool_stride=2,
pool_type=MaxPooling(),
pool_size=2)
tmp = img_conv_group(
input=tmp,
conv_num_filter=[512, 512, 512],
conv_padding=1,
conv_filter_size=3,
conv_act=ReluActivation(),
pool_stride=2,
pool_type=MaxPooling(),
pool_size=2)
tmp = img_conv_group(
input=tmp,
conv_num_filter=[512, 512, 512],
conv_padding=1,
conv_filter_size=3,
conv_act=ReluActivation(),
pool_stride=2,
pool_type=MaxPooling(),
pool_size=2)
tmp = fc_layer(
input=tmp,
size=4096,
act=ReluActivation(),
layer_attr=ExtraAttr(drop_rate=0.5))
tmp = fc_layer(
input=tmp,
size=4096,
act=ReluActivation(),
layer_attr=ExtraAttr(drop_rate=0.5))
return fc_layer(input=tmp, size=num_classes, act=SoftmaxActivation())
############################################################################
# Recurrent #
############################################################################
@wrap_name_default("lstm")
def simple_lstm(input,
size,
name=None,
reverse=False,
mat_param_attr=None,
bias_param_attr=None,
inner_param_attr=None,
act=None,
gate_act=None,
state_act=None,
mixed_layer_attr=None,
lstm_cell_attr=None):
"""
Simple LSTM Cell.
It just combine a mixed layer with fully_matrix_projection and a lstmemory
layer. The simple lstm cell was implemented as follow equations.
.. math::
i_t & = \\sigma(W_{xi}x_{t} + W_{hi}h_{t-1} + W_{ci}c_{t-1} + b_i)
f_t & = \\sigma(W_{xf}x_{t} + W_{hf}h_{t-1} + W_{cf}c_{t-1} + b_f)
c_t & = f_tc_{t-1} + i_t tanh (W_{xc}x_t+W_{hc}h_{t-1} + b_c)
o_t & = \\sigma(W_{xo}x_{t} + W_{ho}h_{t-1} + W_{co}c_t + b_o)
h_t & = o_t tanh(c_t)
Please refer **Generating Sequences With Recurrent Neural Networks** if you
want to know what lstm is. Link_ is here.
.. _Link: http://arxiv.org/abs/1308.0850
:param name: lstm layer name.
:type name: basestring
:param input: input layer name.
:type input: LayerOutput
:param size: lstm layer size.
:type size: int
:param reverse: whether to process the input data in a reverse order
:type reverse: bool
:param mat_param_attr: mixed layer's matrix projection parameter attribute.
:type mat_param_attr: ParameterAttribute
:param bias_param_attr: bias parameter attribute. False means no bias, None
means default bias.
:type bias_param_attr: ParameterAttribute|False
:param inner_param_attr: lstm cell parameter attribute.
:type inner_param_attr: ParameterAttribute
:param act: lstm final activiation type
:type act: BaseActivation
:param gate_act: lstm gate activiation type
:type gate_act: BaseActivation
:param state_act: lstm state activiation type.
:type state_act: BaseActivation
:param mixed_layer_attr: mixed layer's extra attribute.
:type mixed_layer_attr: ExtraLayerAttribute
:param lstm_cell_attr: lstm layer's extra attribute.
:type lstm_cell_attr: ExtraLayerAttribute
:return: lstm layer name.
:rtype: LayerOutput
"""
fc_name = 'lstm_transform_%s' % name
with mixed_layer(
name=fc_name,
size=size * 4,
act=IdentityActivation(),
layer_attr=mixed_layer_attr,
bias_attr=False) as m:
m += full_matrix_projection(input, param_attr=mat_param_attr)
return lstmemory(
name=name,
input=m,
reverse=reverse,
bias_attr=bias_param_attr,
param_attr=inner_param_attr,
act=act,
gate_act=gate_act,
state_act=state_act,
layer_attr=lstm_cell_attr)
@wrap_name_default('lstm_unit')
def lstmemory_unit(input,
name=None,
size=None,
param_attr=None,
act=None,
gate_act=None,
state_act=None,
mixed_bias_attr=None,
lstm_bias_attr=None,
mixed_layer_attr=None,
lstm_layer_attr=None,
get_output_layer_attr=None):
"""
Define calculations that a LSTM unit performs in a single time step.
This function itself is not a recurrent layer, so that it can not be
directly applied to sequence input. This function is always used in
recurrent_group (see layers.py for more details) to implement attention
mechanism.
Please refer to **Generating Sequences With Recurrent Neural Networks**
for more details about LSTM. The link goes as follows:
.. _Link: https://arxiv.org/abs/1308.0850
.. math::
i_t & = \\sigma(W_{xi}x_{t} + W_{hi}h_{t-1} + W_{ci}c_{t-1} + b_i)
f_t & = \\sigma(W_{xf}x_{t} + W_{hf}h_{t-1} + W_{cf}c_{t-1} + b_f)
c_t & = f_tc_{t-1} + i_t tanh (W_{xc}x_t+W_{hc}h_{t-1} + b_c)
o_t & = \\sigma(W_{xo}x_{t} + W_{ho}h_{t-1} + W_{co}c_t + b_o)
h_t & = o_t tanh(c_t)
The example usage is:
.. code-block:: python
lstm_step = lstmemory_unit(input=[layer1],
size=256,
act=TanhActivation(),
gate_act=SigmoidActivation(),
state_act=TanhActivation())
:param input: input layer name.
:type input: LayerOutput
:param name: lstmemory unit name.
:type name: basestring
:param size: lstmemory unit size.
:type size: int
:param param_attr: Parameter config, None if use default.
:type param_attr: ParameterAttribute
:param act: lstm final activiation type
:type act: BaseActivation
:param gate_act: lstm gate activiation type
:type gate_act: BaseActivation
:param state_act: lstm state activiation type.
:type state_act: BaseActivation
:param mixed_bias_attr: bias parameter attribute of mixed layer.
False means no bias, None means default bias.
:type mixed_bias_attr: ParameterAttribute|False
:param lstm_bias_attr: bias parameter attribute of lstm layer.
False means no bias, None means default bias.
:type lstm_bias_attr: ParameterAttribute|False
:param mixed_layer_attr: mixed layer's extra attribute.
:type mixed_layer_attr: ExtraLayerAttribute
:param lstm_layer_attr: lstm layer's extra attribute.
:type lstm_layer_attr: ExtraLayerAttribute
:param get_output_layer_attr: get output layer's extra attribute.
:type get_output_layer_attr: ExtraLayerAttribute
:return: lstmemory unit name.
:rtype: LayerOutput
"""
if size is None:
assert input.size % 4 == 0
size = input.size / 4
out_mem = memory(name=name, size=size)
state_mem = memory(name="%s_state" % name, size=size)
with mixed_layer(
name="%s_input_recurrent" % name,
size=size * 4,
bias_attr=mixed_bias_attr,
layer_attr=mixed_layer_attr,
act=IdentityActivation()) as m:
m += identity_projection(input=input)
m += full_matrix_projection(input=out_mem, param_attr=param_attr)
lstm_out = lstm_step_layer(
name=name,
input=m,
state=state_mem,
size=size,
bias_attr=lstm_bias_attr,
act=act,
gate_act=gate_act,
state_act=state_act,
layer_attr=lstm_layer_attr)
get_output_layer(
name='%s_state' % name,
input=lstm_out,
arg_name='state',
layer_attr=get_output_layer_attr)
return lstm_out
@wrap_name_default('lstm_group')
def lstmemory_group(input,
size=None,
name=None,
reverse=False,
param_attr=None,
act=None,
gate_act=None,
state_act=None,
mixed_bias_attr=None,
lstm_bias_attr=None,
mixed_layer_attr=None,
lstm_layer_attr=None,
get_output_layer_attr=None):
"""
lstm_group is a recurrent layer group version Long Short Term Memory. It
does exactly the same calculation as the lstmemory layer (see lstmemory in
layers.py for the maths) does. A promising benefit is that LSTM memory
cell states, or hidden states in every time step are accessible to for the
user. This is especially useful in attention model. If you do not need to
access to the internal states of the lstm, but merely use its outputs,
it is recommended to use the lstmemory, which is relatively faster than
lstmemory_group.
NOTE: In PaddlePaddle's implementation, the following input-to-hidden
multiplications:
:math:`W_{xi}x_{t}` , :math:`W_{xf}x_{t}`,
:math:`W_{xc}x_t`, :math:`W_{xo}x_{t}` are not done in lstmemory_unit to
speed up the calculations. Consequently, an additional mixed_layer with
full_matrix_projection must be included before lstmemory_unit is called.
The example usage is:
.. code-block:: python
lstm_step = lstmemory_group(input=[layer1],
size=256,
act=TanhActivation(),
gate_act=SigmoidActivation(),
state_act=TanhActivation())
:param input: input layer name.
:type input: LayerOutput
:param name: lstmemory group name.
:type name: basestring
:param size: lstmemory group size.
:type size: int
:param reverse: is lstm reversed
:type reverse: bool
:param param_attr: Parameter config, None if use default.
:type param_attr: ParameterAttribute
:param act: lstm final activiation type
:type act: BaseActivation
:param gate_act: lstm gate activiation type
:type gate_act: BaseActivation
:param state_act: lstm state activiation type.
:type state_act: BaseActivation
:param mixed_bias_attr: bias parameter attribute of mixed layer.
False means no bias, None means default bias.
:type mixed_bias_attr: ParameterAttribute|False
:param lstm_bias_attr: bias parameter attribute of lstm layer.
False means no bias, None means default bias.
:type lstm_bias_attr: ParameterAttribute|False
:param mixed_layer_attr: mixed layer's extra attribute.
:type mixed_layer_attr: ExtraLayerAttribute
:param lstm_layer_attr: lstm layer's extra attribute.
:type lstm_layer_attr: ExtraLayerAttribute
:param get_output_layer_attr: get output layer's extra attribute.
:type get_output_layer_attr: ExtraLayerAttribute
:return: the lstmemory group.
:rtype: LayerOutput
"""
def __lstm_step__(ipt):
return lstmemory_unit(
input=ipt,
name=name,
size=size,
mixed_bias_attr=mixed_bias_attr,
mixed_layer_attr=mixed_layer_attr,
param_attr=param_attr,
lstm_bias_attr=lstm_bias_attr,
act=act,
gate_act=gate_act,
state_act=state_act,
lstm_layer_attr=lstm_layer_attr,
get_output_layer_attr=get_output_layer_attr)
return recurrent_group(
name='%s_recurrent_group' % name,
step=__lstm_step__,
reverse=reverse,
input=input)
@wrap_name_default('gru_unit')
def gru_unit(input,
size=None,
name=None,
gru_bias_attr=None,
act=None,
gate_act=None,
gru_layer_attr=None):
"""
Define calculations that a gated recurrent unit performs in a single time
step. This function itself is not a recurrent layer, so that it can not be
directly applied to sequence input. This function is almost always used in
the recurrent_group (see layers.py for more details) to implement attention
mechanism.
Please see grumemory in layers.py for the details about the maths.
:param input: input layer name.
:type input: LayerOutput
:param name: name of the gru group.
:type name: basestring
:param size: hidden size of the gru.
:type size: int
:param act: type of the activation
:type act: BaseActivation
:param gate_act: type of the gate activation
:type gate_act: BaseActivation
:param gru_layer_attr: Extra parameter attribute of the gru layer.
:type gru_layer_attr: ParameterAttribute|False
:return: the gru output layer.
:rtype: LayerOutput
"""
assert input.size % 3 == 0
if size is None:
size = input.size / 3
out_mem = memory(name=name, size=size)
gru_out = gru_step_layer(
name=name,
input=input,
output_mem=out_mem,
size=size,
bias_attr=gru_bias_attr,
act=act,
gate_act=gate_act,
layer_attr=gru_layer_attr)
return gru_out
@wrap_name_default('gru_group')
def gru_group(input,
size=None,
name=None,
reverse=False,
gru_bias_attr=None,
act=None,
gate_act=None,
gru_layer_attr=None):
"""
gru_group is a recurrent layer group version Gated Recurrent Unit. It
does exactly the same calculation as the grumemory layer does. A promising
benefit is that gru hidden sates are accessible to for the user. This is
especially useful in attention model. If you do not need to access to
any internal state, but merely use the outputs of a GRU, it is recommanded
to use the grumemory, which is relatively faster.
Please see grumemory in layers.py for more detail about the maths.
The example usage is:
.. code-block:: python
gru = gur_group(input=[layer1],
size=256,
act=TanhActivation(),
gate_act=SigmoidActivation())
:param input: input layer name.
:type input: LayerOutput
:param name: name of the gru group.
:type name: basestring
:param size: hidden size of the gru.
:type size: int
:param reverse: whether to process the input data in a reverse order
:type reverse: bool
:param act: type of the activiation
:type act: BaseActivation
:param gate_act: type of the gate activiation
:type gate_act: BaseActivation
:param gru_bias_attr: bias. False means no bias, None means default bias.
:type gru_bias_attr: ParameterAttribute|False
:param gru_layer_attr: Extra parameter attribute of the gru layer.
:type gru_layer_attr: ParameterAttribute|False
:return: the gru group.
:rtype: LayerOutput
"""
def __gru_step__(ipt):
return gru_unit(
input=ipt,
name=name,
size=size,
gru_bias_attr=gru_bias_attr,
act=act,
gate_act=gate_act,
gru_layer_attr=gru_layer_attr)
return recurrent_group(
name='%s_recurrent_group' % name,
step=__gru_step__,
reverse=reverse,
input=input)
@wrap_name_default('simple_gru')
def simple_gru(input,
size,
name=None,
reverse=False,
mixed_param_attr=None,
mixed_bias_param_attr=None,
mixed_layer_attr=None,
gru_bias_attr=None,
act=None,
gate_act=None,
gru_layer_attr=None):
"""
You maybe see gru_step_layer, grumemory in layers.py, gru_unit, gru_group,
simple_gru in network.py. The reason why there are so many interfaces is
that we have two ways to implement recurrent neural network. One way is to
use one complete layer to implement rnn (including simple rnn, gru and lstm)
with multiple time steps, such as recurrent_layer, lstmemory, grumemory. But,
the multiplication operation :math:`W x_t` is not computed in these layers.
See details in their interfaces in layers.py.
The other implementation is to use an recurrent group which can ensemble a
series of layers to compute rnn step by step. This way is flexible for
attenion mechanism or other complex connections.
- gru_step_layer: only compute rnn by one step. It needs an memory as input
and can be used in recurrent group.
- gru_unit: a wrapper of gru_step_layer with memory.
- gru_group: a GRU cell implemented by a combination of multiple layers in
recurrent group.
But :math:`W x_t` is not done in group.
- gru_memory: a GRU cell implemented by one layer, which does same calculation
with gru_group and is faster than gru_group.
- simple_gru: a complete GRU implementation inlcuding :math:`W x_t` and
gru_group. :math:`W` contains :math:`W_r`, :math:`W_z` and :math:`W`, see
formula in grumemory.
The computational speed is that, grumemory is relatively better than
gru_group, and gru_group is relatively better than simple_gru.
The example usage is:
.. code-block:: python
gru = simple_gru(input=[layer1], size=256)
:param input: input layer name.
:type input: LayerOutput
:param name: name of the gru group.
:type name: basestring
:param size: hidden size of the gru.
:type size: int
:param reverse: whether to process the input data in a reverse order
:type reverse: bool
:param act: type of the activiation
:type act: BaseActivation
:param gate_act: type of the gate activiation
:type gate_act: BaseActivation
:param gru_bias_attr: bias. False means no bias, None means default bias.
:type gru_bias_attr: ParameterAttribute|False
:param gru_layer_attr: Extra parameter attribute of the gru layer.
:type gru_layer_attr: ParameterAttribute|False
:return: the gru group.
:rtype: LayerOutput
"""
with mixed_layer(
name='%s_transform' % name,
size=size * 3,
bias_attr=mixed_bias_param_attr,
layer_attr=mixed_layer_attr) as m:
m += full_matrix_projection(input=input, param_attr=mixed_param_attr)
return gru_group(
name=name,
size=size,
input=m,
reverse=reverse,
gru_bias_attr=gru_bias_attr,
act=act,
gate_act=gate_act,
gru_layer_attr=gru_layer_attr)
@wrap_name_default('simple_gru2')
def simple_gru2(input,
size,
name=None,
reverse=False,
mixed_param_attr=None,
mixed_bias_attr=None,
gru_param_attr=None,
gru_bias_attr=None,
act=None,
gate_act=None,
mixed_layer_attr=None,
gru_cell_attr=None):
"""
simple_gru2 is the same with simple_gru, but using grumemory instead
Please see grumemory in layers.py for more detail about the maths.
simple_gru2 is faster than simple_gru.
The example usage is:
.. code-block:: python
gru = simple_gru2(input=[layer1], size=256)
:param input: input layer name.
:type input: LayerOutput
:param name: name of the gru group.
:type name: basestring
:param size: hidden size of the gru.
:type size: int
:param reverse: whether to process the input data in a reverse order
:type reverse: bool
:param act: type of the activiation
:type act: BaseActivation
:param gate_act: type of the gate activiation
:type gate_act: BaseActivation
:param gru_bias_attr: bias. False means no bias, None means default bias.
:type gru_bias_attr: ParameterAttribute|False
:param gru_layer_attr: Extra parameter attribute of the gru layer.
:type gru_layer_attr: ParameterAttribute|False
:return: the gru group.
:rtype: LayerOutput
"""
with mixed_layer(
name='%s_transform' % name,
size=size * 3,
bias_attr=mixed_bias_attr,
layer_attr=mixed_layer_attr) as m:
m += full_matrix_projection(input=input, param_attr=mixed_param_attr)
return grumemory(
name=name,
size=size,
input=m,
reverse=reverse,
bias_attr=gru_bias_attr,
param_attr=gru_param_attr,
act=act,
gate_act=gate_act,
layer_attr=gru_cell_attr)
@wrap_name_default("bidirectional_gru")
def bidirectional_gru(input,
size,
name=None,
return_seq=False,
fwd_mixed_param_attr=None,
fwd_mixed_bias_attr=None,
fwd_gru_param_attr=None,
fwd_gru_bias_attr=None,
fwd_act=None,
fwd_gate_act=None,
fwd_mixed_layer_attr=None,
fwd_gru_cell_attr=None,
bwd_mixed_param_attr=None,
bwd_mixed_bias_attr=None,
bwd_gru_param_attr=None,
bwd_gru_bias_attr=None,
bwd_act=None,
bwd_gate_act=None,
bwd_mixed_layer_attr=None,
bwd_gru_cell_attr=None,
last_seq_attr=None,
first_seq_attr=None,
concat_attr=None,
concat_act=None):
"""
A bidirectional_gru is a recurrent unit that iterates over the input
sequence both in forward and bardward orders, and then concatenate two
outputs to form a final output. However, concatenation of two outputs
is not the only way to form the final output, you can also, for example,
just add them together.
The example usage is:
.. code-block:: python
bi_gru = bidirectional_gru(input=[input1], size=512)
:param name: bidirectional gru layer name.
:type name: basestring
:param input: input layer.
:type input: LayerOutput
:param size: gru layer size.
:type size: int
:param return_seq: If set False, outputs of the last time step are
concatenated and returned.
If set True, the entire output sequences that are
processed in forward and backward directions are
concatenated and returned.
:type return_seq: bool
:return: LayerOutput object.
:rtype: LayerOutput
"""
args = locals()
fw = simple_gru2(
name='%s_fw' % name,
input=input,
size=size,
**dict((k[len('fwd_'):], v) for k, v in args.iteritems()
if k.startswith('fwd_')))
bw = simple_gru2(
name="%s_bw" % name,
input=input,
size=size,
reverse=True,
**dict((k[len('bwd_'):], v) for k, v in args.iteritems()
if k.startswith('bwd_')))
if return_seq:
return concat_layer(
name=name, input=[fw, bw], layer_attr=concat_attr, act=concat_act)
else:
fw_seq = last_seq(
name="%s_fw_last" % name, input=fw, layer_attr=last_seq_attr)
bw_seq = first_seq(
name="%s_bw_last" % name, input=bw, layer_attr=first_seq_attr)
return concat_layer(
name=name,
input=[fw_seq, bw_seq],
layer_attr=concat_attr,
act=concat_act)
@wrap_name_default("bidirectional_lstm")
def bidirectional_lstm(input,
size,
name=None,
return_seq=False,
fwd_mat_param_attr=None,
fwd_bias_param_attr=None,
fwd_inner_param_attr=None,
fwd_act=None,
fwd_gate_act=None,
fwd_state_act=None,
fwd_mixed_layer_attr=None,
fwd_lstm_cell_attr=None,
bwd_mat_param_attr=None,
bwd_bias_param_attr=None,
bwd_inner_param_attr=None,
bwd_act=None,
bwd_gate_act=None,
bwd_state_act=None,
bwd_mixed_layer_attr=None,
bwd_lstm_cell_attr=None,
last_seq_attr=None,
first_seq_attr=None,
concat_attr=None,
concat_act=None):
"""
A bidirectional_lstm is a recurrent unit that iterates over the input
sequence both in forward and bardward orders, and then concatenate two
outputs form a final output. However, concatenation of two outputs
is not the only way to form the final output, you can also, for example,
just add them together.
Please refer to **Neural Machine Translation by Jointly Learning to Align
and Translate** for more details about the bidirectional lstm.
The link goes as follows:
.. _Link: https://arxiv.org/pdf/1409.0473v3.pdf
The example usage is:
.. code-block:: python
bi_lstm = bidirectional_lstm(input=[input1], size=512)
:param name: bidirectional lstm layer name.
:type name: basestring
:param input: input layer.
:type input: LayerOutput
:param size: lstm layer size.
:type size: int
:param return_seq: If set False, outputs of the last time step are
concatenated and returned.
If set True, the entire output sequences that are
processed in forward and backward directions are
concatenated and returned.
:type return_seq: bool
:return: LayerOutput object accroding to the return_seq.
:rtype: LayerOutput
"""
args = locals()
fw = simple_lstm(
name='%s_fw' % name,
input=input,
size=size,
**dict((k[len('fwd_'):], v) for k, v in args.iteritems()
if k.startswith('fwd_')))
bw = simple_lstm(
name="%s_bw" % name,
input=input,
size=size,
reverse=True,
**dict((k[len('bwd_'):], v) for k, v in args.iteritems()
if k.startswith('bwd_')))
if return_seq:
return concat_layer(
name=name, input=[fw, bw], layer_attr=concat_attr, act=concat_act)
else:
fw_seq = last_seq(
name="%s_fw_last" % name, input=fw, layer_attr=last_seq_attr)
bw_seq = first_seq(
name="%s_bw_last" % name, input=bw, layer_attr=first_seq_attr)
return concat_layer(
name=name,
input=[fw_seq, bw_seq],
layer_attr=concat_attr,
act=concat_act)
@wrap_name_default()
@wrap_act_default(param_names=['weight_act'], act=TanhActivation())
def simple_attention(encoded_sequence,
encoded_proj,
decoder_state,
transform_param_attr=None,
softmax_param_attr=None,
weight_act=None,
name=None):
"""
Calculate and then return a context vector by attention machanism.
Size of the context vector equals to size of the encoded_sequence.
.. math::
a(s_{i-1},h_{j}) & = v_{a}f(W_{a}s_{t-1} + U_{a}h_{j})
e_{i,j} & = a(s_{i-1}, h_{j})
a_{i,j} & = \\frac{exp(e_{i,j})}{\\sum_{k=1}^{T_x}{exp(e_{i,k})}}
c_{i} & = \\sum_{j=1}^{T_{x}}a_{i,j}h_{j}
where :math:`h_{j}` is the jth element of encoded_sequence,
:math:`U_{a}h_{j}` is the jth element of encoded_proj
:math:`s_{i-1}` is decoder_state
:math:`f` is weight_act, and is set to tanh by default.
Please refer to **Neural Machine Translation by Jointly Learning to
Align and Translate** for more details. The link is as follows:
https://arxiv.org/abs/1409.0473.
The example usage is:
.. code-block:: python
context = simple_attention(encoded_sequence=enc_seq,
encoded_proj=enc_proj,
decoder_state=decoder_prev,)
:param name: name of the attention model.
:type name: basestring
:param softmax_param_attr: parameter attribute of sequence softmax
that is used to produce attention weight
:type softmax_param_attr: ParameterAttribute
:param weight_act: activation of the attention model
:type weight_act: Activation
:param encoded_sequence: output of the encoder
:type encoded_sequence: LayerOutput
:param encoded_proj: attention weight is computed by a feed forward neural
network which has two inputs : decoder's hidden state
of previous time step and encoder's output.
encoded_proj is output of the feed-forward network for
encoder's output. Here we pre-compute it outside
simple_attention for speed consideration.
:type encoded_proj: LayerOutput
:param decoder_state: hidden state of decoder in previous time step
:type decoder_state: LayerOutput
:param transform_param_attr: parameter attribute of the feed-forward
network that takes decoder_state as inputs to
compute attention weight.
:type transform_param_attr: ParameterAttribute
:return: a context vector
"""
assert encoded_proj.size == decoder_state.size
proj_size = encoded_proj.size
with mixed_layer(size=proj_size, name="%s_transform" % name) as m:
m += full_matrix_projection(
decoder_state, param_attr=transform_param_attr)
expanded = expand_layer(
input=m, expand_as=encoded_sequence, name='%s_expand' % name)
with mixed_layer(
size=proj_size, act=weight_act, name="%s_combine" % name) as m:
m += identity_projection(expanded)
m += identity_projection(encoded_proj)
# sequence softmax is used to normalize similarities between decoder state
# and encoder outputs into a distribution
attention_weight = fc_layer(
input=m,
size=1,
act=SequenceSoftmaxActivation(),
param_attr=softmax_param_attr,
name="%s_softmax" % name,
bias_attr=False)
scaled = scaling_layer(
weight=attention_weight,
input=encoded_sequence,
name='%s_scaling' % name)
return pooling_layer(
input=scaled, pooling_type=SumPooling(), name="%s_pooling" % name)
############################################################################
# Miscs #
############################################################################
@wrap_name_default("dropout")
def dropout_layer(input, dropout_rate, name=None):
"""
@TODO(yuyang18): Add comments.
:param name:
:param input:
:param dropout_rate:
:return:
"""
return addto_layer(
name=name,
input=input,
act=LinearActivation(),
bias_attr=False,
layer_attr=ExtraAttr(drop_rate=dropout_rate))
def inputs(layers, *args):
"""
Declare the inputs of network. The order of input should be as same as
the data provider's return order.
:param layers: Input Layers.
:type layers: list|tuple|LayerOutput.
:return:
"""
if isinstance(layers, LayerOutput) or isinstance(layers, basestring):
layers = [layers]
if len(args) != 0:
layers.extend(args)
Inputs(*[l.name for l in layers])
def outputs(layers, *args):
"""
Declare the outputs of network. If user have not defined the inputs of
network, this method will calculate the input order by dfs travel.
:param layers: Output layers.
:type layers: list|tuple|LayerOutput
:return:
"""
def __dfs_travel__(layer,
predicate=lambda x: x.layer_type == LayerType.DATA):
"""
DFS LRV Travel for output layer.
The return order is define order for data_layer in this leaf node.
:param layer:
:type layer: LayerOutput
:return:
"""
assert isinstance(layer, LayerOutput), "layer is %s" % (layer)
retv = []
if layer.parents is not None:
for p in layer.parents:
retv.extend(__dfs_travel__(p, predicate))
if predicate(layer):
retv.append(layer)
return retv
if isinstance(layers, LayerOutput):
layers = [layers]
if len(args) != 0:
layers.extend(args)
assert len(layers) > 0
if HasInputsSet(): # input already set
Outputs(*[l.name for l in layers])
return # just return outputs.
if len(layers) != 1:
logger.warning("`outputs` routine try to calculate network's"
" inputs and outputs order. It might not work well."
"Please see follow log carefully.")
inputs = []
outputs_ = []
for each_layer in layers:
assert isinstance(each_layer, LayerOutput)
inputs.extend(__dfs_travel__(each_layer))
outputs_.extend(
__dfs_travel__(each_layer,
lambda x: x.layer_type == LayerType.COST))
# Currently, we got each leaf node's inputs order, output order.
# We merge them together.
final_inputs = []
final_outputs = []
for each_input in inputs:
assert isinstance(each_input, LayerOutput)
if each_input.name not in final_inputs:
final_inputs.append(each_input.name)
for each_output in outputs_:
assert isinstance(each_output, LayerOutput)
if each_output.name not in final_outputs:
final_outputs.append(each_output.name)
logger.info("".join(["The input order is [", ", ".join(final_inputs), "]"]))
if len(final_outputs) == 0:
final_outputs = map(lambda x: x.name, layers)
logger.info("".join(
["The output order is [", ", ".join(final_outputs), "]"]))
Inputs(*final_inputs)
Outputs(*final_outputs)
|
|
"""
Support for a generic MQTT vacuum.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/vacuum.mqtt/
"""
import logging
import voluptuous as vol
from homeassistant.components import mqtt
from homeassistant.components.vacuum import (
DOMAIN, SUPPORT_BATTERY, SUPPORT_CLEAN_SPOT, SUPPORT_FAN_SPEED,
SUPPORT_LOCATE, SUPPORT_PAUSE, SUPPORT_RETURN_HOME, SUPPORT_SEND_COMMAND,
SUPPORT_STATUS, SUPPORT_STOP, SUPPORT_TURN_OFF, SUPPORT_TURN_ON,
VacuumDevice)
from homeassistant.const import ATTR_SUPPORTED_FEATURES, CONF_DEVICE, CONF_NAME
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.icon import icon_for_battery_level
from . import (
ATTR_DISCOVERY_HASH, CONF_UNIQUE_ID, MqttAttributes, MqttAvailability,
MqttDiscoveryUpdate, MqttEntityDeviceInfo, subscription)
from .discovery import MQTT_DISCOVERY_NEW, clear_discovery_hash
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['mqtt']
SERVICE_TO_STRING = {
SUPPORT_TURN_ON: 'turn_on',
SUPPORT_TURN_OFF: 'turn_off',
SUPPORT_PAUSE: 'pause',
SUPPORT_STOP: 'stop',
SUPPORT_RETURN_HOME: 'return_home',
SUPPORT_FAN_SPEED: 'fan_speed',
SUPPORT_BATTERY: 'battery',
SUPPORT_STATUS: 'status',
SUPPORT_SEND_COMMAND: 'send_command',
SUPPORT_LOCATE: 'locate',
SUPPORT_CLEAN_SPOT: 'clean_spot',
}
STRING_TO_SERVICE = {v: k for k, v in SERVICE_TO_STRING.items()}
def services_to_strings(services):
"""Convert SUPPORT_* service bitmask to list of service strings."""
strings = []
for service in SERVICE_TO_STRING:
if service & services:
strings.append(SERVICE_TO_STRING[service])
return strings
def strings_to_services(strings):
"""Convert service strings to SUPPORT_* service bitmask."""
services = 0
for string in strings:
services |= STRING_TO_SERVICE[string]
return services
DEFAULT_SERVICES = SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_STOP |\
SUPPORT_RETURN_HOME | SUPPORT_STATUS | SUPPORT_BATTERY |\
SUPPORT_CLEAN_SPOT
ALL_SERVICES = DEFAULT_SERVICES | SUPPORT_PAUSE | SUPPORT_LOCATE |\
SUPPORT_FAN_SPEED | SUPPORT_SEND_COMMAND
CONF_SUPPORTED_FEATURES = ATTR_SUPPORTED_FEATURES
CONF_PAYLOAD_TURN_ON = 'payload_turn_on'
CONF_PAYLOAD_TURN_OFF = 'payload_turn_off'
CONF_PAYLOAD_RETURN_TO_BASE = 'payload_return_to_base'
CONF_PAYLOAD_STOP = 'payload_stop'
CONF_PAYLOAD_CLEAN_SPOT = 'payload_clean_spot'
CONF_PAYLOAD_LOCATE = 'payload_locate'
CONF_PAYLOAD_START_PAUSE = 'payload_start_pause'
CONF_BATTERY_LEVEL_TOPIC = 'battery_level_topic'
CONF_BATTERY_LEVEL_TEMPLATE = 'battery_level_template'
CONF_CHARGING_TOPIC = 'charging_topic'
CONF_CHARGING_TEMPLATE = 'charging_template'
CONF_CLEANING_TOPIC = 'cleaning_topic'
CONF_CLEANING_TEMPLATE = 'cleaning_template'
CONF_DOCKED_TOPIC = 'docked_topic'
CONF_DOCKED_TEMPLATE = 'docked_template'
CONF_ERROR_TOPIC = 'error_topic'
CONF_ERROR_TEMPLATE = 'error_template'
CONF_STATE_TOPIC = 'state_topic'
CONF_STATE_TEMPLATE = 'state_template'
CONF_FAN_SPEED_TOPIC = 'fan_speed_topic'
CONF_FAN_SPEED_TEMPLATE = 'fan_speed_template'
CONF_SET_FAN_SPEED_TOPIC = 'set_fan_speed_topic'
CONF_FAN_SPEED_LIST = 'fan_speed_list'
CONF_SEND_COMMAND_TOPIC = 'send_command_topic'
DEFAULT_NAME = 'MQTT Vacuum'
DEFAULT_RETAIN = False
DEFAULT_SERVICE_STRINGS = services_to_strings(DEFAULT_SERVICES)
DEFAULT_PAYLOAD_TURN_ON = 'turn_on'
DEFAULT_PAYLOAD_TURN_OFF = 'turn_off'
DEFAULT_PAYLOAD_RETURN_TO_BASE = 'return_to_base'
DEFAULT_PAYLOAD_STOP = 'stop'
DEFAULT_PAYLOAD_CLEAN_SPOT = 'clean_spot'
DEFAULT_PAYLOAD_LOCATE = 'locate'
DEFAULT_PAYLOAD_START_PAUSE = 'start_pause'
PLATFORM_SCHEMA = mqtt.MQTT_BASE_PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_SUPPORTED_FEATURES, default=DEFAULT_SERVICE_STRINGS):
vol.All(cv.ensure_list, [vol.In(STRING_TO_SERVICE.keys())]),
vol.Optional(mqtt.CONF_RETAIN, default=DEFAULT_RETAIN): cv.boolean,
vol.Optional(mqtt.CONF_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_PAYLOAD_TURN_ON,
default=DEFAULT_PAYLOAD_TURN_ON): cv.string,
vol.Optional(CONF_PAYLOAD_TURN_OFF,
default=DEFAULT_PAYLOAD_TURN_OFF): cv.string,
vol.Optional(CONF_PAYLOAD_RETURN_TO_BASE,
default=DEFAULT_PAYLOAD_RETURN_TO_BASE): cv.string,
vol.Optional(CONF_PAYLOAD_STOP,
default=DEFAULT_PAYLOAD_STOP): cv.string,
vol.Optional(CONF_PAYLOAD_CLEAN_SPOT,
default=DEFAULT_PAYLOAD_CLEAN_SPOT): cv.string,
vol.Optional(CONF_PAYLOAD_LOCATE,
default=DEFAULT_PAYLOAD_LOCATE): cv.string,
vol.Optional(CONF_PAYLOAD_START_PAUSE,
default=DEFAULT_PAYLOAD_START_PAUSE): cv.string,
vol.Optional(CONF_BATTERY_LEVEL_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_BATTERY_LEVEL_TEMPLATE): cv.template,
vol.Optional(CONF_CHARGING_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_CHARGING_TEMPLATE): cv.template,
vol.Optional(CONF_CLEANING_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_CLEANING_TEMPLATE): cv.template,
vol.Optional(CONF_DOCKED_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_DOCKED_TEMPLATE): cv.template,
vol.Optional(CONF_ERROR_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_ERROR_TEMPLATE): cv.template,
vol.Optional(CONF_STATE_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_STATE_TEMPLATE): cv.template,
vol.Optional(CONF_FAN_SPEED_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_FAN_SPEED_TEMPLATE): cv.template,
vol.Optional(CONF_SET_FAN_SPEED_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_FAN_SPEED_LIST, default=[]):
vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_SEND_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_UNIQUE_ID): cv.string,
vol.Optional(CONF_DEVICE): mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA,
}).extend(mqtt.MQTT_AVAILABILITY_SCHEMA.schema).extend(
mqtt.MQTT_JSON_ATTRS_SCHEMA.schema)
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up MQTT vacuum through configuration.yaml."""
await _async_setup_entity(config, async_add_entities,
discovery_info)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up MQTT vacuum dynamically through MQTT discovery."""
async def async_discover(discovery_payload):
"""Discover and add a MQTT vacuum."""
try:
discovery_hash = discovery_payload.pop(ATTR_DISCOVERY_HASH)
config = PLATFORM_SCHEMA(discovery_payload)
await _async_setup_entity(config, async_add_entities, config_entry,
discovery_hash)
except Exception:
if discovery_hash:
clear_discovery_hash(hass, discovery_hash)
raise
async_dispatcher_connect(
hass, MQTT_DISCOVERY_NEW.format(DOMAIN, 'mqtt'), async_discover)
async def _async_setup_entity(config, async_add_entities, config_entry,
discovery_hash=None):
"""Set up the MQTT vacuum."""
async_add_entities([MqttVacuum(config, config_entry, discovery_hash)])
# pylint: disable=too-many-ancestors
class MqttVacuum(MqttAttributes, MqttAvailability, MqttDiscoveryUpdate,
MqttEntityDeviceInfo, VacuumDevice):
"""Representation of a MQTT-controlled vacuum."""
def __init__(self, config, config_entry, discovery_info):
"""Initialize the vacuum."""
self._cleaning = False
self._charging = False
self._docked = False
self._error = None
self._status = 'Unknown'
self._battery_level = 0
self._fan_speed = 'unknown'
self._fan_speed_list = []
self._sub_state = None
self._unique_id = config.get(CONF_UNIQUE_ID)
# Load config
self._setup_from_config(config)
device_config = config.get(CONF_DEVICE)
MqttAttributes.__init__(self, config)
MqttAvailability.__init__(self, config)
MqttDiscoveryUpdate.__init__(self, discovery_info,
self.discovery_update)
MqttEntityDeviceInfo.__init__(self, device_config, config_entry)
def _setup_from_config(self, config):
self._name = config.get(CONF_NAME)
supported_feature_strings = config.get(CONF_SUPPORTED_FEATURES)
self._supported_features = strings_to_services(
supported_feature_strings
)
self._fan_speed_list = config.get(CONF_FAN_SPEED_LIST)
self._qos = config.get(mqtt.CONF_QOS)
self._retain = config.get(mqtt.CONF_RETAIN)
self._command_topic = config.get(mqtt.CONF_COMMAND_TOPIC)
self._set_fan_speed_topic = config.get(CONF_SET_FAN_SPEED_TOPIC)
self._send_command_topic = config.get(CONF_SEND_COMMAND_TOPIC)
self._payloads = {
key: config.get(key) for key in (
CONF_PAYLOAD_TURN_ON,
CONF_PAYLOAD_TURN_OFF,
CONF_PAYLOAD_RETURN_TO_BASE,
CONF_PAYLOAD_STOP,
CONF_PAYLOAD_CLEAN_SPOT,
CONF_PAYLOAD_LOCATE,
CONF_PAYLOAD_START_PAUSE
)
}
self._state_topics = {
key: config.get(key) for key in (
CONF_BATTERY_LEVEL_TOPIC,
CONF_CHARGING_TOPIC,
CONF_CLEANING_TOPIC,
CONF_DOCKED_TOPIC,
CONF_ERROR_TOPIC,
CONF_FAN_SPEED_TOPIC
)
}
self._templates = {
key: config.get(key) for key in (
CONF_BATTERY_LEVEL_TEMPLATE,
CONF_CHARGING_TEMPLATE,
CONF_CLEANING_TEMPLATE,
CONF_DOCKED_TEMPLATE,
CONF_ERROR_TEMPLATE,
CONF_FAN_SPEED_TEMPLATE
)
}
async def discovery_update(self, discovery_payload):
"""Handle updated discovery message."""
config = PLATFORM_SCHEMA(discovery_payload)
self._setup_from_config(config)
await self.attributes_discovery_update(config)
await self.availability_discovery_update(config)
await self.device_info_discovery_update(config)
await self._subscribe_topics()
self.async_write_ha_state()
async def async_added_to_hass(self):
"""Subscribe MQTT events."""
await super().async_added_to_hass()
await self._subscribe_topics()
async def async_will_remove_from_hass(self):
"""Unsubscribe when removed."""
await subscription.async_unsubscribe_topics(self.hass, self._sub_state)
await MqttAttributes.async_will_remove_from_hass(self)
await MqttAvailability.async_will_remove_from_hass(self)
async def _subscribe_topics(self):
"""(Re)Subscribe to topics."""
for tpl in self._templates.values():
if tpl is not None:
tpl.hass = self.hass
@callback
def message_received(msg):
"""Handle new MQTT message."""
if msg.topic == self._state_topics[CONF_BATTERY_LEVEL_TOPIC] and \
self._templates[CONF_BATTERY_LEVEL_TEMPLATE]:
battery_level = self._templates[CONF_BATTERY_LEVEL_TEMPLATE]\
.async_render_with_possible_json_value(
msg.payload, error_value=None)
if battery_level is not None:
self._battery_level = int(battery_level)
if msg.topic == self._state_topics[CONF_CHARGING_TOPIC] and \
self._templates[CONF_CHARGING_TEMPLATE]:
charging = self._templates[CONF_CHARGING_TEMPLATE]\
.async_render_with_possible_json_value(
msg.payload, error_value=None)
if charging is not None:
self._charging = cv.boolean(charging)
if msg.topic == self._state_topics[CONF_CLEANING_TOPIC] and \
self._templates[CONF_CLEANING_TEMPLATE]:
cleaning = self._templates[CONF_CLEANING_TEMPLATE]\
.async_render_with_possible_json_value(
msg.payload, error_value=None)
if cleaning is not None:
self._cleaning = cv.boolean(cleaning)
if msg.topic == self._state_topics[CONF_DOCKED_TOPIC] and \
self._templates[CONF_DOCKED_TEMPLATE]:
docked = self._templates[CONF_DOCKED_TEMPLATE]\
.async_render_with_possible_json_value(
msg.payload, error_value=None)
if docked is not None:
self._docked = cv.boolean(docked)
if msg.topic == self._state_topics[CONF_ERROR_TOPIC] and \
self._templates[CONF_ERROR_TEMPLATE]:
error = self._templates[CONF_ERROR_TEMPLATE]\
.async_render_with_possible_json_value(
msg.payload, error_value=None)
if error is not None:
self._error = cv.string(error)
if self._docked:
if self._charging:
self._status = "Docked & Charging"
else:
self._status = "Docked"
elif self._cleaning:
self._status = "Cleaning"
elif self._error is not None and not self._error:
self._status = "Error: {}".format(self._error)
else:
self._status = "Stopped"
if msg.topic == self._state_topics[CONF_FAN_SPEED_TOPIC] and \
self._templates[CONF_FAN_SPEED_TEMPLATE]:
fan_speed = self._templates[CONF_FAN_SPEED_TEMPLATE]\
.async_render_with_possible_json_value(
msg.payload, error_value=None)
if fan_speed is not None:
self._fan_speed = fan_speed
self.async_write_ha_state()
topics_list = {topic for topic in self._state_topics.values() if topic}
self._sub_state = await subscription.async_subscribe_topics(
self.hass, self._sub_state,
{
"topic{}".format(i): {
"topic": topic,
"msg_callback": message_received,
"qos": self._qos
} for i, topic in enumerate(topics_list)
}
)
@property
def name(self):
"""Return the name of the vacuum."""
return self._name
@property
def should_poll(self):
"""No polling needed for an MQTT vacuum."""
return False
@property
def is_on(self):
"""Return true if vacuum is on."""
return self._cleaning
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
@property
def status(self):
"""Return a status string for the vacuum."""
if self.supported_features & SUPPORT_STATUS == 0:
return
return self._status
@property
def fan_speed(self):
"""Return the status of the vacuum."""
if self.supported_features & SUPPORT_FAN_SPEED == 0:
return
return self._fan_speed
@property
def fan_speed_list(self):
"""Return the status of the vacuum."""
if self.supported_features & SUPPORT_FAN_SPEED == 0:
return []
return self._fan_speed_list
@property
def battery_level(self):
"""Return the status of the vacuum."""
if self.supported_features & SUPPORT_BATTERY == 0:
return
return max(0, min(100, self._battery_level))
@property
def battery_icon(self):
"""Return the battery icon for the vacuum cleaner."""
if self.supported_features & SUPPORT_BATTERY == 0:
return
return icon_for_battery_level(
battery_level=self.battery_level, charging=self._charging)
@property
def supported_features(self):
"""Flag supported features."""
return self._supported_features
async def async_turn_on(self, **kwargs):
"""Turn the vacuum on."""
if self.supported_features & SUPPORT_TURN_ON == 0:
return
mqtt.async_publish(self.hass, self._command_topic,
self._payloads[CONF_PAYLOAD_TURN_ON],
self._qos, self._retain)
self._status = 'Cleaning'
self.async_write_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn the vacuum off."""
if self.supported_features & SUPPORT_TURN_OFF == 0:
return
mqtt.async_publish(self.hass, self._command_topic,
self._payloads[CONF_PAYLOAD_TURN_OFF],
self._qos, self._retain)
self._status = 'Turning Off'
self.async_write_ha_state()
async def async_stop(self, **kwargs):
"""Stop the vacuum."""
if self.supported_features & SUPPORT_STOP == 0:
return
mqtt.async_publish(self.hass, self._command_topic,
self._payloads[CONF_PAYLOAD_STOP],
self._qos, self._retain)
self._status = 'Stopping the current task'
self.async_write_ha_state()
async def async_clean_spot(self, **kwargs):
"""Perform a spot clean-up."""
if self.supported_features & SUPPORT_CLEAN_SPOT == 0:
return
mqtt.async_publish(self.hass, self._command_topic,
self._payloads[CONF_PAYLOAD_CLEAN_SPOT],
self._qos, self._retain)
self._status = "Cleaning spot"
self.async_write_ha_state()
async def async_locate(self, **kwargs):
"""Locate the vacuum (usually by playing a song)."""
if self.supported_features & SUPPORT_LOCATE == 0:
return
mqtt.async_publish(self.hass, self._command_topic,
self._payloads[CONF_PAYLOAD_LOCATE],
self._qos, self._retain)
self._status = "Hi, I'm over here!"
self.async_write_ha_state()
async def async_start_pause(self, **kwargs):
"""Start, pause or resume the cleaning task."""
if self.supported_features & SUPPORT_PAUSE == 0:
return
mqtt.async_publish(self.hass, self._command_topic,
self._payloads[CONF_PAYLOAD_START_PAUSE],
self._qos, self._retain)
self._status = 'Pausing/Resuming cleaning...'
self.async_write_ha_state()
async def async_return_to_base(self, **kwargs):
"""Tell the vacuum to return to its dock."""
if self.supported_features & SUPPORT_RETURN_HOME == 0:
return
mqtt.async_publish(self.hass, self._command_topic,
self._payloads[CONF_PAYLOAD_RETURN_TO_BASE],
self._qos, self._retain)
self._status = 'Returning home...'
self.async_write_ha_state()
async def async_set_fan_speed(self, fan_speed, **kwargs):
"""Set fan speed."""
if self.supported_features & SUPPORT_FAN_SPEED == 0:
return
if not self._fan_speed_list or fan_speed not in self._fan_speed_list:
return
mqtt.async_publish(self.hass, self._set_fan_speed_topic,
fan_speed, self._qos, self._retain)
self._status = "Setting fan to {}...".format(fan_speed)
self.async_write_ha_state()
async def async_send_command(self, command, params=None, **kwargs):
"""Send a command to a vacuum cleaner."""
if self.supported_features & SUPPORT_SEND_COMMAND == 0:
return
mqtt.async_publish(self.hass, self._send_command_topic,
command, self._qos, self._retain)
self._status = "Sending command {}...".format(command)
self.async_write_ha_state()
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""DEPRECATED mix-in handler for bulk loading data into an application.
Please use the new bulkloader.
"""
import Cookie
import StringIO
import csv
import httplib
import os
import traceback
import wsgiref.handlers
from google.appengine.api import datastore
from google.appengine.ext import webapp
from google.appengine.ext.bulkload import constants
def Validate(value, type):
""" Checks that value is non-empty and of the right type.
Raises ValueError if value is None or empty, TypeError if it's not the given
type.
Args:
value: any value
type: a type or tuple of types
"""
if not value:
raise ValueError('Value should not be empty; received %s.' % value)
elif not isinstance(value, type):
raise TypeError('Expected a %s, but received %s (a %s).' %
(type, value, value.__class__))
class Loader(object):
"""A base class for creating datastore entities from input data.
To add a handler for bulk loading a new entity kind into your datastore,
write a subclass of this class that calls Loader.__init__ from your
class's __init__.
If you need to run extra code to convert entities from the input
data, create new properties, or otherwise modify the entities before
they're inserted, override HandleEntity.
See the CreateEntity method for the creation of entities from the
(parsed) input data.
"""
__loaders = {}
__kind = None
__properties = None
def __init__(self, kind, properties):
""" Constructor.
Populates this Loader's kind and properties map. Also registers it with
the bulk loader, so that all you need to do is instantiate your Loader,
and the bulkload handler will automatically use it.
Args:
kind: a string containing the entity kind that this loader handles
properties: list of (name, converter) tuples.
This is used to automatically convert the CSV columns into properties.
The converter should be a function that takes one argument, a string
value from the CSV file, and returns a correctly typed property value
that should be inserted. The tuples in this list should match the
columns in your CSV file, in order.
For example:
[('name', str),
('id_number', int),
('email', datastore_types.Email),
('user', users.User),
('birthdate', lambda x: datetime.datetime.fromtimestamp(float(x))),
('description', datastore_types.Text),
]
"""
Validate(kind, basestring)
self.__kind = kind
Validate(properties, list)
for name, fn in properties:
Validate(name, basestring)
assert callable(fn), (
'Conversion function %s for property %s is not callable.' % (fn, name))
self.__properties = properties
Loader.__loaders[kind] = self
def kind(self):
""" Return the entity kind that this Loader handes.
"""
return self.__kind
def CreateEntity(self, values, key_name=None):
""" Creates an entity from a list of property values.
Args:
values: list/tuple of str
key_name: if provided, the name for the (single) resulting Entity
Returns:
list of datastore.Entity
The returned entities are populated with the property values from the
argument, converted to native types using the properties map given in
the constructor, and passed through HandleEntity. They're ready to be
inserted.
Raises:
AssertionError if the number of values doesn't match the number
of properties in the properties map.
"""
Validate(values, (list, tuple))
assert len(values) == len(self.__properties), (
'Expected %d CSV columns, found %d.' %
(len(self.__properties), len(values)))
entity = datastore.Entity(self.__kind, name=key_name)
for (name, converter), val in zip(self.__properties, values):
if converter is bool and val.lower() in ('0', 'false', 'no'):
val = False
entity[name] = converter(val)
entities = self.HandleEntity(entity)
if entities is not None:
if not isinstance(entities, (list, tuple)):
entities = [entities]
for entity in entities:
if not isinstance(entity, datastore.Entity):
raise TypeError('Expected a datastore.Entity, received %s (a %s).' %
(entity, entity.__class__))
return entities
def HandleEntity(self, entity):
""" Subclasses can override this to add custom entity conversion code.
This is called for each entity, after its properties are populated from
CSV but before it is stored. Subclasses can override this to add custom
entity handling code.
The entity to be inserted should be returned. If multiple entities should
be inserted, return a list of entities. If no entities should be inserted,
return None or [].
Args:
entity: datastore.Entity
Returns:
datastore.Entity or list of datastore.Entity
"""
return entity
@staticmethod
def RegisteredLoaders():
""" Returns a list of the Loader instances that have been created.
"""
return dict(Loader.__loaders)
class BulkLoad(webapp.RequestHandler):
"""A handler for bulk load requests.
This class contains handlers for the bulkloading process. One for
GET to provide cookie information for the upload script, and one
handler for a POST request to upload the entities.
In the POST request, the body contains the data representing the
entities' property values. The original format was a sequences of
lines of comma-separated values (and is handled by the Load
method). The current (version 1) format is a binary format described
in the Tools and Libraries section of the documentation, and is
handled by the LoadV1 method).
"""
def get(self):
""" Handle a GET. Just show an info page.
"""
page = self.InfoPage(self.request.uri)
self.response.out.write(page)
def post(self):
""" Handle a POST. Reads CSV data, converts to entities, and stores them.
"""
self.response.headers['Content-Type'] = 'text/plain'
response, output = self.Load(self.request.get(constants.KIND_PARAM),
self.request.get(constants.CSV_PARAM))
self.response.set_status(response)
self.response.out.write(output)
def InfoPage(self, uri):
""" Renders an information page with the POST endpoint and cookie flag.
Args:
uri: a string containing the request URI
Returns:
A string with the contents of the info page to be displayed
"""
page = """
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html><head>
<title>Bulk Loader</title>
</head><body>"""
page += ('The bulk load endpoint is: <a href="%s">%s</a><br />\n' %
(uri, uri))
cookies = os.environ.get('HTTP_COOKIE', None)
if cookies:
cookie = Cookie.BaseCookie(cookies)
for param in ['ACSID', 'dev_appserver_login']:
value = cookie.get(param)
if value:
page += ("Pass this flag to the client: --cookie='%s=%s'\n" %
(param, value.value))
break
else:
page += 'No cookie found!\n'
page += '</body></html>'
return page
def IterRows(self, reader):
""" Yields a tuple of a line number and row for each row of the CSV data.
Args:
reader: a csv reader for the input data.
"""
line_num = 1
for columns in reader:
yield (line_num, columns)
line_num += 1
def LoadEntities(self, iter, loader, key_format=None):
"""Generates entities and loads them into the datastore. Returns
a tuple of HTTP code and string reply.
Args:
iter: an iterator yielding pairs of a line number and row contents.
key_format: a format string to convert a line number into an
entity id. If None, then entity ID's are automatically generated.
"""
entities = []
output = []
for line_num, columns in iter:
key_name = None
if key_format is not None:
key_name = key_format % line_num
if columns:
try:
output.append('\nLoading from line %d...' % line_num)
new_entities = loader.CreateEntity(columns, key_name=key_name)
if new_entities:
entities.extend(new_entities)
output.append('done.')
except:
stacktrace = traceback.format_exc()
output.append('error:\n%s' % stacktrace)
return (httplib.BAD_REQUEST, ''.join(output))
datastore.Put(entities)
return (httplib.OK, ''.join(output))
def Load(self, kind, data):
"""Parses CSV data, uses a Loader to convert to entities, and stores them.
On error, fails fast. Returns a "bad request" HTTP response code and
includes the traceback in the output.
Args:
kind: a string containing the entity kind that this loader handles
data: a string containing the CSV data to load
Returns:
tuple (response code, output) where:
response code: integer HTTP response code to return
output: string containing the HTTP response body
"""
data = data.encode('utf-8')
Validate(kind, basestring)
Validate(data, basestring)
output = []
try:
loader = Loader.RegisteredLoaders()[kind]
except KeyError:
output.append('Error: no Loader defined for kind %s.' % kind)
return (httplib.BAD_REQUEST, ''.join(output))
buffer = StringIO.StringIO(data)
reader = csv.reader(buffer, skipinitialspace=True)
try:
csv.field_size_limit(800000)
except AttributeError:
pass
return self.LoadEntities(self.IterRows(reader), loader)
def main(*loaders):
"""Starts bulk upload.
Raises TypeError if not, at least one Loader instance is given.
Args:
loaders: One or more Loader instance.
"""
if not loaders:
raise TypeError('Expected at least one argument.')
for loader in loaders:
if not isinstance(loader, Loader):
raise TypeError('Expected a Loader instance; received %r' % loader)
application = webapp.WSGIApplication([('.*', BulkLoad)])
wsgiref.handlers.CGIHandler().run(application)
if __name__ == '__main__':
main()
|
|
from dimagi.utils.couch.database import safe_delete
from dimagi.utils.parsing import json_format_datetime
from corehq.util.test_utils import unit_testing_only
def get_latest_case_export_schema(domain, case_type):
from .models import CaseExportDataSchema
key = [domain, 'CaseExportDataSchema', case_type]
return _get_latest_export_schema(CaseExportDataSchema, key)
def get_latest_form_export_schema(domain, app_id, xmlns):
from .models import FormExportDataSchema
key = [domain, 'FormExportDataSchema', app_id, xmlns]
return _get_latest_export_schema(FormExportDataSchema, key)
def _get_latest_export_schema(cls, key):
result = cls.get_db().view(
'schemas_by_xmlns_or_case_type/view',
startkey=key + [{}],
endkey=key,
include_docs=True,
limit=1,
reduce=False,
descending=True,
).first()
return cls.wrap(result['doc']) if result else None
def get_case_inferred_schema(domain, case_type):
from .models import CaseInferredSchema
key = [domain, 'CaseInferredSchema', case_type]
result = CaseInferredSchema.get_db().view(
'schemas_by_xmlns_or_case_type/view',
startkey=key + [{}],
endkey=key,
include_docs=True,
limit=1,
reduce=False,
descending=True,
).first()
return CaseInferredSchema.wrap(result['doc']) if result else None
def get_form_inferred_schema(domain, app_id, xmlns):
from .models import FormInferredSchema
key = [domain, 'FormInferredSchema', app_id, xmlns]
result = FormInferredSchema.get_db().view(
'schemas_by_xmlns_or_case_type/view',
startkey=key + [{}],
endkey=key,
include_docs=True,
limit=1,
reduce=False,
descending=True,
).first()
return FormInferredSchema.wrap(result['doc']) if result else None
def get_case_exports_by_domain(domain):
from .models import CaseExportInstance
key = [domain, 'CaseExportInstance']
return _get_export_instance(CaseExportInstance, key)
def get_form_exports_by_domain(domain):
from .models import FormExportInstance
key = [domain, 'FormExportInstance']
return _get_export_instance(FormExportInstance, key)
def get_brief_exports(domain, form_or_case=None):
from .models import ExportInstance
if form_or_case == 'form':
key = [domain, 'FormExportInstance']
elif form_or_case == 'case':
key = [domain, 'CaseExportInstance']
else:
key = [domain]
return _get_export_instance(ExportInstance, key, include_docs=False)
def get_brief_deid_exports(domain, form_or_case=None):
from .models import ExportInstance
doc_types = [doc_type for doc_type in [
'FormExportInstance' if form_or_case in ['form', None] else None,
'CaseExportInstance' if form_or_case in ['case', None] else None,
] if doc_type is not None]
results = ExportInstance.get_db().view(
'export_instances_by_domain/view',
keys=[[domain, doc_type, True] for doc_type in doc_types],
include_docs=False,
reduce=False,
).all()
return [result['value'] for result in results]
def get_export_count_by_domain(domain):
from .models import ExportInstance
export_result = ExportInstance.get_db().view(
'export_instances_by_domain/view',
startkey=[domain],
endkey=[domain, {}],
include_docs=False,
reduce=True,
).one()
return 0 if export_result is None else export_result['value']
def get_deid_export_count(domain):
from .models import ExportInstance
return sum(res['value'] for res in ExportInstance.get_db().view(
'export_instances_by_domain/view',
keys=[[domain, 'FormExportInstance', True],
[domain, 'CaseExportInstance', True]],
include_docs=False,
group=True,
).all())
def _get_export_instance(cls, key, include_docs=True):
results = cls.get_db().view(
'export_instances_by_domain/view',
startkey=key,
endkey=key + [{}],
include_docs=include_docs,
reduce=False,
).all()
if include_docs:
return [cls.wrap(result['doc']) for result in results]
return [result['value'] for result in results]
def get_daily_saved_export_ids_for_auto_rebuild(accessed_after):
"""
get all saved exports accessed after the timestamp
:param accessed_after: datetime to get reports that have been accessed after this timestamp
"""
from .models import ExportInstance
# get exports that have not been accessed yet
new_exports = ExportInstance.get_db().view(
"export_instances_by_is_daily_saved/view",
include_docs=False,
key=[None],
reduce=False,
).all()
export_ids = [export['id'] for export in new_exports]
# get exports that have last_accessed set after the cutoff requested
accessed_reports = ExportInstance.get_db().view(
"export_instances_by_is_daily_saved/view",
include_docs=False,
startkey=[json_format_datetime(accessed_after)],
reduce=False,
).all()
export_ids.extend([result['id'] for result in accessed_reports])
return export_ids
def get_properly_wrapped_export_instance(doc_id):
from .models import ExportInstance
doc = ExportInstance.get_db().get(doc_id)
return _properly_wrap_export_instance(doc)
def _properly_wrap_export_instance(doc):
from .models import FormExportInstance
from .models import CaseExportInstance
from .models import ExportInstance
class_ = {
"FormExportInstance": FormExportInstance,
"CaseExportInstance": CaseExportInstance,
}.get(doc['doc_type'], ExportInstance)
return class_.wrap(doc)
@unit_testing_only
def delete_all_export_data_schemas():
from .models import ExportDataSchema
db = ExportDataSchema.get_db()
for row in db.view('schemas_by_xmlns_or_case_type/view', reduce=False):
doc_id = row['id']
safe_delete(db, doc_id)
@unit_testing_only
def delete_all_export_instances():
from .models import ExportInstance
db = ExportInstance.get_db()
for row in db.view('export_instances_by_domain/view', reduce=False):
doc_id = row['id']
safe_delete(db, doc_id)
|
|
#!/usr/bin/env python
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""
Create base map from localization and mobileye lane detection
"""
import argparse
import csv
import math
import numpy as np
import os
import rospy
import sys
from modules.map.proto.map_pb2 import Map
from modules.map.proto.map_lane_pb2 import LaneBoundaryType, Lane
from modules.map.proto.map_road_pb2 import BoundaryEdge, Road
from modules.routing.proto.routing_pb2 import LaneWaypoint
from modules.routing.proto.poi_pb2 import POI, Landmark
class DataPoint:
"""
class of data sample (localization and mobileye lane detection)
"""
def __init__(self):
self.pos_x = 0.0 # localization
self.pos_y = 0.0
self.pos_z = 0.0
self.theta = 0.0 # heading
self.dist_left = 0.0 # distance to left lane marking
self.conf_left = 0 # confidence of left lane marking (0/1: low confidence, -1/-2: high confidence)
self.dist_right = 0.0 # distance to right lane marking
self.conf_right = 0 # confidence of right lane marking (0/1: low confidence, -1/-2: high confidence)
self.width = 0.0 # lane width
self.ratio = 0.0 # relative position within a lane (dist_left / width)
self.center_x = 0.0 # point on the center line of current lane
self.center_y = 0.0
def distance(x1, y1, x2, y2):
"""
l2 distance
"""
return math.sqrt((x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2))
def interpolate_width(data, default_width):
"""
fill 'width' field of all data samples by interpolation
"""
# Collect a set of consecutive entries with low confidence on left OR right lane detection
intervals = []
interval_begin = -1
interval_end = -1
for (index, entry) in enumerate(data):
if entry.conf_left >= 0 or entry.conf_right >= 0:
if interval_begin < 0:
interval_begin = index
interval_end = index
else:
if interval_begin >= 0:
intervals.append((interval_begin, interval_end))
interval_begin = -1
interval_end = -1
entry.width = entry.dist_left + entry.dist_right
if interval_begin >= 0:
intervals.append((interval_begin, interval_end))
# Iterate through intervals to interpolate width
for interval in intervals:
for index in range(interval[0], interval[1] + 1):
if interval[0] == 0 and interval[1] == len(data) - 1:
data[index].width = default_width
else:
if interval[0] == 0:
data[index].width = data[interval[1] + 1].width
elif interval[1] == len(data) - 1:
data[index].width = data[interval[0] - 1].width
else:
alpha = float(index - interval[0] + 1) / (interval[1] - interval[0] + 2)
data[index].width = (1.0 - alpha) * data[interval[0] - 1].width + alpha * data[interval[1] + 1].width
# Fill in dist_left/right and conf_left/right using interpolated width
for (index, entry) in enumerate(data):
if entry.conf_left >= 0 and entry.conf_right < 0:
entry.dist_left = entry.width - entry.dist_right
entry.conf_left = -1
elif entry.conf_left < 0 and entry.conf_right >= 0:
entry.dist_right = entry.width - entry.dist_left
entry.conf_right = -1
def interpolate_ratio(data, default_ratio):
"""
fill 'ratio' field of all data samples by interpolation
"""
# Collect a set of consecutive entries with low confidence on left AND right lane detection
intervals = []
interval_begin = -1
interval_end = -1
for (index, entry) in enumerate(data):
if entry.conf_left >= 0 and entry.conf_right >= 0:
if interval_begin < 0:
interval_begin = index
interval_end = index
else:
if interval_begin >= 0:
intervals.append((interval_begin, interval_end))
interval_begin = -1
interval_end = -1
entry.ratio = float(entry.dist_left) / entry.width
if interval_begin >= 0:
intervals.append((interval_begin, interval_end))
# Iterate through intervals to interpolate ratio
for interval in intervals:
for index in range(interval[0], interval[1] + 1):
if interval[0] == 0 and interval[1] == len(data) - 1:
data[index].ratio = default_ratio
else:
if interval[0] == 0:
data[index].ratio = data[interval[1] + 1].ratio
elif interval[1] == len(data) - 1:
data[index].ratio = data[interval[0] - 1].ratio
else:
alpha = float(index - interval[0] + 1) / (interval[1] - interval[0] + 2)
data[index].ratio = (1.0 - alpha) * data[interval[0] - 1].ratio + alpha * data[interval[1] + 1].ratio
# Fill in dist_left/right and conf_left/right using interpolated ratio
for (index, entry) in enumerate(data):
if entry.conf_left >= 0 and entry.conf_right >= 0:
entry.dist_left = entry.width * entry.ratio
entry.dist_right = entry.width - entry.dist_left
entry.conf_left = -1
entry.conf_right = -1
def compute_center(data):
"""
fill 'center_x' and 'center_y' fields of all data samples
"""
for entry in data:
pos_x = entry.pos_x
pos_y = entry.pos_y
pos_z = entry.pos_z
theta = entry.theta
dist_left = entry.dist_left
dist_right = entry.dist_right
theta_left = theta + np.pi / 2.0
pos_l_x = pos_x + dist_left * np.cos(theta_left)
pos_l_y = pos_y + dist_left * np.sin(theta_left)
theta_right = theta - np.pi / 2.0
pos_r_x = pos_x + dist_right * np.cos(theta_right)
pos_r_y = pos_y + dist_right * np.sin(theta_right)
entry.center_x = (pos_l_x + pos_r_x) / 2.0
entry.center_y = (pos_l_y + pos_r_y) / 2.0
def sample_data(data, sample_distance):
"""
sample 'data' at the interval of 'sample_distance'
"""
result = []
if len(data) > 0:
last_x = data[0].center_x
last_y = data[0].center_y
result.append(data[0])
for entry in data[1:]:
if distance(last_x, last_y, entry.center_x, entry.center_y) > sample_distance:
result.append(entry)
last_x = entry.center_x
last_y = entry.center_y
return result
def extract_data(data, dim):
"""
extract dimension 'dim' (center_x, center_y or width) of 'data' into a list
"""
result = []
for entry in data:
if dim == 'center_x':
result.append(entry.center_x)
elif dim == 'center_y':
result.append(entry.center_y)
elif dim == 'width':
result.append(entry.width)
return result
def laplacian_operator(data):
"""
apply laplacian operator on data
"""
lap = []
lap.append(0.0)
for index in range(1, len(data) - 1):
lap.append((data[index + 1] + data[index - 1]) / 2.0 - data[index])
lap.append(0.0)
return lap
def laplacian_smooth(data, alpha = 0.5, iterations = 3):
"""
apply laplacian smoothing on data
"""
for iteration in range(iterations):
lap = laplacian_operator(data)
for index in range(len(data)):
data[index] += alpha * lap[index]
def update_data(data, dim, new_data):
"""
copy new_data to dimension 'dim' of 'data'
"""
for entry, new_entry in zip(data, new_data):
if dim == 'center_x':
entry.center_x = new_entry
elif dim == 'center_y':
entry.center_y = new_entry
elif dim == 'width':
entry.width = new_entry
def smooth_dimension(data, dim):
"""
smooth dimension 'dim' of 'data'
"""
extracted_data = extract_data(data, dim)
if dim == 'width':
laplacian_smooth(extracted_data, 1.0, 1000)
else:
laplacian_smooth(extracted_data, 1.0, 1000)
update_data(data, dim, extracted_data)
def smooth_center_width(data):
"""
smooth centers and widths of data
"""
smooth_dimension(data, 'center_x')
smooth_dimension(data, 'center_y')
smooth_dimension(data, 'width')
def split_data(data, max_lane_length):
"""
split data into multiple lists, each of which is not longer than 'max_lane_length'
"""
result = []
current = []
total_length = 0.0
if len(data) > 0:
last_x = data[0].center_x
last_y = data[0].center_y
current.append(data[0])
for entry in data[1:]:
current.append(entry)
d = distance(last_x, last_y, entry.center_x, entry.center_y)
total_length += d
if total_length > max_lane_length:
result.append(current)
current = []
current.append(entry)
total_length = 0.0
last_x = entry.center_x
last_y = entry.center_y
if total_length > 0.0:
result.append(current)
return result
def create_lane(data, offset, lane_count, left_lanes, right_lanes):
"""
create a lane using 'data' whose lateral index is 'offset'
offset = 0: center lane; offset < 0: left lanes; offset > 0: right lanes
lane_count: longitutional index of lane (used for naming)
left_lanes, right_lanes: number of left/right lanes (used for boundary types)
"""
total_length = 0.0
total_left_length = 0.0
total_right_length = 0.0
lane = Lane()
lane.id.id = "lane_" + str(lane_count) + "_" + str(offset)
lane_central_curve_seg = lane.central_curve.segment.add()
start_heading = data[0].theta
lane_left_boundary_curve_seg = lane.left_boundary.curve.segment.add()
lane_left_boundary_curve_seg.heading = float(start_heading)
lane_left_boundary_curve_seg.s = 0.0
lane_right_boundary_curve_seg = lane.right_boundary.curve.segment.add()
lane_right_boundary_curve_seg.heading = float(start_heading)
lane_right_boundary_curve_seg.s = 0.0
last_l_x = 0.0
last_l_y = 0.0
last_c_x = 0.0
last_c_y = 0.0
last_r_x = 0.0
last_r_y = 0.0
for (index, entry) in enumerate(data):
theta = entry.theta
theta_left = theta + np.pi / 2.0
theta_right = theta - np.pi / 2.0
pos_c_x = entry.center_x
pos_c_y = entry.center_y
pos_l_x = pos_c_x + entry.width * (0.5 - offset) * np.cos(theta_left)
pos_l_y = pos_c_y + entry.width * (0.5 - offset) * np.sin(theta_left)
pos_r_x = pos_c_x + entry.width * (0.5 + offset) * np.cos(theta_right)
pos_r_y = pos_c_y + entry.width * (0.5 + offset) * np.sin(theta_right)
pos_c_x = (pos_l_x + pos_r_x) / 2.0
pos_c_y = (pos_l_y + pos_r_y) / 2.0
if index == 0:
lane_central_curve_seg.start_position.x = pos_c_x
lane_central_curve_seg.start_position.y = pos_c_y
lane_left_boundary_curve_seg.start_position.x = pos_l_x
lane_left_boundary_curve_seg.start_position.y = pos_l_y
lane_right_boundary_curve_seg.start_position.x = pos_r_x
lane_right_boundary_curve_seg.start_position.y = pos_r_y
else:
d = distance(last_c_x, last_c_y, pos_c_x, pos_c_y)
total_length += d
d_left = distance(last_l_x, last_l_y, pos_l_x, pos_l_y)
total_left_length += d_left
d_right = distance(last_r_x, last_r_y, pos_r_x, pos_r_y)
total_right_length += d_right
point = lane_central_curve_seg.line_segment.point.add()
point.x = pos_c_x
point.y = pos_c_y
point = lane_left_boundary_curve_seg.line_segment.point.add()
point.x = pos_l_x
point.y = pos_l_y
point = lane_right_boundary_curve_seg.line_segment.point.add()
point.x = pos_r_x
point.y = pos_r_y
sample = lane.left_sample.add()
sample.s = total_length
sample.width = entry.width / 2.0
sample = lane.right_sample.add()
sample.s = total_length
sample.width = entry.width / 2.0
last_l_x = pos_l_x
last_l_y = pos_l_y
last_r_x = pos_r_x
last_r_y = pos_r_y
last_c_x = pos_c_x
last_c_y = pos_c_y
lane_central_curve_seg.length = total_length
lane_left_boundary_curve_seg.length = total_left_length
lane_right_boundary_curve_seg.length = total_right_length
boundary_type = lane.left_boundary.boundary_type.add()
boundary_type.s = 0.0
if offset == -left_lanes:
boundary_type.types.append(LaneBoundaryType.DOUBLE_YELLOW)
else:
boundary_type.types.append(LaneBoundaryType.DOTTED_WHITE)
lane.left_boundary.length = total_left_length
boundary_type = lane.right_boundary.boundary_type.add()
boundary_type.s = 0.0
if offset == right_lanes:
boundary_type.types.append(LaneBoundaryType.CURB)
else:
boundary_type.types.append(LaneBoundaryType.DOTTED_WHITE)
lane.right_boundary.length = total_right_length
lane.length = total_length
lane.speed_limit = 29.06
lane.type = Lane.CITY_DRIVING
lane.turn = Lane.NO_TURN
return lane
def create_road(data, left_lanes, right_lanes):
"""
create a road using 'data'
left_lanes, right_lanes: number of left/right lanes
"""
road = Road()
road.id.id = "road"
section = road.section.add()
section.id.id = "section"
left_edge = section.boundary.outer_polygon.edge.add()
left_edge.type = BoundaryEdge.LEFT_BOUNDARY
right_edge = section.boundary.outer_polygon.edge.add()
right_edge.type = BoundaryEdge.RIGHT_BOUNDARY
total_left_length = 0.0
total_right_length = 0.0
start_heading = data[0].theta
left_edge_curve_seg = left_edge.curve.segment.add()
left_edge_curve_seg.heading = float(start_heading)
left_edge_curve_seg.s = 0.0
right_edge_curve_seg = right_edge.curve.segment.add()
right_edge_curve_seg.heading = float(start_heading)
right_edge_curve_seg.s = 0.0
last_l_x = 0.0
last_l_y = 0.0
last_r_x = 0.0
last_r_y = 0.0
for (index, entry) in enumerate(data):
theta = entry.theta
theta_left = theta + np.pi / 2.0
theta_right = theta - np.pi / 2.0
pos_l_x = entry.center_x + entry.width * (0.5 + left_lanes) * np.cos(theta_left)
pos_l_y = entry.center_y + entry.width * (0.5 + left_lanes) * np.sin(theta_left)
pos_r_x = entry.center_x + entry.width * (0.5 + right_lanes) * np.cos(theta_right)
pos_r_y = entry.center_y + entry.width * (0.5 + right_lanes) * np.sin(theta_right)
if index == 0:
left_edge_curve_seg.start_position.x = pos_l_x
left_edge_curve_seg.start_position.y = pos_l_y
right_edge_curve_seg.start_position.x = pos_r_x
right_edge_curve_seg.start_position.y = pos_r_y
else:
d_left = distance(last_l_x, last_l_y, pos_l_x, pos_l_y)
total_left_length += d_left
d_right = distance(last_r_x, last_r_y, pos_r_x, pos_r_y)
total_right_length += d_right
point = left_edge_curve_seg.line_segment.point.add()
point.x = pos_l_x
point.y = pos_l_y
point = right_edge_curve_seg.line_segment.point.add()
point.x = pos_r_x
point.y = pos_r_y
last_l_x = pos_l_x
last_l_y = pos_l_y
last_r_x = pos_r_x
last_r_y = pos_r_y
left_edge_curve_seg.length = total_left_length
right_edge_curve_seg.length = total_right_length
return road
def main():
parser = argparse.ArgumentParser(
description='Generate Base Map from Recorded Localization and Mobileye Lane Detection')
parser.add_argument(
'-i',
'--input_file',
help='Recorded localization and mobileye lane detection in CSV format',
type=str,
default='/tmp/lane.csv')
parser.add_argument(
'--debug',
help='Print debugging info in /tmp',
action='store_true')
parser.add_argument(
'-o',
'--output_file',
help='Output file name of generated base map',
type=str,
default='modules/map/data/gen/base_map.txt')
parser.add_argument(
'-e',
'--end_waypoint_file',
help='Output file name of default end waypoint',
type=str,
default='modules/map/data/gen/default_end_way_point.txt')
parser.add_argument(
'--default_width',
help='Default lane width in meters (only effective when mobileye lane detection fails for ALL frames)',
type=float,
default=3.5)
parser.add_argument(
'--sample_distance',
help='minimum distance (in meters) of two adjacent samples of a lane',
type=float,
default=0.2)
parser.add_argument(
'--max_lane_length',
help='maximum length (in meters) of a lane (longer lanes will be split)',
type=float,
default=100.0)
parser.add_argument(
'--left_lanes',
help='Number of lanes on the left',
type=int,
default=0)
parser.add_argument(
'--right_lanes',
help='Number of lanes on the right',
type=int,
default=0)
args = vars(parser.parse_args())
csv_file_name = args['input_file']
map_file_name = args['output_file']
waypoint_file_name = args['end_waypoint_file']
default_width = args['default_width']
debug_option = args['debug']
sample_distance = args['sample_distance']
max_lane_length = args['max_lane_length']
left_lanes = args['left_lanes']
right_lanes = args['right_lanes']
default_ratio = 0.5
temp_csv_file_name = '/tmp/lane_interpolation.csv'
rows = []
with open(csv_file_name, 'r') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
rows.append(row)
# Extract data samples
data = []
for row in rows[1:]:
entry = DataPoint()
entry.pos_x = float(row[0])
entry.pos_y = float(row[1])
entry.pos_z = float(row[2])
entry.theta = float(row[3])
entry.dist_left = abs(float(row[4]))
entry.conf_left = int(row[5])
if entry.dist_left < 0.1:
entry.conf_left = 0
entry.dist_right = abs(float(row[6]))
entry.conf_right = int(row[7])
if entry.dist_right < 0.1:
entry.conf_right = 0
entry.width = default_width
entry.ratio = default_ratio
data.append(entry)
# Fill in widths using interpolation
interpolate_width(data, default_width)
# Fill in ratios using interpolation
interpolate_ratio(data, default_ratio)
# Fill in centers
compute_center(data)
# Sample data at the interval of sample_distance
data = sample_data(data, sample_distance)
# Smooth center curves and widths
smooth_center_width(data)
# Output debug info if necessary
if debug_option:
with open(temp_csv_file_name, 'w') as csvfile:
for row in data:
csvfile.write(
"%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s\n" %
(row.pos_x, row.pos_y, row.pos_z, row.theta, row.dist_left, row.conf_left, row.dist_right, row.conf_right, row.width, row.ratio, row.center_x, row.center_y))
# Split data samples into lists with maximum length of max_lane_length
list_data = split_data(data, max_lane_length)
# Create individual lanes
lane_sets = []
for (lane_count, lane_data) in enumerate(list_data):
lane_set = []
for offset in range(-left_lanes, right_lanes + 1):
lane_set.append(create_lane(lane_data, offset, lane_count, left_lanes, right_lanes))
lane_sets.append(lane_set)
# Create road
road = create_road(data, left_lanes, right_lanes)
# Create map
mp = Map()
mp.header.version = "1.400000"
mp.header.date = "20170919"
mp.header.district = "101"
# Set up predecessors, successors, left/right neighbors
for lane_count in range(len(lane_sets)):
for lane_offset in range(len(lane_sets[lane_count])):
if lane_count != 0:
lane_sets[lane_count][lane_offset].predecessor_id.add().id = lane_sets[lane_count - 1][lane_offset].id.id
if lane_count != len(lane_sets) - 1:
lane_sets[lane_count][lane_offset].successor_id.add().id = lane_sets[lane_count + 1][lane_offset].id.id
if lane_offset != 0:
lane_sets[lane_count][lane_offset].left_neighbor_forward_lane_id.add().id = lane_sets[lane_count][lane_offset - 1].id.id
if lane_offset != len(lane_sets[lane_count]) - 1:
lane_sets[lane_count][lane_offset].right_neighbor_forward_lane_id.add().id = lane_sets[lane_count][lane_offset + 1].id.id
# Add road/lanes to map and let road contain lanes
mp.road.extend([road])
for lane_set in lane_sets:
for lane in lane_set:
mp.road[0].section[0].lane_id.add().id = lane.id.id
mp.lane.extend([lane])
# Output map
with open(map_file_name, "w") as f:
f.write(mp.__str__())
# Create default end_way_point using the farthest point of last central lane
last_central_lane = lane_sets[-1][left_lanes]
poi = POI()
landmark = poi.landmark.add()
landmark.name = "default"
waypoint = landmark.waypoint.add()
waypoint.id = last_central_lane.id.id
waypoint.s = last_central_lane.length
waypoint.pose.x = last_central_lane.central_curve.segment[0].line_segment.point[-1].x
waypoint.pose.y = last_central_lane.central_curve.segment[0].line_segment.point[-1].y
# Output default end_way_point
with open(waypoint_file_name, "w") as f:
f.write(poi.__str__())
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
# This script aims to help developers locate forms and view code that needs to
# use the new CSRF protection in Django 1.2. It tries to find all the code that
# may need the steps described in the CSRF documentation. It does not modify
# any code directly, it merely attempts to locate it. Developers should be
# aware of its limitations, described below.
#
# For each template that contains at least one POST form, the following info is printed:
#
# <Absolute path to template>
# AKA: <Aliases (relative to template directory/directories that contain it)>
# POST forms: <Number of POST forms>
# With token: <Number of POST forms with the CSRF token already added>
# Without token:
# <File name and line number of form without token>
#
# Searching for:
# <Template names that need to be searched for in view code
# (includes templates that 'include' current template)>
#
# Found:
# <File name and line number of any view code found>
#
# The format used allows this script to be used in Emacs grep mode:
# M-x grep
# Run grep (like this): /path/to/my/virtualenv/python /path/to/django/src/extras/csrf_migration_helper.py --settings=mysettings /path/to/my/srcs
# Limitations
# ===========
#
# - All templates must be stored on disk in '.html' or '.htm' files.
# (extensions configurable below)
#
# - All Python code must be stored on disk in '.py' files. (extensions
# configurable below)
#
# - All templates must be accessible from TEMPLATE_DIRS or from the 'templates/'
# directory in apps specified in INSTALLED_APPS. Non-file based template
# loaders are out of the picture, because there is no way to ask them to
# return all templates.
#
# - If you put the {% csrf_token %} tag on the same line as the <form> tag it
# will be detected, otherwise it will be assumed that the form does not have
# the token.
#
# - It's impossible to programmatically determine which forms should and should
# not have the token added. The developer must decide when to do this,
# ensuring that the token is only added to internally targetted forms.
#
# - It's impossible to programmatically work out when a template is used. The
# attempts to trace back to view functions are guesses, and could easily fail
# in the following ways:
#
# * If the 'include' template tag is used with a variable
# i.e. {% include tname %} where tname is a variable containing the actual
# template name, rather than {% include "my_template.html" %}.
#
# * If the template name has been built up by view code instead of as a simple
# string. For example, generic views and the admin both do this. (These
# apps are both contrib and both use RequestContext already, as it happens).
#
# * If the 'ssl' tag (or any template tag other than 'include') is used to
# include the template in another template.
#
# - All templates belonging to apps referenced in INSTALLED_APPS will be
# searched, which may include third party apps or Django contrib. In some
# cases, this will be a good thing, because even if the templates of these
# apps have been fixed by someone else, your own view code may reference the
# same template and may need to be updated.
#
# You may, however, wish to comment out some entries in INSTALLED_APPS or
# TEMPLATE_DIRS before running this script.
# Improvements to this script are welcome!
# Configuration
# =============
TEMPLATE_EXTENSIONS = [
".html",
".htm",
]
PYTHON_SOURCE_EXTENSIONS = [
".py",
]
TEMPLATE_ENCODING = "UTF-8"
PYTHON_ENCODING = "UTF-8"
# Method
# ======
# Find templates:
# - template dirs
# - installed apps
#
# Search for POST forms
# - Work out what the name of the template is, as it would appear in an
# 'include' or get_template() call. This can be done by comparing template
# filename to all template dirs. Some templates can have more than one
# 'name' e.g. if a directory and one of its child directories are both in
# TEMPLATE_DIRS. This is actually a common hack used for
# overriding-and-extending admin templates.
#
# For each POST form,
# - see if it already contains '{% csrf_token %}' immediately after <form>
# - work back to the view function(s):
# - First, see if the form is included in any other templates, then
# recursively compile a list of affected templates.
# - Find any code function that references that template. This is just a
# brute force text search that can easily return false positives
# and fail to find real instances.
import os
import sys
import re
from optparse import OptionParser
USAGE = """
This tool helps to locate forms that need CSRF tokens added and the
corresponding view code. This processing is NOT fool proof, and you should read
the help contained in the script itself. Also, this script may need configuring
(by editing the script) before use.
Usage:
python csrf_migration_helper.py [--settings=path.to.your.settings] /path/to/python/code [more paths...]
Paths can be specified as relative paths.
With no arguments, this help is printed.
"""
_POST_FORM_RE = \
re.compile(r'(<form\W[^>]*\bmethod\s*=\s*(\'|"|)POST(\'|"|)\b[^>]*>)', re.IGNORECASE)
_TOKEN_RE = re.compile('\{% csrf_token')
def get_template_dirs():
"""
Returns a set of all directories that contain project templates.
"""
from django.conf import settings
dirs = set()
if ('django.template.loaders.filesystem.load_template_source' in settings.TEMPLATE_LOADERS
or 'django.template.loaders.filesystem.Loader' in settings.TEMPLATE_LOADERS):
dirs.update(map(unicode, settings.TEMPLATE_DIRS))
if ('django.template.loaders.app_directories.load_template_source' in settings.TEMPLATE_LOADERS
or 'django.template.loaders.app_directories.Loader' in settings.TEMPLATE_LOADERS):
from django.template.loaders.app_directories import app_template_dirs
dirs.update(app_template_dirs)
return dirs
def make_template_info(filename, root_dirs):
"""
Creates a Template object for a filename, calculating the possible
relative_filenames from the supplied filename and root template directories
"""
return Template(filename,
[filename[len(d)+1:] for d in root_dirs if filename.startswith(d)])
class Template(object):
def __init__(self, absolute_filename, relative_filenames):
self.absolute_filename, self.relative_filenames = absolute_filename, relative_filenames
def content(self):
try:
return self._content
except AttributeError:
fd = open(self.absolute_filename)
try:
content = fd.read().decode(TEMPLATE_ENCODING)
except UnicodeDecodeError, e:
message = '%s in %s' % (
e[4], self.absolute_filename.encode('UTF-8', 'ignore'))
raise UnicodeDecodeError(*(e.args[:4] + (message,)))
fd.close()
self._content = content
return content
content = property(content)
def post_form_info(self):
"""
Get information about any POST forms in the template.
Returns [(linenumber, csrf_token added)]
"""
matches = []
for ln, line in enumerate(self.content.split("\n")):
m = _POST_FORM_RE.search(line)
if m is not None:
matches.append((ln + 1, _TOKEN_RE.search(line) is not None))
return matches
def includes_template(self, t):
"""
Returns true if this template includes template 't' (via {% include %})
"""
for r in t.relative_filenames:
if re.search(r'\{%\s*include\s+(\'|")' + re.escape(r) + r'(\1)\s*%\}', self.content):
return True
return False
def related_templates(self):
"""
Returns all templates that include this one, recursively. (starting
with this one)
"""
try:
return self._related_templates
except AttributeError:
pass
retval = set([self])
for t in self.all_templates:
if t.includes_template(self):
# If two templates mutually include each other, directly or
# indirectly, we have a problem here...
retval = retval.union(t.related_templates())
self._related_templates = retval
return retval
def __repr__(self):
return repr(self.absolute_filename)
def __eq__(self, other):
return self.absolute_filename == other.absolute_filename
def __hash__(self):
return hash(self.absolute_filename)
def get_templates(dirs):
"""
Returns all files in dirs that have template extensions, as Template
objects.
"""
templates = set()
for root in dirs:
for (dirpath, dirnames, filenames) in os.walk(root):
for f in filenames:
if len([True for e in TEMPLATE_EXTENSIONS if f.endswith(e)]) > 0:
t = make_template_info(os.path.join(dirpath, f), dirs)
# templates need to be able to search others:
t.all_templates = templates
templates.add(t)
return templates
def get_python_code(paths):
"""
Returns all Python code, as a list of tuples, each one being:
(filename, list of lines)
"""
retval = []
for p in paths:
if not os.path.isdir(p):
raise Exception("'%s' is not a directory." % p)
for (dirpath, dirnames, filenames) in os.walk(p):
for f in filenames:
if len([True for e in PYTHON_SOURCE_EXTENSIONS if f.endswith(e)]) > 0:
fn = os.path.join(dirpath, f)
fd = open(fn)
content = [l.decode(PYTHON_ENCODING) for l in fd.readlines()]
fd.close()
retval.append((fn, content))
return retval
def search_python_list(python_code, template_names):
"""
Searches python code for a list of template names.
Returns a list of tuples, each one being:
(filename, line number)
"""
retval = []
for tn in template_names:
retval.extend(search_python(python_code, tn))
retval = list(set(retval))
retval.sort()
return retval
def search_python(python_code, template_name):
"""
Searches Python code for a template name.
Returns a list of tuples, each one being:
(filename, line number)
"""
retval = []
for fn, content in python_code:
for ln, line in enumerate(content):
if ((u'"%s"' % template_name) in line) or \
((u"'%s'" % template_name) in line):
retval.append((fn, ln + 1))
return retval
def main(pythonpaths):
template_dirs = get_template_dirs()
templates = get_templates(template_dirs)
python_code = get_python_code(pythonpaths)
for t in templates:
# Logic
form_matches = t.post_form_info()
num_post_forms = len(form_matches)
form_lines_without_token = [ln for (ln, has_token) in form_matches if not has_token]
if num_post_forms == 0:
continue
to_search = [rf for rt in t.related_templates() for rf in rt.relative_filenames]
found = search_python_list(python_code, to_search)
# Display:
print t.absolute_filename
for r in t.relative_filenames:
print u" AKA %s" % r
print u" POST forms: %s" % num_post_forms
print u" With token: %s" % (num_post_forms - len(form_lines_without_token))
if form_lines_without_token:
print u" Without token:"
for ln in form_lines_without_token:
print "%s:%d:" % (t.absolute_filename, ln)
print
print u" Searching for:"
for r in to_search:
print u" " + r
print
print u" Found:"
if len(found) == 0:
print " Nothing"
else:
for fn, ln in found:
print "%s:%d:" % (fn, ln)
print
print "----"
parser = OptionParser(usage=USAGE)
parser.add_option("", "--settings", action="store", dest="settings", help="Dotted path to settings file")
if __name__ == '__main__':
options, args = parser.parse_args()
if len(args) == 0:
parser.print_help()
sys.exit(1)
settings = getattr(options, 'settings', None)
if settings is None:
if os.environ.get("DJANGO_SETTINGS_MODULE", None) is None:
print "You need to set DJANGO_SETTINGS_MODULE or use the '--settings' parameter"
sys.exit(1)
else:
os.environ["DJANGO_SETTINGS_MODULE"] = settings
main(args)
|
|
#!/usr/bin/python
from console.plugins import *
class WLC_Plugin(ConsolePlugin):
def plugin_description(self):
return 'WLC testing related functionality'
def plugin_load(self):
try: # ensure capwap_internal is imported
from scapy.contrib.capwap import CAPWAP_PKTS
except:
del sys.modules['scapy.contrib.capwap']
raise
if 'trex_stl_lib.trex_stl_wlc' in sys.modules:
del sys.modules['trex_stl_lib.trex_stl_wlc']
from trex_stl_lib.trex_stl_wlc import AP_Manager
self.ap_manager = AP_Manager(self.trex_client)
self.add_argument('-p', '--ports', nargs = '+', action = 'merge', type = int, default = None,
dest = 'port_list',
help = 'A list of ports on which to apply the command. Default = all')
self.add_argument('-v', default = 2, type = int,
dest = 'verbose_level',
help = 'Verbosity level, 0 = quiet, 1 = errors (default), 2 = warnings, 3 = info, 4 = debug')
self.add_argument('-c', '--count', default = 1, type = int,
dest = 'count',
help = 'Amount of actions to apply')
self.add_argument('--cert', type = is_valid_file,
dest = 'ap_cert',
help = 'Certificate filename used for DTLS')
self.add_argument('--priv', type = is_valid_file,
dest = 'ap_privkey',
help = 'Private key filename used for DTLS')
self.add_argument('-i', '--ids', nargs = '+', default = [], action = 'merge',
dest = 'ap_ids',
help = 'A list of AP ID(s) - Name or MAC or IP')
self.add_argument('-i', '--ids', nargs = '+', action = 'merge', type = str,
dest = 'client_ids',
help = 'A list of client IDs - MAC or IP')
self.add_argument('-i', '--ids', nargs = '+', action = 'merge', type = str,
dest = 'device_ids',
help = 'A list of AP and/or Client IDs on which to apply the command')
self.add_argument('-f', '--file', required = True, type = is_valid_file,
dest = 'file_path',
help = 'File path to load')
self.add_argument('-m', '--mult', default = '1', type = parsing_opts.match_multiplier_strict,
dest = 'multiplier',
help = parsing_opts.match_multiplier_help)
self.add_argument('-t', metavar = 'T1=VAL,T2=VAL ...', action = 'merge', default = None, type = parsing_opts.decode_tunables,
dest = 'tunables',
help = 'Sets tunables for a profile. Example = -t fsize=100,pg_id=7')
self.add_argument('--total', action = 'store_true',
dest = 'total_mult',
help = 'Traffic will be divided between all clients specified')
self.add_argument('-m', '--mac', type = check_mac_addr,
dest = 'ap_mac',
help = 'Base AP MAC')
self.add_argument('-i', '--ip', type = check_ipv4_addr,
dest = 'ap_ip',
help = 'Base AP IP')
self.add_argument('-u', '--udp', type = int,
dest = 'ap_udp',
help = 'Base AP UDP port')
self.add_argument('-r', '--radio', metavar = 'MAC', type = check_mac_addr,
dest = 'ap_radio',
help = 'Base AP Radio MAC')
self.add_argument('--client-mac', metavar = 'MAC', type = check_mac_addr,
dest = 'client_mac',
help = 'Base client MAC')
self.add_argument('--client-ip', metavar = 'IP', type = check_ipv4_addr,
dest = 'client_ip',
help = 'Base client IP')
self.add_argument('--save', action = 'store_true',
dest = 'base_save',
help = 'Save "next" AP and Client base values. Will be loaded at start of console.')
self.add_argument('--load', action = 'store_true',
dest = 'base_load',
help = 'Load saved AP and Client base values.')
def plugin_unload(self):
try:
self.do_close(None)
except:
import traceback
traceback.print_exc()
raise
def do_close(self, port_list):
'''Closes all wlc-related stuff'''
self.ap_manager.close(port_list)
def show_base(self):
general_table = text_tables.Texttable(max_width = 200)
general_table.set_cols_align(['l', 'l'])
general_table.set_deco(15)
aps = self.ap_manager.get_connected_aps()
if aps:
info_arr = [('IP', aps[0].ip_dst), ('Hostname', aps[0].wlc_name.decode('ascii')), ('Image ver', '.'.join(['%s' % c for c in aps[0].wlc_sw_ver]))]
general_table.add_row([bold('WLC'), ' / '.join(['%s: %s' % (k, v or '?') for k, v in info_arr])])
general_table.add_row([bold('Next AP:'), 'LAN MAC: %s / IP: %s / UDP: %s / Radio MAC: %s' % self.ap_manager._gen_ap_params()])
general_table.add_row([bold('Next Client:'), 'MAC: %s / IP: %s' % self.ap_manager._gen_client_params()])
self.ap_manager.log(general_table.draw())
def do_show(self):
'''Show status of APs'''
self.show_base()
info = self.ap_manager.get_info()
if not info:
return
ap_client_info_table = text_tables.Texttable(max_width = 200)
ap_client_info_table.set_cols_align(['c', 'l', 'l'])
ap_client_info_table.set_deco(15) # full
categories = ['Port', 'AP(s) info', 'Client(s) info']
ap_client_info_table.header([bold(c) for c in categories])
for port_id in sorted(info.keys()):
port_info = '%s\nBG thread: %s' % (port_id, 'alive' if info[port_id]['bg_thread_alive'] else bold('dead'))
ap_arr = []
client_arr = []
name_per_num = {}
for ap_name in sorted(info[port_id]['aps'].keys(), key = natural_sorted_key):
ap = info[port_id]['aps'][ap_name]
ap_info = 'Name: %s' % ap_name
ap_info += '\nIP: %s / MAC: %s' % (ap['ip'], ap['mac'])
is_connected = ap['dtls_established'] and ap['is_connected']
ap_info += '\nConnected: %s / SSID: %s' % ('Yes' if is_connected else bold('No'), ap['ssid'] or bold('-'))
ap_lines = ap_info.count('\n') + 1
ap_info += '\n' * max(0, len(ap['clients']) - ap_lines) # pad to be same size as clients
ap_arr.append(ap_info)
clients_arr = []
for client in ap['clients']:
clients_arr.append('IP: %s / MAC: %s / Assoc: %s' % (client['ip'], client['mac'], 'Yes' if client['is_associated'] else bold('No')))
if clients_arr:
client_info = '\n'.join(clients_arr)
client_info += '\n' * max(0, ap_lines - len(clients_arr)) # pad to be same size as ap
else:
client_info = 'None'
client_info += '\n' * max(0, ap_lines - 1) # pad to be same size as ap
client_arr.append(client_info)
ap_client_info_table.add_row([
port_info,
('\n' + ('- ' * 22) + '\n').join(ap_arr),
('\n' + ('- ' * 25) + '\n').join(client_arr)])
self.ap_manager.log(ap_client_info_table.draw())
self.ap_manager.log('')
def do_create_ap(self, port_list, count, verbose_level, ap_cert, ap_privkey):
'''Create AP(s) on port'''
if count < 1:
raise Exception('Count should be greated than zero')
if not port_list:
raise Exception('Please specify TRex ports where to add AP(s)')
bu_mac, bu_ip, bu_udp, bu_radio = self.ap_manager._gen_ap_params()
init_ports = [port for port in port_list if port not in self.ap_manager.service_ctx]
ap_names = []
success = False
try:
self.ap_manager.init(init_ports) # implicitly for console
for port in port_list:
for _ in range(count):
ap_params = self.ap_manager._gen_ap_params()
self.ap_manager.create_ap(port, *ap_params, verbose_level = verbose_level, rsa_priv_file = ap_privkey, rsa_cert_file = ap_cert)
ap_names.append(ap_params[0])
assert ap_names
self.ap_manager.join_aps(ap_names)
success = True
finally:
if not success:
for name in ap_names: # rollback
self.ap_manager.remove_ap(name)
self.ap_manager.set_base_values(mac = bu_mac, ip = bu_ip, udp = bu_udp, radio = bu_radio)
close_ports = [port for port in init_ports if port in self.ap_manager.service_ctx]
if close_ports:
self.ap_manager.close(close_ports)
def do_add_client(self, ap_ids, count):
'''Add client(s) to AP(s)'''
if count < 1 or count > 200:
raise Exception('Count of clients should be within range 1-200')
ap_ids = ap_ids or self.ap_manager.aps
bu_mac, bu_ip = self.ap_manager._gen_client_params()
client_ips = []
success = False
try:
for ap_id in ap_ids:
for _ in range(count):
client_params = self.ap_manager._gen_client_params()
self.ap_manager.create_client(*client_params, ap_id = self.ap_manager._get_ap_by_id(ap_id))
client_ips.append(client_params[1])
self.ap_manager.join_clients(client_ips)
success = True
finally:
if not success:
for ip in client_ips: # rollback
self.ap_manager.remove_client(ip)
self.ap_manager.set_base_values(client_mac = bu_mac, client_ip = bu_ip)
def do_reconnect(self, device_ids):
'''Reconnect disconnected AP(s) or Client(s).'''
device_ids = device_ids or ([a.name for a in self.ap_manager.aps] + [c.ip for c in self.ap_manager.clients])
ports = set()
aps = set()
clients = set()
err_ids = set()
for device_id in device_ids:
try:
ap = self.ap_manager._get_ap_by_id(device_id)
aps.add(ap)
clients |= set(ap.clients)
ports.add(ap.port_id)
except:
try:
client = self.ap_manager._get_client_by_id(device_id)
clients.add(client)
aps.add(client.ap)
ports.add(client.ap.port_id)
except:
err_ids.add(device_id)
if err_ids:
raise Exception('Invalid IDs: %s' % ', '.join(sorted(err_ids, key = natural_sorted_key)))
if not self.ap_manager.bg_client.is_connected():
self.ap_manager.bg_client.connect()
for port_id in ports:
if port_id in self.ap_manager.service_ctx:
if not self.ap_manager.service_ctx[port_id]['bg'].is_running():
self.ap_manager.service_ctx[port_id]['bg'].run()
non_init_ports = [p for p in ports if p not in self.ap_manager.service_ctx]
not_joined_aps = [a for a in aps if not (a.is_connected and a.is_dtls_established)]
not_assoc_clients = [c for c in clients if not (c.is_associated and c.seen_arp_reply)]
if not (non_init_ports or not_joined_aps or not_assoc_clients):
self.ap_manager.log(bold('Nothing to reconnect, everything works fine.'))
return
while non_init_ports:
self.ap_manager.init(non_init_ports[:10])
non_init_ports = non_init_ports[10:]
while not_joined_aps:
self.ap_manager.join_aps(not_joined_aps[:10])
not_joined_aps = not_joined_aps[10:]
while not_assoc_clients:
self.ap_manager.join_clients(not_assoc_clients[:20])
not_assoc_clients = not_assoc_clients[20:]
def do_start(self, client_ids, file_path, multiplier, tunables, total_mult):
'''Start traffic on behalf on client(s).'''
if not client_ids:
clients = self.ap_manager.clients
else:
clients = set([self.ap_manager._get_client_by_id(id) for id in client_ids])
if len(client_ids) != len(clients):
raise Exception('Client IDs should be unique')
if not clients:
raise Exception('No clients to start traffic on behalf of them!')
ports = list(set([client.ap.port_id for client in clients]))
# stop ports if needed
active_ports = list_intersect(self.trex_client.get_active_ports(), ports)
if active_ports:
self.trex_client.stop(active_ports)
# remove all streams
self.trex_client.remove_all_streams(ports)
# pack the profile
try:
tunables = tunables or {}
for client in clients:
profile = STLProfile.load(file_path,
direction = tunables.get('direction', client.ap.port_id % 2),
port_id = client.ap.port_id,
**tunables)
self.ap_manager.add_streams(client, profile.get_streams())
except STLError as e:
msg = bold("\nError loading profile '%s'" % file_path)
self.ap_manager.log(msg + '\n')
self.ap_manager.log(e.brief() + "\n")
self.trex_client.start(ports = ports, mult = multiplier, force = True, total = total_mult)
return RC_OK()
def do_base(self, ap_mac, ap_ip, ap_udp, ap_radio, client_mac, client_ip, base_save, base_load):
'''Set base values of MAC, IP etc. for created AP/Client.\nWill be increased for each new device.'''
self.ap_manager.set_base_values(ap_mac, ap_ip, ap_udp, ap_radio, client_mac, client_ip, base_save, base_load)
self.show_base()
|
|
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from alembic import op
from oslo.serialization import jsonutils
import six
import sqlalchemy as sa
from sqlalchemy.sql import text
from nailgun.settings import settings
def upgrade_enum(table, column_name, enum_name, old_options, new_options):
old_type = sa.Enum(*old_options, name=enum_name)
new_type = sa.Enum(*new_options, name=enum_name)
tmp_type = sa.Enum(*new_options, name="_" + enum_name)
# Create a temporary type, convert and drop the "old" type
tmp_type.create(op.get_bind(), checkfirst=False)
op.execute(
u'ALTER TABLE {0} ALTER COLUMN {1} TYPE _{2}'
u' USING {1}::text::_{2}'.format(
table,
column_name,
enum_name
)
)
old_type.drop(op.get_bind(), checkfirst=False)
# Create and convert to the "new" type
new_type.create(op.get_bind(), checkfirst=False)
op.execute(
u'ALTER TABLE {0} ALTER COLUMN {1} TYPE {2}'
u' USING {1}::text::{2}'.format(
table,
column_name,
enum_name
)
)
tmp_type.drop(op.get_bind(), checkfirst=False)
def drop_enum(name):
op.execute(
u'DROP TYPE {0}'.format(name)
)
def convert_condition_value(val):
if isinstance(val, six.string_types):
return "'{0}'".format(val)
return str(val).lower()
def negate_condition(condition):
"""Negates condition.
"""
return "not ({0})".format(condition)
def remove_question_operator(expression):
"""Removes '?' operator from expressions, it was deprecated in 6.0
"""
return re.sub(r'(:[\w\.\-]+)\?', '\\1', expression)
def upgrade_release_attributes_50_to_51(attrs_meta):
if not attrs_meta.get('editable'):
return attrs_meta
def depends_to_restrictions(depends, restrictions):
for cond in depends:
expr = cond.keys()[0]
restrictions.append(
expr + " != " + convert_condition_value(cond[expr]))
def conflicts_to_restrictions(conflicts, restrictions):
for cond in conflicts:
expr = cond.keys()[0]
restrictions.append(
expr + " == " + convert_condition_value(cond[expr]))
for _, group in six.iteritems(attrs_meta.get('editable')):
for _, attr in six.iteritems(group):
restrictions = []
if attr.get('depends'):
depends_to_restrictions(attr['depends'], restrictions)
attr.pop('depends')
if attr.get('conflicts'):
conflicts_to_restrictions(attr['conflicts'], restrictions)
attr.pop('conflicts')
if restrictions:
attr['restrictions'] = restrictions
return attrs_meta
def upgrade_release_attributes_51_to_60(attrs_meta):
"""Remove '?' operator from expressions
"""
if not attrs_meta.get('editable'):
return attrs_meta
def convert_restrictions(restrictions):
result = []
for restriction in restrictions:
if isinstance(restriction, basestring):
restriction = remove_question_operator(restriction)
else:
restriction['condition'] = remove_question_operator(
restriction['condition'])
result.append(restriction)
return result
for _, group in six.iteritems(attrs_meta.get('editable')):
for _, attr in six.iteritems(group):
if 'restrictions' in attr:
attr['restrictions'] = convert_restrictions(
attr['restrictions'])
if 'values' in attr:
for value in attr['values']:
if 'restrictions' in value:
value['restrictions'] = convert_restrictions(
value['restrictions'])
return attrs_meta
def upgrade_release_roles_50_to_51(roles_meta):
for _, role in six.iteritems(roles_meta):
if role.get('depends'):
for depend in role['depends']:
cond = depend.get('condition')
if isinstance(cond, dict):
expr = cond.keys()[0]
depend['condition'] = \
expr + " == " + convert_condition_value(cond[expr])
return roles_meta
def upgrade_release_roles_51_to_60(roles_meta, add_meta=None):
"""Convert all role_metadata.depends values into
roles_metadata.restrictions.
"""
add_meta = add_meta or {}
for role_name, role in six.iteritems(roles_meta):
for depend in role.get('depends', []):
cond = depend.get('condition')
new_restriction = {
'condition': remove_question_operator(negate_condition(cond))
}
if 'warning' in depend:
new_restriction['message'] = depend['warning']
role.setdefault('restrictions', [])
role['restrictions'].append(new_restriction)
if 'depends' in role:
del role['depends']
if role_name in add_meta:
role.update(add_meta[role_name])
return roles_meta
def upgrade_clusters_replaced_info(connection):
select = text(
"""SELECT id, replaced_provisioning_info, replaced_deployment_info
FROM clusters""")
clusters = connection.execute(select)
for cluster in clusters:
nodes_select = text(
"""SELECT id FROM nodes WHERE cluster_id=:id""")
nodes = connection.execute(
nodes_select,
id=cluster[0])
provisioning_info = jsonutils.loads(cluster[1])
deployment_nodes = jsonutils.loads(cluster[2])
provisioning_nodes = provisioning_info.pop('nodes', [])
for node in nodes:
node_deploy = [d for d in deployment_nodes
if d['uid'] == str(node[0])]
node_provision = next((d for d in provisioning_nodes
if d['uid'] == str(node[0])), {})
update_node = text(
"""UPDATE nodes
SET replaced_deployment_info = :deploy,
replaced_provisioning_info = :provision
WHERE id = :id""")
connection.execute(
update_node,
deploy=jsonutils.dumps(node_deploy),
provision=jsonutils.dumps(node_provision),
id=node[0])
update_cluster = text(
"""UPDATE clusters
SET replaced_deployment_info = :deploy,
replaced_provisioning_info = :provision
WHERE id = :id""")
connection.execute(
update_cluster,
deploy=jsonutils.dumps({}),
provision=jsonutils.dumps(provisioning_info),
id=cluster[0])
def upgrade_release_set_deployable_false(connection, versions):
"""Set deployable=False for a given versions list.
:param connection: a database connection
:param versions: a list of versions to be forbidden
"""
update_query = text(
"UPDATE releases SET is_deployable = 'false' "
" WHERE version IN :versions")
connection.execute(update_query, versions=tuple(versions))
def upgrade_release_fill_orchestrator_data(connection, versions):
"""Fill release_orchestrator_data if it's not filled yet.
:param connection: a database connection
:param versions: a list of versions to be forbidden
"""
for version in versions:
select_query = text(
"SELECT id, operating_system FROM releases "
" WHERE version LIKE :version AND id NOT IN ("
" SELECT release_id FROM release_orchestrator_data "
" )")
releases = connection.execute(select_query, version=version)
for release in releases:
insert_query = text(
"INSERT INTO release_orchestrator_data ("
" release_id, repo_metadata, puppet_manifests_source, "
" puppet_modules_source)"
" VALUES ("
" :release_id, "
" :repo_metadata, "
" :puppet_manifests_source, "
" :puppet_modules_source)")
# if release_orchestrator_data isn't filled then releases'
# repos stores in unversioned directory with "fuelweb" word
repo_path = 'http://{MASTER_IP}:8080/{OS}/fuelweb/x86_64'.format(
MASTER_IP=settings.MASTER_IP, OS=release[1].lower())
# for ubuntu we need to add 'trusty main'
if release[1].lower() == 'ubuntu':
repo_path += ' trusty main'
connection.execute(
insert_query,
release_id=release[0],
repo_metadata=(
'{{ "nailgun": "{0}" }}'.format(repo_path)),
puppet_manifests_source=(
'rsync://{MASTER_IP}:/puppet/manifests/'.format(
MASTER_IP=settings.MASTER_IP)),
puppet_modules_source=(
'rsync://{MASTER_IP}:/puppet/modules/'.format(
MASTER_IP=settings.MASTER_IP)),
)
def move_orchestrator_data_to_attributes(connection):
"""Moving data from orchestrator data db table to cluster attributes
:param connection: a database connection
"""
select_query = text(
"SELECT "
"id, "
"release_id, "
"repo_metadata, "
"puppet_manifests_source, "
"puppet_modules_source "
"FROM release_orchestrator_data")
for odata in connection.execute(select_query):
select_query = text(
"SELECT id, attributes_metadata, operating_system "
" FROM releases WHERE id = :release_id")
for release in connection.execute(select_query, release_id=odata[1]):
repo_setup = {
'metadata': {
# old releases shouldn't be able to edit
# repos
'restrictions': [{
'condition': 'true',
'action': 'hide',
}],
'label': 'Repositories',
'weight': 50,
},
'repos': {
'type': 'custom_repo_configuration',
'value': [],
}}
puppet = {
'manifests': odata[3],
'modules': odata[4],
}
if release[2].lower() == 'ubuntu':
for name, repo in six.iteritems(jsonutils.loads(odata[2])):
uri, suite, section = repo.split()
repo_setup['repos']['value'].append({
'type': 'deb',
'name': name,
'uri': uri,
'suite': suite,
'section': section,
'priority': 1001
})
elif release[2].lower() == 'centos':
for name, repo in six.iteritems(jsonutils.loads(odata[2])):
repo_setup['repos']['value'].append({
'type': 'rpm',
'name': name,
'uri': repo,
'priority': 1
})
# update releases
attributes_metadata = jsonutils.loads(release[1])
attributes_metadata['editable'].update({'repo_setup': repo_setup})
attributes_metadata['generated'].update({'puppet': puppet})
update_query = text(
"UPDATE releases "
" SET attributes_metadata = :attributes_metadata "
" WHERE id = :release_id")
connection.execute(
update_query,
attributes_metadata=jsonutils.dumps(attributes_metadata),
release_id=odata[1])
# update cluster attributes
select_query = text(
"SELECT a.id, a.editable, a.generated "
" FROM attributes as a INNER JOIN clusters as c "
" ON a.cluster_id = c.id "
" WHERE c.release_id = :release_id")
for attr in connection.execute(select_query, release_id=odata[1]):
editable = jsonutils.loads(attr[1])
generated = jsonutils.loads(attr[2])
editable.update({'repo_setup': repo_setup})
generated.update({'puppet': puppet})
connection.execute(
text(
"UPDATE attributes "
" SET editable = :editable, generated = :generated "
" WHERE id = :attr_id"),
editable=jsonutils.dumps(editable),
generated=jsonutils.dumps(generated),
attr_id=attr[0])
def upgrade_attributes_metadata_6_0_to_6_1(attributes_meta):
attributes_meta['editable']['storage']['volumes_lvm']['description'] = \
'It is recommended to have at least one Storage - Cinder LVM node.'
return attributes_meta
def upgrade_master_node_settings_6_0_to_6_1(master_node_settings):
master_node_settings['statistics']['name']['type'] = 'hidden'
master_node_settings['statistics']['email']['type'] = 'hidden'
master_node_settings['statistics']['company']['type'] = 'hidden'
master_node_settings['tracking'] = {
"email": {
"type": "text",
"value": "",
"label": "Mirantis Account Email",
"weight": 10,
"regex": {
"source": "^\\S+@\\S+$",
"error": "Invalid email"
}
},
"password": {
"type": "password",
"value": "",
"label": "Password",
"weight": 20,
"regex": {
"source": "\\S",
"error": "Password cannot be empty"
}
}
}
master_node_settings['statistics']['name']['regex'] = {}
master_node_settings['statistics']['email']['regex'] = {}
master_node_settings['statistics']['company']['regex'] = {}
master_node_settings['statistics']['name']['restrictions'] = {}
master_node_settings['statistics']['email']['restrictions'] = {}
master_node_settings['statistics']['company']['restrictions'] = {}
master_node_settings['statistics']['send_user_info']['restrictions'] = {}
return master_node_settings
def upgrade_role_limits_6_0_to_6_1(roles_meta, _limits_to_update):
for role_name, role_definition in six.iteritems(roles_meta):
if role_name in _limits_to_update:
role_definition['limits'] = _limits_to_update[role_name]
return roles_meta
def upgrade_role_restrictions_6_0_to_6_1(roles_meta, _new_role_restrictions):
for role_name, role_definition in six.iteritems(roles_meta):
if role_name in _new_role_restrictions:
role_definition['restrictions'] = _new_role_restrictions[role_name]
return roles_meta
def upgrade_6_0_to_6_1_plugins_cluster_attrs_use_ids_mapping(connection):
"""In Fuel 6.0 we had plugin version in cluster attributes
to identify which plugin should be enabled or disabled.
In 6.1 release we have plugins updates feature, it means
that a single plugin can be updated/overwritten with newer
version. For example 1.0.0 can be replaced with 1.0.1.
As result we cannot rely on versions anymore, here we
convert version mapping to plugin ids.
See blueprint:
https://blueprints.launchpad.net/fuel/+spec/plugins-security-fixes-delivery
"""
select_attrs = text("""SELECT id, editable FROM attributes""")
select_plugins = text(
"""SELECT id FROM plugins
WHERE name = :plugin_name AND
version = :plugin_version""")
update_attrs = text(
"""UPDATE attributes
SET editable = :editable
WHERE id = :id""")
attrs_list = connection.execute(select_attrs)
for raw_attrs in attrs_list:
attr_id = raw_attrs[0]
attrs = jsonutils.loads(raw_attrs[1])
for key, attr in six.iteritems(attrs):
metadata = attr.get('metadata', {})
plugin_version = metadata.get('plugin_version')
if not plugin_version:
continue
plugin_name = key
# If there is no plugin with such version
# and name, it means that something was wrong
# and somebody deleted the plugin from database
# we must not fail migration in this case
plugin_id = None
plugins = list(connection.execute(
select_plugins,
plugin_name=plugin_name,
plugin_version=plugin_version))
if plugins:
plugin_id = plugins[0][0]
del attr['metadata']['plugin_version']
attr['metadata']['plugin_id'] = plugin_id
connection.execute(
update_attrs,
editable=jsonutils.dumps(attrs),
id=attr_id)
def upgrade_networks_metadata_to_6_1(networks_meta, _bonding_metadata):
networks_meta['bonding'] = _bonding_metadata
return networks_meta
|
|
#
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABCMeta
from numbers import Integral
import numpy as np
import sqlite3
from sqlite3 import Row
import warnings
from logbook import Logger
import pandas as pd
from pandas.tseries.tools import normalize_date
from six import with_metaclass, string_types
from zipline.errors import (
ConsumeAssetMetaDataError,
InvalidAssetType,
MultipleSymbolsFound,
RootSymbolNotFound,
SidAssignmentError,
SidNotFound,
SymbolNotFound,
MapAssetIdentifierIndexError,
)
from zipline.assets._assets import (
Asset, Equity, Future
)
log = Logger('assets.py')
# Expected fields for an Asset's metadata
ASSET_FIELDS = [
'sid',
'asset_type',
'symbol',
'root_symbol',
'asset_name',
'start_date',
'end_date',
'first_traded',
'exchange',
'notice_date',
'expiration_date',
'contract_multiplier',
# The following fields are for compatibility with other systems
'file_name', # Used as symbol
'company_name', # Used as asset_name
'start_date_nano', # Used as start_date
'end_date_nano', # Used as end_date
]
# Expected fields for an Asset's metadata
ASSET_TABLE_FIELDS = [
'sid',
'symbol',
'asset_name',
'start_date',
'end_date',
'first_traded',
'exchange',
]
# Expected fields for an Asset's metadata
FUTURE_TABLE_FIELDS = ASSET_TABLE_FIELDS + [
'root_symbol',
'notice_date',
'expiration_date',
'contract_multiplier',
]
EQUITY_TABLE_FIELDS = ASSET_TABLE_FIELDS
# Create the query once from the fields, so that the join is not done
# repeatedly.
FUTURE_BY_SID_QUERY = 'select {0} from futures where sid=?'.format(
", ".join(FUTURE_TABLE_FIELDS))
EQUITY_BY_SID_QUERY = 'select {0} from equities where sid=?'.format(
", ".join(EQUITY_TABLE_FIELDS))
class AssetFinder(object):
def __init__(self,
metadata=None,
allow_sid_assignment=True,
fuzzy_char=None,
db_path=':memory:',
create_table=True):
self.fuzzy_char = fuzzy_char
# This flag controls if the AssetFinder is allowed to generate its own
# sids. If False, metadata that does not contain a sid will raise an
# exception when building assets.
self.allow_sid_assignment = allow_sid_assignment
if allow_sid_assignment:
self.end_date_to_assign = normalize_date(
pd.Timestamp('now', tz='UTC'))
self.conn = sqlite3.connect(db_path)
self.conn.text_factory = str
self.cursor = self.conn.cursor()
# The AssetFinder also holds a nested-dict of all metadata for
# reference when building Assets
self.metadata_cache = {}
# Create table and read in metadata.
# Should we use flags like 'r', 'w', instead?
# What we need to support is:
# - A 'throwaway' mode where the metadata is read each run.
# - A 'write' mode where the data is written to the provided db_path
# - A 'read' mode where the asset finder uses a prexisting db.
if create_table:
self.create_db_tables()
if metadata is not None:
self.consume_metadata(metadata)
# Cache for lookup of assets by sid, the objects in the asset lookp may
# be shared with the results from equity and future lookup caches.
#
# The top level cache exists to minimize lookups on the asset type
# routing.
#
# The caches are read through, i.e. accessing an asset through
# retrieve_asset, _retrieve_equity etc. will populate the cache on
# first retrieval.
self._asset_cache = {}
self._equity_cache = {}
self._future_cache = {}
self._asset_type_cache = {}
def create_db_tables(self):
c = self.conn.cursor()
c.execute("""
CREATE TABLE equities(
sid integer,
symbol text,
asset_name text,
start_date integer,
end_date integer,
first_traded integer,
exchange text,
fuzzy text
)""")
c.execute('CREATE INDEX equities_sid on equities(sid)')
c.execute('CREATE INDEX equities_symbol on equities(symbol)')
c.execute('CREATE INDEX equities_fuzzy on equities(fuzzy)')
c.execute("""
CREATE TABLE futures(
sid integer,
symbol text,
asset_name text,
start_date integer,
end_date integer,
first_traded integer,
exchange text,
root_symbol text,
notice_date integer,
expiration_date integer,
contract_multiplier real
)""")
c.execute('CREATE INDEX futures_sid on futures(sid)')
c.execute('CREATE INDEX futures_root_symbol on equities(symbol)')
c.execute("""
CREATE TABLE asset_router
(sid integer,
asset_type text)
""")
c.execute('CREATE INDEX asset_router_sid on asset_router(sid)')
self.conn.commit()
def asset_type_by_sid(self, sid):
try:
return self._asset_type_cache[sid]
except KeyError:
pass
c = self.conn.cursor()
# Python 3 compatibility required forcing to int for sid = 0.
t = (int(sid),)
query = 'select asset_type from asset_router where sid=:sid'
c.execute(query, t)
data = c.fetchone()
if data is None:
return
asset_type = data[0]
self._asset_type_cache[sid] = asset_type
return asset_type
def retrieve_asset(self, sid, default_none=False):
if isinstance(sid, Asset):
return sid
try:
asset = self._asset_cache[sid]
except KeyError:
asset_type = self.asset_type_by_sid(sid)
if asset_type == 'equity':
asset = self._retrieve_equity(sid)
elif asset_type == 'future':
asset = self._retrieve_futures_contract(sid)
else:
asset = None
self._asset_cache[sid] = asset
if asset is not None:
return asset
elif default_none:
return None
else:
raise SidNotFound(sid=sid)
def _retrieve_equity(self, sid):
try:
return self._equity_cache[sid]
except KeyError:
pass
c = self.conn.cursor()
c.row_factory = Row
t = (int(sid),)
c.execute(EQUITY_BY_SID_QUERY, t)
data = dict(c.fetchone())
if data:
if data['start_date']:
data['start_date'] = pd.Timestamp(data['start_date'], tz='UTC')
if data['end_date']:
data['end_date'] = pd.Timestamp(data['end_date'], tz='UTC')
if data['first_traded']:
data['first_traded'] = pd.Timestamp(
data['first_traded'], tz='UTC')
equity = Equity(**data)
else:
equity = None
self._equity_cache[sid] = equity
return equity
def _retrieve_futures_contract(self, sid):
try:
return self._future_cache[sid]
except KeyError:
pass
c = self.conn.cursor()
t = (int(sid),)
c.row_factory = Row
c.execute(FUTURE_BY_SID_QUERY, t)
data = dict(c.fetchone())
if data:
if data['start_date']:
data['start_date'] = pd.Timestamp(data['start_date'], tz='UTC')
if data['end_date']:
data['end_date'] = pd.Timestamp(data['end_date'], tz='UTC')
if data['first_traded']:
data['first_traded'] = pd.Timestamp(
data['first_traded'], tz='UTC')
if data['notice_date']:
data['notice_date'] = pd.Timestamp(
data['notice_date'], tz='UTC')
if data['expiration_date']:
data['expiration_date'] = pd.Timestamp(
data['expiration_date'], tz='UTC')
future = Future(**data)
else:
future = None
self._future_cache[sid] = future
return future
def lookup_symbol_resolve_multiple(self, symbol, as_of_date=None):
"""
Return matching Asset of name symbol in database.
If multiple Assets are found and as_of_date is not set,
raises MultipleSymbolsFound.
If no Asset was active at as_of_date, and allow_expired is False
raises SymbolNotFound.
"""
if as_of_date is not None:
as_of_date = pd.Timestamp(normalize_date(as_of_date))
c = self.conn.cursor()
if as_of_date:
# If one SID exists for symbol, return that symbol
t = (symbol, as_of_date.value, as_of_date.value)
query = ("select sid from equities "
"where symbol=? "
"and start_date<=? "
"and end_date>=?")
c.execute(query, t)
candidates = c.fetchall()
if len(candidates) == 1:
return self._retrieve_equity(candidates[0][0])
# If no SID exists for symbol, return SID with the
# highest-but-not-over end_date
if len(candidates) == 0:
t = (symbol, as_of_date.value)
query = ("select sid from equities "
"where symbol=? "
"and start_date<=? "
"order by end_date desc "
"limit 1")
c.execute(query, t)
data = c.fetchone()
if data:
return self._retrieve_equity(data[0])
# If multiple SIDs exist for symbol, return latest start_date with
# end_date as a tie-breaker
if len(candidates) > 1:
t = (symbol, as_of_date.value)
query = ("select sid from equities "
"where symbol=? " +
"and start_date<=? " +
"order by start_date desc, end_date desc " +
"limit 1")
c.execute(query, t)
data = c.fetchone()
if data:
return self._retrieve_equity(data[0])
raise SymbolNotFound(symbol=symbol)
else:
t = (symbol,)
query = ("select sid from equities where symbol=?")
c.execute(query, t)
data = c.fetchall()
if len(data) == 1:
return self._retrieve_equity(data[0][0])
elif not data:
raise SymbolNotFound(symbol=symbol)
else:
options = []
for row in data:
sid = row[0]
asset = self._retrieve_equity(sid)
options.append(asset)
raise MultipleSymbolsFound(symbol=symbol,
options=options)
def lookup_symbol(self, symbol, as_of_date, fuzzy=False):
"""
If a fuzzy string is provided, then we try various symbols based on
the provided symbol. This is to facilitate mapping from a broker's
symbol to ours in cases where mapping to the broker's symbol loses
information. For example, if we have CMCS_A, but a broker has CMCSA,
when the broker provides CMCSA, it can also provide fuzzy='_',
so we can find a match by inserting an underscore.
"""
symbol = symbol.upper()
as_of_date = normalize_date(as_of_date)
if not fuzzy:
try:
return self.lookup_symbol_resolve_multiple(symbol, as_of_date)
except SymbolNotFound:
return None
else:
c = self.conn.cursor()
fuzzy = symbol.replace(self.fuzzy_char, '')
t = (fuzzy, as_of_date.value, as_of_date.value)
query = ("select sid from equities "
"where fuzzy=? " +
"and start_date<=? " +
"and end_date>=?")
c.execute(query, t)
candidates = c.fetchall()
# If one SID exists for symbol, return that symbol
if len(candidates) == 1:
return self._retrieve_equity(candidates[0][0])
# If multiple SIDs exist for symbol, return latest start_date with
# end_date as a tie-breaker
if len(candidates) > 1:
t = (symbol, as_of_date.value)
query = ("select sid from equities "
"where symbol=? " +
"and start_date<=? " +
"order by start_date desc, end_date desc" +
"limit 1")
c.execute(query, t)
data = c.fetchone()
if data:
return self._retrieve_equity(data[0])
def lookup_future_chain(self, root_symbol, as_of_date, knowledge_date):
""" Return the futures chain for a given root symbol.
Parameters
----------
root_symbol : str
Root symbol of the desired future.
as_of_date : pd.Timestamp
Date at which the chain determination is rooted. I.e. the
existing contract whose notice date is first after this
date is the primary contract, etc.
knowledge_date : pd.Timestamp
Date for determining which contracts exist for inclusion in
this chain. Contracts exist only if they have a start_date
on or before this date.
Returns
-------
list
A list of Future objects, the chain for the given
parameters.
Raises
------
RootSymbolNotFound
Raised when a future chain could not be found for the given
root symbol.
"""
c = self.conn.cursor()
t = {'root_symbol': root_symbol,
'as_of_date': as_of_date.value,
'knowledge_date': knowledge_date.value}
c.execute("""
select sid from futures
where root_symbol=:root_symbol
and :as_of_date < notice_date
and start_date <= :knowledge_date
order by notice_date asc
""", t)
sids = [r[0] for r in c.fetchall()]
if not sids:
# Check if root symbol exists.
c.execute("""
select count(sid) from futures where root_symbol=:root_symbol
""", t)
count = c.fetchone()[0]
if count == 0:
raise RootSymbolNotFound(root_symbol=root_symbol)
else:
# If symbol exists, return empty future chain.
return []
return [self._retrieve_futures_contract(sid) for sid in sids]
@property
def sids(self):
c = self.conn.cursor()
query = 'select sid from asset_router'
c.execute(query)
return [r[0] for r in c.fetchall()]
def _lookup_generic_scalar(self,
asset_convertible,
as_of_date,
matches,
missing):
"""
Convert asset_convertible to an asset.
On success, append to matches.
On failure, append to missing.
"""
try:
if isinstance(asset_convertible, Asset):
matches.append(asset_convertible)
elif isinstance(asset_convertible, Integral):
result = self.retrieve_asset(int(asset_convertible))
if result is None:
raise SymbolNotFound(symbol=asset_convertible)
matches.append(result)
elif isinstance(asset_convertible, string_types):
# Throws SymbolNotFound on failure to match.
matches.append(
self.lookup_symbol_resolve_multiple(
asset_convertible,
as_of_date,
)
)
else:
raise NotAssetConvertible(
"Input was %s, not AssetConvertible."
% asset_convertible
)
except SymbolNotFound:
missing.append(asset_convertible)
return None
def lookup_generic(self,
asset_convertible_or_iterable,
as_of_date):
"""
Convert a AssetConvertible or iterable of AssetConvertibles into
a list of Asset objects.
This method exists primarily as a convenience for implementing
user-facing APIs that can handle multiple kinds of input. It should
not be used for internal code where we already know the expected types
of our inputs.
Returns a pair of objects, the first of which is the result of the
conversion, and the second of which is a list containing any values
that couldn't be resolved.
"""
matches = []
missing = []
# Interpret input as scalar.
if isinstance(asset_convertible_or_iterable, AssetConvertible):
self._lookup_generic_scalar(
asset_convertible=asset_convertible_or_iterable,
as_of_date=as_of_date,
matches=matches,
missing=missing,
)
try:
return matches[0], missing
except IndexError:
if hasattr(asset_convertible_or_iterable, '__int__'):
raise SidNotFound(sid=asset_convertible_or_iterable)
else:
raise SymbolNotFound(symbol=asset_convertible_or_iterable)
# Interpret input as iterable.
try:
iterator = iter(asset_convertible_or_iterable)
except TypeError:
raise NotAssetConvertible(
"Input was not a AssetConvertible "
"or iterable of AssetConvertible."
)
for obj in iterator:
self._lookup_generic_scalar(obj, as_of_date, matches, missing)
return matches, missing
def map_identifier_index_to_sids(self, index, as_of_date):
"""
This method is for use in sanitizing a user's DataFrame or Panel
inputs.
Takes the given index of identifiers, checks their types, builds assets
if necessary, and returns a list of the sids that correspond to the
input index.
Parameters
__________
index : Iterable
An iterable containing ints, strings, or Assets
as_of_date : pandas.Timestamp
A date to be used to resolve any dual-mapped symbols
Returns
_______
List
A list of integer sids corresponding to the input index
"""
# This method assumes that the type of the objects in the index is
# consistent and can, therefore, be taken from the first identifier
first_identifier = index[0]
# Ensure that input is AssetConvertible (integer, string, or Asset)
if not isinstance(first_identifier, AssetConvertible):
raise MapAssetIdentifierIndexError(obj=first_identifier)
# If sids are provided, no mapping is necessary
if isinstance(first_identifier, Integral):
return index
# If symbols or Assets are provided, construction and mapping is
# necessary
self.consume_identifiers(index)
# Look up all Assets for mapping
matches = []
missing = []
for identifier in index:
self._lookup_generic_scalar(identifier, as_of_date,
matches, missing)
# Handle missing assets
if len(missing) > 0:
warnings.warn("Missing assets for identifiers: " + missing)
# Return a list of the sids of the found assets
return [asset.sid for asset in matches]
def _insert_metadata(self, identifier, **kwargs):
"""
Inserts the given metadata kwargs to the entry for the given
identifier. Matching fields in the existing entry will be overwritten.
:param identifier: The identifier for which to insert metadata
:param kwargs: The keyed metadata to insert
"""
if identifier in self.metadata_cache:
# Multiple pass insertion no longer supported.
# This could and probably should raise an Exception, but is
# currently just a short-circuit for compatibility with existing
# testing structure in the test_algorithm module which creates
# multiple sources which all insert redundant metadata.
return
entry = {}
for key, value in kwargs.items():
# Do not accept invalid fields
if key not in ASSET_FIELDS:
continue
# Do not accept Nones
if value is None:
continue
# Do not accept empty strings
if value == '':
continue
# Do not accept nans from dataframes
if isinstance(value, float) and np.isnan(value):
continue
entry[key] = value
# Check if the sid is declared
try:
entry['sid']
except KeyError:
# If the identifier is not a sid, assign one
if hasattr(identifier, '__int__'):
entry['sid'] = identifier.__int__()
else:
if self.allow_sid_assignment:
# Assign the sid the value of its insertion order.
# This assumes that we are assigning values to all assets.
entry['sid'] = len(self.metadata_cache)
else:
raise SidAssignmentError(identifier=identifier)
# If the file_name is in the kwargs, it will be used as the symbol
try:
entry['symbol'] = entry.pop('file_name')
except KeyError:
pass
# If the identifier coming in was a string and there is no defined
# symbol yet, set the symbol to the incoming identifier
try:
entry['symbol']
pass
except KeyError:
if isinstance(identifier, string_types):
entry['symbol'] = identifier
# If the company_name is in the kwargs, it may be the asset_name
try:
company_name = entry.pop('company_name')
try:
entry['asset_name']
except KeyError:
entry['asset_name'] = company_name
except KeyError:
pass
# If dates are given as nanos, pop them
try:
entry['start_date'] = entry.pop('start_date_nano')
except KeyError:
pass
try:
entry['end_date'] = entry.pop('end_date_nano')
except KeyError:
pass
try:
entry['notice_date'] = entry.pop('notice_date_nano')
except KeyError:
pass
try:
entry['expiration_date'] = entry.pop('expiration_date_nano')
except KeyError:
pass
# Process dates to Timestamps
try:
entry['start_date'] = pd.Timestamp(entry['start_date'], tz='UTC')
except KeyError:
# Set a default start_date of the EPOCH, so that all date queries
# work when a start date is not provided.
entry['start_date'] = pd.Timestamp(0, tz='UTC')
try:
# Set a default end_date of 'now', so that all date queries
# work when a end date is not provided.
entry['end_date'] = pd.Timestamp(entry['end_date'], tz='UTC')
except KeyError:
entry['end_date'] = self.end_date_to_assign
try:
entry['notice_date'] = pd.Timestamp(entry['notice_date'],
tz='UTC')
except KeyError:
pass
try:
entry['expiration_date'] = pd.Timestamp(entry['expiration_date'],
tz='UTC')
except KeyError:
pass
# Build an Asset of the appropriate type, default to Equity
asset_type = entry.pop('asset_type', 'equity')
if asset_type.lower() == 'equity':
try:
fuzzy = entry['symbol'].replace(self.fuzzy_char, '') \
if self.fuzzy_char else None
except KeyError:
fuzzy = None
asset = Equity(**entry)
c = self.conn.cursor()
t = (asset.sid,
asset.symbol,
asset.asset_name,
asset.start_date.value if asset.start_date else None,
asset.end_date.value if asset.end_date else None,
asset.first_traded.value if asset.first_traded else None,
asset.exchange,
fuzzy)
c.execute("""INSERT INTO equities(
sid,
symbol,
asset_name,
start_date,
end_date,
first_traded,
exchange,
fuzzy)
VALUES(?, ?, ?, ?, ?, ?, ?, ?)""", t)
t = (asset.sid,
'equity')
c.execute("""INSERT INTO asset_router(sid, asset_type)
VALUES(?, ?)""", t)
elif asset_type.lower() == 'future':
asset = Future(**entry)
c = self.conn.cursor()
t = (asset.sid,
asset.symbol,
asset.asset_name,
asset.start_date.value if asset.start_date else None,
asset.end_date.value if asset.end_date else None,
asset.first_traded.value if asset.first_traded else None,
asset.exchange,
asset.root_symbol,
asset.notice_date.value if asset.notice_date else None,
asset.expiration_date.value
if asset.expiration_date else None,
asset.contract_multiplier)
c.execute("""INSERT INTO futures(
sid,
symbol,
asset_name,
start_date,
end_date,
first_traded,
exchange,
root_symbol,
notice_date,
expiration_date,
contract_multiplier)
VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)""", t)
t = (asset.sid,
'future')
c.execute("""INSERT INTO asset_router(sid, asset_type)
VALUES(?, ?)""", t)
else:
raise InvalidAssetType(asset_type=asset_type)
self.metadata_cache[identifier] = entry
def consume_identifiers(self, identifiers):
"""
Consumes the given identifiers in to the metadata cache of this
AssetFinder.
"""
for identifier in identifiers:
# Handle case where full Assets are passed in
# For example, in the creation of a DataFrameSource, the source's
# 'sid' args may be full Assets
if isinstance(identifier, Asset):
sid = identifier.sid
metadata = identifier.to_dict()
metadata['asset_type'] = identifier.__class__.__name__
self.insert_metadata(identifier=sid, **metadata)
else:
self.insert_metadata(identifier)
def consume_metadata(self, metadata):
"""
Consumes the provided metadata in to the metadata cache. The
existing values in the cache will be overwritten when there
is a conflict.
:param metadata: The metadata to be consumed
"""
# Handle dicts
if isinstance(metadata, dict):
self._insert_metadata_dict(metadata)
# Handle DataFrames
elif isinstance(metadata, pd.DataFrame):
self._insert_metadata_dataframe(metadata)
# Handle readables
elif hasattr(metadata, 'read'):
self._insert_metadata_readable(metadata)
else:
raise ConsumeAssetMetaDataError(obj=metadata)
def clear_metadata(self):
"""
Used for testing.
"""
self.metadata_cache = {}
self.conn = sqlite3.connect(':memory:')
self.create_db_tables()
def insert_metadata(self, identifier, **kwargs):
self._insert_metadata(identifier, **kwargs)
self.conn.commit()
def _insert_metadata_dataframe(self, dataframe):
for identifier, row in dataframe.iterrows():
self._insert_metadata(identifier, **row)
self.conn.commit()
def _insert_metadata_dict(self, dict):
for identifier, entry in dict.items():
self._insert_metadata(identifier, **entry)
self.conn.commit()
def _insert_metadata_readable(self, readable):
for row in readable.read():
# Parse out the row of the readable object
metadata_dict = {}
for field in ASSET_FIELDS:
try:
row_value = row[field]
# Avoid passing placeholders
if row_value and (row_value != 'None'):
metadata_dict[field] = row[field]
except KeyError:
continue
except IndexError:
continue
# Locate the identifier, fail if not found
if 'sid' in metadata_dict:
identifier = metadata_dict['sid']
elif 'symbol' in metadata_dict:
identifier = metadata_dict['symbol']
else:
raise ConsumeAssetMetaDataError(obj=row)
self._insert_metadata(identifier, **metadata_dict)
self.conn.commit()
class AssetConvertible(with_metaclass(ABCMeta)):
"""
ABC for types that are convertible to integer-representations of
Assets.
Includes Asset, six.string_types, and Integral
"""
pass
AssetConvertible.register(Integral)
AssetConvertible.register(Asset)
# Use six.string_types for Python2/3 compatibility
for _type in string_types:
AssetConvertible.register(_type)
class NotAssetConvertible(ValueError):
pass
|
|
import ptypes
from ptypes import *
from . import Ntddk, rtltypes
from .datatypes import *
class DISPATCHER_HEADER(pstruct.type):
_fields_ = [
(LONG, 'Lock'),
(LONG, 'SignalState'),
(LIST_ENTRY, 'WaitListHead'),
]
class KEVENT(pstruct.type):
_fields_ = [
(DISPATCHER_HEADER, 'Header'),
]
class KSEMAPHORE(pstruct.type):
_fields_ = [
(DISPATCHER_HEADER, 'Header'),
(LONG, 'Limit'),
]
class KGATE(KEVENT): pass
class KSPIN_LOCK(ULONG_PTR): pass
class KDPC(pstruct.type, versioned):
_fields_ = [
(UCHAR, 'Type'),
(UCHAR, 'Importance'),
(USHORT, 'Number'),
(lambda self: dyn.clone(LIST_ENTRY, _path_=['DpcListEntry'], _object_=P(KDPC)), 'DpcListEntry'),
(PVOID, 'DeferredRoutine'),
(PVOID, 'DeferredContext'),
(PVOID, 'SystemArgument1'),
(PVOID, 'SystemArgument2'),
(PVOID, 'DpcData'),
]
class KSCB(pstruct.type):
class _Spare1(pbinary.flags):
_fields_ = [
(4, 'Spare1'),
(1, 'RankBias'),
(1, 'HardCap'),
(1, 'OverQuota'),
(1, 'Inserted'),
]
def __init__(self, **attrs):
super(KLOCK_ENTRY, self).__init__(**attrs)
# circular import
from . import rtltypes
self._fields_ = [
(ULONGLONG, 'GenerationCycles'),
(ULONGLONG, 'UnderQuotaCycleTarget'),
(ULONGLONG, 'RankCycleTarget'),
(ULONGLONG, 'LongTermCycles'),
(ULONGLONG, 'LastReportedCycles'),
(ULONGLONG, 'OverQuotaHistory'),
(LIST_ENTRY, 'PerProcessorList'),
(rtltypes.RTL_BALANCED_NODE, 'QueueNode'),
(_Spare1, 'Spare1'),
(UCHAR, 'Spare2'),
(USHORT, 'ReadySummary'),
(ULONG, 'Rank'),
(dyn.array(LIST_ENTRY, 16), 'ReadyListHead'),
]
class KSCHEDULING_GROUP(pstruct.type, versioned):
_fields_ = [
(USHORT, 'Value'),
(UCHAR, 'Type'),
(UCHAR, 'HardCap'),
(ULONG, 'RelativeWeight'),
(ULONGLONG, 'QueryHistoryTimeStamp'),
(LONGLONG, 'NotificationCycles'),
(lambda self: dyn.clone(LIST_ENTRY, _path_=['SchedulingGroupList'], _object_=P(KSCHEDULING_GROUP)), 'SchedulingGroupList'),
(P(KDPC), 'NotificationDpc'),
(P(KSCB), 'PerProcessor'),
]
class KWAIT_STATUS_REGISTER(UCHAR):
pass
class KAPC_STATE(pstruct.type):
def __init__(self, **attrs):
super(KAPC_STATE, self).__init__(**attrs)
self._fields_ = F = []
F.extend([
(dyn.array(LIST_ENTRY, 2), 'ApcListHead'),
(P(KPROCESS), 'Process'),
(UCHAR, 'KernelApcInProgress'),
(UCHAR, 'KernelApcPending'),
(UCHAR, 'UserApcPending'),
])
class KAPC(pstruct.type):
def __init__(self, **attrs):
super(KAPC, self).__init__(**attrs)
self._fields_ = F = []
F.extend([
(UCHAR, 'Type'),
(UCHAR, 'SpareByte0'),
(UCHAR, 'Size'),
(UCHAR, 'SpareByte1'),
(ULONG, 'SpareLong0'),
(P(KTHREAD), 'Thread'),
(LIST_ENTRY, 'ApcListEntry'),
(PVOID, 'KernelRoutine'),
(PVOID, 'RundownRoutine'),
(PVOID, 'NormalRoutine'),
(PVOID, 'NormalContext'),
(PVOID, 'SystemArgument1'),
(PVOID, 'SystemArgument2'),
(CHAR, 'ApcStateIndex'),
(CHAR, 'ApcMode'),
(UCHAR, 'Inserted'),
])
class KWAIT_BLOCK(pstruct.type):
def __init__(self, **attrs):
super(KWAIT_BLOCK, self).__init__(**attrs)
self._fields_ = F = []
F.extend([
(LIST_ENTRY, 'WaitListEntry'),
(P(KTHREAD), 'Thread'),
(PVOID, 'Object'),
(P(KWAIT_BLOCK), 'NextWaitBlock'),
(WORD, 'WaitKey'),
(UCHAR, 'WaitType'),
(UCHAR, 'SpareByte'),
])
class KQUEUE(pstruct.type):
_fields_ = [
(DISPATCHER_HEADER, 'Header'),
(LIST_ENTRY, 'EntryListHead'),
(ULONG, 'CurrentCount'),
(ULONG, 'MaximumCount'),
(LIST_ENTRY, 'ThreadListHead'),
]
class KTIMER(pstruct.type):
_fields_ = [
(DISPATCHER_HEADER, 'Header'),
(ULARGE_INTEGER, 'DueTime'),
(LIST_ENTRY, 'TimerListEntry'),
(P(KDPC), 'Dpc'),
(ULONG, 'Period'),
]
class GROUP_AFFINITY(pstruct.type):
_fields_ = [
(lambda self: ULONGLONG if getattr(self, 'WIN64', False) else ULONG, 'Mask'),
(USHORT, 'Group'),
#(dyn.array(USHORT, 3), 'Reserved'),
]
class KAFFINITY_EX(pstruct.type):
_fields_ = [
(USHORT, 'Count'),
(USHORT, 'Size'),
(ULONG, 'Reserved'),
(lambda self: dyn.array(ULONGLONG, 20) if getattr(self, 'WIN64', False) else dyn.array(ULONG, 1), 'Bitmap'),
]
class KLOCK_ENTRY_LOCK_STATE(pstruct.type):
_fields_ = [
(PVOID, 'LockState'),
(PVOID, 'SessionState'),
]
class KLOCK_ENTRY(pstruct.type, versioned):
def __init__(self, **attrs):
super(KLOCK_ENTRY, self).__init__(**attrs)
# circular import
from . import rtltypes
self._fields_ = f = [
(rtltypes.RTL_BALANCED_NODE, 'TreeNode'),
]
if getattr(self, 'WIN64', False):
f.extend([
(ULONG, 'EntryFlags'),
(ULONG, 'SpareFlags'),
(KLOCK_ENTRY_LOCK_STATE, 'LockState'),
(rtltypes.RTL_RB_TREE, 'OwnerTree'),
(rtltypes.RTL_RB_TREE, 'WaiterTree'),
(ULONGLONG, 'EntryLock'),
(USHORT, 'AllBoosts'),
(USHORT, 'IoNormalPriorityWaiterCount'),
(USHORT, 'SparePad'),
(dyn.block(2 if getattr(self, 'WIN64', False) else 0), 'padding(ParentValue)'),
])
else:
f.extend([
(PVOID, 'ThreadUnsafe'),
(KLOCK_ENTRY_LOCK_STATE, 'LockState'),
(rtltypes.RTL_RB_TREE, 'OwnerTree'),
(rtltypes.RTL_RB_TREE, 'WaiterTree'),
(ULONG, 'EntryCount'),
(USHORT, 'AllBoosts'),
(USHORT, 'IoNormalPriorityWaiterCount'),
])
return
class KTHREAD(pstruct.type, versioned):
def __init__(self, **attrs):
super(KTHREAD, self).__init__(**attrs)
self._fields_ = f = []
f.extend([
(DISPATCHER_HEADER, 'Header'),
(PVOID, 'SListFaultAddress'),
(ULONGLONG, 'QuantumTarget'),
(PVOID, 'InitialStack'),
(PVOID, 'StackLimit'),
(PVOID, 'StackBase'),
(KSPIN_LOCK, 'ThreadLock'),
(ULONGLONG, 'CycleTime'),
])
if not getattr(self, 'WIN64', False):
f.extend([
(ULONG, 'HighCycleTime'),
(PVOID, 'ServiceTable'),
])
f.extend([
(ULONG, 'CurrentRunTime'),
(ULONG, 'ExpectedRunTime'),
(PVOID, 'KernelStack'),
(P(XSAVE_FORMAT), 'StateSaveArea'),
(P(KSCHEDULING_GROUP), 'SchedulingGroup'),
(KWAIT_STATUS_REGISTER, 'WaitRegister'),
(BOOLEAN, 'Running'),
(dyn.array(BOOLEAN, 2), 'Alerted'),
(ULONG, 'MiscFlags'),
(ULONG, 'ThreadFlags'),
(UCHAR, 'Tag'),
(UCHAR, 'SystemHeteroCpuPolicy'),
(UCHAR, 'UserHeteroCpuPolicy'),
(UCHAR, 'SpecCtrl'),
(ULONG, 'SystemCallNumber'),
(ULONG, 'ReadyTime'),
(PVOID, 'FirstArgument'),
(P(Ntddk.KTRAP_FRAME), 'TrapFrame'),
(KAPC_STATE, 'ApcState'), # (unaligned)
(CHAR, 'Priority'),
(ULONG, 'UserIdealProcessor'),
])
if not getattr(self, 'WIN64', False):
f.extend([
(ULONG, 'ContextSwitches'),
(UCHAR, 'State'),
(CHAR, 'Spare12'),
(KIRQL, 'WaitIrql'),
(KPROCESSOR_MODE, 'WaitMode'),
])
f.extend([
(LONG_PTR, 'WaitStatus'),
(P(KWAIT_BLOCK), 'WaitBlockList'),
(LIST_ENTRY, 'WaitListEntry'),
(P(KQUEUE), 'Queue'),
(PVOID, 'Teb'),
(dyn.align(8), 'align(RelativeTimerBias)'),
(ULONGLONG, 'RelativeTimerBias'),
(KTIMER, 'Timer'),
])
if getattr(self, 'WIN64', False):
f.extend([
(dyn.array(KWAIT_BLOCK, 4), 'WaitBlock'), # (unaligned)
(dyn.block(4), 'padding(WaitBlock)'),
#(P(UMS_CONTROL_BLOCK), 'Ucb'),
(PVOID, 'Ucb'),
#(P(KUMS_CONTEXT_HEADER), 'Uch'),
(PVOID, 'Uch'),
(PVOID, 'TebMappedLowVa'),
(LIST_ENTRY, 'QueueListEntry'),
(ULONG, 'NextProcessor'),
(LONG, 'QueuePriority'),
(P(KPROCESS), 'Process'),
])
else:
f.extend([
(dyn.array(KWAIT_BLOCK, 4), 'WaitBlock'), # (unaligned)
(LIST_ENTRY, 'QueueListEntry'),
(ULONG, 'NextProcessor'),
(LONG, 'QueuePriority'),
(P(KPROCESS), 'Process'),
])
f.extend([
(GROUP_AFFINITY, 'UserAffinity'), # (unaligned)
(CHAR, 'PreviousMode'),
(CHAR, 'BasePriority'),
(CHAR, 'PriorityDecrement'),
(UCHAR, 'Preempted'),
(UCHAR, 'AdjustReason'),
(CHAR, 'AdjustIncrement'),
])
f.extend([
(GROUP_AFFINITY, 'Affinity'), # (unaligned)
(UCHAR, 'ApcStateIndex'),
(UCHAR, 'WaitBlockCount'),
(ULONG, 'IdealProcessor'),
(dyn.array(P(KAPC_STATE), 2), 'ApcStatePointer'),
(KAPC_STATE, 'SavedApcState'), # (unaligned)
(UCHAR, 'WaitReason'),
(CHAR, 'SuspendCount'),
(CHAR, 'Saturation'),
(USHORT, 'SListFaultCount'),
(KAPC, 'SchedulerApc'), # (unaligned)
(UCHAR, 'CallbackNestingLevel'),
(ULONG, 'UserTime'),
(KEVENT, 'SuspendEvent'),
(LIST_ENTRY, 'ThreadListEntry'),
(LIST_ENTRY, 'MutantListHead'),
(SINGLE_LIST_ENTRY, 'LockEntriesFreeList'),
(dyn.array(KLOCK_ENTRY, 6), 'LockEntries'),
(SINGLE_LIST_ENTRY, 'PropagateBoostsEntry'),
(SINGLE_LIST_ENTRY, 'IoSelfBoostsEntry'),
(dyn.array(UCHAR, 16), 'PriorityFloorsCount'),
(ULONG, 'PriorityFloorSummary'),
(LONG, 'AbCompletedIoBoostCount'),
(SHORT, 'AbReferenceCount'),
(UCHAR, 'AbFreeEntryCount'),
(UCHAR, 'AbWaitEntryCount'),
(ULONG, 'ForegroundLossTime'),
(LIST_ENTRY, 'GlobalForegroundListEntry'),
])
if getattr(self, 'WIN64', False):
f.extend([
(LONGLONG, 'ReadOperationCount'),
(LONGLONG, 'WriteOperationCount'),
(LONGLONG, 'OtherOperationCount'),
(LONGLONG, 'ReadTransferCount'),
(LONGLONG, 'WriteTransferCount'),
(LONGLONG, 'OtherTransferCount'),
])
return
class KGDTENTRY(pstruct.type):
@pbinary.bigendian
class _Bits(pbinary.flags):
_fields_ = [
(8, 'BaseMid'),
(5, 'Type'),
(2, 'Dp2'),
(1, 'Pres'),
(4, 'LimitHi'),
(1, 'Sys'),
(1, 'Reserved_0'),
(1, 'Default_Big'),
(1, 'Granularity'),
(8, 'BaseHi'),
]
_fields_ = [
(USHORT, 'LimitLow'),
(USHORT, 'BaseLow'),
(_Bits, 'Bits'),
]
class KGDTENTRY64(pstruct.type):
@pbinary.bigendian
class _Bits(pbinary.flags):
_fields_ = [
(8, 'BaseMiddle'),
(5, 'Type'),
(2, 'Dpl'),
(1, 'Present'),
(4, 'LimitHigh'),
(1, 'System'),
(1, 'LongMode'),
(1, 'DefaultBig'),
(1, 'Granularity'),
(8, 'BaseHigh'),
]
_fields_ = [
(USHORT, 'LimitLow'),
(USHORT, 'BaseLow'),
(_Bits, 'Bits'),
(ULONG, 'BaseUpper'),
(ULONG, 'MustBeZero'),
]
class KIDTENTRY(pstruct.type):
_fields_ = [
(USHORT, 'Offset'),
(USHORT, 'Selector'),
(USHORT, 'Access'),
(USHORT, 'ExtendedOffset'),
]
class KEXECUTE_OPTIONS(UCHAR):
pass
class KSTACK_COUNT(pstruct.type):
_fields_ = [
(LONG, 'Value'),
]
class KPROCESS(pstruct.type, versioned):
def DirectoryTableBase(self):
return self['DirectoryTableBase'].int()
def __init__(self, **attrs):
super(KPROCESS, self).__init__(**attrs)
self._fields_ = f = []
f.extend([
(DISPATCHER_HEADER, 'Header'),
(LIST_ENTRY, 'ProfileListHead'),
(ULONGLONG if getattr(self, 'WIN64', False) else ULONG, 'DirectoryTableBase'),
])
if not getattr(self, 'WIN64', False):
f.extend([
(KGDTENTRY, 'LdtDescriptor'),
(KIDTENTRY, 'Int21Descriptor'),
])
f.extend([
(LIST_ENTRY, 'ThreadListHead'),
(ULONG, 'ProcessLock'),
])
if getattr(self, 'WIN64', False):
f.extend([
(ULONG, 'Spare0'),
(ULONGLONG, 'DeepFreezeStartTime'),
])
f.extend([
(KAFFINITY_EX, 'Affinity'),
(LIST_ENTRY, 'ReadyListHead'),
(SINGLE_LIST_ENTRY, 'SwapListEntry'),
(KAFFINITY_EX, 'ActiveProcessors'),
(LONG, 'ProcessFlags'),
(CHAR, 'BasePriority'),
(CHAR, 'QuantumReset'),
(UCHAR, 'Visited'),
(KEXECUTE_OPTIONS, 'Flags'),
(dyn.array(ULONG, 20 if getattr(self, 'WIN64', False) else 1), 'ThreadSeed'),
(dyn.array(USHORT, 20 if getattr(self, 'WIN64', False) else 1), 'IdealNode'),
(USHORT, 'IdealGlobalNode'),
(USHORT, 'Spare1'),
])
if getattr(self, 'WIN64', False):
f.extend([
(KSTACK_COUNT, 'StackCount'),
(LIST_ENTRY, 'ProcessListEntry'),
(ULONGLONG, 'CycleTime'),
(ULONGLONG, 'ContextSwitches'),
(P(KSCHEDULING_GROUP), 'SchedulingGroup'),
])
else:
f.extend([
(USHORT, 'IopmOffset'),
(P(KSCHEDULING_GROUP), 'SchedulingGroup'),
(KSTACK_COUNT, 'StackCount'),
(LIST_ENTRY, 'ProcessListEntry'),
(ULONGLONG, 'CycleTime'),
(ULONGLONG, 'ContextSwitches'),
])
f.extend([
(ULONG, 'FreezeCount'),
(ULONG, 'KernelTime'),
(ULONG, 'UserTime'),
])
if getattr(self, 'WIN64', False):
f.extend([
(USHORT, 'LdtFreeSelectorHint'),
(USHORT, 'LdtTableLength'),
(KGDTENTRY64, 'LdtSystemDescriptor'),
(PVOID, 'LdtBaseAddress'),
(Ntddk.FAST_MUTEX, 'LdtProcessLock'),
(PVOID, 'InstrumentationCallback'),
(ULONGLONG, 'SecurePid'),
])
else:
f.extend([
(PVOID, 'VdmTrapClear'),
])
return
|
|
from .. import Availability, Class, Constant, Define, Method, Parameter, Type
gx_class = Class('USERMETA',
doc="""
The :class:`USERMETA` class handles user style metadata tied to real
data.
""")
gx_defines = [
Define('USERMETA_FORMAT',
doc=":class:`USERMETA` Format Types",
constants=[
Constant('USERMETA_FORMAT_DEFAULT', value='-1', type=Type.INT32_T,
doc="Use the standard type for the system"),
Constant('USERMETA_FORMAT_ISO', value='0', type=Type.INT32_T,
doc="ISO 19139 standard"),
Constant('USERMETA_FORMAT_FGDC', value='1', type=Type.INT32_T,
doc="FGDC Metadata Standard")
])]
gx_methods = {
'Miscellaneous': [
Method('Create_USERMETA', module='geoengine.core', version='7.0.0',
availability=Availability.PUBLIC,
doc="Creates an empty :class:`USERMETA` object",
return_type="USERMETA",
return_doc=":class:`USERMETA` Object",
parameters = [
Parameter('format', type=Type.INT32_T,
doc=":def:`USERMETA_FORMAT` Type of Meta to create")
]),
Method('CreateS_USERMETA', module='geoengine.core', version='7.0.0',
availability=Availability.PUBLIC,
doc="Create a :class:`USERMETA` from a file",
return_type="USERMETA",
return_doc=":class:`USERMETA` Object",
parameters = [
Parameter('file', type=Type.STRING,
doc="File Name")
]),
Method('Destroy_USERMETA', module='geoengine.core', version='7.0.0',
availability=Availability.PUBLIC,
doc="Destroyes the :class:`USERMETA` object",
return_type=Type.VOID,
parameters = [
Parameter('usermeta', type="USERMETA",
doc="Projection to Destroy")
]),
Method('GetDataCreationDate_USERMETA', module='geoengine.core', version='7.0.0',
availability=Availability.PUBLIC,
doc="Get the Data Creation Date",
return_type=Type.VOID,
parameters = [
Parameter('usermeta', type="USERMETA"),
Parameter('date', type=Type.DOUBLE, is_ref=True,
doc="Date")
]),
Method('GetExtents2d_USERMETA', module='geoengine.core', version='7.0.0',
availability=Availability.PUBLIC,
doc="Get the 2d Extents",
return_type=Type.VOID,
parameters = [
Parameter('usermeta', type="USERMETA"),
Parameter('min_x', type=Type.DOUBLE, is_ref=True,
doc="MinX"),
Parameter('min_y', type=Type.DOUBLE, is_ref=True,
doc="MinY"),
Parameter('max_x', type=Type.DOUBLE, is_ref=True,
doc="MaxX"),
Parameter('max_y', type=Type.DOUBLE, is_ref=True,
doc="MaxY")
]),
Method('GetExtents3d_USERMETA', module='geoengine.core', version='7.0.0',
availability=Availability.PUBLIC,
doc="Get the 3d Extents",
return_type=Type.VOID,
parameters = [
Parameter('usermeta', type="USERMETA"),
Parameter('min_x', type=Type.DOUBLE, is_ref=True,
doc="MinX"),
Parameter('min_y', type=Type.DOUBLE, is_ref=True,
doc="MinY"),
Parameter('min_z', type=Type.DOUBLE, is_ref=True,
doc="MinZ"),
Parameter('max_x', type=Type.DOUBLE, is_ref=True,
doc="MaxX"),
Parameter('max_y', type=Type.DOUBLE, is_ref=True,
doc="MaxY"),
Parameter('max_z', type=Type.DOUBLE, is_ref=True,
doc="MaxZ")
]),
Method('GetIPJ_USERMETA', module='geoengine.core', version='7.0.0',
availability=Availability.PUBLIC,
doc="Get the :class:`IPJ`",
return_type=Type.VOID,
parameters = [
Parameter('usermeta', type="USERMETA"),
Parameter('ipj', type="IPJ",
doc="Date")
]),
Method('GetMetaCreationDate_USERMETA', module='geoengine.core', version='7.0.0',
availability=Availability.PUBLIC,
doc="Get the Meta Creation Date",
return_type=Type.VOID,
parameters = [
Parameter('usermeta', type="USERMETA"),
Parameter('date', type=Type.DOUBLE, is_ref=True,
doc="Date")
]),
Method('GetXMLFormat_USERMETA', module='geoengine.core', version='7.0.0',
availability=Availability.PUBLIC,
doc="Get the XML Format",
return_type=Type.VOID,
parameters = [
Parameter('usermeta', type="USERMETA"),
Parameter('format', type=Type.INT32_T, is_ref=True,
doc=":def:`USERMETA_FORMAT`")
]),
Method('SetXMLFormat_USERMETA', module='geoengine.core', version='9.6.0',
availability=Availability.PUBLIC,
doc="Get the XML Format",
return_type=Type.VOID,
parameters = [
Parameter('usermeta', type="USERMETA"),
Parameter('format', type=Type.INT32_T, doc=":def:`USERMETA_FORMAT`")
]),
Method('iCompare_USERMETA', module='geoengine.core', version='7.0.0',
availability=Availability.PUBLIC,
doc="Compare 2 :class:`USERMETA`'s",
return_type=Type.INT32_T,
return_doc="""
0 - No
1 - Yes
""",
parameters = [
Parameter('usermeta1', type="USERMETA",
doc="First :class:`USERMETA`"),
Parameter('usermeta2', type="USERMETA",
doc="Second UERMETA")
]),
Method('IGetDataCreator_USERMETA', module='geoengine.core', version='7.0.0',
availability=Availability.PUBLIC,
doc="Get the Data Creator",
return_type=Type.VOID,
parameters = [
Parameter('usermeta', type="USERMETA"),
Parameter('data_creator', type=Type.STRING, is_ref=True, size_of_param='size',
doc="DataCreator returned"),
Parameter('size', type=Type.INT32_T, default_length='STR_DEFAULT_LONG',
doc="Maximum name size")
]),
Method('IGetFormat_USERMETA', module='geoengine.core', version='7.0.0',
availability=Availability.PUBLIC,
doc="Get the File Format",
return_type=Type.VOID,
parameters = [
Parameter('usermeta', type="USERMETA"),
Parameter('format', type=Type.STRING, is_ref=True, size_of_param='size',
doc="Title returned"),
Parameter('size', type=Type.INT32_T, default_length='STR_DEFAULT_LONG',
doc="Maximum name size")
]),
Method('IGetMetaCreator_USERMETA', module='geoengine.core', version='7.0.0',
availability=Availability.PUBLIC,
doc="Get the Meta Creator",
return_type=Type.VOID,
parameters = [
Parameter('usermeta', type="USERMETA"),
Parameter('meta_creator', type=Type.STRING, is_ref=True, size_of_param='size',
doc="MetaCreator returned"),
Parameter('size', type=Type.INT32_T, default_length='STR_DEFAULT_LONG',
doc="Maximum name size")
]),
Method('IGetProject_USERMETA', module='geoengine.core', version='7.0.0',
availability=Availability.PUBLIC,
doc="Get the File Project",
return_type=Type.VOID,
parameters = [
Parameter('usermeta', type="USERMETA"),
Parameter('project', type=Type.STRING, is_ref=True, size_of_param='size',
doc="Title returned"),
Parameter('size', type=Type.INT32_T, default_length='STR_DEFAULT_LONG',
doc="Maximum name size")
]),
Method('IGetTitle_USERMETA', module='geoengine.core', version='7.0.0',
availability=Availability.PUBLIC,
doc="Get the Title",
return_type=Type.VOID,
parameters = [
Parameter('usermeta', type="USERMETA"),
Parameter('title', type=Type.STRING, is_ref=True, size_of_param='size',
doc="Title returned"),
Parameter('size', type=Type.INT32_T, default_length='STR_DEFAULT_LONG',
doc="Maximum name size")
]),
Method('Serial_USERMETA', module='geoengine.core', version='7.0.0',
availability=Availability.PUBLIC,
doc="Serialize :class:`USERMETA` to a :class:`BF`.",
return_type=Type.VOID,
parameters = [
Parameter('usermeta', type="USERMETA"),
Parameter('save_geo', type=Type.BOOL,
doc="Output Geosoft Metadata?"),
Parameter('file', type=Type.STRING,
doc="File name to save to")
]),
Method('SetDataCreationDate_USERMETA', module='geoengine.core', version='7.0.0',
availability=Availability.PUBLIC,
doc="Set the Data Creation Date",
return_type=Type.VOID,
parameters = [
Parameter('usermeta', type="USERMETA"),
Parameter('date', type=Type.DOUBLE,
doc="Date")
]),
Method('SetDataCreator_USERMETA', module='geoengine.core', version='7.0.0',
availability=Availability.PUBLIC,
doc="Set the Data Creator",
return_type=Type.VOID,
parameters = [
Parameter('usermeta', type="USERMETA"),
Parameter('data_creator', type=Type.STRING,
doc="DataCreator")
]),
Method('SetExtents2d_USERMETA', module='geoengine.core', version='7.0.0',
availability=Availability.PUBLIC,
doc="Set the 2d Extents",
return_type=Type.VOID,
parameters = [
Parameter('usermeta', type="USERMETA"),
Parameter('min_x', type=Type.DOUBLE,
doc="MinX"),
Parameter('min_y', type=Type.DOUBLE,
doc="MinY"),
Parameter('max_x', type=Type.DOUBLE,
doc="MaxX"),
Parameter('max_y', type=Type.DOUBLE,
doc="MaxY")
]),
Method('SetExtents3d_USERMETA', module='geoengine.core', version='7.0.0',
availability=Availability.PUBLIC,
doc="Set the 3d Extents",
return_type=Type.VOID,
parameters = [
Parameter('usermeta', type="USERMETA"),
Parameter('min_x', type=Type.DOUBLE,
doc="MinX"),
Parameter('min_y', type=Type.DOUBLE,
doc="MinY"),
Parameter('min_z', type=Type.DOUBLE,
doc="MinZ"),
Parameter('max_x', type=Type.DOUBLE,
doc="MaxX"),
Parameter('max_y', type=Type.DOUBLE,
doc="MaxY"),
Parameter('max_z', type=Type.DOUBLE,
doc="MaxZ")
]),
Method('SetFormat_USERMETA', module='geoengine.core', version='7.0.0',
availability=Availability.PUBLIC,
doc="Set the File Format",
return_type=Type.VOID,
parameters = [
Parameter('usermeta', type="USERMETA"),
Parameter('format', type=Type.STRING,
doc="Format")
]),
Method('SetIPJ_USERMETA', module='geoengine.core', version='7.0.0',
availability=Availability.PUBLIC,
doc="Set the :class:`IPJ`",
return_type=Type.VOID,
parameters = [
Parameter('usermeta', type="USERMETA"),
Parameter('ipj', type="IPJ",
doc="Date")
]),
Method('SetMetaCreationDate_USERMETA', module='geoengine.core', version='7.0.0',
availability=Availability.PUBLIC,
doc="Set the Meta Creation Date",
return_type=Type.VOID,
parameters = [
Parameter('usermeta', type="USERMETA"),
Parameter('date', type=Type.DOUBLE,
doc="Date")
]),
Method('SetMetaCreator_USERMETA', module='geoengine.core', version='7.0.0',
availability=Availability.PUBLIC,
doc="Set the Meta Creator",
return_type=Type.VOID,
parameters = [
Parameter('usermeta', type="USERMETA"),
Parameter('meta_creator', type=Type.STRING,
doc="MetaCreator")
]),
Method('SetProject_USERMETA', module='geoengine.core', version='7.0.0',
availability=Availability.PUBLIC,
doc="Set the File Project",
return_type=Type.VOID,
parameters = [
Parameter('usermeta', type="USERMETA"),
Parameter('project', type=Type.STRING,
doc="Project")
]),
Method('SetTitle_USERMETA', module='geoengine.core', version='7.0.0',
availability=Availability.PUBLIC,
doc="Set the Title",
return_type=Type.VOID,
parameters = [
Parameter('usermeta', type="USERMETA"),
Parameter('title', type=Type.STRING,
doc="Title")
]),
Method('UpdateExtents2D_USERMETA', module='geoengine.core', version='7.0.1',
availability=Availability.PUBLIC,
doc="""
Edit an existing XML metadata file by
changing the extents and projection data
""",
return_type=Type.VOID,
parameters = [
Parameter('filename', type=Type.STRING,
doc="Filename of existing metadata to update"),
Parameter('ipj', type="IPJ",
doc="New projection"),
Parameter('min_x', type=Type.DOUBLE,
doc="New MinX value"),
Parameter('min_y', type=Type.DOUBLE,
doc="New MinY value"),
Parameter('max_x', type=Type.DOUBLE,
doc="New MaxX value"),
Parameter('max_y', type=Type.DOUBLE,
doc="New MaxY value")
]),
Method('UpdateFileType_USERMETA', module='geoengine.core', version='7.2.0',
availability=Availability.PUBLIC,
doc="""
Edit an existing XML metadata file by
changing the file type
""",
return_type=Type.VOID,
parameters = [
Parameter('file_name', type=Type.STRING,
doc="Filename of existing metadata to update"),
Parameter('new_file_type', type=Type.STRING,
doc="New file type")
]),
Method('SaveFileLineage_USERMETA', module='geoengine.core', version='8.2.0',
availability=Availability.PUBLIC,
doc="Add lineage to XML",
return_type=Type.VOID,
parameters = [
Parameter('file_name', type=Type.STRING,
doc="Filename of existing metadata to update"),
Parameter('save_geo', type=Type.BOOL,
doc="Output Geosoft Metadata?")
])
]
}
|
|
from django.shortcuts import render_to_response
from django import forms
from django import VERSION as DJANGO_VERSION
from django.template import RequestContext
from django.db.models import signals as signalmodule
from django.http import HttpResponse
import exceptions
# Try to be compatible with Django 1.5+.
try:
import json
except ImportError:
from django.utils import simplejson as json
import datetime
import os
__all__ = ['render_to', 'signals', 'ajax_request', 'autostrip']
try:
from functools import wraps
except ImportError:
def wraps(wrapped, assigned=('__module__', '__name__', '__doc__'),
updated=('__dict__',)):
def inner(wrapper):
for attr in assigned:
setattr(wrapper, attr, getattr(wrapped, attr))
for attr in updated:
getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
return wrapper
return inner
def render_to(template=None, content_type=None, mimetype=None):
"""
Decorator for Django views that sends returned dict to render_to_response
function.
Template name can be decorator parameter or TEMPLATE item in returned
dictionary. RequestContext always added as context instance.
If view doesn't return dict then decorator simply returns output.
Parameters:
- template: template name to use
- content_type: content type to send in response headers
- mimetype: content type to send in response headers (deprecated)
Examples:
# 1. Template name in decorator parameters
@render_to('template.html')
def foo(request):
bar = Bar.object.all()
return {'bar': bar}
# equals to
def foo(request):
bar = Bar.object.all()
return render_to_response('template.html',
{'bar': bar},
context_instance=RequestContext(request))
# 2. Template name as TEMPLATE item value in return dictionary.
if TEMPLATE is given then its value will have higher priority
than render_to argument.
@render_to()
def foo(request, category):
template_name = '%s.html' % category
return {'bar': bar, 'TEMPLATE': template_name}
#equals to
def foo(request, category):
template_name = '%s.html' % category
return render_to_response(template_name,
{'bar': bar},
context_instance=RequestContext(request))
"""
def renderer(function):
@wraps(function)
def wrapper(request, *args, **kwargs):
output = function(request, *args, **kwargs)
if not isinstance(output, dict):
return output
tmpl = output.pop('TEMPLATE', template)
if tmpl is None:
template_dir = os.path.join(*function.__module__.split('.')[:-1])
tmpl = os.path.join(template_dir, function.func_name + ".html")
# Explicit version check to avoid swallowing other exceptions
if DJANGO_VERSION[0] >= 1 and DJANGO_VERSION[1] >= 5:
return render_to_response(tmpl, output, \
context_instance=RequestContext(request),
content_type=content_type or mimetype)
else:
return render_to_response(tmpl, output, \
context_instance=RequestContext(request),
mimetype=content_type or mimetype)
return wrapper
return renderer
class Signals(object):
'''
Convenient wrapper for working with Django's signals (or any other
implementation using same API).
Example of usage::
# connect to registered signal
@signals.post_save(sender=YourModel)
def sighandler(instance, **kwargs):
pass
# connect to any signal
signals.register_signal(siginstance, signame) # and then as in example above
or
@signals(siginstance, sender=YourModel)
def sighandler(instance, **kwargs):
pass
In any case defined function will remain as is, without any changes.
(c) 2008 Alexander Solovyov, new BSD License
'''
def __init__(self):
self._signals = {}
# register all Django's default signals
for k, v in signalmodule.__dict__.items():
# that's hardcode, but IMHO it's better than isinstance
if not k.startswith('__') and k != 'Signal':
self.register_signal(v, k)
def __getattr__(self, name):
return self._connect(self._signals[name])
def __call__(self, signal, **kwargs):
def inner(func):
signal.connect(func, **kwargs)
return func
return inner
def _connect(self, signal):
def wrapper(**kwargs):
return self(signal, **kwargs)
return wrapper
def register_signal(self, signal, name):
self._signals[name] = signal
signals = Signals()
def date_time_handler(obj):
if isinstance(obj, datetime.datetime):
return obj.isoformat()
else:
raise TypeError("%r is not JSON serializable" % obj)
FORMAT_TYPES = {
'application/json': lambda response: json.dumps(response, default=date_time_handler),
'text/json': lambda response: json.dumps(response, default=date_time_handler),
}
try:
import yaml
except ImportError:
pass
else:
FORMAT_TYPES.update({
'application/yaml': yaml.dump,
'text/yaml': yaml.dump,
})
def ajax_request(func):
"""
If view returned serializable dict, returns response in a format requested
by HTTP_ACCEPT header. Defaults to JSON if none requested or match.
Currently supports JSON or YAML (if installed), but can easily be extended.
example:
@ajax_request
def my_view(request):
news = News.objects.all()
news_titles = [entry.title for entry in news]
return {'news_titles': news_titles}
"""
@wraps(func)
def wrapper(request, *args, **kwargs):
for accepted_type in request.META.get('HTTP_ACCEPT', '').split(','):
if accepted_type in FORMAT_TYPES.keys():
format_type = accepted_type
break
else:
format_type = 'application/json'
response = func(request, *args, **kwargs)
if not isinstance(response, HttpResponse):
#response = jsonobj.serialize_models(response, request)
if isinstance(response, dict):
pass
#response.update(ajax_response.STATUS_SUCCESS)
data = FORMAT_TYPES[format_type](response)
response = HttpResponse(data, content_type=format_type)
response['content-length'] = len(data)
return response
return wrapper
#What a fuck decorator!
def ajax_by_method(template=None, content_type=None, minetype=None):
def renderer(function):
@wraps(function)
def wrapper(request, *args, **kw_args):
if request.method == 'GET':
return render_to(template, content_type, minetype)(function)(request, *args, **kw_args)
elif request.method == 'POST':
return ajax_request(function)(request, *args, **kw_args)
else:
raise exceptions.MethodError
return wrapper
return renderer
def autostrip(cls):
"""
strip text fields before validation
example:
class PersonForm(forms.Form):
name = forms.CharField(min_length=2, max_length=10)
email = forms.EmailField()
PersonForm = autostrip(PersonForm)
#or you can use @autostrip in python >= 2.6
Author: nail.xx
"""
fields = [(key, value) for key, value in cls.base_fields.iteritems() if isinstance(value, forms.CharField)]
for field_name, field_object in fields:
def get_clean_func(original_clean):
return lambda value: original_clean(value and value.strip())
clean_func = get_clean_func(getattr(field_object, 'clean'))
setattr(field_object, 'clean', clean_func)
return cls
def method(method_name):
def renderer(function):
@wraps(function)
def wrapper(request, *args, **kw_args):
if method_name.upper() != request.method:
raise exceptions.MethodError, 'This method(%s) should not be used in this view.' % request.method
return function(request, *args, **kw_args)
return wrapper
return renderer
|
|
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Logical Link Control(LLC, IEEE 802.2) parser/serializer
http://standards.ieee.org/getieee802/download/802.2-1998.pdf
LLC format
+-----------------+--------------+
| DSAP address | 8 bits |
+-----------------+--------------+
| SSAP address | 8 bits |
+-----------------+--------------+
| Control | 8 or 16 bits |
+-----------------+--------------+
DSAP address field
LSB
+-----+---+---+---+---+---+---+---+
| I/G | D | D | D | D | D | D | D |
+-----+---+---+---+---+---+---+---+
I/G bit = 0 : Individual DSAP
I/G bit = 1 : Group DSA
D : DSAP address
SSAP address field
LSB
+-----+---+---+---+---+---+---+---+
| C/R | S | S | S | S | S | S | S |
+-----+---+---+---+---+---+---+---+
C/R bit = 0 : Command
C/R bit = 1 : Response
S : SSAP address
Control field
Information transfer
command/response
(I-format PDU)
1 2 3 4 5 6 7 8 9 10-16
+---+---+---+---+---+---+---+---+-----+------+
| 0 | N(S) | P/F | N(R) |
+---+---+---+---+---+---+---+---+-----+------+
Supervisory
commands/responses
(S-format PDUs)
1 2 3 4 5 6 7 8 9 10-16
+---+---+---+---+---+---+---+---+-----+------+
| 1 0 | S S | 0 0 0 0 | P/F | N(R) |
+---+---+---+---+---+---+---+---+-----+------+
Unnumbered
commands/responses
(U-format PDUs)
1 2 3 4 5 6 7 8
+---+---+----+---+-----+---+----+---+
| 1 1 | M1 M1 | P/F | M2 M2 M2 |
+---+---+----+---+-----+---+----+---+
N(S) : sender send sequence number (Bit 2=lower-order-bit)
N(R) : sender receive sequence number (Bit 10=lower-order-bit)
S : supervisory function bit
M1/M2: modifier function bit
P/F : poll bit - command LLC PDUs
final bit - response LLC PDUs
"""
import struct
from . import bpdu
from . import packet_base
from ryu.lib import stringify
SAP_BPDU = 0x42
class llc(packet_base.PacketBase):
"""LLC(IEEE 802.2) header encoder/decoder class.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte
order.
__init__ takes the corresponding args in this order.
.. tabularcolumns:: |l|L|
=============== ===============================================
Attribute Description
=============== ===============================================
dsap_addr Destination service access point address field \
includes I/G bit at least significant bit.
ssap_addr Source service access point address field \
includes C/R bit at least significant bit.
control Control field \
[16 bits for formats that include sequence \
numbering, and 8 bits for formats that do not]. \
Either ryu.lib.packet.llc.ControlFormatI or \
ryu.lib.packet.llc.ControlFormatS or \
ryu.lib.packet.llc.ControlFormatU object.
=============== ===============================================
"""
_PACK_STR = '!BB'
_PACK_LEN = struct.calcsize(_PACK_STR)
_CTR_TYPES = {}
_CTR_PACK_STR = '!2xB'
_MIN_LEN = _PACK_LEN
@staticmethod
def register_control_type(register_cls):
llc._CTR_TYPES[register_cls.TYPE] = register_cls
return register_cls
def __init__(self, dsap_addr, ssap_addr, control):
super(llc, self).__init__()
assert getattr(control, 'TYPE', None) in self._CTR_TYPES
self.dsap_addr = dsap_addr
self.ssap_addr = ssap_addr
self.control = control
@classmethod
def parser(cls, buf):
assert len(buf) >= cls._PACK_LEN
(dsap_addr, ssap_addr) = struct.unpack_from(cls._PACK_STR, buf)
(control,) = struct.unpack_from(cls._CTR_PACK_STR, buf)
ctrl = cls._get_control(control)
control, information = ctrl.parser(buf[cls._PACK_LEN:])
return (cls(dsap_addr, ssap_addr, control),
cls.get_packet_type(dsap_addr), information)
def serialize(self, payload, prev):
addr = struct.pack(self._PACK_STR, self.dsap_addr, self.ssap_addr)
control = self.control.serialize()
return addr + control
@classmethod
def _get_control(cls, buf):
key = buf & 0b1 if buf & 0b1 == ControlFormatI.TYPE else buf & 0b11
return cls._CTR_TYPES[key]
@llc.register_control_type
class ControlFormatI(stringify.StringifyMixin):
"""LLC sub encoder/decoder class for control I-format field.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte
order.
__init__ takes the corresponding args in this order.
======================== ===============================
Attribute Description
======================== ===============================
send_sequence_number sender send sequence number
pf_bit poll/final bit
receive_sequence_number sender receive sequence number
======================== ===============================
"""
TYPE = 0b0
_PACK_STR = '!H'
_PACK_LEN = struct.calcsize(_PACK_STR)
def __init__(self, send_sequence_number=0, pf_bit=0,
receive_sequence_number=0):
super(ControlFormatI, self).__init__()
self.send_sequence_number = send_sequence_number
self.pf_bit = pf_bit
self.receive_sequence_number = receive_sequence_number
@classmethod
def parser(cls, buf):
assert len(buf) >= cls._PACK_LEN
(control,) = struct.unpack_from(cls._PACK_STR, buf)
assert (control >> 8) & 0b1 == cls.TYPE
send_sequence_number = (control >> 9) & 0b1111111
pf_bit = (control >> 8) & 0b1
receive_sequence_number = (control >> 1) & 0b1111111
return cls(send_sequence_number, pf_bit,
receive_sequence_number), buf[cls._PACK_LEN:]
def serialize(self):
control = (self.send_sequence_number << 9 |
self.TYPE << 8 |
self.receive_sequence_number << 1 |
self.pf_bit)
return struct.pack(self._PACK_STR, control)
@llc.register_control_type
class ControlFormatS(stringify.StringifyMixin):
"""LLC sub encoder/decoder class for control S-format field.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte
order.
__init__ takes the corresponding args in this order.
======================== ===============================
Attribute Description
======================== ===============================
supervisory_function supervisory function bit
pf_bit poll/final bit
receive_sequence_number sender receive sequence number
======================== ===============================
"""
TYPE = 0b01
_PACK_STR = '!H'
_PACK_LEN = struct.calcsize(_PACK_STR)
def __init__(self, supervisory_function=0, pf_bit=0,
receive_sequence_number=0):
super(ControlFormatS, self).__init__()
self.supervisory_function = supervisory_function
self.pf_bit = pf_bit
self.receive_sequence_number = receive_sequence_number
@classmethod
def parser(cls, buf):
assert len(buf) >= cls._PACK_LEN
(control,) = struct.unpack_from(cls._PACK_STR, buf)
assert (control >> 8) & 0b11 == cls.TYPE
assert (control >> 12) & 0b1111 == 0
supervisory_function = (control >> 10) & 0b11
pf_bit = (control >> 8) & 0b1
receive_sequence_number = (control >> 1) & 0b1111111
return cls(supervisory_function, pf_bit,
receive_sequence_number), buf[cls._PACK_LEN:]
def serialize(self):
control = (self.supervisory_function << 10 |
self.TYPE << 8 |
self.receive_sequence_number << 1 |
self.pf_bit)
return struct.pack(self._PACK_STR, control)
@llc.register_control_type
class ControlFormatU(stringify.StringifyMixin):
"""LLC sub encoder/decoder class for control U-format field.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte
order.
__init__ takes the corresponding args in this order.
======================== ===============================
Attribute Description
======================== ===============================
modifier_function1 modifier function bit
pf_bit poll/final bit
modifier_function2 modifier function bit
======================== ===============================
"""
TYPE = 0b11
_PACK_STR = '!B'
_PACK_LEN = struct.calcsize(_PACK_STR)
def __init__(self, modifier_function1=0, pf_bit=0, modifier_function2=0):
super(ControlFormatU, self).__init__()
self.modifier_function1 = modifier_function1
self.pf_bit = pf_bit
self.modifier_function2 = modifier_function2
@classmethod
def parser(cls, buf):
assert len(buf) >= cls._PACK_LEN
(control,) = struct.unpack_from(cls._PACK_STR, buf)
assert control & 0b11 == cls.TYPE
modifier_function1 = (control >> 2) & 0b11
pf_bit = (control >> 4) & 0b1
modifier_function2 = (control >> 5) & 0b111
return cls(modifier_function1, pf_bit,
modifier_function2), buf[cls._PACK_LEN:]
def serialize(self):
control = (self.modifier_function2 << 5 |
self.pf_bit << 4 |
self.modifier_function1 << 2 |
self.TYPE)
return struct.pack(self._PACK_STR, control)
llc.register_packet_type(bpdu.bpdu, SAP_BPDU)
|
|
#
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from datetime import timedelta
from mock import MagicMock
from nose_parameterized import parameterized
from six.moves import range
from textwrap import dedent
from unittest import TestCase
import numpy as np
import pandas as pd
from zipline.assets import AssetFinder
from zipline.utils.api_support import ZiplineAPI
from zipline.utils.control_flow import nullctx
from zipline.utils.test_utils import (
setup_logger,
teardown_logger
)
import zipline.utils.factory as factory
import zipline.utils.simfactory as simfactory
from zipline.errors import (
OrderDuringInitialize,
RegisterTradingControlPostInit,
TradingControlViolation,
AccountControlViolation,
SymbolNotFound,
RootSymbolNotFound,
UnsupportedDatetimeFormat,
)
from zipline.test_algorithms import (
access_account_in_init,
access_portfolio_in_init,
AmbitiousStopLimitAlgorithm,
EmptyPositionsAlgorithm,
InvalidOrderAlgorithm,
RecordAlgorithm,
TestAlgorithm,
TestOrderAlgorithm,
TestOrderInstantAlgorithm,
TestOrderPercentAlgorithm,
TestOrderStyleForwardingAlgorithm,
TestOrderValueAlgorithm,
TestRegisterTransformAlgorithm,
TestTargetAlgorithm,
TestTargetPercentAlgorithm,
TestTargetValueAlgorithm,
SetLongOnlyAlgorithm,
SetAssetDateBoundsAlgorithm,
SetMaxPositionSizeAlgorithm,
SetMaxOrderCountAlgorithm,
SetMaxOrderSizeAlgorithm,
SetDoNotOrderListAlgorithm,
SetMaxLeverageAlgorithm,
api_algo,
api_get_environment_algo,
api_symbol_algo,
call_all_order_methods,
call_order_in_init,
handle_data_api,
handle_data_noop,
initialize_api,
initialize_noop,
noop_algo,
record_float_magic,
record_variables,
)
import zipline.utils.events
from zipline.utils.test_utils import (
assert_single_position,
drain_zipline,
to_utc,
)
from zipline.sources import (SpecificEquityTrades,
DataFrameSource,
DataPanelSource,
RandomWalkSource)
from zipline.assets import Equity
from zipline.finance.execution import LimitOrder
from zipline.finance.trading import SimulationParameters
from zipline.utils.api_support import set_algo_instance
from zipline.utils.events import DateRuleFactory, TimeRuleFactory
from zipline.algorithm import TradingAlgorithm
from zipline.protocol import DATASOURCE_TYPE
from zipline.finance.trading import TradingEnvironment
from zipline.finance.commission import PerShare
# Because test cases appear to reuse some resources.
_multiprocess_can_split_ = False
class TestRecordAlgorithm(TestCase):
def setUp(self):
self.sim_params = factory.create_simulation_parameters(num_days=4)
trade_history = factory.create_trade_history(
133,
[10.0, 10.0, 11.0, 11.0],
[100, 100, 100, 300],
timedelta(days=1),
self.sim_params
)
self.source = SpecificEquityTrades(event_list=trade_history)
self.df_source, self.df = \
factory.create_test_df_source(self.sim_params)
def test_record_incr(self):
algo = RecordAlgorithm(
sim_params=self.sim_params)
output = algo.run(self.source)
np.testing.assert_array_equal(output['incr'].values,
range(1, len(output) + 1))
np.testing.assert_array_equal(output['name'].values,
range(1, len(output) + 1))
np.testing.assert_array_equal(output['name2'].values,
[2] * len(output))
np.testing.assert_array_equal(output['name3'].values,
range(1, len(output) + 1))
class TestMiscellaneousAPI(TestCase):
def setUp(self):
setup_logger(self)
sids = [1, 2]
self.sim_params = factory.create_simulation_parameters(
num_days=2,
data_frequency='minute',
emission_rate='minute',
)
self.source = factory.create_minutely_trade_source(
sids,
sim_params=self.sim_params,
concurrent=True,
)
def tearDown(self):
teardown_logger(self)
def test_zipline_api_resolves_dynamically(self):
# Make a dummy algo.
algo = TradingAlgorithm(
initialize=lambda context: None,
handle_data=lambda context, data: None,
sim_params=self.sim_params,
)
# Verify that api methods get resolved dynamically by patching them out
# and then calling them
for method in algo.all_api_methods():
name = method.__name__
sentinel = object()
def fake_method(*args, **kwargs):
return sentinel
setattr(algo, name, fake_method)
with ZiplineAPI(algo):
self.assertIs(sentinel, getattr(zipline.api, name)())
def test_get_environment(self):
expected_env = {
'arena': 'backtest',
'data_frequency': 'minute',
'start': pd.Timestamp('2006-01-03 14:31:00+0000', tz='UTC'),
'end': pd.Timestamp('2006-01-04 21:00:00+0000', tz='UTC'),
'capital_base': 100000.0,
'platform': 'zipline'
}
def initialize(algo):
self.assertEqual('zipline', algo.get_environment())
self.assertEqual(expected_env, algo.get_environment('*'))
def handle_data(algo, data):
pass
algo = TradingAlgorithm(initialize=initialize,
handle_data=handle_data,
sim_params=self.sim_params)
algo.run(self.source)
def test_get_open_orders(self):
def initialize(algo):
algo.minute = 0
def handle_data(algo, data):
if algo.minute == 0:
# Should be filled by the next minute
algo.order(algo.sid(1), 1)
# Won't be filled because the price is too low.
algo.order(algo.sid(2), 1, style=LimitOrder(0.01))
algo.order(algo.sid(2), 1, style=LimitOrder(0.01))
algo.order(algo.sid(2), 1, style=LimitOrder(0.01))
all_orders = algo.get_open_orders()
self.assertEqual(list(all_orders.keys()), [1, 2])
self.assertEqual(all_orders[1], algo.get_open_orders(1))
self.assertEqual(len(all_orders[1]), 1)
self.assertEqual(all_orders[2], algo.get_open_orders(2))
self.assertEqual(len(all_orders[2]), 3)
if algo.minute == 1:
# First order should have filled.
# Second order should still be open.
all_orders = algo.get_open_orders()
self.assertEqual(list(all_orders.keys()), [2])
self.assertEqual([], algo.get_open_orders(1))
orders_2 = algo.get_open_orders(2)
self.assertEqual(all_orders[2], orders_2)
self.assertEqual(len(all_orders[2]), 3)
for order in orders_2:
algo.cancel_order(order)
all_orders = algo.get_open_orders()
self.assertEqual(all_orders, {})
algo.minute += 1
algo = TradingAlgorithm(initialize=initialize,
handle_data=handle_data,
sim_params=self.sim_params)
algo.run(self.source)
def test_schedule_function(self):
date_rules = DateRuleFactory
time_rules = TimeRuleFactory
def incrementer(algo, data):
algo.func_called += 1
self.assertEqual(
algo.get_datetime().time(),
datetime.time(hour=14, minute=31),
)
def initialize(algo):
algo.func_called = 0
algo.days = 1
algo.date = None
algo.schedule_function(
func=incrementer,
date_rule=date_rules.every_day(),
time_rule=time_rules.market_open(),
)
def handle_data(algo, data):
if not algo.date:
algo.date = algo.get_datetime().date()
if algo.date < algo.get_datetime().date():
algo.days += 1
algo.date = algo.get_datetime().date()
algo = TradingAlgorithm(
initialize=initialize,
handle_data=handle_data,
sim_params=self.sim_params,
)
algo.run(self.source)
self.assertEqual(algo.func_called, algo.days)
@parameterized.expand([
('daily',),
('minute'),
])
def test_schedule_funtion_rule_creation(self, mode):
def nop(*args, **kwargs):
return None
self.sim_params.data_frequency = mode
algo = TradingAlgorithm(
initialize=nop, handle_data=nop, sim_params=self.sim_params,
)
# Schedule something for NOT Always.
algo.schedule_function(nop, time_rule=zipline.utils.events.Never())
event_rule = algo.event_manager._events[1].rule
self.assertIsInstance(event_rule, zipline.utils.events.OncePerDay)
inner_rule = event_rule.rule
self.assertIsInstance(inner_rule, zipline.utils.events.ComposedRule)
first = inner_rule.first
second = inner_rule.second
composer = inner_rule.composer
self.assertIsInstance(first, zipline.utils.events.Always)
if mode == 'daily':
self.assertIsInstance(second, zipline.utils.events.Always)
else:
self.assertIsInstance(second, zipline.utils.events.Never)
self.assertIs(composer, zipline.utils.events.ComposedRule.lazy_and)
def test_asset_lookup(self):
metadata = {0: {'symbol': 'PLAY',
'asset_type': 'equity',
'start_date': '2002-01-01',
'end_date': '2004-01-01'},
1: {'symbol': 'PLAY',
'asset_type': 'equity',
'start_date': '2005-01-01',
'end_date': '2006-01-01'}}
algo = TradingAlgorithm(asset_metadata=metadata)
# Test before either PLAY existed
algo.sim_params.period_end = pd.Timestamp('2001-12-01', tz='UTC')
with self.assertRaises(SymbolNotFound):
algo.symbol('PLAY')
with self.assertRaises(SymbolNotFound):
algo.symbols('PLAY')
# Test when first PLAY exists
algo.sim_params.period_end = pd.Timestamp('2002-12-01', tz='UTC')
list_result = algo.symbols('PLAY')
self.assertEqual(0, list_result[0])
# Test after first PLAY ends
algo.sim_params.period_end = pd.Timestamp('2004-12-01', tz='UTC')
self.assertEqual(0, algo.symbol('PLAY'))
# Test after second PLAY begins
algo.sim_params.period_end = pd.Timestamp('2005-12-01', tz='UTC')
self.assertEqual(1, algo.symbol('PLAY'))
# Test after second PLAY ends
algo.sim_params.period_end = pd.Timestamp('2006-12-01', tz='UTC')
self.assertEqual(1, algo.symbol('PLAY'))
list_result = algo.symbols('PLAY')
self.assertEqual(1, list_result[0])
# Test lookup SID
self.assertIsInstance(algo.sid(0), Equity)
self.assertIsInstance(algo.sid(1), Equity)
def test_future_chain(self):
""" Tests the future_chain API function.
"""
metadata = {
0: {
'symbol': 'CLG06',
'root_symbol': 'CL',
'asset_type': 'future',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2005-12-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-01-20', tz='UTC')},
1: {
'root_symbol': 'CL',
'symbol': 'CLK06',
'asset_type': 'future',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-03-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-04-20', tz='UTC')},
2: {
'symbol': 'CLQ06',
'root_symbol': 'CL',
'asset_type': 'future',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-06-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-07-20', tz='UTC')},
3: {
'symbol': 'CLX06',
'root_symbol': 'CL',
'asset_type': 'future',
'start_date': pd.Timestamp('2006-02-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-09-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-10-20', tz='UTC')}
}
algo = TradingAlgorithm(asset_metadata=metadata)
algo.datetime = pd.Timestamp('2006-12-01', tz='UTC')
# Check that the fields of the FutureChain object are set correctly
cl = algo.future_chain('CL')
self.assertEqual(cl.root_symbol, 'CL')
self.assertEqual(cl.as_of_date, algo.datetime)
# Check the fields are set correctly if an as_of_date is supplied
as_of_date = pd.Timestamp('1952-08-11', tz='UTC')
cl = algo.future_chain('CL', as_of_date=as_of_date)
self.assertEqual(cl.root_symbol, 'CL')
self.assertEqual(cl.as_of_date, as_of_date)
cl = algo.future_chain('CL', as_of_date='1952-08-11')
self.assertEqual(cl.root_symbol, 'CL')
self.assertEqual(cl.as_of_date, as_of_date)
# Check that weird capitalization is corrected
cl = algo.future_chain('cL')
self.assertEqual(cl.root_symbol, 'CL')
cl = algo.future_chain('cl')
self.assertEqual(cl.root_symbol, 'CL')
# Check that invalid root symbols raise RootSymbolNotFound
with self.assertRaises(RootSymbolNotFound):
algo.future_chain('CLZ')
with self.assertRaises(RootSymbolNotFound):
algo.future_chain('')
# Check that invalid dates raise UnsupportedDatetimeFormat
with self.assertRaises(UnsupportedDatetimeFormat):
algo.future_chain('CL', 'my_finger_slipped')
with self.assertRaises(UnsupportedDatetimeFormat):
algo.future_chain('CL', '2015-09-')
def test_set_symbol_lookup_date(self):
"""
Test the set_symbol_lookup_date API method.
"""
# Note we start sid enumeration at i+3 so as not to
# collide with sids [1, 2] added in the setUp() method.
dates = pd.date_range('2013-01-01', freq='2D', periods=2, tz='UTC')
# Create two assets with the same symbol but different
# non-overlapping date ranges.
metadata = pd.DataFrame.from_records(
[
{
'sid': i + 3,
'file_name': 'DUP',
'start_date': date.value,
'end_date': (date + timedelta(days=1)).value,
}
for i, date in enumerate(dates)
]
)
algo = TradingAlgorithm(asset_metadata=metadata)
# Set the period end to a date after the period end
# dates for our assets.
algo.sim_params.period_end = pd.Timestamp('2015-01-01', tz='UTC')
# With no symbol lookup date set, we will use the period end date
# for the as_of_date, resulting here in the asset with the earlier
# start date being returned.
result = algo.symbol('DUP')
self.assertEqual(result.symbol, 'DUP')
# By first calling set_symbol_lookup_date, the relevant asset
# should be returned by lookup_symbol_resolve_multiple
for i, date in enumerate(dates):
algo.set_symbol_lookup_date(date)
result = algo.symbol('DUP')
self.assertEqual(result.symbol, 'DUP')
self.assertEqual(result.sid, i + 3)
with self.assertRaises(UnsupportedDatetimeFormat):
algo.set_symbol_lookup_date('foobar')
class TestTransformAlgorithm(TestCase):
def setUp(self):
setup_logger(self)
self.sim_params = factory.create_simulation_parameters(num_days=4)
trade_history = factory.create_trade_history(
133,
[10.0, 10.0, 11.0, 11.0],
[100, 100, 100, 300],
timedelta(days=1),
self.sim_params
)
self.source = SpecificEquityTrades(event_list=trade_history)
self.df_source, self.df = \
factory.create_test_df_source(self.sim_params)
self.panel_source, self.panel = \
factory.create_test_panel_source(self.sim_params)
def tearDown(self):
teardown_logger(self)
def test_source_as_input(self):
algo = TestRegisterTransformAlgorithm(
sim_params=self.sim_params,
sids=[133]
)
algo.run(self.source)
self.assertEqual(len(algo.sources), 1)
assert isinstance(algo.sources[0], SpecificEquityTrades)
def test_invalid_order_parameters(self):
algo = InvalidOrderAlgorithm(
sids=[133],
sim_params=self.sim_params
)
algo.run(self.source)
def test_multi_source_as_input(self):
sim_params = SimulationParameters(
self.df.index[0],
self.df.index[-1]
)
algo = TestRegisterTransformAlgorithm(
sim_params=sim_params,
sids=[0, 1, 133]
)
algo.run([self.source, self.df_source], overwrite_sim_params=False)
self.assertEqual(len(algo.sources), 2)
def test_df_as_input(self):
algo = TestRegisterTransformAlgorithm(
sim_params=self.sim_params,
sids=[0, 1]
)
algo.run(self.df)
assert isinstance(algo.sources[0], DataFrameSource)
def test_panel_as_input(self):
algo = TestRegisterTransformAlgorithm(
sim_params=self.sim_params,
sids=[0, 1])
algo.run(self.panel)
assert isinstance(algo.sources[0], DataPanelSource)
def test_run_twice(self):
algo = TestRegisterTransformAlgorithm(
sim_params=self.sim_params,
sids=[0, 1]
)
res1 = algo.run(self.df)
res2 = algo.run(self.df)
np.testing.assert_array_equal(res1, res2)
def test_data_frequency_setting(self):
self.sim_params.data_frequency = 'daily'
algo = TestRegisterTransformAlgorithm(
sim_params=self.sim_params,
)
self.assertEqual(algo.sim_params.data_frequency, 'daily')
self.sim_params.data_frequency = 'minute'
algo = TestRegisterTransformAlgorithm(
sim_params=self.sim_params,
)
self.assertEqual(algo.sim_params.data_frequency, 'minute')
@parameterized.expand([
(TestOrderAlgorithm,),
(TestOrderValueAlgorithm,),
(TestTargetAlgorithm,),
(TestOrderPercentAlgorithm,),
(TestTargetPercentAlgorithm,),
(TestTargetValueAlgorithm,),
])
def test_order_methods(self, algo_class):
algo = algo_class(
sim_params=self.sim_params,
)
algo.run(self.df)
@parameterized.expand([
(TestOrderAlgorithm,),
(TestOrderValueAlgorithm,),
(TestTargetAlgorithm,),
(TestOrderPercentAlgorithm,),
(TestTargetValueAlgorithm,),
])
def test_order_methods_for_future(self, algo_class):
metadata = {0: {'asset_type': 'future',
'contract_multiplier': 10}}
algo = algo_class(
sim_params=self.sim_params,
asset_metadata=metadata
)
algo.run(self.df)
def test_order_method_style_forwarding(self):
method_names_to_test = ['order',
'order_value',
'order_percent',
'order_target',
'order_target_percent',
'order_target_value']
for name in method_names_to_test:
algo = TestOrderStyleForwardingAlgorithm(
sim_params=self.sim_params,
instant_fill=False,
method_name=name
)
algo.run(self.df)
def test_order_instant(self):
algo = TestOrderInstantAlgorithm(sim_params=self.sim_params,
instant_fill=True)
algo.run(self.df)
def test_minute_data(self):
source = RandomWalkSource(freq='minute',
start=pd.Timestamp('2000-1-3',
tz='UTC'),
end=pd.Timestamp('2000-1-4',
tz='UTC'))
self.sim_params.data_frequency = 'minute'
algo = TestOrderInstantAlgorithm(sim_params=self.sim_params,
instant_fill=True)
algo.run(source)
class TestPositions(TestCase):
def setUp(self):
setup_logger(self)
self.sim_params = factory.create_simulation_parameters(num_days=4)
trade_history = factory.create_trade_history(
1,
[10.0, 10.0, 11.0, 11.0],
[100, 100, 100, 300],
timedelta(days=1),
self.sim_params
)
self.source = SpecificEquityTrades(event_list=trade_history)
self.df_source, self.df = \
factory.create_test_df_source(self.sim_params)
def tearDown(self):
teardown_logger(self)
def test_empty_portfolio(self):
algo = EmptyPositionsAlgorithm(sim_params=self.sim_params)
daily_stats = algo.run(self.df)
expected_position_count = [
0, # Before entering the first position
1, # After entering, exiting on this date
0, # After exiting
0,
]
for i, expected in enumerate(expected_position_count):
self.assertEqual(daily_stats.ix[i]['num_positions'],
expected)
def test_noop_orders(self):
algo = AmbitiousStopLimitAlgorithm(sid=1)
daily_stats = algo.run(self.source)
# Verify that possitions are empty for all dates.
empty_positions = daily_stats.positions.map(lambda x: len(x) == 0)
self.assertTrue(empty_positions.all())
class TestAlgoScript(TestCase):
def setUp(self):
days = 251
self.sim_params = factory.create_simulation_parameters(num_days=days)
setup_logger(self)
trade_history = factory.create_trade_history(
133,
[10.0] * days,
[100] * days,
timedelta(days=1),
self.sim_params
)
self.source = SpecificEquityTrades(sids=[133],
event_list=trade_history)
self.df_source, self.df = \
factory.create_test_df_source(self.sim_params)
self.zipline_test_config = {
'sid': 0,
}
def tearDown(self):
teardown_logger(self)
def test_noop(self):
algo = TradingAlgorithm(initialize=initialize_noop,
handle_data=handle_data_noop)
algo.run(self.df)
def test_noop_string(self):
algo = TradingAlgorithm(script=noop_algo)
algo.run(self.df)
def test_api_calls(self):
algo = TradingAlgorithm(initialize=initialize_api,
handle_data=handle_data_api)
algo.run(self.df)
def test_api_calls_string(self):
algo = TradingAlgorithm(script=api_algo)
algo.run(self.df)
def test_api_get_environment(self):
platform = 'zipline'
metadata = {0: {'symbol': 'TEST',
'asset_type': 'equity'}}
algo = TradingAlgorithm(script=api_get_environment_algo,
asset_metadata=metadata,
platform=platform)
algo.run(self.df)
self.assertEqual(algo.environment, platform)
def test_api_symbol(self):
metadata = {0: {'symbol': 'TEST',
'asset_type': 'equity'}}
algo = TradingAlgorithm(script=api_symbol_algo,
asset_metadata=metadata)
algo.run(self.df)
def test_fixed_slippage(self):
# verify order -> transaction -> portfolio position.
# --------------
test_algo = TradingAlgorithm(
script="""
from zipline.api import (slippage,
commission,
set_slippage,
set_commission,
order,
record,
sid)
def initialize(context):
model = slippage.FixedSlippage(spread=0.10)
set_slippage(model)
set_commission(commission.PerTrade(100.00))
context.count = 1
context.incr = 0
def handle_data(context, data):
if context.incr < context.count:
order(sid(0), -1000)
record(price=data[0].price)
context.incr += 1""",
sim_params=self.sim_params,
)
set_algo_instance(test_algo)
self.zipline_test_config['algorithm'] = test_algo
self.zipline_test_config['trade_count'] = 200
# this matches the value in the algotext initialize
# method, and will be used inside assert_single_position
# to confirm we have as many transactions as orders we
# placed.
self.zipline_test_config['order_count'] = 1
zipline = simfactory.create_test_zipline(
**self.zipline_test_config)
output, _ = assert_single_position(self, zipline)
# confirm the slippage and commission on a sample
# transaction
recorded_price = output[1]['daily_perf']['recorded_vars']['price']
transaction = output[1]['daily_perf']['transactions'][0]
self.assertEqual(100.0, transaction['commission'])
expected_spread = 0.05
expected_commish = 0.10
expected_price = recorded_price - expected_spread - expected_commish
self.assertEqual(expected_price, transaction['price'])
def test_volshare_slippage(self):
# verify order -> transaction -> portfolio position.
# --------------
test_algo = TradingAlgorithm(
script="""
from zipline.api import *
def initialize(context):
model = slippage.VolumeShareSlippage(
volume_limit=.3,
price_impact=0.05
)
set_slippage(model)
set_commission(commission.PerShare(0.02))
context.count = 2
context.incr = 0
def handle_data(context, data):
if context.incr < context.count:
# order small lots to be sure the
# order will fill in a single transaction
order(sid(0), 5000)
record(price=data[0].price)
record(volume=data[0].volume)
record(incr=context.incr)
context.incr += 1
""",
sim_params=self.sim_params,
)
set_algo_instance(test_algo)
self.zipline_test_config['algorithm'] = test_algo
self.zipline_test_config['trade_count'] = 100
# 67 will be used inside assert_single_position
# to confirm we have as many transactions as expected.
# The algo places 2 trades of 5000 shares each. The trade
# events have volume ranging from 100 to 950. The volume cap
# of 0.3 limits the trade volume to a range of 30 - 316 shares.
# The spreadsheet linked below calculates the total position
# size over each bar, and predicts 67 txns will be required
# to fill the two orders. The number of bars and transactions
# differ because some bars result in multiple txns. See
# spreadsheet for details:
# https://www.dropbox.com/s/ulrk2qt0nrtrigb/Volume%20Share%20Worksheet.xlsx
self.zipline_test_config['expected_transactions'] = 67
zipline = simfactory.create_test_zipline(
**self.zipline_test_config)
output, _ = assert_single_position(self, zipline)
# confirm the slippage and commission on a sample
# transaction
per_share_commish = 0.02
perf = output[1]
transaction = perf['daily_perf']['transactions'][0]
commish = transaction['amount'] * per_share_commish
self.assertEqual(commish, transaction['commission'])
self.assertEqual(2.029, transaction['price'])
def test_algo_record_vars(self):
test_algo = TradingAlgorithm(
script=record_variables,
sim_params=self.sim_params,
)
set_algo_instance(test_algo)
self.zipline_test_config['algorithm'] = test_algo
self.zipline_test_config['trade_count'] = 200
zipline = simfactory.create_test_zipline(
**self.zipline_test_config)
output, _ = drain_zipline(self, zipline)
self.assertEqual(len(output), 252)
incr = []
for o in output[:200]:
incr.append(o['daily_perf']['recorded_vars']['incr'])
np.testing.assert_array_equal(incr, range(1, 201))
def test_algo_record_allow_mock(self):
"""
Test that values from "MagicMock"ed methods can be passed to record.
Relevant for our basic/validation and methods like history, which
will end up returning a MagicMock instead of a DataFrame.
"""
test_algo = TradingAlgorithm(
script=record_variables,
sim_params=self.sim_params,
)
set_algo_instance(test_algo)
test_algo.record(foo=MagicMock())
def _algo_record_float_magic_should_pass(self, var_type):
test_algo = TradingAlgorithm(
script=record_float_magic % var_type,
sim_params=self.sim_params,
)
set_algo_instance(test_algo)
self.zipline_test_config['algorithm'] = test_algo
self.zipline_test_config['trade_count'] = 200
zipline = simfactory.create_test_zipline(
**self.zipline_test_config)
output, _ = drain_zipline(self, zipline)
self.assertEqual(len(output), 252)
incr = []
for o in output[:200]:
incr.append(o['daily_perf']['recorded_vars']['data'])
np.testing.assert_array_equal(incr, [np.nan] * 200)
def test_algo_record_nan(self):
self._algo_record_float_magic_should_pass('nan')
def test_order_methods(self):
"""
Only test that order methods can be called without error.
Correct filling of orders is tested in zipline.
"""
test_algo = TradingAlgorithm(
script=call_all_order_methods,
sim_params=self.sim_params,
)
set_algo_instance(test_algo)
self.zipline_test_config['algorithm'] = test_algo
self.zipline_test_config['trade_count'] = 200
zipline = simfactory.create_test_zipline(
**self.zipline_test_config)
output, _ = drain_zipline(self, zipline)
def test_order_in_init(self):
"""
Test that calling order in initialize
will raise an error.
"""
with self.assertRaises(OrderDuringInitialize):
test_algo = TradingAlgorithm(
script=call_order_in_init,
sim_params=self.sim_params,
)
set_algo_instance(test_algo)
test_algo.run(self.source)
def test_portfolio_in_init(self):
"""
Test that accessing portfolio in init doesn't break.
"""
test_algo = TradingAlgorithm(
script=access_portfolio_in_init,
sim_params=self.sim_params,
)
set_algo_instance(test_algo)
self.zipline_test_config['algorithm'] = test_algo
self.zipline_test_config['trade_count'] = 1
zipline = simfactory.create_test_zipline(
**self.zipline_test_config)
output, _ = drain_zipline(self, zipline)
def test_account_in_init(self):
"""
Test that accessing account in init doesn't break.
"""
test_algo = TradingAlgorithm(
script=access_account_in_init,
sim_params=self.sim_params,
)
set_algo_instance(test_algo)
self.zipline_test_config['algorithm'] = test_algo
self.zipline_test_config['trade_count'] = 1
zipline = simfactory.create_test_zipline(
**self.zipline_test_config)
output, _ = drain_zipline(self, zipline)
class TestHistory(TestCase):
@classmethod
def setUpClass(cls):
cls._start = pd.Timestamp('1991-01-01', tz='UTC')
cls._end = pd.Timestamp('1991-01-15', tz='UTC')
cls.sim_params = factory.create_simulation_parameters(
data_frequency='minute',
)
@property
def source(self):
return RandomWalkSource(start=self._start, end=self._end)
def test_history(self):
history_algo = """
from zipline.api import history, add_history
def initialize(context):
add_history(10, '1d', 'price')
def handle_data(context, data):
df = history(10, '1d', 'price')
"""
algo = TradingAlgorithm(
script=history_algo,
sim_params=self.sim_params,
)
output = algo.run(self.source)
self.assertIsNot(output, None)
def test_history_without_add(self):
def handle_data(algo, data):
algo.history(1, '1m', 'price')
algo = TradingAlgorithm(
initialize=lambda _: None,
handle_data=handle_data,
sim_params=self.sim_params,
)
algo.run(self.source)
self.assertIsNotNone(algo.history_container)
self.assertEqual(algo.history_container.buffer_panel.window_length, 1)
def test_add_history_in_handle_data(self):
def handle_data(algo, data):
algo.add_history(1, '1m', 'price')
algo = TradingAlgorithm(
initialize=lambda _: None,
handle_data=handle_data,
sim_params=self.sim_params,
)
algo.run(self.source)
self.assertIsNotNone(algo.history_container)
self.assertEqual(algo.history_container.buffer_panel.window_length, 1)
class TestGetDatetime(TestCase):
@parameterized.expand(
[
('default', None,),
('utc', 'UTC',),
('us_east', 'US/Eastern',),
]
)
def test_get_datetime(self, name, tz):
algo = dedent(
"""
import pandas as pd
from zipline.api import get_datetime
def initialize(context):
context.tz = {tz} or 'UTC'
context.first_bar = True
def handle_data(context, data):
if context.first_bar:
dt = get_datetime({tz})
if dt.tz.zone != context.tz:
raise ValueError("Mismatched Zone")
elif dt.tz_convert("US/Eastern").hour != 9:
raise ValueError("Mismatched Hour")
elif dt.tz_convert("US/Eastern").minute != 31:
raise ValueError("Mismatched Minute")
context.first_bar = False
""".format(tz=repr(tz))
)
start = to_utc('2014-01-02 9:31')
end = to_utc('2014-01-03 9:31')
source = RandomWalkSource(
start=start,
end=end,
)
sim_params = factory.create_simulation_parameters(
data_frequency='minute'
)
algo = TradingAlgorithm(
script=algo,
sim_params=sim_params,
identifiers=[1]
)
algo.run(source)
self.assertFalse(algo.first_bar)
class TestTradingControls(TestCase):
def setUp(self):
self.sim_params = factory.create_simulation_parameters(num_days=4)
self.sid = 133
self.trade_history = factory.create_trade_history(
self.sid,
[10.0, 10.0, 11.0, 11.0],
[100, 100, 100, 300],
timedelta(days=1),
self.sim_params
)
self.source = SpecificEquityTrades(event_list=self.trade_history)
def _check_algo(self,
algo,
handle_data,
expected_order_count,
expected_exc):
algo._handle_data = handle_data
with self.assertRaises(expected_exc) if expected_exc else nullctx():
algo.run(self.source)
self.assertEqual(algo.order_count, expected_order_count)
self.source.rewind()
def check_algo_succeeds(self, algo, handle_data, order_count=4):
# Default for order_count assumes one order per handle_data call.
self._check_algo(algo, handle_data, order_count, None)
def check_algo_fails(self, algo, handle_data, order_count):
self._check_algo(algo,
handle_data,
order_count,
TradingControlViolation)
def test_set_max_position_size(self):
# Buy one share four times. Should be fine.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 1)
algo.order_count += 1
algo = SetMaxPositionSizeAlgorithm(sid=self.sid,
max_shares=10,
max_notional=500.0)
self.check_algo_succeeds(algo, handle_data)
# Buy three shares four times. Should bail on the fourth before it's
# placed.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 3)
algo.order_count += 1
algo = SetMaxPositionSizeAlgorithm(sid=self.sid,
max_shares=10,
max_notional=500.0)
self.check_algo_fails(algo, handle_data, 3)
# Buy two shares four times. Should bail due to max_notional on the
# third attempt.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 3)
algo.order_count += 1
algo = SetMaxPositionSizeAlgorithm(sid=self.sid,
max_shares=10,
max_notional=61.0)
self.check_algo_fails(algo, handle_data, 2)
# Set the trading control to a different sid, then BUY ALL THE THINGS!.
# Should continue normally.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 10000)
algo.order_count += 1
algo = SetMaxPositionSizeAlgorithm(sid=self.sid + 1,
max_shares=10,
max_notional=61.0)
self.check_algo_succeeds(algo, handle_data)
# Set the trading control sid to None, then BUY ALL THE THINGS!. Should
# fail because setting sid to None makes the control apply to all sids.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 10000)
algo.order_count += 1
algo = SetMaxPositionSizeAlgorithm(max_shares=10, max_notional=61.0)
self.check_algo_fails(algo, handle_data, 0)
def test_set_do_not_order_list(self):
# set the restricted list to be the sid, and fail.
algo = SetDoNotOrderListAlgorithm(
sid=self.sid,
restricted_list=[self.sid])
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 100)
algo.order_count += 1
self.check_algo_fails(algo, handle_data, 0)
# set the restricted list to exclude the sid, and succeed
algo = SetDoNotOrderListAlgorithm(
sid=self.sid,
restricted_list=[134, 135, 136])
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 100)
algo.order_count += 1
self.check_algo_succeeds(algo, handle_data)
def test_set_max_order_size(self):
# Buy one share.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 1)
algo.order_count += 1
algo = SetMaxOrderSizeAlgorithm(sid=self.sid,
max_shares=10,
max_notional=500.0)
self.check_algo_succeeds(algo, handle_data)
# Buy 1, then 2, then 3, then 4 shares. Bail on the last attempt
# because we exceed shares.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), algo.order_count + 1)
algo.order_count += 1
algo = SetMaxOrderSizeAlgorithm(sid=self.sid,
max_shares=3,
max_notional=500.0)
self.check_algo_fails(algo, handle_data, 3)
# Buy 1, then 2, then 3, then 4 shares. Bail on the last attempt
# because we exceed notional.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), algo.order_count + 1)
algo.order_count += 1
algo = SetMaxOrderSizeAlgorithm(sid=self.sid,
max_shares=10,
max_notional=40.0)
self.check_algo_fails(algo, handle_data, 3)
# Set the trading control to a different sid, then BUY ALL THE THINGS!.
# Should continue normally.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 10000)
algo.order_count += 1
algo = SetMaxOrderSizeAlgorithm(sid=self.sid + 1,
max_shares=1,
max_notional=1.0)
self.check_algo_succeeds(algo, handle_data)
# Set the trading control sid to None, then BUY ALL THE THINGS!.
# Should fail because not specifying a sid makes the trading control
# apply to all sids.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), 10000)
algo.order_count += 1
algo = SetMaxOrderSizeAlgorithm(max_shares=1,
max_notional=1.0)
self.check_algo_fails(algo, handle_data, 0)
def test_set_max_order_count(self):
# Override the default setUp to use six-hour intervals instead of full
# days so we can exercise trading-session rollover logic.
trade_history = factory.create_trade_history(
self.sid,
[10.0, 10.0, 11.0, 11.0],
[100, 100, 100, 300],
timedelta(hours=6),
self.sim_params
)
self.source = SpecificEquityTrades(event_list=trade_history)
def handle_data(algo, data):
for i in range(5):
algo.order(algo.sid(self.sid), 1)
algo.order_count += 1
algo = SetMaxOrderCountAlgorithm(3)
self.check_algo_fails(algo, handle_data, 3)
# Second call to handle_data is the same day as the first, so the last
# order of the second call should fail.
algo = SetMaxOrderCountAlgorithm(9)
self.check_algo_fails(algo, handle_data, 9)
# Only ten orders are placed per day, so this should pass even though
# in total more than 20 orders are placed.
algo = SetMaxOrderCountAlgorithm(10)
self.check_algo_succeeds(algo, handle_data, order_count=20)
def test_long_only(self):
# Sell immediately -> fail immediately.
def handle_data(algo, data):
algo.order(algo.sid(self.sid), -1)
algo.order_count += 1
algo = SetLongOnlyAlgorithm()
self.check_algo_fails(algo, handle_data, 0)
# Buy on even days, sell on odd days. Never takes a short position, so
# should succeed.
def handle_data(algo, data):
if (algo.order_count % 2) == 0:
algo.order(algo.sid(self.sid), 1)
else:
algo.order(algo.sid(self.sid), -1)
algo.order_count += 1
algo = SetLongOnlyAlgorithm()
self.check_algo_succeeds(algo, handle_data)
# Buy on first three days, then sell off holdings. Should succeed.
def handle_data(algo, data):
amounts = [1, 1, 1, -3]
algo.order(algo.sid(self.sid), amounts[algo.order_count])
algo.order_count += 1
algo = SetLongOnlyAlgorithm()
self.check_algo_succeeds(algo, handle_data)
# Buy on first three days, then sell off holdings plus an extra share.
# Should fail on the last sale.
def handle_data(algo, data):
amounts = [1, 1, 1, -4]
algo.order(algo.sid(self.sid), amounts[algo.order_count])
algo.order_count += 1
algo = SetLongOnlyAlgorithm()
self.check_algo_fails(algo, handle_data, 3)
def test_register_post_init(self):
def initialize(algo):
algo.initialized = True
def handle_data(algo, data):
with self.assertRaises(RegisterTradingControlPostInit):
algo.set_max_position_size(self.sid, 1, 1)
with self.assertRaises(RegisterTradingControlPostInit):
algo.set_max_order_size(self.sid, 1, 1)
with self.assertRaises(RegisterTradingControlPostInit):
algo.set_max_order_count(1)
with self.assertRaises(RegisterTradingControlPostInit):
algo.set_long_only()
algo = TradingAlgorithm(initialize=initialize,
handle_data=handle_data)
algo.run(self.source)
self.source.rewind()
def test_asset_date_bounds(self):
# Run the algorithm with a sid that ends far in the future
df_source, _ = factory.create_test_df_source(self.sim_params)
metadata = {0: {'start_date': '1990-01-01',
'end_date': '2020-01-01'}}
asset_finder = AssetFinder()
algo = SetAssetDateBoundsAlgorithm(
asset_finder=asset_finder,
asset_metadata=metadata,
sim_params=self.sim_params,)
algo.run(df_source)
# Run the algorithm with a sid that has already ended
df_source, _ = factory.create_test_df_source(self.sim_params)
metadata = {0: {'start_date': '1989-01-01',
'end_date': '1990-01-01'}}
asset_finder = AssetFinder()
algo = SetAssetDateBoundsAlgorithm(
asset_finder=asset_finder,
asset_metadata=metadata,
sim_params=self.sim_params,)
with self.assertRaises(TradingControlViolation):
algo.run(df_source)
# Run the algorithm with a sid that has not started
df_source, _ = factory.create_test_df_source(self.sim_params)
metadata = {0: {'start_date': '2020-01-01',
'end_date': '2021-01-01'}}
algo = SetAssetDateBoundsAlgorithm(
asset_finder=asset_finder,
asset_metadata=metadata,
sim_params=self.sim_params,)
with self.assertRaises(TradingControlViolation):
algo.run(df_source)
class TestAccountControls(TestCase):
def setUp(self):
self.sim_params = factory.create_simulation_parameters(num_days=4)
self.sidint = 133
self.trade_history = factory.create_trade_history(
self.sidint,
[10.0, 10.0, 11.0, 11.0],
[100, 100, 100, 300],
timedelta(days=1),
self.sim_params
)
self.source = SpecificEquityTrades(event_list=self.trade_history)
def _check_algo(self,
algo,
handle_data,
expected_exc):
algo._handle_data = handle_data
with self.assertRaises(expected_exc) if expected_exc else nullctx():
algo.run(self.source)
self.source.rewind()
def check_algo_succeeds(self, algo, handle_data):
# Default for order_count assumes one order per handle_data call.
self._check_algo(algo, handle_data, None)
def check_algo_fails(self, algo, handle_data):
self._check_algo(algo,
handle_data,
AccountControlViolation)
def test_set_max_leverage(self):
# Set max leverage to 0 so buying one share fails.
def handle_data(algo, data):
algo.order(algo.sid(self.sidint), 1)
algo = SetMaxLeverageAlgorithm(0)
self.check_algo_fails(algo, handle_data)
# Set max leverage to 1 so buying one share passes
def handle_data(algo, data):
algo.order(algo.sid(self.sidint), 1)
algo = SetMaxLeverageAlgorithm(1)
self.check_algo_succeeds(algo, handle_data)
class TestClosePosAlgo(TestCase):
def setUp(self):
self.days = TradingEnvironment().trading_days
self.index = [self.days[0], self.days[1], self.days[2]]
self.panel = pd.Panel({1: pd.DataFrame({
'price': [1, 2, 4], 'volume': [1e9, 0, 0],
'type': [DATASOURCE_TYPE.TRADE,
DATASOURCE_TYPE.TRADE,
DATASOURCE_TYPE.CLOSE_POSITION]},
index=self.index)
})
self.no_close_panel = pd.Panel({1: pd.DataFrame({
'price': [1, 2, 4], 'volume': [1e9, 0, 0],
'type': [DATASOURCE_TYPE.TRADE,
DATASOURCE_TYPE.TRADE,
DATASOURCE_TYPE.TRADE]},
index=self.index)
})
def test_close_position_equity(self):
metadata = {1: {'symbol': 'TEST',
'asset_type': 'equity',
'end_date': self.days[3]}}
self.algo = TestAlgorithm(sid=1, amount=1, order_count=1,
instant_fill=True, commission=PerShare(0),
asset_metadata=metadata)
self.data = DataPanelSource(self.panel)
# Check results
expected_positions = [1, 1, 0]
expected_pnl = [0, 1, 2]
results = self.run_algo()
self.check_algo_pnl(results, expected_pnl)
self.check_algo_positions(results, expected_positions)
def test_close_position_future(self):
metadata = {1: {'symbol': 'TEST',
'asset_type': 'future',
}}
self.algo = TestAlgorithm(sid=1, amount=1, order_count=1,
instant_fill=True, commission=PerShare(0),
asset_metadata=metadata)
self.data = DataPanelSource(self.panel)
# Check results
expected_positions = [1, 1, 0]
expected_pnl = [0, 1, 2]
results = self.run_algo()
self.check_algo_pnl(results, expected_pnl)
self.check_algo_positions(results, expected_positions)
def test_auto_close_future(self):
metadata = {1: {'symbol': 'TEST',
'asset_type': 'future',
'notice_date': self.days[3],
'expiration_date': self.days[4]}}
self.algo = TestAlgorithm(sid=1, amount=1, order_count=1,
instant_fill=True, commission=PerShare(0),
asset_metadata=metadata)
self.data = DataPanelSource(self.no_close_panel)
# Check results
results = self.run_algo()
expected_pnl = [0, 1, 2]
self.check_algo_pnl(results, expected_pnl)
expected_positions = [1, 1, 0]
self.check_algo_positions(results, expected_positions)
def run_algo(self):
results = self.algo.run(self.data)
return results
def check_algo_pnl(self, results, expected_pnl):
for i, pnl in enumerate(results.pnl):
self.assertEqual(pnl, expected_pnl[i])
def check_algo_positions(self, results, expected_positions):
for i, amount in enumerate(results.positions):
if amount:
actual_position = amount[0]['amount']
else:
actual_position = 0
self.assertEqual(actual_position, expected_positions[i])
|
|
from benchmark.fortune_html_parser import FortuneHTMLParser
from setup.linux import setup_util
from benchmark.test_types import *
import importlib
import os
import subprocess
import time
import re
from pprint import pprint
import sys
import traceback
import json
import logging
import csv
import shlex
import math
from collections import OrderedDict
from threading import Thread
from threading import Event
from utils import header
# Cross-platform colored text
from colorama import Fore, Back, Style
from datetime import datetime
from datetime import timedelta
class FrameworkTest:
headers_template = "-H 'Host: localhost' -H '{accept}' -H 'Connection: keep-alive'"
# Used for test types that require no pipelining or query string params.
concurrency_template = """
echo ""
echo "---------------------------------------------------------"
echo " Running Primer {name}"
echo " {wrk} {headers} --latency -d 5 -c 8 --timeout 8 -t 8 \"http://{server_host}:{port}{url}\""
echo "---------------------------------------------------------"
echo ""
{wrk} {headers} --latency -d 5 -c 8 --timeout 8 -t 8 "http://{server_host}:{port}{url}"
sleep 5
echo ""
echo "---------------------------------------------------------"
echo " Running Warmup {name}"
echo " {wrk} {headers} --latency -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}\""
echo "---------------------------------------------------------"
echo ""
{wrk} {headers} --latency -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}"
sleep 5
echo ""
echo "---------------------------------------------------------"
echo " Synchronizing time"
echo "---------------------------------------------------------"
echo ""
ntpdate -s pool.ntp.org
for c in {levels}
do
echo ""
echo "---------------------------------------------------------"
echo " Concurrency: $c for {name}"
echo " {wrk} {headers} --latency -d {duration} -c $c --timeout $c -t $(($c>{max_threads}?{max_threads}:$c)) \"http://{server_host}:{port}{url}\""
echo "---------------------------------------------------------"
echo ""
STARTTIME=$(date +"%s")
{wrk} {headers} --latency -d {duration} -c $c --timeout $c -t "$(($c>{max_threads}?{max_threads}:$c))" http://{server_host}:{port}{url}
echo "STARTTIME $STARTTIME"
echo "ENDTIME $(date +"%s")"
sleep 2
done
"""
# Used for test types that require pipelining.
pipeline_template = """
echo ""
echo "---------------------------------------------------------"
echo " Running Primer {name}"
echo " {wrk} {headers} --latency -d 5 -c 8 --timeout 8 -t 8 \"http://{server_host}:{port}{url}\""
echo "---------------------------------------------------------"
echo ""
{wrk} {headers} --latency -d 5 -c 8 --timeout 8 -t 8 "http://{server_host}:{port}{url}"
sleep 5
echo ""
echo "---------------------------------------------------------"
echo " Running Warmup {name}"
echo " {wrk} {headers} --latency -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}\""
echo "---------------------------------------------------------"
echo ""
{wrk} {headers} --latency -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}"
sleep 5
echo ""
echo "---------------------------------------------------------"
echo " Synchronizing time"
echo "---------------------------------------------------------"
echo ""
ntpdate -s pool.ntp.org
for c in {levels}
do
echo ""
echo "---------------------------------------------------------"
echo " Concurrency: $c for {name}"
echo " {wrk} {headers} --latency -d {duration} -c $c --timeout $c -t $(($c>{max_threads}?{max_threads}:$c)) \"http://{server_host}:{port}{url}\" -s ~/pipeline.lua -- {pipeline}"
echo "---------------------------------------------------------"
echo ""
STARTTIME=$(date +"%s")
{wrk} {headers} --latency -d {duration} -c $c --timeout $c -t "$(($c>{max_threads}?{max_threads}:$c))" http://{server_host}:{port}{url} -s ~/pipeline.lua -- {pipeline}
echo "STARTTIME $STARTTIME"
echo "ENDTIME $(date +"%s")"
sleep 2
done
"""
# Used for test types that require a database -
# These tests run at a static concurrency level and vary the size of
# the query sent with each request
query_template = """
echo ""
echo "---------------------------------------------------------"
echo " Running Primer {name}"
echo " wrk {headers} --latency -d 5 -c 8 --timeout 8 -t 8 \"http://{server_host}:{port}{url}2\""
echo "---------------------------------------------------------"
echo ""
wrk {headers} --latency -d 5 -c 8 --timeout 8 -t 8 "http://{server_host}:{port}{url}2"
sleep 5
echo ""
echo "---------------------------------------------------------"
echo " Running Warmup {name}"
echo " wrk {headers} --latency -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}2\""
echo "---------------------------------------------------------"
echo ""
wrk {headers} --latency -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}2"
sleep 5
echo ""
echo "---------------------------------------------------------"
echo " Synchronizing time"
echo "---------------------------------------------------------"
echo ""
ntpdate -s pool.ntp.org
for c in {levels}
do
echo ""
echo "---------------------------------------------------------"
echo " Queries: $c for {name}"
echo " wrk {headers} --latency -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}$c\""
echo "---------------------------------------------------------"
echo ""
STARTTIME=$(date +"%s")
wrk {headers} --latency -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}$c"
echo "STARTTIME $STARTTIME"
echo "ENDTIME $(date +"%s")"
sleep 2
done
"""
############################################################
# start(benchmarker)
# Start the test using it's setup file
############################################################
def start(self, out, err):
# Setup environment variables
logDir = os.path.join(self.fwroot, self.benchmarker.latest_results_directory, 'logs', self.name.lower())
bash_functions_path= os.path.join(self.fwroot, 'toolset/setup/linux/bash_functions.sh')
setup_util.replace_environ(config='$FWROOT/config/benchmark_profile',
command='''\
export TROOT=%s && \
export IROOT=%s && \
export DBHOST=%s && \
export LOGDIR=%s && \
export MAX_THREADS=%s && \
export MAX_CONCURRENCY=%s \
''' % (
self.directory,
self.install_root,
self.database_host,
logDir,
self.benchmarker.threads,
max(self.benchmarker.concurrency_levels)))
# Always ensure that IROOT belongs to the runner_user
chown = "sudo chown -R %s:%s %s" % (self.benchmarker.runner_user,
self.benchmarker.runner_user, os.path.join(self.fwroot, self.install_root))
subprocess.check_call(chown, shell=True, cwd=self.fwroot, executable='/bin/bash')
# Run the module start inside parent of TROOT
# - we use the parent as a historical accident, a number of tests
# refer to their TROOT maually still
previousDir = os.getcwd()
os.chdir(os.path.dirname(self.troot))
logging.info("Running setup module start (cwd=%s)", self.directory)
# Run the start script for the test as the "testrunner" user
#
# `sudo` - Switching user requires superuser privs
# -u [username] The username
# -E Preserves the current environment variables
# -H Forces the home var (~) to be reset to the user specified
# `stdbuf` - Disable buffering, send output to python ASAP
# -o0 zero-sized buffer for stdout
# -e0 zero-sized buffer for stderr
# `bash` - Run the setup.sh script using bash
# -e Force bash to exit on first error
# -x Turn on bash tracing e.g. print commands before running
#
# Most servers do not output to stdout/stderr while serving
# requests so there is no performance hit from disabling
# output buffering. This disabling is necessary to
# a) allow TFB to show output in real time and b) avoid loosing
# output in the buffer when the testrunner processes are forcibly
# killed
#
# See http://www.pixelbeat.org/programming/stdio_buffering/
# See https://blogs.gnome.org/markmc/2013/06/04/async-io-and-python/
# See http://eyalarubas.com/python-subproc-nonblock.html
command = 'sudo -u %s -E -H stdbuf -o0 -e0 bash -exc "source %s && source %s.sh"' % (
self.benchmarker.runner_user,
bash_functions_path,
os.path.join(self.troot, self.setup_file))
debug_command = '''\
export FWROOT=%s && \\
export TROOT=%s && \\
export IROOT=%s && \\
export DBHOST=%s && \\
export LOGDIR=%s && \\
export MAX_THREADS=%s && \\
export MAX_CONCURRENCY=%s && \\
cd %s && \\
%s''' % (self.fwroot,
self.directory,
self.install_root,
self.database_host,
logDir,
self.benchmarker.threads,
self.directory,
max(self.benchmarker.concurrency_levels),
command)
logging.info("To run %s manually, copy/paste this:\n%s", self.name, debug_command)
def tee_output(prefix, line):
# Needs to be one atomic write
# Explicitly use UTF-8 as it's the most common framework output
# TODO improve encoding handling
line = prefix.encode('utf-8') + line
# Log to current terminal
sys.stdout.write(line)
sys.stdout.flush()
# logging.error("".join([prefix, line]))
out.write(line)
out.flush()
# Start the setup.sh command
p = subprocess.Popen(command, cwd=self.directory,
shell=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
nbsr = setup_util.NonBlockingStreamReader(p.stdout,
"%s: %s.sh and framework processes have terminated" % (self.name, self.setup_file))
# Set a limit on total execution time of setup.sh
timeout = datetime.now() + timedelta(minutes = 105)
time_remaining = timeout - datetime.now()
# Need to print to stdout once every 10 minutes or Travis-CI will abort
travis_timeout = datetime.now() + timedelta(minutes = 5)
# Flush output until setup.sh work is finished. This is
# either a) when setup.sh exits b) when the port is bound
# c) when we run out of time. Note that 'finished' doesn't
# guarantee setup.sh process is dead - the OS may choose to make
# setup.sh a zombie process if it still has living children
#
# Note: child processes forked (using &) will remain alive
# after setup.sh has exited. The will have inherited the
# stdout/stderr descriptors and will be directing their
# output to the pipes.
#
prefix = "Setup %s: " % self.name
while (p.poll() is None
and not self.benchmarker.is_port_bound(self.port)
and not time_remaining.total_seconds() < 0):
# The conditions above are slow to check, so
# we will delay output substantially if we only
# print one line per condition check.
# Adding a tight loop here mitigates the effect,
# ensuring that most of the output directly from
# setup.sh is sent to tee_output before the outer
# loop exits and prints things like "setup.sh exited"
#
for i in xrange(10):
try:
line = nbsr.readline(0.05)
if line:
tee_output(prefix, line)
# Reset Travis-CI timer
travis_timeout = datetime.now() + timedelta(minutes = 5)
except setup_util.EndOfStream:
tee_output(prefix, "Setup has terminated\n")
break
time_remaining = timeout - datetime.now()
if (travis_timeout - datetime.now()).total_seconds() < 0:
sys.stdout.write(prefix + 'Printing so Travis-CI does not time out\n')
sys.stdout.write(prefix + "Status: Poll: %s, Port %s bound: %s, Time Left: %s\n" % (
p.poll(), self.port, self.benchmarker.is_port_bound(self.port), time_remaining))
sys.stdout.flush()
travis_timeout = datetime.now() + timedelta(minutes = 5)
# Did we time out?
if time_remaining.total_seconds() < 0:
tee_output(prefix, "%s.sh timed out!! Aborting...\n" % self.setup_file)
p.kill()
return 1
# What's our return code?
# If setup.sh has terminated, use that code
# Otherwise, detect if the port was bound
tee_output(prefix, "Status: Poll: %s, Port %s bound: %s, Time Left: %s\n" % (
p.poll(), self.port, self.benchmarker.is_port_bound(self.port), time_remaining))
retcode = (p.poll() if p.poll() is not None else 0 if self.benchmarker.is_port_bound(self.port) else 1)
if p.poll() is not None:
tee_output(prefix, "%s.sh process exited naturally with %s\n" % (self.setup_file, p.poll()))
elif self.benchmarker.is_port_bound(self.port):
tee_output(prefix, "Bound port detected on %s\n" % self.port)
# Before we return control to the benchmarker, spin up a
# thread to keep an eye on the pipes in case the running
# framework uses stdout/stderr. Once all processes accessing
# the subprocess.PIPEs are dead, this thread will terminate.
# Use a different prefix to indicate this is the framework
# speaking
prefix = "Server %s: " % self.name
def watch_child_pipes(nbsr, prefix):
while True:
try:
line = nbsr.readline(60)
if line:
tee_output(prefix, line)
except setup_util.EndOfStream:
tee_output(prefix, "Framework processes have terminated\n")
return
watch_thread = Thread(target = watch_child_pipes,
args = (nbsr, prefix))
watch_thread.daemon = True
watch_thread.start()
logging.info("Executed %s.sh, returning %s", self.setup_file, retcode)
os.chdir(previousDir)
return retcode
############################################################
# End start
############################################################
############################################################
# verify_urls
# Verifys each of the URLs for this test. THis will sinply
# curl the URL and check for it's return status.
# For each url, a flag will be set on this object for whether
# or not it passed
# Returns True if all verifications succeeded
############################################################
def verify_urls(self, out, err):
result = True
def verify_type(test_type):
test = self.runTests[test_type]
test.setup_out_err(out, err)
out.write(header("VERIFYING %s" % test_type.upper()))
base_url = "http://%s:%s" % (self.benchmarker.server_host, self.port)
try:
results = test.verify(base_url)
except Exception as e:
results = [('fail',"""Caused Exception in TFB
This almost certainly means your return value is incorrect,
but also that you have found a bug. Please submit an issue
including this message: %s\n%s""" % (e, traceback.format_exc()),
base_url)]
logging.warning("Verifying test %s for %s caused an exception: %s", test_type, self.name, e)
traceback.format_exc()
test.failed = any(result is 'fail' for (result, reason, url) in results)
test.warned = any(result is 'warn' for (result, reason, url) in results)
test.passed = all(result is 'pass' for (result, reason, url) in results)
def output_result(result, reason, url):
specific_rules_url = "http://frameworkbenchmarks.readthedocs.org/en/latest/Project-Information/Framework-Tests/#specific-test-requirements"
color = Fore.GREEN
if result.upper() == "WARN":
color = Fore.YELLOW
elif result.upper() == "FAIL":
color = Fore.RED
out.write((" " + color + "%s" + Style.RESET_ALL + " for %s\n") % (result.upper(), url))
print (" " + color + "%s" + Style.RESET_ALL + " for %s\n") % (result.upper(), url)
if reason is not None and len(reason) != 0:
for line in reason.splitlines():
out.write(" " + line + '\n')
print " " + line
if not test.passed:
out.write(" See %s\n" % specific_rules_url)
print " See %s\n" % specific_rules_url
[output_result(r1,r2,url) for (r1, r2, url) in results]
if test.failed:
self.benchmarker.report_verify_results(self, test_type, 'fail')
elif test.warned:
self.benchmarker.report_verify_results(self, test_type, 'warn')
elif test.passed:
self.benchmarker.report_verify_results(self, test_type, 'pass')
else:
raise Exception("Unknown error - test did not pass,warn,or fail")
result = True
for test_type in self.runTests:
verify_type(test_type)
if self.runTests[test_type].failed:
result = False
return result
############################################################
# End verify_urls
############################################################
############################################################
# benchmark
# Runs the benchmark for each type of test that it implements
# JSON/DB/Query.
############################################################
def benchmark(self, out, err):
def benchmark_type(test_type):
out.write("BENCHMARKING %s ... " % test_type.upper())
test = self.runTests[test_type]
test.setup_out_err(out, err)
output_file = self.benchmarker.output_file(self.name, test_type)
if not os.path.exists(output_file):
# Open to create the empty file
with open(output_file, 'w'):
pass
if not test.failed:
if test_type == 'plaintext': # One special case
remote_script = self.__generate_pipeline_script(test.get_url(), self.port, test.accept_header)
elif test_type == 'query' or test_type == 'update':
remote_script = self.__generate_query_script(test.get_url(), self.port, test.accept_header)
else:
remote_script = self.__generate_concurrency_script(test.get_url(), self.port, test.accept_header)
# Begin resource usage metrics collection
self.__begin_logging(test_type)
# Run the benchmark
with open(output_file, 'w') as raw_file:
p = subprocess.Popen(self.benchmarker.client_ssh_string.split(" "), stdin=subprocess.PIPE, stdout=raw_file, stderr=err)
p.communicate(remote_script)
err.flush()
# End resource usage metrics collection
self.__end_logging()
results = self.__parse_test(test_type)
print "Benchmark results:"
pprint(results)
self.benchmarker.report_benchmark_results(framework=self, test=test_type, results=results['results'])
out.write( "Complete\n" )
out.flush()
for test_type in self.runTests:
benchmark_type(test_type)
############################################################
# End benchmark
############################################################
############################################################
# parse_all
# Method meant to be run for a given timestamp
############################################################
def parse_all(self):
for test_type in self.runTests:
if os.path.exists(self.benchmarker.get_output_file(self.name, test_type)):
results = self.__parse_test(test_type)
self.benchmarker.report_benchmark_results(framework=self, test=test_type, results=results['results'])
##########################################################################################
# Private Methods
##########################################################################################
############################################################
# __parse_test(test_type)
############################################################
def __parse_test(self, test_type):
try:
results = dict()
results['results'] = []
stats = []
if os.path.exists(self.benchmarker.get_output_file(self.name, test_type)):
with open(self.benchmarker.output_file(self.name, test_type)) as raw_data:
is_warmup = True
rawData = None
for line in raw_data:
if "Queries:" in line or "Concurrency:" in line:
is_warmup = False
rawData = None
continue
if "Warmup" in line or "Primer" in line:
is_warmup = True
continue
if not is_warmup:
if rawData == None:
rawData = dict()
results['results'].append(rawData)
#if "Requests/sec:" in line:
# m = re.search("Requests/sec:\s+([0-9]+)", line)
# rawData['reportedResults'] = m.group(1)
# search for weighttp data such as succeeded and failed.
if "Latency" in line:
m = re.findall("([0-9]+\.*[0-9]*[us|ms|s|m|%]+)", line)
if len(m) == 4:
rawData['latencyAvg'] = m[0]
rawData['latencyStdev'] = m[1]
rawData['latencyMax'] = m[2]
# rawData['latencyStdevPercent'] = m[3]
#if "Req/Sec" in line:
# m = re.findall("([0-9]+\.*[0-9]*[k|%]*)", line)
# if len(m) == 4:
# rawData['requestsAvg'] = m[0]
# rawData['requestsStdev'] = m[1]
# rawData['requestsMax'] = m[2]
# rawData['requestsStdevPercent'] = m[3]
#if "requests in" in line:
# m = re.search("requests in ([0-9]+\.*[0-9]*[ms|s|m|h]+)", line)
# if m != None:
# # parse out the raw time, which may be in minutes or seconds
# raw_time = m.group(1)
# if "ms" in raw_time:
# rawData['total_time'] = float(raw_time[:len(raw_time)-2]) / 1000.0
# elif "s" in raw_time:
# rawData['total_time'] = float(raw_time[:len(raw_time)-1])
# elif "m" in raw_time:
# rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 60.0
# elif "h" in raw_time:
# rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 3600.0
if "requests in" in line:
m = re.search("([0-9]+) requests in", line)
if m != None:
rawData['totalRequests'] = int(m.group(1))
if "Socket errors" in line:
if "connect" in line:
m = re.search("connect ([0-9]+)", line)
rawData['connect'] = int(m.group(1))
if "read" in line:
m = re.search("read ([0-9]+)", line)
rawData['read'] = int(m.group(1))
if "write" in line:
m = re.search("write ([0-9]+)", line)
rawData['write'] = int(m.group(1))
if "timeout" in line:
m = re.search("timeout ([0-9]+)", line)
rawData['timeout'] = int(m.group(1))
if "Non-2xx" in line:
m = re.search("Non-2xx or 3xx responses: ([0-9]+)", line)
if m != None:
rawData['5xx'] = int(m.group(1))
if "STARTTIME" in line:
m = re.search("[0-9]+", line)
rawData["startTime"] = int(m.group(0))
if "ENDTIME" in line:
m = re.search("[0-9]+", line)
rawData["endTime"] = int(m.group(0))
test_stats = self.__parse_stats(test_type, rawData["startTime"], rawData["endTime"], 1)
# rawData["averageStats"] = self.__calculate_average_stats(test_stats)
stats.append(test_stats)
with open(self.benchmarker.stats_file(self.name, test_type) + ".json", "w") as stats_file:
json.dump(stats, stats_file, indent=2)
return results
except IOError:
return None
############################################################
# End benchmark
############################################################
############################################################
# __generate_concurrency_script(url, port)
# Generates the string containing the bash script that will
# be run on the client to benchmark a single test. This
# specifically works for the variable concurrency tests (JSON
# and DB)
############################################################
def __generate_concurrency_script(self, url, port, accept_header, wrk_command="wrk"):
headers = self.headers_template.format(accept=accept_header)
return self.concurrency_template.format(max_concurrency=max(self.benchmarker.concurrency_levels),
max_threads=self.benchmarker.threads, name=self.name, duration=self.benchmarker.duration,
levels=" ".join("{}".format(item) for item in self.benchmarker.concurrency_levels),
server_host=self.benchmarker.server_host, port=port, url=url, headers=headers, wrk=wrk_command)
############################################################
# __generate_pipeline_script(url, port)
# Generates the string containing the bash script that will
# be run on the client to benchmark a single pipeline test.
############################################################
def __generate_pipeline_script(self, url, port, accept_header, wrk_command="wrk"):
headers = self.headers_template.format(accept=accept_header)
return self.pipeline_template.format(max_concurrency=16384,
max_threads=self.benchmarker.threads, name=self.name, duration=self.benchmarker.duration,
levels=" ".join("{}".format(item) for item in [256,1024,4096,16384]),
server_host=self.benchmarker.server_host, port=port, url=url, headers=headers, wrk=wrk_command,
pipeline=16)
############################################################
# __generate_query_script(url, port)
# Generates the string containing the bash script that will
# be run on the client to benchmark a single test. This
# specifically works for the variable query tests (Query)
############################################################
def __generate_query_script(self, url, port, accept_header):
headers = self.headers_template.format(accept=accept_header)
return self.query_template.format(max_concurrency=max(self.benchmarker.concurrency_levels),
max_threads=self.benchmarker.threads, name=self.name, duration=self.benchmarker.duration,
levels=" ".join("{}".format(item) for item in self.benchmarker.query_levels),
server_host=self.benchmarker.server_host, port=port, url=url, headers=headers)
############################################################
# Returns True if any test type this this framework test will use a DB
############################################################
def requires_database(self):
'''Returns True/False if this test requires a database'''
return any(tobj.requires_db for (ttype,tobj) in self.runTests.iteritems())
############################################################
# __begin_logging
# Starts a thread to monitor the resource usage, to be synced with the client's time
# TODO: MySQL and InnoDB are possible. Figure out how to implement them.
############################################################
def __begin_logging(self, test_type):
output_file = "{file_name}".format(file_name=self.benchmarker.get_stats_file(self.name, test_type))
dstat_string = "dstat -afilmprsT --aio --fs --ipc --lock --raw --socket --tcp \
--raw --socket --tcp --udp --unix --vm --disk-util \
--rpc --rpcd --output {output_file}".format(output_file=output_file)
cmd = shlex.split(dstat_string)
dev_null = open(os.devnull, "w")
self.subprocess_handle = subprocess.Popen(cmd, stdout=dev_null)
##############################################################
# Begin __end_logging
# Stops the logger thread and blocks until shutdown is complete.
##############################################################
def __end_logging(self):
self.subprocess_handle.terminate()
self.subprocess_handle.communicate()
##############################################################
# Begin __parse_stats
# For each test type, process all the statistics, and return a multi-layered dictionary
# that has a structure as follows:
# (timestamp)
# | (main header) - group that the stat is in
# | | (sub header) - title of the stat
# | | | (stat) - the stat itself, usually a floating point number
##############################################################
def __parse_stats(self, test_type, start_time, end_time, interval):
stats_dict = dict()
stats_file = self.benchmarker.stats_file(self.name, test_type)
with open(stats_file) as stats:
while(stats.next() != "\n"): # dstat doesn't output a completely compliant CSV file - we need to strip the header
pass
stats_reader = csv.reader(stats)
main_header = stats_reader.next()
sub_header = stats_reader.next()
time_row = sub_header.index("epoch")
int_counter = 0
for row in stats_reader:
time = float(row[time_row])
int_counter+=1
if time < start_time:
continue
elif time > end_time:
return stats_dict
if int_counter % interval != 0:
continue
row_dict = dict()
for nextheader in main_header:
if nextheader != "":
row_dict[nextheader] = dict()
header = ""
for item_num, column in enumerate(row):
if(len(main_header[item_num]) != 0):
header = main_header[item_num]
row_dict[header][sub_header[item_num]] = float(column) # all the stats are numbers, so we want to make sure that they stay that way in json
stats_dict[time] = row_dict
return stats_dict
##############################################################
# End __parse_stats
##############################################################
def __getattr__(self, name):
"""For backwards compatibility, we used to pass benchmarker
as the argument to the setup.sh files"""
try:
x = getattr(self.benchmarker, name)
except AttributeError:
print "AttributeError: %s not a member of FrameworkTest or Benchmarker" % name
print "This is probably a bug"
raise
return x
##############################################################
# Begin __calculate_average_stats
# We have a large amount of raw data for the statistics that
# may be useful for the stats nerds, but most people care about
# a couple of numbers. For now, we're only going to supply:
# * Average CPU
# * Average Memory
# * Total network use
# * Total disk use
# More may be added in the future. If they are, please update
# the above list.
# Note: raw_stats is directly from the __parse_stats method.
# Recall that this consists of a dictionary of timestamps,
# each of which contain a dictionary of stat categories which
# contain a dictionary of stats
##############################################################
def __calculate_average_stats(self, raw_stats):
raw_stat_collection = dict()
for timestamp, time_dict in raw_stats.items():
for main_header, sub_headers in time_dict.items():
item_to_append = None
if 'cpu' in main_header:
# We want to take the idl stat and subtract it from 100
# to get the time that the CPU is NOT idle.
item_to_append = sub_headers['idl'] - 100.0
elif main_header == 'memory usage':
item_to_append = sub_headers['used']
elif 'net' in main_header:
# Network stats have two parts - recieve and send. We'll use a tuple of
# style (recieve, send)
item_to_append = (sub_headers['recv'], sub_headers['send'])
elif 'dsk' or 'io' in main_header:
# Similar for network, except our tuple looks like (read, write)
item_to_append = (sub_headers['read'], sub_headers['writ'])
if item_to_append is not None:
if main_header not in raw_stat_collection:
raw_stat_collection[main_header] = list()
raw_stat_collection[main_header].append(item_to_append)
# Simple function to determine human readable size
# http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
def sizeof_fmt(num):
# We'll assume that any number we get is convertable to a float, just in case
num = float(num)
for x in ['bytes','KB','MB','GB']:
if num < 1024.0 and num > -1024.0:
return "%3.1f%s" % (num, x)
num /= 1024.0
return "%3.1f%s" % (num, 'TB')
# Now we have our raw stats in a readable format - we need to format it for display
# We need a floating point sum, so the built in sum doesn't cut it
display_stat_collection = dict()
for header, values in raw_stat_collection.items():
display_stat = None
if 'cpu' in header:
display_stat = sizeof_fmt(math.fsum(values) / len(values))
elif main_header == 'memory usage':
display_stat = sizeof_fmt(math.fsum(values) / len(values))
elif 'net' in main_header:
receive, send = zip(*values) # unzip
display_stat = {'receive': sizeof_fmt(math.fsum(receive)), 'send': sizeof_fmt(math.fsum(send))}
else: # if 'dsk' or 'io' in header:
read, write = zip(*values) # unzip
display_stat = {'read': sizeof_fmt(math.fsum(read)), 'write': sizeof_fmt(math.fsum(write))}
display_stat_collection[header] = display_stat
return display_stat
###########################################################################################
# End __calculate_average_stats
#########################################################################################
##########################################################################################
# Constructor
##########################################################################################
def __init__(self, name, directory, benchmarker, runTests, args):
self.name = name
self.directory = directory
self.benchmarker = benchmarker
self.runTests = runTests
self.fwroot = benchmarker.fwroot
self.approach = ""
self.classification = ""
self.database = ""
self.framework = ""
self.language = ""
self.orm = ""
self.platform = ""
self.webserver = ""
self.os = ""
self.database_os = ""
self.display_name = ""
self.notes = ""
self.versus = ""
# setup logging
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
self.install_root="%s/%s" % (self.fwroot, "installs")
if benchmarker.install_strategy is 'pertest':
self.install_root="%s/pertest/%s" % (self.install_root, name)
# Used in setup.sh scripts for consistency with
# the bash environment variables
self.troot = self.directory
self.iroot = self.install_root
self.__dict__.update(args)
############################################################
# End __init__
############################################################
############################################################
# End FrameworkTest
############################################################
##########################################################################################
# Static methods
##########################################################################################
##############################################################
# parse_config(config, directory, benchmarker)
# parses a config file and returns a list of FrameworkTest
# objects based on that config file.
##############################################################
def parse_config(config, directory, benchmarker):
tests = []
# This sort ordering is set up specifically to return the length
# of the test name. There were SO many problems involved with
# 'plaintext' being run first (rather, just not last) that we
# needed to ensure that it was run last for every framework.
def testOrder(type_name):
return len(type_name)
# The config object can specify multiple tests
# Loop over them and parse each into a FrameworkTest
for test in config['tests']:
names = [name for (name,keys) in test.iteritems()]
if "default" not in names:
logging.warn("Framework %s does not define a default test in benchmark_config.json", config['framework'])
# Check that each test configuration is acceptable
# Throw exceptions if a field is missing, or how to improve the field
for test_name, test_keys in test.iteritems():
# Ensure that each FrameworkTest has a framework property, inheriting from top-level if not
if not test_keys['framework']:
test_keys['framework'] = config['framework']
# Confirm required keys are present
required_keys = ['language','webserver','classification','database','approach','orm','framework','os','database_os']
missing = list(set(required_keys) - set(test_keys))
if len(missing) > 0:
missingstr = (", ").join(map(str, missing))
raise Exception("benchmark_config.json for test %s is invalid, please amend and add the following required keys: [%s]"
% (test_name, missingstr))
# Check that test url values are all appropriate
example_urls = {
"json_url": "/json",
"db_url": "/mysql/db",
"query_url": "/mysql/queries?queries= or /mysql/queries/",
"fortune_url": "/mysql/fortunes",
"update_url": "/mysql/updates?queries= or /mysql/updates/",
"plaintext_url": "/plaintext"
}
for test_url in ["json_url","db_url","query_url","fortune_url","update_url","plaintext_url"]:
key_value = test_keys.get(test_url, None)
if key_value != None and not key_value.startswith('/'):
errmsg = """`%s` field in test \"%s\" does not appear to be a valid url: \"%s\"\n
Example `%s` url: \"%s\"
""" % (test_url, test_name, key_value, test_url, example_urls[test_url])
raise Exception(errmsg)
# Check database type
# List adopted from run-ci.py
SUPPORTED_DATABASES = ["mysql","postgres","mongodb","cassandra","elasticsearch","redis"]
EDGE_CASES = ["sqlite","sqlserver","none"]
db_type = test_keys.get("database", None).lower()
if db_type.lower() not in sum([SUPPORTED_DATABASES, EDGE_CASES], []):
supportedstr = (", ").join(map(str, SUPPORTED_DATABASES))
edgestr = (", ").join(map(str, EDGE_CASES))
errmsg = """Invalid db specified for test \"%s\" in framework \"%s\", please specify a supported database or \"None\"\n
Supported databases: [%s]\n
Edge cases: [%s]\n
Supplied (lowercased): \"%s\"
""" % (test_name, config["framework"], supportedstr, edgestr, db_type)
raise Exception(errmsg)
# Check language
# "Scala" from "/home/vagrant/FrameworkBenchmarks/frameworks/Scala/finagle"
recommended_lang = directory.split('/')[-2]
if test_keys.get("language", "") == "":
raise Exception("Please specify a language for test \"%s\" in framework \"%s\", suggestion: \"%s\""
% (test_name, config["framework"], recommended_lang))
# Check approach
SUPPORTED_APPROACHES = ["realistic","stripped"]
test_approach = test_keys.get("approach", None).lower()
if test_approach not in SUPPORTED_APPROACHES:
approachstr = (", ").join(map(str, SUPPORTED_APPROACHES))
errmsg = """Invalid approach specified for test \"%s\" in framework \"%s\", please specify a supported approach\n
Supported approaches: [%s]\n
Suggestion: \"Realistic\"\n
Supplied (lowercased): \"%s\"
""" % (test_name, config["framework"], approachstr, test_approach)
raise Exception(errmsg)
# Check classification
SUPPORTED_CLASSIFICATIONS = ["fullstack","micro","platform"]
test_classification = test_keys.get("classification", None).lower()
if test_classification not in SUPPORTED_CLASSIFICATIONS:
classstr = (", ").join(map(str, SUPPORTED_CLASSIFICATIONS))
errmsg = """Invalid classification specified for test \"%s\" in framework \"%s\", please specify a supported classification\n
Supported classifications: [%s]\n
Supplied (lowercased): \"%s\"
""" % (test_name, config["framework"], classstr, test_classification)
raise Exception(errmsg)
# Check webserver
if test_keys.get("webserver", None) == "":
raise Exception("Invalid `webserver` specified for test \"%s\" in framework \"%s\", field `webserver` cannot be empty"
% (test_name, config["framework"]))
# Check ORM
SUPPORTED_ORMS = ["full","micro","raw"]
test_orm = test_keys.get("orm", None).lower()
if test_orm not in SUPPORTED_ORMS:
ormstr = (", ").join(map(str, SUPPORTED_ORMS))
errmsg = """Invalid orm specified for test \"%s\" in framework \"%s\", please specify a supported orm type\n
Supported classifications: [%s]\n
Supplied (lowercased): \"%s\"
""" % (test_name, config["framework"], ormstr, test_orm)
raise Exception(errmsg)
# Check OS
SUPPORTED_OSES = ["linux","windows"]
test_os = test_keys.get("os", None).lower()
if test_os not in SUPPORTED_OSES:
osstr = (", ").join(map(str, SUPPORTED_OSES))
errmsg = """Invalid OS specified for test \"%s\" in framework \"%s\", please specify a supported OS\n
Supported OS's: [%s]\n
Suggestion: \"Linux\"\n
Supplied (lowercased): \"%s\"
""" % (test_name, config["framework"], osstr, test_os)
raise Exception(errmsg)
# Check Database OS
SUPPORTED_DB_OSES = ["linux","windows"]
test_db_os = test_keys.get("database_os", None).lower()
if test_db_os not in SUPPORTED_DB_OSES:
db_osstr = (", ").join(map(str, SUPPORTED_DB_OSES))
errmsg = """Invalid Database OS specified for test \"%s\" in framework \"%s\", please specify a supported Database OS\n
Supported OS's: [%s]\n
Suggestion: \"Linux\"\n
Supplied (lowercased): \"%s\"
""" % (test_name, config["framework"], db_osstr, test_db_os)
raise Exception(errmsg)
### Done validating benchmark_config values ###
# Map test type to a parsed FrameworkTestType object
runTests = dict()
for type_name, type_obj in benchmarker.types.iteritems():
try:
runTests[type_name] = type_obj.copy().parse(test_keys)
except AttributeError as ae:
# This is quite common - most tests don't support all types
# Quitely log it and move on (debug logging is on in travis and this causes
# ~1500 lines of debug, so I'm totally ignoring it for now
# logging.debug("Missing arguments for test type %s for framework test %s", type_name, test_name)
pass
# We need to sort by test_type to run
sortedTestKeys = sorted(runTests.keys(), key=testOrder)
sortedRunTests = OrderedDict()
for sortedTestKey in sortedTestKeys:
sortedRunTests[sortedTestKey] = runTests[sortedTestKey]
# Prefix all test names with framework except 'default' test
# Done at the end so we may still refer to the primary test as `default` in benchmark config error messages
if test_name == 'default':
test_name = config['framework']
else:
test_name = "%s-%s" % (config['framework'], test_name)
# By passing the entire set of keys, each FrameworkTest will have a member for each key
tests.append(FrameworkTest(test_name, directory, benchmarker, sortedRunTests, test_keys))
return tests
##############################################################
# End parse_config
##############################################################
|
|
from pylearn2.models.s3c import S3C
from pylearn2.models.s3c import E_Step_Scan
from pylearn2.models.s3c import Grad_M_Step
from pylearn2.models.s3c import E_Step
from theano import function
import numpy as np
import theano.tensor as T
from theano import config
#from pylearn2.utils import serial
import warnings
def broadcast(mat, shape_0):
rval = mat
if mat.shape[0] != shape_0:
assert mat.shape[0] == 1
rval = np.zeros((shape_0, mat.shape[1]),dtype=mat.dtype)
for i in xrange(shape_0):
rval[i,:] = mat[0,:]
return rval
class Test_S3C_Inference:
def setUp(self):
# Temporarily change config.floatX to float64, as s3c inference
# tests currently fail due to numerical issues for float32.
self.prev_floatX = config.floatX
config.floatX = 'float64'
def tearDown(self):
# Restore previous value of floatX
config.floatX = self.prev_floatX
def __init__(self):
""" gets a small batch of data
sets up an S3C model
"""
# We also have to change the value of config.floatX in __init__.
self.prev_floatX = config.floatX
config.floatX = 'float64'
try:
self.tol = 1e-5
#dataset = serial.load('${PYLEARN2_DATA_PATH}/stl10/stl10_patches/data.pkl')
#X = dataset.get_batch_design(1000)
#X = X[:,0:5]
X = np.random.RandomState([1,2,3]).randn(1000,5)
X -= X.mean()
X /= X.std()
m, D = X.shape
N = 5
#don't give the model an e_step or learning rate so it won't spend years compiling a learn_func
self.model = S3C(nvis = D,
nhid = N,
irange = .1,
init_bias_hid = 0.,
init_B = 3.,
min_B = 1e-8,
max_B = 1000.,
init_alpha = 1., min_alpha = 1e-8, max_alpha = 1000.,
init_mu = 1., e_step = None,
m_step = Grad_M_Step(),
min_bias_hid = -1e30, max_bias_hid = 1e30,
)
self.model.make_pseudoparams()
self.h_new_coeff_schedule = [.1, .2, .3, .4, .5, .6, .7, .8, .9, 1. ]
self.e_step = E_Step_Scan(h_new_coeff_schedule = self.h_new_coeff_schedule)
self.e_step.register_model(self.model)
self.X = X
self.N = N
self.m = m
finally:
config.floatX = self.prev_floatX
def test_match_unrolled(self):
""" tests that inference with scan matches result using unrolled loops """
unrolled_e_step = E_Step(h_new_coeff_schedule = self.h_new_coeff_schedule)
unrolled_e_step.register_model(self.model)
V = T.matrix()
scan_result = self.e_step.infer(V)
unrolled_result = unrolled_e_step.infer(V)
outputs = []
for key in scan_result:
outputs.append(scan_result[key])
outputs.append(unrolled_result[key])
f = function([V], outputs)
outputs = f(self.X)
assert len(outputs) % 2 == 0
for i in xrange(0,len(outputs),2):
assert np.allclose(outputs[i],outputs[i+1])
def test_grad_s(self):
"tests that the gradients with respect to s_i are 0 after doing a mean field update of s_i "
model = self.model
e_step = self.e_step
X = self.X
assert X.shape[0] == self.m
model.test_batch_size = X.shape[0]
init_H = e_step.init_H_hat(V = X)
init_Mu1 = e_step.init_S_hat(V = X)
prev_setting = config.compute_test_value
config.compute_test_value= 'off'
H, Mu1 = function([], outputs=[init_H, init_Mu1])()
config.compute_test_value = prev_setting
H = broadcast(H, self.m)
Mu1 = broadcast(Mu1, self.m)
H = np.cast[config.floatX](self.model.rng.uniform(0.,1.,H.shape))
Mu1 = np.cast[config.floatX](self.model.rng.uniform(-5.,5.,Mu1.shape))
H_var = T.matrix(name='H_var')
H_var.tag.test_value = H
Mu1_var = T.matrix(name='Mu1_var')
Mu1_var.tag.test_value = Mu1
idx = T.iscalar()
idx.tag.test_value = 0
S = e_step.infer_S_hat(V = X, H_hat = H_var, S_hat = Mu1_var)
s_idx = S[:,idx]
s_i_func = function([H_var,Mu1_var,idx],s_idx)
sigma0 = 1. / model.alpha
Sigma1 = e_step.infer_var_s1_hat()
mu0 = T.zeros_like(model.mu)
#by truncated KL, I mean that I am dropping terms that don't depend on H and Mu1
# (they don't affect the outcome of this test and some of them are intractable )
trunc_kl = - model.entropy_hs(H_hat = H_var, var_s0_hat = sigma0, var_s1_hat = Sigma1) + \
model.expected_energy_vhs(V = X, H_hat = H_var, S_hat = Mu1_var, var_s0_hat = sigma0, var_s1_hat = Sigma1)
grad_Mu1 = T.grad(trunc_kl.sum(), Mu1_var)
grad_Mu1_idx = grad_Mu1[:,idx]
grad_func = function([H_var, Mu1_var, idx], grad_Mu1_idx)
for i in xrange(self.N):
Mu1[:,i] = s_i_func(H, Mu1, i)
g = grad_func(H,Mu1,i)
assert not np.any(np.isnan(g))
g_abs_max = np.abs(g).max()
if g_abs_max > self.tol:
raise Exception('after mean field step, gradient of kl divergence wrt mean field parameter should be 0, but here the max magnitude of a gradient element is '+str(g_abs_max)+' after updating s_'+str(i))
def test_value_s(self):
"tests that the value of the kl divergence decreases with each update to s_i "
model = self.model
e_step = self.e_step
X = self.X
assert X.shape[0] == self.m
init_H = e_step.init_H_hat(V = X)
init_Mu1 = e_step.init_S_hat(V = X)
prev_setting = config.compute_test_value
config.compute_test_value= 'off'
H, Mu1 = function([], outputs=[init_H, init_Mu1])()
config.compute_test_value = prev_setting
H = broadcast(H, self.m)
Mu1 = broadcast(Mu1, self.m)
H = np.cast[config.floatX](self.model.rng.uniform(0.,1.,H.shape))
Mu1 = np.cast[config.floatX](self.model.rng.uniform(-5.,5.,Mu1.shape))
H_var = T.matrix(name='H_var')
H_var.tag.test_value = H
Mu1_var = T.matrix(name='Mu1_var')
Mu1_var.tag.test_value = Mu1
idx = T.iscalar()
idx.tag.test_value = 0
S = e_step.infer_S_hat( V = X, H_hat = H_var, S_hat = Mu1_var)
s_idx = S[:,idx]
s_i_func = function([H_var,Mu1_var,idx],s_idx)
sigma0 = 1. / model.alpha
Sigma1 = e_step.infer_var_s1_hat()
mu0 = T.zeros_like(model.mu)
#by truncated KL, I mean that I am dropping terms that don't depend on H and Mu1
# (they don't affect the outcome of this test and some of them are intractable )
trunc_kl = - model.entropy_hs(H_hat = H_var, var_s0_hat = sigma0, var_s1_hat = Sigma1) + \
model.expected_energy_vhs(V = X, H_hat = H_var, S_hat = Mu1_var, var_s0_hat = sigma0, var_s1_hat = Sigma1)
trunc_kl_func = function([H_var, Mu1_var], trunc_kl)
for i in xrange(self.N):
prev_kl = trunc_kl_func(H,Mu1)
Mu1[:,i] = s_i_func(H, Mu1, i)
new_kl = trunc_kl_func(H,Mu1)
increase = new_kl - prev_kl
mx = increase.max()
if mx > 1e-3:
raise Exception('after mean field step in s, kl divergence should decrease, but some elements increased by as much as '+str(mx)+' after updating s_'+str(i))
def test_grad_h(self):
"tests that the gradients with respect to h_i are 0 after doing a mean field update of h_i "
model = self.model
e_step = self.e_step
X = self.X
assert X.shape[0] == self.m
init_H = e_step.init_H_hat(V = X)
init_Mu1 = e_step.init_S_hat(V = X)
prev_setting = config.compute_test_value
config.compute_test_value= 'off'
H, Mu1 = function([], outputs=[init_H, init_Mu1])()
config.compute_test_value = prev_setting
H = broadcast(H, self.m)
Mu1 = broadcast(Mu1, self.m)
H = np.cast[config.floatX](self.model.rng.uniform(0.,1.,H.shape))
Mu1 = np.cast[config.floatX](self.model.rng.uniform(-5.,5.,Mu1.shape))
H_var = T.matrix(name='H_var')
H_var.tag.test_value = H
Mu1_var = T.matrix(name='Mu1_var')
Mu1_var.tag.test_value = Mu1
idx = T.iscalar()
idx.tag.test_value = 0
new_H = e_step.infer_H_hat(V = X, H_hat = H_var, S_hat = Mu1_var)
h_idx = new_H[:,idx]
updates_func = function([H_var,Mu1_var,idx], h_idx)
sigma0 = 1. / model.alpha
Sigma1 = e_step.infer_var_s1_hat()
mu0 = T.zeros_like(model.mu)
#by truncated KL, I mean that I am dropping terms that don't depend on H and Mu1
# (they don't affect the outcome of this test and some of them are intractable )
trunc_kl = - model.entropy_hs(H_hat = H_var, var_s0_hat = sigma0, var_s1_hat = Sigma1) + \
model.expected_energy_vhs(V = X, H_hat = H_var, S_hat = Mu1_var, var_s0_hat = sigma0,
var_s1_hat = Sigma1)
grad_H = T.grad(trunc_kl.sum(), H_var)
assert len(grad_H.type.broadcastable) == 2
#from theano.printing import min_informative_str
#print min_informative_str(grad_H)
#grad_H = Print('grad_H')(grad_H)
#grad_H_idx = grad_H[:,idx]
grad_func = function([H_var, Mu1_var], grad_H)
failed = False
for i in xrange(self.N):
rval = updates_func(H, Mu1, i)
H[:,i] = rval
g = grad_func(H,Mu1)[:,i]
assert not np.any(np.isnan(g))
g_abs_max = np.abs(g).max()
if g_abs_max > self.tol:
#print "new values of H"
#print H[:,i]
#print "gradient on new values of H"
#print g
failed = True
print 'iteration ',i
#print 'max value of new H: ',H[:,i].max()
#print 'H for failing g: '
failing_h = H[np.abs(g) > self.tol, i]
#print failing_h
#from matplotlib import pyplot as plt
#plt.scatter(H[:,i],g)
#plt.show()
#ignore failures extremely close to h=1
high_mask = failing_h > .001
low_mask = failing_h < .999
mask = high_mask * low_mask
print 'masked failures: ',mask.shape[0],' err ',g_abs_max
if mask.sum() > 0:
print 'failing h passing the range mask'
print failing_h[ mask.astype(bool) ]
raise Exception('after mean field step, gradient of kl divergence'
' wrt freshly updated variational parameter should be 0, '
'but here the max magnitude of a gradient element is '
+str(g_abs_max)+' after updating h_'+str(i))
#assert not failed
def test_value_h(self):
"tests that the value of the kl divergence decreases with each update to h_i "
model = self.model
e_step = self.e_step
X = self.X
assert X.shape[0] == self.m
init_H = e_step.init_H_hat(V = X)
init_Mu1 = e_step.init_S_hat(V = X)
prev_setting = config.compute_test_value
config.compute_test_value= 'off'
H, Mu1 = function([], outputs=[init_H, init_Mu1])()
config.compute_test_value = prev_setting
H = broadcast(H, self.m)
Mu1 = broadcast(Mu1, self.m)
H = np.cast[config.floatX](self.model.rng.uniform(0.,1.,H.shape))
Mu1 = np.cast[config.floatX](self.model.rng.uniform(-5.,5.,Mu1.shape))
H_var = T.matrix(name='H_var')
H_var.tag.test_value = H
Mu1_var = T.matrix(name='Mu1_var')
Mu1_var.tag.test_value = Mu1
idx = T.iscalar()
idx.tag.test_value = 0
newH = e_step.infer_H_hat(V = X, H_hat = H_var, S_hat = Mu1_var)
h_idx = newH[:,idx]
h_i_func = function([H_var,Mu1_var,idx],h_idx)
sigma0 = 1. / model.alpha
Sigma1 = e_step.infer_var_s1_hat()
mu0 = T.zeros_like(model.mu)
#by truncated KL, I mean that I am dropping terms that don't depend on H and Mu1
# (they don't affect the outcome of this test and some of them are intractable )
trunc_kl = - model.entropy_hs(H_hat = H_var, var_s0_hat = sigma0, var_s1_hat = Sigma1) + \
model.expected_energy_vhs(V = X, H_hat = H_var, S_hat = Mu1_var, var_s0_hat = sigma0, var_s1_hat = Sigma1)
trunc_kl_func = function([H_var, Mu1_var], trunc_kl)
for i in xrange(self.N):
prev_kl = trunc_kl_func(H,Mu1)
H[:,i] = h_i_func(H, Mu1, i)
#we don't update mu, the whole point of the split e step is we don't have to
new_kl = trunc_kl_func(H,Mu1)
increase = new_kl - prev_kl
print 'failures after iteration ',i,': ',(increase > self.tol).sum()
mx = increase.max()
if mx > 1e-4:
print 'increase amounts of failing examples:'
print increase[increase > self.tol]
print 'failing H:'
print H[increase > self.tol,:]
print 'failing Mu1:'
print Mu1[increase > self.tol,:]
print 'failing V:'
print X[increase > self.tol,:]
raise Exception('after mean field step in h, kl divergence should decrease, but some elements increased by as much as '+str(mx)+' after updating h_'+str(i))
if __name__ == '__main__':
obj = Test_S3C_Inference()
#obj.test_grad_h()
#obj.test_grad_s()
#obj.test_value_s()
obj.test_value_h()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.polling.async_base_polling import AsyncLROBasePolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class KeyVaultClientOperationsMixin:
async def _full_backup_initial(
self,
vault_base_url: str,
azure_storage_blob_container_uri: Optional["_models.SASTokenParameter"] = None,
**kwargs: Any
) -> "_models.FullBackupOperation":
cls = kwargs.pop('cls', None) # type: ClsType["_models.FullBackupOperation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "7.3-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._full_backup_initial.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if azure_storage_blob_container_uri is not None:
body_content = self._serialize.body(azure_storage_blob_container_uri, 'SASTokenParameter')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['Retry-After']=self._deserialize('int', response.headers.get('Retry-After'))
response_headers['Azure-AsyncOperation']=self._deserialize('str', response.headers.get('Azure-AsyncOperation'))
deserialized = self._deserialize('FullBackupOperation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_full_backup_initial.metadata = {'url': '/backup'} # type: ignore
async def begin_full_backup(
self,
vault_base_url: str,
azure_storage_blob_container_uri: Optional["_models.SASTokenParameter"] = None,
**kwargs: Any
) -> AsyncLROPoller["_models.FullBackupOperation"]:
"""Creates a full backup using a user-provided SAS token to an Azure blob storage container.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param azure_storage_blob_container_uri: Azure blob shared access signature token pointing to a
valid Azure blob container where full backup needs to be stored. This token needs to be valid
for at least next 24 hours from the time of making this call.
:type azure_storage_blob_container_uri: ~azure.keyvault.v7_3_preview.models.SASTokenParameter
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncLROBasePolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either FullBackupOperation or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.keyvault.v7_3_preview.models.FullBackupOperation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.FullBackupOperation"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._full_backup_initial(
vault_base_url=vault_base_url,
azure_storage_blob_container_uri=azure_storage_blob_container_uri,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
response_headers = {}
response = pipeline_response.http_response
response_headers['Retry-After']=self._deserialize('int', response.headers.get('Retry-After'))
response_headers['Azure-AsyncOperation']=self._deserialize('str', response.headers.get('Azure-AsyncOperation'))
deserialized = self._deserialize('FullBackupOperation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
if polling is True: polling_method = AsyncLROBasePolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_full_backup.metadata = {'url': '/backup'} # type: ignore
async def full_backup_status(
self,
vault_base_url: str,
job_id: str,
**kwargs: Any
) -> "_models.FullBackupOperation":
"""Returns the status of full backup operation.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param job_id: The id returned as part of the backup request.
:type job_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FullBackupOperation, or the result of cls(response)
:rtype: ~azure.keyvault.v7_3_preview.models.FullBackupOperation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FullBackupOperation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "7.3-preview"
accept = "application/json"
# Construct URL
url = self.full_backup_status.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'jobId': self._serialize.url("job_id", job_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('FullBackupOperation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
full_backup_status.metadata = {'url': '/backup/{jobId}/pending'} # type: ignore
async def _full_restore_operation_initial(
self,
vault_base_url: str,
restore_blob_details: Optional["_models.RestoreOperationParameters"] = None,
**kwargs: Any
) -> "_models.RestoreOperation":
cls = kwargs.pop('cls', None) # type: ClsType["_models.RestoreOperation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "7.3-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._full_restore_operation_initial.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if restore_blob_details is not None:
body_content = self._serialize.body(restore_blob_details, 'RestoreOperationParameters')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['Retry-After']=self._deserialize('int', response.headers.get('Retry-After'))
response_headers['Azure-AsyncOperation']=self._deserialize('str', response.headers.get('Azure-AsyncOperation'))
deserialized = self._deserialize('RestoreOperation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_full_restore_operation_initial.metadata = {'url': '/restore'} # type: ignore
async def begin_full_restore_operation(
self,
vault_base_url: str,
restore_blob_details: Optional["_models.RestoreOperationParameters"] = None,
**kwargs: Any
) -> AsyncLROPoller["_models.RestoreOperation"]:
"""Restores all key materials using the SAS token pointing to a previously stored Azure Blob
storage backup folder.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param restore_blob_details: The Azure blob SAS token pointing to a folder where the previous
successful full backup was stored.
:type restore_blob_details: ~azure.keyvault.v7_3_preview.models.RestoreOperationParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncLROBasePolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RestoreOperation or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.keyvault.v7_3_preview.models.RestoreOperation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RestoreOperation"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._full_restore_operation_initial(
vault_base_url=vault_base_url,
restore_blob_details=restore_blob_details,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
response_headers = {}
response = pipeline_response.http_response
response_headers['Retry-After']=self._deserialize('int', response.headers.get('Retry-After'))
response_headers['Azure-AsyncOperation']=self._deserialize('str', response.headers.get('Azure-AsyncOperation'))
deserialized = self._deserialize('RestoreOperation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
if polling is True: polling_method = AsyncLROBasePolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_full_restore_operation.metadata = {'url': '/restore'} # type: ignore
async def restore_status(
self,
vault_base_url: str,
job_id: str,
**kwargs: Any
) -> "_models.RestoreOperation":
"""Returns the status of restore operation.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param job_id: The Job Id returned part of the restore operation.
:type job_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RestoreOperation, or the result of cls(response)
:rtype: ~azure.keyvault.v7_3_preview.models.RestoreOperation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RestoreOperation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "7.3-preview"
accept = "application/json"
# Construct URL
url = self.restore_status.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'jobId': self._serialize.url("job_id", job_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('RestoreOperation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
restore_status.metadata = {'url': '/restore/{jobId}/pending'} # type: ignore
async def _selective_key_restore_operation_initial(
self,
vault_base_url: str,
key_name: str,
restore_blob_details: Optional["_models.SelectiveKeyRestoreOperationParameters"] = None,
**kwargs: Any
) -> "_models.SelectiveKeyRestoreOperation":
cls = kwargs.pop('cls', None) # type: ClsType["_models.SelectiveKeyRestoreOperation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "7.3-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._selective_key_restore_operation_initial.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'keyName': self._serialize.url("key_name", key_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if restore_blob_details is not None:
body_content = self._serialize.body(restore_blob_details, 'SelectiveKeyRestoreOperationParameters')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['Retry-After']=self._deserialize('int', response.headers.get('Retry-After'))
response_headers['Azure-AsyncOperation']=self._deserialize('str', response.headers.get('Azure-AsyncOperation'))
deserialized = self._deserialize('SelectiveKeyRestoreOperation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_selective_key_restore_operation_initial.metadata = {'url': '/keys/{keyName}/restore'} # type: ignore
async def begin_selective_key_restore_operation(
self,
vault_base_url: str,
key_name: str,
restore_blob_details: Optional["_models.SelectiveKeyRestoreOperationParameters"] = None,
**kwargs: Any
) -> AsyncLROPoller["_models.SelectiveKeyRestoreOperation"]:
"""Restores all key versions of a given key using user supplied SAS token pointing to a previously
stored Azure Blob storage backup folder.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: The name of the key to be restored from the user supplied backup.
:type key_name: str
:param restore_blob_details: The Azure blob SAS token pointing to a folder where the previous
successful full backup was stored.
:type restore_blob_details: ~azure.keyvault.v7_3_preview.models.SelectiveKeyRestoreOperationParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncLROBasePolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either SelectiveKeyRestoreOperation or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.keyvault.v7_3_preview.models.SelectiveKeyRestoreOperation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.SelectiveKeyRestoreOperation"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._selective_key_restore_operation_initial(
vault_base_url=vault_base_url,
key_name=key_name,
restore_blob_details=restore_blob_details,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
response_headers = {}
response = pipeline_response.http_response
response_headers['Retry-After']=self._deserialize('int', response.headers.get('Retry-After'))
response_headers['Azure-AsyncOperation']=self._deserialize('str', response.headers.get('Azure-AsyncOperation'))
deserialized = self._deserialize('SelectiveKeyRestoreOperation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'keyName': self._serialize.url("key_name", key_name, 'str'),
}
if polling is True: polling_method = AsyncLROBasePolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_selective_key_restore_operation.metadata = {'url': '/keys/{keyName}/restore'} # type: ignore
|
|
"""Base class for sparse matrix formats using compressed storage."""
__all__ = []
from warnings import warn
import operator
import numpy as np
from scipy._lib._util import _prune_array
from .base import spmatrix, isspmatrix, SparseEfficiencyWarning
from .data import _data_matrix, _minmax_mixin
from .dia import dia_matrix
from . import _sparsetools
from ._sparsetools import (get_csr_submatrix, csr_sample_offsets, csr_todense,
csr_sample_values, csr_row_index, csr_row_slice,
csr_column_index1, csr_column_index2)
from ._index import IndexMixin
from .sputils import (upcast, upcast_char, to_native, isdense, isshape,
getdtype, isscalarlike, isintlike, get_index_dtype,
downcast_intp_index, get_sum_dtype, check_shape,
matrix, asmatrix, is_pydata_spmatrix)
class _cs_matrix(_data_matrix, _minmax_mixin, IndexMixin):
"""base matrix class for compressed row- and column-oriented matrices"""
def __init__(self, arg1, shape=None, dtype=None, copy=False):
_data_matrix.__init__(self)
if isspmatrix(arg1):
if arg1.format == self.format and copy:
arg1 = arg1.copy()
else:
arg1 = arg1.asformat(self.format)
self._set_self(arg1)
elif isinstance(arg1, tuple):
if isshape(arg1):
# It's a tuple of matrix dimensions (M, N)
# create empty matrix
self._shape = check_shape(arg1)
M, N = self.shape
# Select index dtype large enough to pass array and
# scalar parameters to sparsetools
idx_dtype = get_index_dtype(maxval=max(M, N))
self.data = np.zeros(0, getdtype(dtype, default=float))
self.indices = np.zeros(0, idx_dtype)
self.indptr = np.zeros(self._swap((M, N))[0] + 1,
dtype=idx_dtype)
else:
if len(arg1) == 2:
# (data, ij) format
from .coo import coo_matrix
other = self.__class__(coo_matrix(arg1, shape=shape,
dtype=dtype))
self._set_self(other)
elif len(arg1) == 3:
# (data, indices, indptr) format
(data, indices, indptr) = arg1
# Select index dtype large enough to pass array and
# scalar parameters to sparsetools
maxval = None
if shape is not None:
maxval = max(shape)
idx_dtype = get_index_dtype((indices, indptr),
maxval=maxval,
check_contents=True)
self.indices = np.array(indices, copy=copy,
dtype=idx_dtype)
self.indptr = np.array(indptr, copy=copy, dtype=idx_dtype)
self.data = np.array(data, copy=copy, dtype=dtype)
else:
raise ValueError("unrecognized {}_matrix "
"constructor usage".format(self.format))
else:
# must be dense
try:
arg1 = np.asarray(arg1)
except Exception as e:
raise ValueError("unrecognized {}_matrix constructor usage"
"".format(self.format)) from e
from .coo import coo_matrix
self._set_self(self.__class__(coo_matrix(arg1, dtype=dtype)))
# Read matrix dimensions given, if any
if shape is not None:
self._shape = check_shape(shape)
else:
if self.shape is None:
# shape not already set, try to infer dimensions
try:
major_dim = len(self.indptr) - 1
minor_dim = self.indices.max() + 1
except Exception as e:
raise ValueError('unable to infer matrix dimensions') from e
else:
self._shape = check_shape(self._swap((major_dim,
minor_dim)))
if dtype is not None:
self.data = self.data.astype(dtype, copy=False)
self.check_format(full_check=False)
def getnnz(self, axis=None):
if axis is None:
return int(self.indptr[-1])
else:
if axis < 0:
axis += 2
axis, _ = self._swap((axis, 1 - axis))
_, N = self._swap(self.shape)
if axis == 0:
return np.bincount(downcast_intp_index(self.indices),
minlength=N)
elif axis == 1:
return np.diff(self.indptr)
raise ValueError('axis out of bounds')
getnnz.__doc__ = spmatrix.getnnz.__doc__
def _set_self(self, other, copy=False):
"""take the member variables of other and assign them to self"""
if copy:
other = other.copy()
self.data = other.data
self.indices = other.indices
self.indptr = other.indptr
self._shape = check_shape(other.shape)
def check_format(self, full_check=True):
"""check whether the matrix format is valid
Parameters
----------
full_check : bool, optional
If `True`, rigorous check, O(N) operations. Otherwise
basic check, O(1) operations (default True).
"""
# use _swap to determine proper bounds
major_name, minor_name = self._swap(('row', 'column'))
major_dim, minor_dim = self._swap(self.shape)
# index arrays should have integer data types
if self.indptr.dtype.kind != 'i':
warn("indptr array has non-integer dtype ({})"
"".format(self.indptr.dtype.name), stacklevel=3)
if self.indices.dtype.kind != 'i':
warn("indices array has non-integer dtype ({})"
"".format(self.indices.dtype.name), stacklevel=3)
idx_dtype = get_index_dtype((self.indptr, self.indices))
self.indptr = np.asarray(self.indptr, dtype=idx_dtype)
self.indices = np.asarray(self.indices, dtype=idx_dtype)
self.data = to_native(self.data)
# check array shapes
for x in [self.data.ndim, self.indices.ndim, self.indptr.ndim]:
if x != 1:
raise ValueError('data, indices, and indptr should be 1-D')
# check index pointer
if (len(self.indptr) != major_dim + 1):
raise ValueError("index pointer size ({}) should be ({})"
"".format(len(self.indptr), major_dim + 1))
if (self.indptr[0] != 0):
raise ValueError("index pointer should start with 0")
# check index and data arrays
if (len(self.indices) != len(self.data)):
raise ValueError("indices and data should have the same size")
if (self.indptr[-1] > len(self.indices)):
raise ValueError("Last value of index pointer should be less than "
"the size of index and data arrays")
self.prune()
if full_check:
# check format validity (more expensive)
if self.nnz > 0:
if self.indices.max() >= minor_dim:
raise ValueError("{} index values must be < {}"
"".format(minor_name, minor_dim))
if self.indices.min() < 0:
raise ValueError("{} index values must be >= 0"
"".format(minor_name))
if np.diff(self.indptr).min() < 0:
raise ValueError("index pointer values must form a "
"non-decreasing sequence")
# if not self.has_sorted_indices():
# warn('Indices were not in sorted order. Sorting indices.')
# self.sort_indices()
# assert(self.has_sorted_indices())
# TODO check for duplicates?
#######################
# Boolean comparisons #
#######################
def _scalar_binopt(self, other, op):
"""Scalar version of self._binopt, for cases in which no new nonzeros
are added. Produces a new spmatrix in canonical form.
"""
self.sum_duplicates()
res = self._with_data(op(self.data, other), copy=True)
res.eliminate_zeros()
return res
def __eq__(self, other):
# Scalar other.
if isscalarlike(other):
if np.isnan(other):
return self.__class__(self.shape, dtype=np.bool_)
if other == 0:
warn("Comparing a sparse matrix with 0 using == is inefficient"
", try using != instead.", SparseEfficiencyWarning,
stacklevel=3)
all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))
inv = self._scalar_binopt(other, operator.ne)
return all_true - inv
else:
return self._scalar_binopt(other, operator.eq)
# Dense other.
elif isdense(other):
return self.todense() == other
# Pydata sparse other.
elif is_pydata_spmatrix(other):
return NotImplemented
# Sparse other.
elif isspmatrix(other):
warn("Comparing sparse matrices using == is inefficient, try using"
" != instead.", SparseEfficiencyWarning, stacklevel=3)
# TODO sparse broadcasting
if self.shape != other.shape:
return False
elif self.format != other.format:
other = other.asformat(self.format)
res = self._binopt(other, '_ne_')
all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))
return all_true - res
else:
return False
def __ne__(self, other):
# Scalar other.
if isscalarlike(other):
if np.isnan(other):
warn("Comparing a sparse matrix with nan using != is"
" inefficient", SparseEfficiencyWarning, stacklevel=3)
all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))
return all_true
elif other != 0:
warn("Comparing a sparse matrix with a nonzero scalar using !="
" is inefficient, try using == instead.",
SparseEfficiencyWarning, stacklevel=3)
all_true = self.__class__(np.ones(self.shape), dtype=np.bool_)
inv = self._scalar_binopt(other, operator.eq)
return all_true - inv
else:
return self._scalar_binopt(other, operator.ne)
# Dense other.
elif isdense(other):
return self.todense() != other
# Pydata sparse other.
elif is_pydata_spmatrix(other):
return NotImplemented
# Sparse other.
elif isspmatrix(other):
# TODO sparse broadcasting
if self.shape != other.shape:
return True
elif self.format != other.format:
other = other.asformat(self.format)
return self._binopt(other, '_ne_')
else:
return True
def _inequality(self, other, op, op_name, bad_scalar_msg):
# Scalar other.
if isscalarlike(other):
if 0 == other and op_name in ('_le_', '_ge_'):
raise NotImplementedError(" >= and <= don't work with 0.")
elif op(0, other):
warn(bad_scalar_msg, SparseEfficiencyWarning)
other_arr = np.empty(self.shape, dtype=np.result_type(other))
other_arr.fill(other)
other_arr = self.__class__(other_arr)
return self._binopt(other_arr, op_name)
else:
return self._scalar_binopt(other, op)
# Dense other.
elif isdense(other):
return op(self.todense(), other)
# Sparse other.
elif isspmatrix(other):
# TODO sparse broadcasting
if self.shape != other.shape:
raise ValueError("inconsistent shapes")
elif self.format != other.format:
other = other.asformat(self.format)
if op_name not in ('_ge_', '_le_'):
return self._binopt(other, op_name)
warn("Comparing sparse matrices using >= and <= is inefficient, "
"using <, >, or !=, instead.", SparseEfficiencyWarning)
all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))
res = self._binopt(other, '_gt_' if op_name == '_le_' else '_lt_')
return all_true - res
else:
raise ValueError("Operands could not be compared.")
def __lt__(self, other):
return self._inequality(other, operator.lt, '_lt_',
"Comparing a sparse matrix with a scalar "
"greater than zero using < is inefficient, "
"try using >= instead.")
def __gt__(self, other):
return self._inequality(other, operator.gt, '_gt_',
"Comparing a sparse matrix with a scalar "
"less than zero using > is inefficient, "
"try using <= instead.")
def __le__(self, other):
return self._inequality(other, operator.le, '_le_',
"Comparing a sparse matrix with a scalar "
"greater than zero using <= is inefficient, "
"try using > instead.")
def __ge__(self, other):
return self._inequality(other, operator.ge, '_ge_',
"Comparing a sparse matrix with a scalar "
"less than zero using >= is inefficient, "
"try using < instead.")
#################################
# Arithmetic operator overrides #
#################################
def _add_dense(self, other):
if other.shape != self.shape:
raise ValueError('Incompatible shapes ({} and {})'
.format(self.shape, other.shape))
dtype = upcast_char(self.dtype.char, other.dtype.char)
order = self._swap('CF')[0]
result = np.array(other, dtype=dtype, order=order, copy=True)
M, N = self._swap(self.shape)
y = result if result.flags.c_contiguous else result.T
csr_todense(M, N, self.indptr, self.indices, self.data, y)
return matrix(result, copy=False)
def _add_sparse(self, other):
return self._binopt(other, '_plus_')
def _sub_sparse(self, other):
return self._binopt(other, '_minus_')
def multiply(self, other):
"""Point-wise multiplication by another matrix, vector, or
scalar.
"""
# Scalar multiplication.
if isscalarlike(other):
return self._mul_scalar(other)
# Sparse matrix or vector.
if isspmatrix(other):
if self.shape == other.shape:
other = self.__class__(other)
return self._binopt(other, '_elmul_')
# Single element.
elif other.shape == (1, 1):
return self._mul_scalar(other.toarray()[0, 0])
elif self.shape == (1, 1):
return other._mul_scalar(self.toarray()[0, 0])
# A row times a column.
elif self.shape[1] == 1 and other.shape[0] == 1:
return self._mul_sparse_matrix(other.tocsc())
elif self.shape[0] == 1 and other.shape[1] == 1:
return other._mul_sparse_matrix(self.tocsc())
# Row vector times matrix. other is a row.
elif other.shape[0] == 1 and self.shape[1] == other.shape[1]:
other = dia_matrix((other.toarray().ravel(), [0]),
shape=(other.shape[1], other.shape[1]))
return self._mul_sparse_matrix(other)
# self is a row.
elif self.shape[0] == 1 and self.shape[1] == other.shape[1]:
copy = dia_matrix((self.toarray().ravel(), [0]),
shape=(self.shape[1], self.shape[1]))
return other._mul_sparse_matrix(copy)
# Column vector times matrix. other is a column.
elif other.shape[1] == 1 and self.shape[0] == other.shape[0]:
other = dia_matrix((other.toarray().ravel(), [0]),
shape=(other.shape[0], other.shape[0]))
return other._mul_sparse_matrix(self)
# self is a column.
elif self.shape[1] == 1 and self.shape[0] == other.shape[0]:
copy = dia_matrix((self.toarray().ravel(), [0]),
shape=(self.shape[0], self.shape[0]))
return copy._mul_sparse_matrix(other)
else:
raise ValueError("inconsistent shapes")
# Assume other is a dense matrix/array, which produces a single-item
# object array if other isn't convertible to ndarray.
other = np.atleast_2d(other)
if other.ndim != 2:
return np.multiply(self.toarray(), other)
# Single element / wrapped object.
if other.size == 1:
return self._mul_scalar(other.flat[0])
# Fast case for trivial sparse matrix.
elif self.shape == (1, 1):
return np.multiply(self.toarray()[0, 0], other)
from .coo import coo_matrix
ret = self.tocoo()
# Matching shapes.
if self.shape == other.shape:
data = np.multiply(ret.data, other[ret.row, ret.col])
# Sparse row vector times...
elif self.shape[0] == 1:
if other.shape[1] == 1: # Dense column vector.
data = np.multiply(ret.data, other)
elif other.shape[1] == self.shape[1]: # Dense matrix.
data = np.multiply(ret.data, other[:, ret.col])
else:
raise ValueError("inconsistent shapes")
row = np.repeat(np.arange(other.shape[0]), len(ret.row))
col = np.tile(ret.col, other.shape[0])
return coo_matrix((data.view(np.ndarray).ravel(), (row, col)),
shape=(other.shape[0], self.shape[1]),
copy=False)
# Sparse column vector times...
elif self.shape[1] == 1:
if other.shape[0] == 1: # Dense row vector.
data = np.multiply(ret.data[:, None], other)
elif other.shape[0] == self.shape[0]: # Dense matrix.
data = np.multiply(ret.data[:, None], other[ret.row])
else:
raise ValueError("inconsistent shapes")
row = np.repeat(ret.row, other.shape[1])
col = np.tile(np.arange(other.shape[1]), len(ret.col))
return coo_matrix((data.view(np.ndarray).ravel(), (row, col)),
shape=(self.shape[0], other.shape[1]),
copy=False)
# Sparse matrix times dense row vector.
elif other.shape[0] == 1 and self.shape[1] == other.shape[1]:
data = np.multiply(ret.data, other[:, ret.col].ravel())
# Sparse matrix times dense column vector.
elif other.shape[1] == 1 and self.shape[0] == other.shape[0]:
data = np.multiply(ret.data, other[ret.row].ravel())
else:
raise ValueError("inconsistent shapes")
ret.data = data.view(np.ndarray).ravel()
return ret
###########################
# Multiplication handlers #
###########################
def _mul_vector(self, other):
M, N = self.shape
# output array
result = np.zeros(M, dtype=upcast_char(self.dtype.char,
other.dtype.char))
# csr_matvec or csc_matvec
fn = getattr(_sparsetools, self.format + '_matvec')
fn(M, N, self.indptr, self.indices, self.data, other, result)
return result
def _mul_multivector(self, other):
M, N = self.shape
n_vecs = other.shape[1] # number of column vectors
result = np.zeros((M, n_vecs),
dtype=upcast_char(self.dtype.char, other.dtype.char))
# csr_matvecs or csc_matvecs
fn = getattr(_sparsetools, self.format + '_matvecs')
fn(M, N, n_vecs, self.indptr, self.indices, self.data,
other.ravel(), result.ravel())
return result
def _mul_sparse_matrix(self, other):
M, K1 = self.shape
K2, N = other.shape
major_axis = self._swap((M, N))[0]
other = self.__class__(other) # convert to this format
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices))
fn = getattr(_sparsetools, self.format + '_matmat_maxnnz')
nnz = fn(M, N,
np.asarray(self.indptr, dtype=idx_dtype),
np.asarray(self.indices, dtype=idx_dtype),
np.asarray(other.indptr, dtype=idx_dtype),
np.asarray(other.indices, dtype=idx_dtype))
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices),
maxval=nnz)
indptr = np.empty(major_axis + 1, dtype=idx_dtype)
indices = np.empty(nnz, dtype=idx_dtype)
data = np.empty(nnz, dtype=upcast(self.dtype, other.dtype))
fn = getattr(_sparsetools, self.format + '_matmat')
fn(M, N, np.asarray(self.indptr, dtype=idx_dtype),
np.asarray(self.indices, dtype=idx_dtype),
self.data,
np.asarray(other.indptr, dtype=idx_dtype),
np.asarray(other.indices, dtype=idx_dtype),
other.data,
indptr, indices, data)
return self.__class__((data, indices, indptr), shape=(M, N))
def diagonal(self, k=0):
rows, cols = self.shape
if k <= -rows or k >= cols:
return np.empty(0, dtype=self.data.dtype)
fn = getattr(_sparsetools, self.format + "_diagonal")
y = np.empty(min(rows + min(k, 0), cols - max(k, 0)),
dtype=upcast(self.dtype))
fn(k, self.shape[0], self.shape[1], self.indptr, self.indices,
self.data, y)
return y
diagonal.__doc__ = spmatrix.diagonal.__doc__
#####################
# Other binary ops #
#####################
def _maximum_minimum(self, other, npop, op_name, dense_check):
if isscalarlike(other):
if dense_check(other):
warn("Taking maximum (minimum) with > 0 (< 0) number results"
" to a dense matrix.", SparseEfficiencyWarning,
stacklevel=3)
other_arr = np.empty(self.shape, dtype=np.asarray(other).dtype)
other_arr.fill(other)
other_arr = self.__class__(other_arr)
return self._binopt(other_arr, op_name)
else:
self.sum_duplicates()
new_data = npop(self.data, np.asarray(other))
mat = self.__class__((new_data, self.indices, self.indptr),
dtype=new_data.dtype, shape=self.shape)
return mat
elif isdense(other):
return npop(self.todense(), other)
elif isspmatrix(other):
return self._binopt(other, op_name)
else:
raise ValueError("Operands not compatible.")
def maximum(self, other):
return self._maximum_minimum(other, np.maximum,
'_maximum_', lambda x: np.asarray(x) > 0)
maximum.__doc__ = spmatrix.maximum.__doc__
def minimum(self, other):
return self._maximum_minimum(other, np.minimum,
'_minimum_', lambda x: np.asarray(x) < 0)
minimum.__doc__ = spmatrix.minimum.__doc__
#####################
# Reduce operations #
#####################
def sum(self, axis=None, dtype=None, out=None):
"""Sum the matrix over the given axis. If the axis is None, sum
over both rows and columns, returning a scalar.
"""
# The spmatrix base class already does axis=0 and axis=1 efficiently
# so we only do the case axis=None here
if (not hasattr(self, 'blocksize') and
axis in self._swap(((1, -1), (0, 2)))[0]):
# faster than multiplication for large minor axis in CSC/CSR
res_dtype = get_sum_dtype(self.dtype)
ret = np.zeros(len(self.indptr) - 1, dtype=res_dtype)
major_index, value = self._minor_reduce(np.add)
ret[major_index] = value
ret = asmatrix(ret)
if axis % 2 == 1:
ret = ret.T
if out is not None and out.shape != ret.shape:
raise ValueError('dimensions do not match')
return ret.sum(axis=(), dtype=dtype, out=out)
# spmatrix will handle the remaining situations when axis
# is in {None, -1, 0, 1}
else:
return spmatrix.sum(self, axis=axis, dtype=dtype, out=out)
sum.__doc__ = spmatrix.sum.__doc__
def _minor_reduce(self, ufunc, data=None):
"""Reduce nonzeros with a ufunc over the minor axis when non-empty
Can be applied to a function of self.data by supplying data parameter.
Warning: this does not call sum_duplicates()
Returns
-------
major_index : array of ints
Major indices where nonzero
value : array of self.dtype
Reduce result for nonzeros in each major_index
"""
if data is None:
data = self.data
major_index = np.flatnonzero(np.diff(self.indptr))
value = ufunc.reduceat(data,
downcast_intp_index(self.indptr[major_index]))
return major_index, value
#######################
# Getting and Setting #
#######################
def _get_intXint(self, row, col):
M, N = self._swap(self.shape)
major, minor = self._swap((row, col))
indptr, indices, data = get_csr_submatrix(
M, N, self.indptr, self.indices, self.data,
major, major + 1, minor, minor + 1)
return data.sum(dtype=self.dtype)
def _get_sliceXslice(self, row, col):
major, minor = self._swap((row, col))
if major.step in (1, None) and minor.step in (1, None):
return self._get_submatrix(major, minor, copy=True)
return self._major_slice(major)._minor_slice(minor)
def _get_arrayXarray(self, row, col):
# inner indexing
idx_dtype = self.indices.dtype
M, N = self._swap(self.shape)
major, minor = self._swap((row, col))
major = np.asarray(major, dtype=idx_dtype)
minor = np.asarray(minor, dtype=idx_dtype)
val = np.empty(major.size, dtype=self.dtype)
csr_sample_values(M, N, self.indptr, self.indices, self.data,
major.size, major.ravel(), minor.ravel(), val)
if major.ndim == 1:
return asmatrix(val)
return self.__class__(val.reshape(major.shape))
def _get_columnXarray(self, row, col):
# outer indexing
major, minor = self._swap((row, col))
return self._major_index_fancy(major)._minor_index_fancy(minor)
def _major_index_fancy(self, idx):
"""Index along the major axis where idx is an array of ints.
"""
idx_dtype = self.indices.dtype
indices = np.asarray(idx, dtype=idx_dtype).ravel()
_, N = self._swap(self.shape)
M = len(indices)
new_shape = self._swap((M, N))
if M == 0:
return self.__class__(new_shape)
row_nnz = np.diff(self.indptr)
idx_dtype = self.indices.dtype
res_indptr = np.zeros(M+1, dtype=idx_dtype)
np.cumsum(row_nnz[idx], out=res_indptr[1:])
nnz = res_indptr[-1]
res_indices = np.empty(nnz, dtype=idx_dtype)
res_data = np.empty(nnz, dtype=self.dtype)
csr_row_index(M, indices, self.indptr, self.indices, self.data,
res_indices, res_data)
return self.__class__((res_data, res_indices, res_indptr),
shape=new_shape, copy=False)
def _major_slice(self, idx, copy=False):
"""Index along the major axis where idx is a slice object.
"""
if idx == slice(None):
return self.copy() if copy else self
M, N = self._swap(self.shape)
start, stop, step = idx.indices(M)
M = len(range(start, stop, step))
new_shape = self._swap((M, N))
if M == 0:
return self.__class__(new_shape)
row_nnz = np.diff(self.indptr)
idx_dtype = self.indices.dtype
res_indptr = np.zeros(M+1, dtype=idx_dtype)
np.cumsum(row_nnz[idx], out=res_indptr[1:])
if step == 1:
all_idx = slice(self.indptr[start], self.indptr[stop])
res_indices = np.array(self.indices[all_idx], copy=copy)
res_data = np.array(self.data[all_idx], copy=copy)
else:
nnz = res_indptr[-1]
res_indices = np.empty(nnz, dtype=idx_dtype)
res_data = np.empty(nnz, dtype=self.dtype)
csr_row_slice(start, stop, step, self.indptr, self.indices,
self.data, res_indices, res_data)
return self.__class__((res_data, res_indices, res_indptr),
shape=new_shape, copy=False)
def _minor_index_fancy(self, idx):
"""Index along the minor axis where idx is an array of ints.
"""
idx_dtype = self.indices.dtype
idx = np.asarray(idx, dtype=idx_dtype).ravel()
M, N = self._swap(self.shape)
k = len(idx)
new_shape = self._swap((M, k))
if k == 0:
return self.__class__(new_shape)
# pass 1: count idx entries and compute new indptr
col_offsets = np.zeros(N, dtype=idx_dtype)
res_indptr = np.empty_like(self.indptr)
csr_column_index1(k, idx, M, N, self.indptr, self.indices,
col_offsets, res_indptr)
# pass 2: copy indices/data for selected idxs
col_order = np.argsort(idx).astype(idx_dtype, copy=False)
nnz = res_indptr[-1]
res_indices = np.empty(nnz, dtype=idx_dtype)
res_data = np.empty(nnz, dtype=self.dtype)
csr_column_index2(col_order, col_offsets, len(self.indices),
self.indices, self.data, res_indices, res_data)
return self.__class__((res_data, res_indices, res_indptr),
shape=new_shape, copy=False)
def _minor_slice(self, idx, copy=False):
"""Index along the minor axis where idx is a slice object.
"""
if idx == slice(None):
return self.copy() if copy else self
M, N = self._swap(self.shape)
start, stop, step = idx.indices(N)
N = len(range(start, stop, step))
if N == 0:
return self.__class__(self._swap((M, N)))
if step == 1:
return self._get_submatrix(minor=idx, copy=copy)
# TODO: don't fall back to fancy indexing here
return self._minor_index_fancy(np.arange(start, stop, step))
def _get_submatrix(self, major=None, minor=None, copy=False):
"""Return a submatrix of this matrix.
major, minor: None, int, or slice with step 1
"""
M, N = self._swap(self.shape)
i0, i1 = _process_slice(major, M)
j0, j1 = _process_slice(minor, N)
if i0 == 0 and j0 == 0 and i1 == M and j1 == N:
return self.copy() if copy else self
indptr, indices, data = get_csr_submatrix(
M, N, self.indptr, self.indices, self.data, i0, i1, j0, j1)
shape = self._swap((i1 - i0, j1 - j0))
return self.__class__((data, indices, indptr), shape=shape,
dtype=self.dtype, copy=False)
def _set_intXint(self, row, col, x):
i, j = self._swap((row, col))
self._set_many(i, j, x)
def _set_arrayXarray(self, row, col, x):
i, j = self._swap((row, col))
self._set_many(i, j, x)
def _set_arrayXarray_sparse(self, row, col, x):
# clear entries that will be overwritten
self._zero_many(*self._swap((row, col)))
M, N = row.shape # matches col.shape
broadcast_row = M != 1 and x.shape[0] == 1
broadcast_col = N != 1 and x.shape[1] == 1
r, c = x.row, x.col
x = np.asarray(x.data, dtype=self.dtype)
if x.size == 0:
return
if broadcast_row:
r = np.repeat(np.arange(M), len(r))
c = np.tile(c, M)
x = np.tile(x, M)
if broadcast_col:
r = np.repeat(r, N)
c = np.tile(np.arange(N), len(c))
x = np.repeat(x, N)
# only assign entries in the new sparsity structure
i, j = self._swap((row[r, c], col[r, c]))
self._set_many(i, j, x)
def _setdiag(self, values, k):
if 0 in self.shape:
return
M, N = self.shape
broadcast = (values.ndim == 0)
if k < 0:
if broadcast:
max_index = min(M + k, N)
else:
max_index = min(M + k, N, len(values))
i = np.arange(max_index, dtype=self.indices.dtype)
j = np.arange(max_index, dtype=self.indices.dtype)
i -= k
else:
if broadcast:
max_index = min(M, N - k)
else:
max_index = min(M, N - k, len(values))
i = np.arange(max_index, dtype=self.indices.dtype)
j = np.arange(max_index, dtype=self.indices.dtype)
j += k
if not broadcast:
values = values[:len(i)]
self[i, j] = values
def _prepare_indices(self, i, j):
M, N = self._swap(self.shape)
def check_bounds(indices, bound):
idx = indices.max()
if idx >= bound:
raise IndexError('index (%d) out of range (>= %d)' %
(idx, bound))
idx = indices.min()
if idx < -bound:
raise IndexError('index (%d) out of range (< -%d)' %
(idx, bound))
i = np.array(i, dtype=self.indices.dtype, copy=False, ndmin=1).ravel()
j = np.array(j, dtype=self.indices.dtype, copy=False, ndmin=1).ravel()
check_bounds(i, M)
check_bounds(j, N)
return i, j, M, N
def _set_many(self, i, j, x):
"""Sets value at each (i, j) to x
Here (i,j) index major and minor respectively, and must not contain
duplicate entries.
"""
i, j, M, N = self._prepare_indices(i, j)
x = np.array(x, dtype=self.dtype, copy=False, ndmin=1).ravel()
n_samples = x.size
offsets = np.empty(n_samples, dtype=self.indices.dtype)
ret = csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,
i, j, offsets)
if ret == 1:
# rinse and repeat
self.sum_duplicates()
csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,
i, j, offsets)
if -1 not in offsets:
# only affects existing non-zero cells
self.data[offsets] = x
return
else:
warn("Changing the sparsity structure of a {}_matrix is expensive."
" lil_matrix is more efficient.".format(self.format),
SparseEfficiencyWarning, stacklevel=3)
# replace where possible
mask = offsets > -1
self.data[offsets[mask]] = x[mask]
# only insertions remain
mask = ~mask
i = i[mask]
i[i < 0] += M
j = j[mask]
j[j < 0] += N
self._insert_many(i, j, x[mask])
def _zero_many(self, i, j):
"""Sets value at each (i, j) to zero, preserving sparsity structure.
Here (i,j) index major and minor respectively.
"""
i, j, M, N = self._prepare_indices(i, j)
n_samples = len(i)
offsets = np.empty(n_samples, dtype=self.indices.dtype)
ret = csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,
i, j, offsets)
if ret == 1:
# rinse and repeat
self.sum_duplicates()
csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,
i, j, offsets)
# only assign zeros to the existing sparsity structure
self.data[offsets[offsets > -1]] = 0
def _insert_many(self, i, j, x):
"""Inserts new nonzero at each (i, j) with value x
Here (i,j) index major and minor respectively.
i, j and x must be non-empty, 1d arrays.
Inserts each major group (e.g. all entries per row) at a time.
Maintains has_sorted_indices property.
Modifies i, j, x in place.
"""
order = np.argsort(i, kind='mergesort') # stable for duplicates
i = i.take(order, mode='clip')
j = j.take(order, mode='clip')
x = x.take(order, mode='clip')
do_sort = self.has_sorted_indices
# Update index data type
idx_dtype = get_index_dtype((self.indices, self.indptr),
maxval=(self.indptr[-1] + x.size))
self.indptr = np.asarray(self.indptr, dtype=idx_dtype)
self.indices = np.asarray(self.indices, dtype=idx_dtype)
i = np.asarray(i, dtype=idx_dtype)
j = np.asarray(j, dtype=idx_dtype)
# Collate old and new in chunks by major index
indices_parts = []
data_parts = []
ui, ui_indptr = np.unique(i, return_index=True)
ui_indptr = np.append(ui_indptr, len(j))
new_nnzs = np.diff(ui_indptr)
prev = 0
for c, (ii, js, je) in enumerate(zip(ui, ui_indptr, ui_indptr[1:])):
# old entries
start = self.indptr[prev]
stop = self.indptr[ii]
indices_parts.append(self.indices[start:stop])
data_parts.append(self.data[start:stop])
# handle duplicate j: keep last setting
uj, uj_indptr = np.unique(j[js:je][::-1], return_index=True)
if len(uj) == je - js:
indices_parts.append(j[js:je])
data_parts.append(x[js:je])
else:
indices_parts.append(j[js:je][::-1][uj_indptr])
data_parts.append(x[js:je][::-1][uj_indptr])
new_nnzs[c] = len(uj)
prev = ii
# remaining old entries
start = self.indptr[ii]
indices_parts.append(self.indices[start:])
data_parts.append(self.data[start:])
# update attributes
self.indices = np.concatenate(indices_parts)
self.data = np.concatenate(data_parts)
nnzs = np.empty(self.indptr.shape, dtype=idx_dtype)
nnzs[0] = idx_dtype(0)
indptr_diff = np.diff(self.indptr)
indptr_diff[ui] += new_nnzs
nnzs[1:] = indptr_diff
self.indptr = np.cumsum(nnzs, out=nnzs)
if do_sort:
# TODO: only sort where necessary
self.has_sorted_indices = False
self.sort_indices()
self.check_format(full_check=False)
######################
# Conversion methods #
######################
def tocoo(self, copy=True):
major_dim, minor_dim = self._swap(self.shape)
minor_indices = self.indices
major_indices = np.empty(len(minor_indices), dtype=self.indices.dtype)
_sparsetools.expandptr(major_dim, self.indptr, major_indices)
row, col = self._swap((major_indices, minor_indices))
from .coo import coo_matrix
return coo_matrix((self.data, (row, col)), self.shape, copy=copy,
dtype=self.dtype)
tocoo.__doc__ = spmatrix.tocoo.__doc__
def toarray(self, order=None, out=None):
if out is None and order is None:
order = self._swap('cf')[0]
out = self._process_toarray_args(order, out)
if not (out.flags.c_contiguous or out.flags.f_contiguous):
raise ValueError('Output array must be C or F contiguous')
# align ideal order with output array order
if out.flags.c_contiguous:
x = self.tocsr()
y = out
else:
x = self.tocsc()
y = out.T
M, N = x._swap(x.shape)
csr_todense(M, N, x.indptr, x.indices, x.data, y)
return out
toarray.__doc__ = spmatrix.toarray.__doc__
##############################################################
# methods that examine or modify the internal data structure #
##############################################################
def eliminate_zeros(self):
"""Remove zero entries from the matrix
This is an *in place* operation.
"""
M, N = self._swap(self.shape)
_sparsetools.csr_eliminate_zeros(M, N, self.indptr, self.indices,
self.data)
self.prune() # nnz may have changed
def __get_has_canonical_format(self):
"""Determine whether the matrix has sorted indices and no duplicates
Returns
- True: if the above applies
- False: otherwise
has_canonical_format implies has_sorted_indices, so if the latter flag
is False, so will the former be; if the former is found True, the
latter flag is also set.
"""
# first check to see if result was cached
if not getattr(self, '_has_sorted_indices', True):
# not sorted => not canonical
self._has_canonical_format = False
elif not hasattr(self, '_has_canonical_format'):
self.has_canonical_format = bool(
_sparsetools.csr_has_canonical_format(
len(self.indptr) - 1, self.indptr, self.indices))
return self._has_canonical_format
def __set_has_canonical_format(self, val):
self._has_canonical_format = bool(val)
if val:
self.has_sorted_indices = True
has_canonical_format = property(fget=__get_has_canonical_format,
fset=__set_has_canonical_format)
def sum_duplicates(self):
"""Eliminate duplicate matrix entries by adding them together
This is an *in place* operation.
"""
if self.has_canonical_format:
return
self.sort_indices()
M, N = self._swap(self.shape)
_sparsetools.csr_sum_duplicates(M, N, self.indptr, self.indices,
self.data)
self.prune() # nnz may have changed
self.has_canonical_format = True
def __get_sorted(self):
"""Determine whether the matrix has sorted indices
Returns
- True: if the indices of the matrix are in sorted order
- False: otherwise
"""
# first check to see if result was cached
if not hasattr(self, '_has_sorted_indices'):
self._has_sorted_indices = bool(
_sparsetools.csr_has_sorted_indices(
len(self.indptr) - 1, self.indptr, self.indices))
return self._has_sorted_indices
def __set_sorted(self, val):
self._has_sorted_indices = bool(val)
has_sorted_indices = property(fget=__get_sorted, fset=__set_sorted)
def sorted_indices(self):
"""Return a copy of this matrix with sorted indices
"""
A = self.copy()
A.sort_indices()
return A
# an alternative that has linear complexity is the following
# although the previous option is typically faster
# return self.toother().toother()
def sort_indices(self):
"""Sort the indices of this matrix *in place*
"""
if not self.has_sorted_indices:
_sparsetools.csr_sort_indices(len(self.indptr) - 1, self.indptr,
self.indices, self.data)
self.has_sorted_indices = True
def prune(self):
"""Remove empty space after all non-zero elements.
"""
major_dim = self._swap(self.shape)[0]
if len(self.indptr) != major_dim + 1:
raise ValueError('index pointer has invalid length')
if len(self.indices) < self.nnz:
raise ValueError('indices array has fewer than nnz elements')
if len(self.data) < self.nnz:
raise ValueError('data array has fewer than nnz elements')
self.indices = _prune_array(self.indices[:self.nnz])
self.data = _prune_array(self.data[:self.nnz])
def resize(self, *shape):
shape = check_shape(shape)
if hasattr(self, 'blocksize'):
bm, bn = self.blocksize
new_M, rm = divmod(shape[0], bm)
new_N, rn = divmod(shape[1], bn)
if rm or rn:
raise ValueError("shape must be divisible into %s blocks. "
"Got %s" % (self.blocksize, shape))
M, N = self.shape[0] // bm, self.shape[1] // bn
else:
new_M, new_N = self._swap(shape)
M, N = self._swap(self.shape)
if new_M < M:
self.indices = self.indices[:self.indptr[new_M]]
self.data = self.data[:self.indptr[new_M]]
self.indptr = self.indptr[:new_M + 1]
elif new_M > M:
self.indptr = np.resize(self.indptr, new_M + 1)
self.indptr[M + 1:].fill(self.indptr[M])
if new_N < N:
mask = self.indices < new_N
if not np.all(mask):
self.indices = self.indices[mask]
self.data = self.data[mask]
major_index, val = self._minor_reduce(np.add, mask)
self.indptr.fill(0)
self.indptr[1:][major_index] = val
np.cumsum(self.indptr, out=self.indptr)
self._shape = shape
resize.__doc__ = spmatrix.resize.__doc__
###################
# utility methods #
###################
# needed by _data_matrix
def _with_data(self, data, copy=True):
"""Returns a matrix with the same sparsity structure as self,
but with different data. By default the structure arrays
(i.e. .indptr and .indices) are copied.
"""
if copy:
return self.__class__((data, self.indices.copy(),
self.indptr.copy()),
shape=self.shape,
dtype=data.dtype)
else:
return self.__class__((data, self.indices, self.indptr),
shape=self.shape, dtype=data.dtype)
def _binopt(self, other, op):
"""apply the binary operation fn to two sparse matrices."""
other = self.__class__(other)
# e.g. csr_plus_csr, csr_minus_csr, etc.
fn = getattr(_sparsetools, self.format + op + self.format)
maxnnz = self.nnz + other.nnz
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices),
maxval=maxnnz)
indptr = np.empty(self.indptr.shape, dtype=idx_dtype)
indices = np.empty(maxnnz, dtype=idx_dtype)
bool_ops = ['_ne_', '_lt_', '_gt_', '_le_', '_ge_']
if op in bool_ops:
data = np.empty(maxnnz, dtype=np.bool_)
else:
data = np.empty(maxnnz, dtype=upcast(self.dtype, other.dtype))
fn(self.shape[0], self.shape[1],
np.asarray(self.indptr, dtype=idx_dtype),
np.asarray(self.indices, dtype=idx_dtype),
self.data,
np.asarray(other.indptr, dtype=idx_dtype),
np.asarray(other.indices, dtype=idx_dtype),
other.data,
indptr, indices, data)
A = self.__class__((data, indices, indptr), shape=self.shape)
A.prune()
return A
def _divide_sparse(self, other):
"""
Divide this matrix by a second sparse matrix.
"""
if other.shape != self.shape:
raise ValueError('inconsistent shapes')
r = self._binopt(other, '_eldiv_')
if np.issubdtype(r.dtype, np.inexact):
# Eldiv leaves entries outside the combined sparsity
# pattern empty, so they must be filled manually.
# Everything outside of other's sparsity is NaN, and everything
# inside it is either zero or defined by eldiv.
out = np.empty(self.shape, dtype=self.dtype)
out.fill(np.nan)
row, col = other.nonzero()
out[row, col] = 0
r = r.tocoo()
out[r.row, r.col] = r.data
out = matrix(out)
else:
# integers types go with nan <-> 0
out = r
return out
def _process_slice(sl, num):
if sl is None:
i0, i1 = 0, num
elif isinstance(sl, slice):
i0, i1, stride = sl.indices(num)
if stride != 1:
raise ValueError('slicing with step != 1 not supported')
i0 = min(i0, i1) # give an empty slice when i0 > i1
elif isintlike(sl):
if sl < 0:
sl += num
i0, i1 = sl, sl + 1
if i0 < 0 or i1 > num:
raise IndexError('index out of bounds: 0 <= %d < %d <= %d' %
(i0, i1, num))
else:
raise TypeError('expected slice or scalar')
return i0, i1
|
|
import pytest
import pgdb
@pytest.fixture
def pgdbConn(dbhost, db, frontenduser):
return pgdb.connect(dbhost + ':' + db + ':' + frontenduser)
def generic_rider_insert(conn, args):
cursor=conn.cursor()
cursor.execute("""
SELECT * from carpoolvote.submit_new_rider (
%(IPAddress)s,
%(RiderFirstName)s,
%(RiderLastName)s,
%(RiderEmail)s,
%(RiderPhone)s,
%(RiderCollectionZIP)s,
%(RiderDropOffZIP)s,
%(AvailableRideTimesLocal)s,
%(TotalPartySize)s,
%(TwoWayTripNeeded)s,
%(RiderIsVulnerable)s,
%(RiderWillNotTalkPolitics)s,
%(PleaseStayInTouch)s,
%(NeedWheelchair)s,
%(RiderPreferredContact)s,
%(RiderAccommodationNotes)s,
%(RiderLegalConsent)s,
%(RiderWillBeSafe)s,
%(RiderCollectionAddress)s,
%(RiderDestinationAddress)s
)
""", args)
results=cursor.fetchone()
conn.commit()
return {'uuid' : results[0], 'error_code' : results[1], 'error_text' : results[2]}
def test_insert_rider_000_all_valid(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00|2019-10-01T02:00/2019-10-01T03:00',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)==0
assert error_code==0
assert len(uuid)>0
pgdbConn.commit()
cursor = pgdbConn.cursor()
cursor.execute("""SELECT status FROM carpoolvote.rider WHERE "UUID"=%(uuid)s """, {'uuid' : uuid})
results = cursor.fetchone()
assert results[0] == 'Pending'
def test_insert_rider_001_IPAddress_invalid(pgdbConn):
args = {
'IPAddress' : 'abcd',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00|2019-10-01T02:00/2019-10-01T03:00',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)>0
assert error_code==2
assert len(uuid)==0
pgdbConn.commit()
def test_insert_rider_002_RiderCollectionZIP_invalid_empty(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00|2019-10-01T02:00/2019-10-01T03:00',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)>0
assert error_code==2
assert len(uuid)==0
pgdbConn.commit()
def test_insert_rider_003_RiderCollectionZIP_invalid_not_exists(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '00000',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00|2019-10-01T02:00/2019-10-01T03:00',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)>0
assert error_code==2
assert len(uuid)==0
pgdbConn.commit()
def test_insert_rider_004_RiderCollectionZIP_invalid_not_number(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : 'abcd',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00|2019-10-01T02:00/2019-10-01T03:00',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)>0
assert error_code==2
assert len(uuid)==0
pgdbConn.commit()
def test_insert_rider_005_RiderDropOffZIP_invalid_empty(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00|2019-10-01T02:00/2019-10-01T03:00',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)>0
assert error_code==2
assert len(uuid)==0
pgdbConn.commit()
def test_insert_rider_006_RiderDropOffZIP_invalid_not_found(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '00000',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00|2019-10-01T02:00/2019-10-01T03:00',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)>0
assert error_code==2
assert len(uuid)==0
pgdbConn.commit()
def test_insert_rider_007_RiderDropOffZIP_invalid_not_number(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : 'abcd',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00|2019-10-01T02:00/2019-10-01T03:00',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)>0
assert error_code==2
assert len(uuid)==0
pgdbConn.commit()
def test_insert_rider_008_AvailableRideTimesLocal_empty(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)>0
assert error_code==2
assert len(uuid)==0
pgdbConn.commit()
def test_insert_rider_009_AvailableRideTimesLocal_invalid_incomplete(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)>0
assert error_code==2
assert len(uuid)==0
pgdbConn.commit()
def test_insert_rider_010_AvailableRideTimesLocal_invalid_incomplete(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00/',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)>0
assert error_code==2
assert len(uuid)==0
pgdbConn.commit()
def test_insert_rider_011_AvailableRideTimesLocal_invalid_incomplete(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00|',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)>0
assert error_code==2
assert len(uuid)==0
pgdbConn.commit()
def test_insert_rider_012_AvailableRideTimesLocal_invalid_chronology(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T03:00/2018-10-01T02:00',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)>0
assert error_code==2
assert len(uuid)==0
pgdbConn.commit()
def test_insert_rider_013_AvailableRideTimesLocal_invalid_past(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2000-10-01T02:00/2000-10-01T03:00',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)>0
assert error_code==2
assert len(uuid)==0
pgdbConn.commit()
def test_insert_rider_014_TotalPartySize_invalid_zero(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00|2019-10-01T02:00/2019-10-01T03:00',
'TotalPartySize' : '0',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)>0
assert error_code==2
assert len(uuid)==0
pgdbConn.commit()
def test_insert_rider_015_TotalPartySize_invalid_negative(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00|2019-10-01T02:00/2019-10-01T03:00',
'TotalPartySize' : '-10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)>0
assert error_code==2
assert len(uuid)==0
pgdbConn.commit()
def test_insert_rider_016_RiderPreferredContact_valid_SMS(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00|2019-10-01T02:00/2019-10-01T03:00',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'SMS',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)==0
assert error_code==0
assert len(uuid)>0
pgdbConn.commit()
def test_insert_rider_017_RiderPreferredContact_valid_Email(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00|2019-10-01T02:00/2019-10-01T03:00',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)==0
assert error_code==0
assert len(uuid)>0
pgdbConn.commit()
def test_insert_rider_018_RiderPreferredContact_valid_Phone(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00|2019-10-01T02:00/2019-10-01T03:00',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Phone',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)==0
assert error_code==0
assert len(uuid)>0
pgdbConn.commit()
|
|
"""
Support for Pioneer Network Receivers.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.pioneer/
"""
import logging
import telnetlib
import voluptuous as vol
from homeassistant.components.media_player import (
SUPPORT_PAUSE, SUPPORT_SELECT_SOURCE, MediaPlayerDevice, PLATFORM_SCHEMA,
SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET,
SUPPORT_PLAY)
from homeassistant.const import (
CONF_HOST, STATE_OFF, STATE_ON, STATE_UNKNOWN, CONF_NAME, CONF_PORT,
CONF_TIMEOUT)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'Pioneer AVR'
DEFAULT_PORT = 23 # telnet default. Some Pioneer AVRs use 8102
DEFAULT_TIMEOUT = None
SUPPORT_PIONEER = SUPPORT_PAUSE | SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | \
SUPPORT_TURN_ON | SUPPORT_TURN_OFF | \
SUPPORT_SELECT_SOURCE | SUPPORT_PLAY
MAX_VOLUME = 185
MAX_SOURCE_NUMBERS = 60
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.socket_timeout,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Pioneer platform."""
pioneer = PioneerDevice(
config.get(CONF_NAME), config.get(CONF_HOST), config.get(CONF_PORT),
config.get(CONF_TIMEOUT))
if pioneer.update():
add_devices([pioneer])
return True
else:
return False
class PioneerDevice(MediaPlayerDevice):
"""Representation of a Pioneer device."""
def __init__(self, name, host, port, timeout):
"""Initialize the Pioneer device."""
self._name = name
self._host = host
self._port = port
self._timeout = timeout
self._pwstate = 'PWR1'
self._volume = 0
self._muted = False
self._selected_source = ''
self._source_name_to_number = {}
self._source_number_to_name = {}
@classmethod
def telnet_request(cls, telnet, command, expected_prefix):
"""Execute `command` and return the response."""
try:
telnet.write(command.encode("ASCII") + b"\r")
except telnetlib.socket.timeout:
_LOGGER.debug("Pioneer command %s timed out", command)
return None
# The receiver will randomly send state change updates, make sure
# we get the response we are looking for
for _ in range(3):
result = telnet.read_until(b"\r\n", timeout=0.2).decode("ASCII") \
.strip()
if result.startswith(expected_prefix):
return result
return None
def telnet_command(self, command):
"""Establish a telnet connection and sends `command`."""
try:
try:
telnet = telnetlib.Telnet(self._host,
self._port,
self._timeout)
except ConnectionRefusedError:
_LOGGER.debug("Pioneer %s refused connection", self._name)
return
telnet.write(command.encode("ASCII") + b"\r")
telnet.read_very_eager() # skip response
telnet.close()
except telnetlib.socket.timeout:
_LOGGER.debug(
"Pioneer %s command %s timed out", self._name, command)
def update(self):
"""Get the latest details from the device."""
try:
telnet = telnetlib.Telnet(self._host, self._port, self._timeout)
except ConnectionRefusedError:
_LOGGER.debug("Pioneer %s refused connection", self._name)
return False
pwstate = self.telnet_request(telnet, "?P", "PWR")
if pwstate:
self._pwstate = pwstate
volume_str = self.telnet_request(telnet, "?V", "VOL")
self._volume = int(volume_str[3:]) / MAX_VOLUME if volume_str else None
muted_value = self.telnet_request(telnet, "?M", "MUT")
self._muted = (muted_value == "MUT0") if muted_value else None
# Build the source name dictionaries if necessary
if not self._source_name_to_number:
for i in range(MAX_SOURCE_NUMBERS):
result = self.telnet_request(
telnet, "?RGB" + str(i).zfill(2), "RGB")
if not result:
continue
source_name = result[6:]
source_number = str(i).zfill(2)
self._source_name_to_number[source_name] = source_number
self._source_number_to_name[source_number] = source_name
source_number = self.telnet_request(telnet, "?F", "FN")
if source_number:
self._selected_source = self._source_number_to_name \
.get(source_number[2:])
else:
self._selected_source = None
telnet.close()
return True
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
if self._pwstate == "PWR1":
return STATE_OFF
if self._pwstate == "PWR0":
return STATE_ON
return STATE_UNKNOWN
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._muted
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_PIONEER
@property
def source(self):
"""Return the current input source."""
return self._selected_source
@property
def source_list(self):
"""List of available input sources."""
return list(self._source_name_to_number.keys())
@property
def media_title(self):
"""Title of current playing media."""
return self._selected_source
def turn_off(self):
"""Turn off media player."""
self.telnet_command("PF")
def volume_up(self):
"""Volume up media player."""
self.telnet_command("VU")
def volume_down(self):
"""Volume down media player."""
self.telnet_command("VD")
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
# 60dB max
self.telnet_command(str(round(volume * MAX_VOLUME)).zfill(3) + "VL")
def mute_volume(self, mute):
"""Mute (true) or unmute (false) media player."""
self.telnet_command("MO" if mute else "MF")
def turn_on(self):
"""Turn the media player on."""
self.telnet_command("PO")
def select_source(self, source):
"""Select input source."""
self.telnet_command(self._source_name_to_number.get(source) + "FN")
|
|
"""
Support to interact with a Music Player Daemon.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.mpd/
"""
import logging
import socket
from homeassistant.components.media_player import (
MEDIA_TYPE_MUSIC, SUPPORT_NEXT_TRACK, SUPPORT_PAUSE,
SUPPORT_PREVIOUS_TRACK, SUPPORT_TURN_OFF, SUPPORT_TURN_ON,
SUPPORT_VOLUME_SET, MediaPlayerDevice)
from homeassistant.const import STATE_OFF, STATE_PAUSED, STATE_PLAYING
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['python-mpd2==0.5.5']
SUPPORT_MPD = SUPPORT_PAUSE | SUPPORT_VOLUME_SET | SUPPORT_TURN_OFF | \
SUPPORT_TURN_ON | SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the MPD platform."""
daemon = config.get('server', None)
port = config.get('port', 6600)
location = config.get('location', 'MPD')
password = config.get('password', None)
import mpd
# pylint: disable=no-member
try:
mpd_client = mpd.MPDClient()
mpd_client.connect(daemon, port)
if password is not None:
mpd_client.password(password)
mpd_client.close()
mpd_client.disconnect()
except socket.error:
_LOGGER.error(
"Unable to connect to MPD. "
"Please check your settings")
return False
except mpd.CommandError as error:
if "incorrect password" in str(error):
_LOGGER.error(
"MPD reported incorrect password. "
"Please check your password.")
return False
else:
raise
add_devices([MpdDevice(daemon, port, location, password)])
class MpdDevice(MediaPlayerDevice):
"""Representation of a MPD server."""
# MPD confuses pylint
# pylint: disable=no-member, abstract-method
def __init__(self, server, port, location, password):
"""Initialize the MPD device."""
import mpd
self.server = server
self.port = port
self._name = location
self.password = password
self.status = None
self.currentsong = None
self.client = mpd.MPDClient()
self.client.timeout = 10
self.client.idletimeout = None
self.update()
def update(self):
"""Get the latest data and update the state."""
import mpd
try:
self.status = self.client.status()
self.currentsong = self.client.currentsong()
except mpd.ConnectionError:
self.client.connect(self.server, self.port)
if self.password is not None:
self.client.password(self.password)
self.status = self.client.status()
self.currentsong = self.client.currentsong()
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the media state."""
if self.status['state'] == 'play':
return STATE_PLAYING
elif self.status['state'] == 'pause':
return STATE_PAUSED
else:
return STATE_OFF
@property
def media_content_id(self):
"""Content ID of current playing media."""
return self.currentsong['id']
@property
def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
# Time does not exist for streams
return self.currentsong.get('time')
@property
def media_title(self):
"""Title of current playing media."""
name = self.currentsong.get('name', None)
title = self.currentsong.get('title', None)
if name is None and title is None:
return "None"
elif name is None:
return title
elif title is None:
return name
else:
return '{}: {}'.format(name, title)
@property
def media_artist(self):
"""Artist of current playing media (Music track only)."""
return self.currentsong.get('artist')
@property
def media_album_name(self):
"""Album of current playing media (Music track only)."""
return self.currentsong.get('album')
@property
def volume_level(self):
"""Return the volume level."""
return int(self.status['volume'])/100
@property
def supported_media_commands(self):
"""Flag of media commands that are supported."""
return SUPPORT_MPD
def turn_off(self):
"""Service to send the MPD the command to stop playing."""
self.client.stop()
def turn_on(self):
"""Service to send the MPD the command to start playing."""
self.client.play()
def set_volume_level(self, volume):
"""Set volume of media player."""
self.client.setvol(int(volume * 100))
def volume_up(self):
"""Service to send the MPD the command for volume up."""
current_volume = int(self.status['volume'])
if current_volume <= 100:
self.client.setvol(current_volume + 5)
def volume_down(self):
"""Service to send the MPD the command for volume down."""
current_volume = int(self.status['volume'])
if current_volume >= 0:
self.client.setvol(current_volume - 5)
def media_play(self):
"""Service to send the MPD the command for play/pause."""
self.client.pause(0)
def media_pause(self):
"""Service to send the MPD the command for play/pause."""
self.client.pause(1)
def media_next_track(self):
"""Service to send the MPD the command for next track."""
self.client.next()
def media_previous_track(self):
"""Service to send the MPD the command for previous track."""
self.client.previous()
|
|
# Copyright 2018 Google LLC. All rights reserved. Licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
#
# Any software provided by Google hereunder is distributed "AS IS", WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, and is not intended for production use.
"""Tests for the bucket_details.py file"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
from gcs_bucket_mover import bucket_details
from tests import common
class TestBucketDetails(unittest.TestCase):
"""Tests for the logic in the BucketDetails class."""
def setUp(self):
self.parsed_args = common.get_mock_args()
self.source_bucket = common.get_mock_source_bucket()
def test_default_constructor(self):
"""Tests the default object is successfully created."""
details = bucket_details.BucketDetails(
conf=self.parsed_args, source_bucket=self.source_bucket)
# Test that the properties are set to the parsed_args/source_bucket values
self.assertEqual(self.parsed_args.location, details.location)
self.assertEqual(self.parsed_args.storage_class, details.storage_class)
self.assertEqual(self.source_bucket.get_iam_policy(),
details.iam_policy)
self.assertEqual(self.source_bucket.acl.get_entities(),
details.acl_entities)
self.assertEqual(self.source_bucket.default_object_acl.get_entities(),
details.default_obj_acl_entities)
self.assertEqual(self.source_bucket.requester_pays,
details.requester_pays)
self.assertEqual(self.source_bucket.cors, details.cors)
self.assertEqual(self.source_bucket.default_kms_key_name,
details.default_kms_key_name)
self.assertEqual(self.source_bucket.labels, details.labels)
self.assertEqual(self.source_bucket.lifecycle_rules,
details.lifecycle_rules)
self.assertEqual(self.source_bucket.get_logging(), details.logging)
self.assertEqual(self.source_bucket.versioning_enabled,
details.versioning_enabled)
self.assertEqual(self.source_bucket.list_notifications(),
details.notifications)
# Test that the bucket properties can be overridden
value = 'test'
details.iam_policy = details.acl_entities = details.default_obj_acl_entities = value
details.requester_pays = details.cors = details.default_kms_key_name = value
details.labels = details.lifecycle_rules = details.logging = value
details.versioning_enabled = details.notifications = value
self.assertEqual(value, details.iam_policy)
self.assertEqual(value, details.acl_entities)
self.assertEqual(value, details.default_obj_acl_entities)
self.assertEqual(value, details.requester_pays)
self.assertEqual(value, details.cors)
self.assertEqual(value, details.default_kms_key_name)
self.assertEqual(value, details.labels)
self.assertEqual(value, details.lifecycle_rules)
self.assertEqual(value, details.logging)
self.assertEqual(value, details.versioning_enabled)
self.assertEqual(value, details.notifications)
def test_skip_everything(self):
"""Tests the object constructor when the skip_everything flag is True."""
self.parsed_args.skip_everything = True
details = bucket_details.BucketDetails(
conf=self.parsed_args, source_bucket=self.source_bucket)
self.assertEqual(self.parsed_args.location, details.location)
self.assertEqual(self.parsed_args.storage_class, details.storage_class)
self.assertIsNone(details.iam_policy)
self.assertIsNone(details.acl_entities)
self.assertIsNone(details.default_obj_acl_entities)
self.assertIsNone(details.requester_pays)
self.assertIsNone(details.cors)
self.assertIsNone(details.default_kms_key_name)
self.assertEqual(details.labels, {})
self.assertIsNone(details.lifecycle_rules)
self.assertIsNone(details.logging)
self.assertIsNone(details.versioning_enabled)
self.assertListEqual(details.notifications, [])
# Test that the bucket properties cannot be overridden
value = 'test'
details.iam_policy = details.acl_entities = details.default_obj_acl_entities = value
details.requester_pays = details.cors = details.default_kms_key_name = value
details.labels = details.lifecycle_rules = details.logging = value
details.versioning_enabled = details.notifications = value
self.assertIsNone(details.iam_policy)
self.assertIsNone(details.acl_entities)
self.assertIsNone(details.default_obj_acl_entities)
self.assertIsNone(details.requester_pays)
self.assertIsNone(details.cors)
self.assertIsNone(details.default_kms_key_name)
self.assertEqual(details.labels, {})
self.assertIsNone(details.lifecycle_rules)
self.assertIsNone(details.logging)
self.assertIsNone(details.versioning_enabled)
self.assertListEqual(details.notifications, [])
def test_skip_acl(self):
"""Tests the --skip_acl flag works correctly."""
self.parsed_args.skip_acl = True
details = bucket_details.BucketDetails(
conf=self.parsed_args, source_bucket=self.source_bucket)
self.assertEqual(self.parsed_args.location, details.location)
self.assertEqual(self.parsed_args.storage_class, details.storage_class)
self.assertEqual(self.source_bucket.get_iam_policy(),
details.iam_policy)
self.assertIsNone(details.acl_entities)
self.assertEqual(self.source_bucket.default_object_acl.get_entities(),
details.default_obj_acl_entities)
self.assertEqual(self.source_bucket.requester_pays,
details.requester_pays)
self.assertEqual(self.source_bucket.cors, details.cors)
self.assertEqual(self.source_bucket.default_kms_key_name,
details.default_kms_key_name)
self.assertEqual(self.source_bucket.labels, details.labels)
self.assertEqual(self.source_bucket.lifecycle_rules,
details.lifecycle_rules)
self.assertEqual(self.source_bucket.get_logging(), details.logging)
self.assertEqual(self.source_bucket.versioning_enabled,
details.versioning_enabled)
self.assertEqual(self.source_bucket.list_notifications(),
details.notifications)
details.acl_entities = 'test'
self.assertIsNone(details.acl_entities)
def test_skip_cors(self):
"""Tests the --skip_cors flag works correctly."""
self.parsed_args.skip_cors = True
details = bucket_details.BucketDetails(
conf=self.parsed_args, source_bucket=self.source_bucket)
self.assertEqual(self.parsed_args.location, details.location)
self.assertEqual(self.parsed_args.storage_class, details.storage_class)
self.assertEqual(self.source_bucket.get_iam_policy(),
details.iam_policy)
self.assertEqual(self.source_bucket.acl.get_entities(),
details.acl_entities)
self.assertEqual(self.source_bucket.default_object_acl.get_entities(),
details.default_obj_acl_entities)
self.assertEqual(self.source_bucket.requester_pays,
details.requester_pays)
self.assertIsNone(details.cors)
self.assertEqual(self.source_bucket.default_kms_key_name,
details.default_kms_key_name)
self.assertEqual(self.source_bucket.labels, details.labels)
self.assertEqual(self.source_bucket.lifecycle_rules,
details.lifecycle_rules)
self.assertEqual(self.source_bucket.get_logging(), details.logging)
self.assertEqual(self.source_bucket.versioning_enabled,
details.versioning_enabled)
self.assertEqual(self.source_bucket.list_notifications(),
details.notifications)
details.cors = 'test'
self.assertIsNone(details.cors)
def test_skip_default_obj_acl(self):
"""Tests the --skip_default_obj_acl flag works correctly."""
self.parsed_args.skip_default_obj_acl = True
details = bucket_details.BucketDetails(
conf=self.parsed_args, source_bucket=self.source_bucket)
self.assertEqual(self.parsed_args.location, details.location)
self.assertEqual(self.parsed_args.storage_class, details.storage_class)
self.assertEqual(self.source_bucket.get_iam_policy(),
details.iam_policy)
self.assertEqual(self.source_bucket.acl.get_entities(),
details.acl_entities)
self.assertIsNone(details.default_obj_acl_entities)
self.assertEqual(self.source_bucket.requester_pays,
details.requester_pays)
self.assertEqual(self.source_bucket.cors, details.cors)
self.assertEqual(self.source_bucket.default_kms_key_name,
details.default_kms_key_name)
self.assertEqual(self.source_bucket.labels, details.labels)
self.assertEqual(self.source_bucket.lifecycle_rules,
details.lifecycle_rules)
self.assertEqual(self.source_bucket.get_logging(), details.logging)
self.assertEqual(self.source_bucket.versioning_enabled,
details.versioning_enabled)
self.assertEqual(self.source_bucket.list_notifications(),
details.notifications)
details.default_obj_acl_entities = 'test'
self.assertIsNone(details.default_obj_acl_entities)
def test_skip_iam(self):
"""Tests the --skip_iam flag works correctly."""
self.parsed_args.skip_iam = True
details = bucket_details.BucketDetails(
conf=self.parsed_args, source_bucket=self.source_bucket)
self.assertEqual(self.parsed_args.location, details.location)
self.assertEqual(self.parsed_args.storage_class, details.storage_class)
self.assertIsNone(details.iam_policy)
self.assertEqual(self.source_bucket.acl.get_entities(),
details.acl_entities)
self.assertEqual(self.source_bucket.default_object_acl.get_entities(),
details.default_obj_acl_entities)
self.assertEqual(self.source_bucket.requester_pays,
details.requester_pays)
self.assertEqual(self.source_bucket.cors, details.cors)
self.assertEqual(self.source_bucket.default_kms_key_name,
details.default_kms_key_name)
self.assertEqual(self.source_bucket.labels, details.labels)
self.assertEqual(self.source_bucket.lifecycle_rules,
details.lifecycle_rules)
self.assertEqual(self.source_bucket.get_logging(), details.logging)
self.assertEqual(self.source_bucket.versioning_enabled,
details.versioning_enabled)
self.assertEqual(self.source_bucket.list_notifications(),
details.notifications)
details.iam_policy = 'test'
self.assertIsNone(details.iam_policy)
def test_skip_kms_key(self):
"""Tests the --skip_kms_key flag works correctly."""
self.parsed_args.skip_kms_key = True
details = bucket_details.BucketDetails(
conf=self.parsed_args, source_bucket=self.source_bucket)
self.assertEqual(self.parsed_args.location, details.location)
self.assertEqual(self.parsed_args.storage_class, details.storage_class)
self.assertEqual(self.source_bucket.get_iam_policy(),
details.iam_policy)
self.assertEqual(self.source_bucket.acl.get_entities(),
details.acl_entities)
self.assertEqual(self.source_bucket.default_object_acl.get_entities(),
details.default_obj_acl_entities)
self.assertEqual(self.source_bucket.requester_pays,
details.requester_pays)
self.assertEqual(self.source_bucket.cors, details.cors)
self.assertIsNone(details.default_kms_key_name)
self.assertEqual(self.source_bucket.labels, details.labels)
self.assertEqual(self.source_bucket.lifecycle_rules,
details.lifecycle_rules)
self.assertEqual(self.source_bucket.get_logging(), details.logging)
self.assertEqual(self.source_bucket.versioning_enabled,
details.versioning_enabled)
self.assertEqual(self.source_bucket.list_notifications(),
details.notifications)
details.default_kms_key_name = 'test'
self.assertIsNone(details.default_kms_key_name)
def test_skip_labels(self):
"""Tests the --skip_labels flag works correctly."""
self.parsed_args.skip_labels = True
details = bucket_details.BucketDetails(
conf=self.parsed_args, source_bucket=self.source_bucket)
self.assertEqual(self.parsed_args.location, details.location)
self.assertEqual(self.parsed_args.storage_class, details.storage_class)
self.assertEqual(self.source_bucket.get_iam_policy(),
details.iam_policy)
self.assertEqual(self.source_bucket.acl.get_entities(),
details.acl_entities)
self.assertEqual(self.source_bucket.default_object_acl.get_entities(),
details.default_obj_acl_entities)
self.assertEqual(self.source_bucket.requester_pays,
details.requester_pays)
self.assertEqual(self.source_bucket.cors, details.cors)
self.assertEqual(self.source_bucket.default_kms_key_name,
details.default_kms_key_name)
self.assertEqual(details.labels, {})
self.assertEqual(self.source_bucket.lifecycle_rules,
details.lifecycle_rules)
self.assertEqual(self.source_bucket.get_logging(), details.logging)
self.assertEqual(self.source_bucket.versioning_enabled,
details.versioning_enabled)
self.assertEqual(self.source_bucket.list_notifications(),
details.notifications)
details.labels = 'test'
self.assertEqual(details.labels, {})
def test_skip_logging(self):
"""Tests the --skip_logging flag works correctly."""
self.parsed_args.skip_logging = True
details = bucket_details.BucketDetails(
conf=self.parsed_args, source_bucket=self.source_bucket)
self.assertEqual(self.parsed_args.location, details.location)
self.assertEqual(self.parsed_args.storage_class, details.storage_class)
self.assertEqual(self.source_bucket.get_iam_policy(),
details.iam_policy)
self.assertEqual(self.source_bucket.acl.get_entities(),
details.acl_entities)
self.assertEqual(self.source_bucket.default_object_acl.get_entities(),
details.default_obj_acl_entities)
self.assertEqual(self.source_bucket.requester_pays,
details.requester_pays)
self.assertEqual(self.source_bucket.cors, details.cors)
self.assertEqual(self.source_bucket.default_kms_key_name,
details.default_kms_key_name)
self.assertEqual(self.source_bucket.labels, details.labels)
self.assertEqual(self.source_bucket.lifecycle_rules,
details.lifecycle_rules)
self.assertIsNone(details.logging)
self.assertEqual(self.source_bucket.versioning_enabled,
details.versioning_enabled)
self.assertEqual(self.source_bucket.list_notifications(),
details.notifications)
details.logging = 'test'
self.assertIsNone(details.logging)
def test_skip_lifecycle_rules(self):
"""Tests the --skip_lifecycle_rules flag works correctly."""
self.parsed_args.skip_lifecycle_rules = True
details = bucket_details.BucketDetails(
conf=self.parsed_args, source_bucket=self.source_bucket)
self.assertEqual(self.parsed_args.location, details.location)
self.assertEqual(self.parsed_args.storage_class, details.storage_class)
self.assertEqual(self.source_bucket.get_iam_policy(),
details.iam_policy)
self.assertEqual(self.source_bucket.acl.get_entities(),
details.acl_entities)
self.assertEqual(self.source_bucket.default_object_acl.get_entities(),
details.default_obj_acl_entities)
self.assertEqual(self.source_bucket.requester_pays,
details.requester_pays)
self.assertEqual(self.source_bucket.cors, details.cors)
self.assertEqual(self.source_bucket.default_kms_key_name,
details.default_kms_key_name)
self.assertEqual(self.source_bucket.labels, details.labels)
self.assertIsNone(details.lifecycle_rules)
self.assertEqual(self.source_bucket.get_logging(), details.logging)
self.assertEqual(self.source_bucket.versioning_enabled,
details.versioning_enabled)
self.assertEqual(self.source_bucket.list_notifications(),
details.notifications)
details.lifecycle_rules = 'test'
self.assertIsNone(details.lifecycle_rules)
def test_skip_notifications(self):
"""Tests the --skip_notifications flag works correctly."""
self.parsed_args.skip_notifications = True
details = bucket_details.BucketDetails(
conf=self.parsed_args, source_bucket=self.source_bucket)
self.assertEqual(self.parsed_args.location, details.location)
self.assertEqual(self.parsed_args.storage_class, details.storage_class)
self.assertEqual(self.source_bucket.get_iam_policy(),
details.iam_policy)
self.assertEqual(self.source_bucket.acl.get_entities(),
details.acl_entities)
self.assertEqual(self.source_bucket.default_object_acl.get_entities(),
details.default_obj_acl_entities)
self.assertEqual(self.source_bucket.requester_pays,
details.requester_pays)
self.assertEqual(self.source_bucket.cors, details.cors)
self.assertEqual(self.source_bucket.default_kms_key_name,
details.default_kms_key_name)
self.assertEqual(self.source_bucket.labels, details.labels)
self.assertEqual(self.source_bucket.lifecycle_rules,
details.lifecycle_rules)
self.assertEqual(self.source_bucket.get_logging(), details.logging)
self.assertEqual(self.source_bucket.versioning_enabled,
details.versioning_enabled)
self.assertListEqual(details.notifications, [])
details.notifications = 'test'
self.assertListEqual(details.notifications, [])
def test_skip_requester_pays(self):
"""Tests the --skip_requester_pays flag works correctly."""
self.parsed_args.skip_requester_pays = True
details = bucket_details.BucketDetails(
conf=self.parsed_args, source_bucket=self.source_bucket)
self.assertEqual(self.parsed_args.location, details.location)
self.assertEqual(self.parsed_args.storage_class, details.storage_class)
self.assertEqual(self.source_bucket.get_iam_policy(),
details.iam_policy)
self.assertEqual(self.source_bucket.acl.get_entities(),
details.acl_entities)
self.assertEqual(self.source_bucket.default_object_acl.get_entities(),
details.default_obj_acl_entities)
self.assertIsNone(details.requester_pays)
self.assertEqual(self.source_bucket.cors, details.cors)
self.assertEqual(self.source_bucket.default_kms_key_name,
details.default_kms_key_name)
self.assertEqual(self.source_bucket.labels, details.labels)
self.assertEqual(self.source_bucket.lifecycle_rules,
details.lifecycle_rules)
self.assertEqual(self.source_bucket.get_logging(), details.logging)
self.assertEqual(self.source_bucket.versioning_enabled,
details.versioning_enabled)
self.assertEqual(self.source_bucket.list_notifications(),
details.notifications)
details.requester_pays = 'test'
self.assertIsNone(details.requester_pays)
def test_skip_versioning(self):
"""Tests the --skip_versioning flag works correctly."""
self.parsed_args.skip_versioning = True
details = bucket_details.BucketDetails(
conf=self.parsed_args, source_bucket=self.source_bucket)
self.assertEqual(self.parsed_args.location, details.location)
self.assertEqual(self.parsed_args.storage_class, details.storage_class)
self.assertEqual(self.source_bucket.get_iam_policy(),
details.iam_policy)
self.assertEqual(self.source_bucket.acl.get_entities(),
details.acl_entities)
self.assertEqual(self.source_bucket.default_object_acl.get_entities(),
details.default_obj_acl_entities)
self.assertEqual(self.source_bucket.requester_pays,
details.requester_pays)
self.assertEqual(self.source_bucket.cors, details.cors)
self.assertEqual(self.source_bucket.default_kms_key_name,
details.default_kms_key_name)
self.assertEqual(self.source_bucket.labels, details.labels)
self.assertEqual(self.source_bucket.lifecycle_rules,
details.lifecycle_rules)
self.assertEqual(self.source_bucket.get_logging(), details.logging)
self.assertIsNone(details.versioning_enabled)
self.assertEqual(self.source_bucket.list_notifications(),
details.notifications)
details.versioning_enabled = 'test'
self.assertIsNone(details.versioning_enabled)
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
"""
import json
from collections import namedtuple, Counter
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as pltc
import matplotlib.patches as pltp
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
from kbpo.defs import CANONICAL_RELATIONS, ALL_RELATIONS
def project_to_bins(data):
low, med, high = np.percentile(data, 50), np.percentile(data, 90), np.percentile(data, 100)
print("Frequency bins: low (50%) {}, medium (90%) {} and high (100%) {}", low, med, high)
# Collapse data into bins.
freq = [0., 0., 0.]
for count, _ in data:
if count <= low:
freq[0] += 1
elif count <= med:
freq[1] += 1
else:
freq[2] += 1
return freq
# Consider stacked histogram?
def do_plot_entity_histogram(args):
# First load the data.
objs = [json.load(fstream) for fstream in args.input]
# project data onto axis.
xlabels = []
ylabels = ["Low frequency", "Medium freq.", "High freq."]
Y = []
for obj in objs:
freq = obj["instance_frequency"]
xlabels.append(obj['mode'])
Y.append([freq["low"], freq["med"], freq["high"],])
Y = np.array(Y).T
colors = [pltc.to_hex(tuple(plt.cm.viridis.colors[int(i/len(ylabels) * 256)])) for i in range(len(ylabels))]
inds = np.arange(len(xlabels))
width = 0.5
plt.ylabel("Number of entites")
plt.xlabel("Sampling scheme")
plt.xticks(inds + width/2, xlabels)
Y_ = np.zeros(Y.shape[1]) # Remember, rows are now features, columns are systems
for i, lbl in enumerate(ylabels):
plt.bar(left=inds, bottom=Y_, height=Y[i], width=width, align='edge', label=ylabels[i], color=colors[i], alpha=0.8)
Y_ += Y[i]
plt.legend(bbox_to_anchor=(1.0, .9), bbox_transform=plt.gcf().transFigure, loc="upper right")
#plt.legend()
plt.tight_layout(rect=(0,0,0.8,1))
plt.savefig(args.output)
def do_plot_pair_diagram(args):
# First load the data.
obj = json.load(args.input)
# project data onto axis.
X = ["Low", "Medium", "High"]
Y = ["Low", "Medium", "High"]
Z = [[obj["pair_frequency"]["{} {}".format(l, l_)] for l in ["low", "med", "high"]] for l_ in ["low", "med", "high"]]
inds = np.arange(len(X))
plt.matshow(Z, cmap="viridis")
plt.ylabel("Subject entity")
plt.xlabel("Object entity")
plt.xticks(inds, X)
plt.yticks(inds, Y)
plt.colorbar()
plt.savefig(args.output)
# Consider stacked histogram?
def do_plot_relation_histogram(args):
# First load the data.
objs = [json.load(fstream) for fstream in args.input]
# project data onto axis.
X = CANONICAL_RELATIONS
Y = []
xlabels = [r.replace("_", r"\_") for r in CANONICAL_RELATIONS]
ylabels = []
for obj in objs:
freq = obj["relation_frequency"]
ylabels.append(obj['mode'])
Y.append([freq.get(r, 0) for r in CANONICAL_RELATIONS])
colors = [pltc.to_hex(tuple(plt.cm.viridis.colors[int(i/len(ylabels) * 256)])) for i in range(len(ylabels))]
inds = np.arange(len(X))
width = .8/len(objs)
ax = plt.gca()
ax.set_ylabel("# of instances")
ax.set_yscale("log")
ax.set_xlabel("Relation")
ax.set_xticks(np.arange(len(X)))
ax.set_xticklabels(xlabels, rotation=45, rotation_mode="anchor", ha="right")
for i, ylbl in enumerate(ylabels):
ax.bar(left=inds + i * width, height=Y[i], width=width, align='edge', label=ylbl, color=colors[i], alpha=0.8)
plt.legend()
plt.tight_layout()
plt.savefig(args.output)
# Consider stacked histogram?
def do_plot_clusters(args):
# First load the data.
objs = [json.load(fstream) for fstream in args.input]
#fig, axs = plt.subplots(1, 3, sharey=True)
ax = plt.gca()
# project data onto axis.
xlabels = []
ylabels = ["Low", "Medium", "High"]
max_cluster_size = 5
Ys = []
for obj in objs:
num_samples = obj.get("num_samples", 10)
freq = obj["cluster_frequency"]
xlabels.append(obj['mode'])
Y = []
for i, rng in enumerate(["low", "med", "high"]):
cntr = Counter(min(x, max_cluster_size) for x in freq[rng])
Y.append([cntr[i]/num_samples for i in range(1, max_cluster_size + 1)])
Ys.append(np.array(Y))
colors = [[i / len(xlabels) + j / (len(xlabels) * len(ylabels)) for j in range(len(ylabels))] for i in range(len(xlabels))]
colors = [[pltc.to_hex(plt.cm.viridis.colors[int(v * 256)]) for v in vs] for vs in colors]
inds = np.arange(max_cluster_size)
width = 0.8 / len(Ys)
plt.xticks(inds + 0.4, inds+1)
for i, (Y, xlabel) in enumerate(zip(Ys, xlabels)):
Y_ = np.zeros(Y.shape[1])
for j, ylbl in enumerate(ylabels):
plt.bar(left=inds + i * width, bottom=Y_, height=Y[j], width=width, align='edge', color=colors[i][j], alpha=0.8)
Y_ += Y[j]
plt.legend(handles=[pltp.Patch(facecolor=colors[i][1]) for i in range(len(xlabels))], labels=xlabels, loc="upper right", )
plt.xlabel("Cluster size")
plt.ylabel("Number of clusters")
plt.savefig(args.output)
# TODO make reasonable.
def do_plot_distribution(args):
# First load the data.
counts = load_counts(args.counts)
objs = [json.load(fstream) for fstream in args.input]
# Identify low, medium, high splits.
high_max = sorted(counts.values())[-3]
low_max, med_max = 3, np.power(high_max, 2./3.)
low_max, med_max = int(np.ceil(low_max)), int(np.ceil(med_max))
print(low_max, med_max, high_max)
# Start by plotting the distribution curve.
coarse_bins = np.array([1, low_max, med_max, high_max])
bins = np.exp(np.hstack([
np.linspace(0, np.log(low_max), 3, endpoint=False),
np.linspace(np.log(low_max), np.log(med_max), 30, endpoint=False),
np.linspace(np.log(med_max), np.log(high_max), 20, endpoint=True),
]))
y, x_ = np.histogram(list(counts.values()), bins)
# x is actually in the midpoint of each x_
x = (x_[:-1] + x_[1:])/2
x, y = x[y > 0], y[y > 0]
# project data onto axis.
data = {}
for obj in objs:
dist = []
for sample in obj['frequencies']:
dist_, _ = np.histogram(sample, bins=coarse_bins)
dist.append(dist_)
dist = np.array(dist).mean(axis=0)
dist = normalize_probs(dist)
# TODO: error bars
data[obj['mode']] = dist
fig, ax1 = plt.subplots()
ax1.set_xscale("log")
ax1.set_xlabel("# documents with entity")
ax1.set_yscale("log")
ax1.set_ylabel("# of entities")
ax1.set_xlim(1, high_max)
ax1.axvline(x=low_max, linestyle='--')
ax1.axvline(x=med_max, linestyle='--')
ax1.plot(x, y, marker='', linestyle='-')
ax2 = ax1.twinx()
ax2.set_ylim(0,1)
inds = coarse_bins[:-1]
widths = (coarse_bins[1:] - coarse_bins[:-1])/len(data)
print(inds)
print(widths)
for i, (k, vs) in enumerate(sorted(data.items())):
ret = ax2.bar(left=inds + i * widths, height=vs, width=widths, align='edge', label=k, alpha=0.4)
print(ret)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
fig.savefig(args.output)
# TODO: make reasonable.
def do_plot_cross_distribution(args):
# First load the data.
counts = load_counts(args.counts)
objs = [json.load(fstream) for fstream in args.input]
# Identify low, medium, high splits.
high_max = sorted(counts.values())[-3]
low_max, med_max = 3, np.power(high_max, 2./3.)
low_max, med_max = int(np.ceil(low_max)), int(np.ceil(med_max))
print(low_max, med_max, high_max)
# Start by plotting the distribution curve.
coarse_bins = np.array([1, low_max, med_max, high_max])
bins = [1, 2, 3, 4, 10, 20, 30]
# Project data onto these bins.
lows, meds, highs = {}, {}, {}
for obj in objs:
low_dist, med_dist, high_dist = [], [], []
for sample in obj['crosslinks']:
low, med, high = [], [], []
for freq, doc_count in sample:
if freq < low_max:
low.append(doc_count)
elif freq < med_max:
med.append(doc_count)
else:
high.append(doc_count)
low_dist.append(np.histogram(low, bins)[0])
med_dist.append(np.histogram(med, bins)[0])
high_dist.append(np.histogram(high, bins)[0])
low_dist = np.array(low_dist).mean(axis=0)
med_dist = np.array(med_dist).mean(axis=0)
high_dist = np.array(high_dist).mean(axis=0)
# TODO: error bars
lows[obj['mode']] = low_dist
meds[obj['mode']] = med_dist
highs[obj['mode']] = high_dist
inds = np.arange(0, len(bins)-1)
width = 1. / len(objs)
f, axs = plt.subplots(3, sharey=True)
for ax, dist in zip(axs, [lows, meds, highs]):
ax.set_yscale("log")
ax.set_xlabel("# documents with entity")
ax.set_ylabel("# of entities")
for i, (label, data) in enumerate(sorted(dist.items())):
print(len(data))
ax.bar(left=inds + i * width, height=data, width=width, align='edge', label=label, alpha=0.4)
f.savefig(args.output)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='')
subparsers = parser.add_subparsers()
command_parser = subparsers.add_parser('entity', help='Make entity histogram plot')
command_parser.add_argument('input', type=argparse.FileType('r'), nargs='+', help="JSON files to plot with.")
command_parser.add_argument('-o', '--output', type=str, default="histogram.pdf", help="Where to output plot file.")
command_parser.set_defaults(func=do_plot_entity_histogram)
command_parser = subparsers.add_parser('pairs', help='Make pair histogram plot')
command_parser.add_argument('input', type=argparse.FileType('r'), help="JSON files to plot with.")
command_parser.add_argument('-o', '--output', type=str, default="pair.pdf", help="Where to output plot file.")
command_parser.set_defaults(func=do_plot_pair_diagram)
command_parser = subparsers.add_parser('relations', help='Make relation histogram plot')
command_parser.add_argument('input', type=argparse.FileType('r'), nargs='+', help="JSON files to plot with.")
command_parser.add_argument('-o', '--output', type=str, default="histogram.pdf", help="Where to output plot file.")
command_parser.set_defaults(func=do_plot_relation_histogram)
command_parser = subparsers.add_parser('clusters', help='Make pair histogram plot')
command_parser.add_argument('input', type=argparse.FileType('r'), nargs='+', help="JSON files to plot with.")
command_parser.add_argument('-o', '--output', type=str, default="pair.pdf", help="Where to output plot file.")
command_parser.set_defaults(func=do_plot_clusters)
command_parser = subparsers.add_parser('plot-distribution', help='Make plots')
command_parser.add_argument('input', type=argparse.FileType('r'), nargs='+', help="JSON files to plot with.")
command_parser.add_argument('-o', '--output', type=str, default="distribution.png", help="Where to output plot file.")
command_parser.set_defaults(func=do_plot_distribution)
command_parser = subparsers.add_parser('plot-cross-distribution', help='Make plots')
command_parser.add_argument('input', type=argparse.FileType('r'), nargs='+', help="JSON files to plot with.")
command_parser.add_argument('-o', '--output', type=str, default="cross_distribution.png", help="Where to output plot file.")
command_parser.set_defaults(func=do_plot_cross_distribution)
ARGS = parser.parse_args()
if ARGS.func is None:
parser.print_help()
sys.exit(1)
else:
ARGS.func(ARGS)
|
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ConfigParser
import contextlib
from tempest_lib import exceptions as lib_exc
import types
import urlparse
from neutron.tests.tempest import config
import boto
import boto.ec2
import boto.s3.connection
CONF = config.CONF
class BotoClientBase(object):
ALLOWED_METHODS = set()
def __init__(self, identity_client):
self.identity_client = identity_client
self.ca_cert = CONF.identity.ca_certificates_file
self.connection_timeout = str(CONF.boto.http_socket_timeout)
self.num_retries = str(CONF.boto.num_retries)
self.build_timeout = CONF.boto.build_timeout
self.connection_data = {}
def _config_boto_timeout(self, timeout, retries):
try:
boto.config.add_section("Boto")
except ConfigParser.DuplicateSectionError:
pass
boto.config.set("Boto", "http_socket_timeout", timeout)
boto.config.set("Boto", "num_retries", retries)
def _config_boto_ca_certificates_file(self, ca_cert):
if ca_cert is None:
return
try:
boto.config.add_section("Boto")
except ConfigParser.DuplicateSectionError:
pass
boto.config.set("Boto", "ca_certificates_file", ca_cert)
def __getattr__(self, name):
"""Automatically creates methods for the allowed methods set."""
if name in self.ALLOWED_METHODS:
def func(self, *args, **kwargs):
with contextlib.closing(self.get_connection()) as conn:
return getattr(conn, name)(*args, **kwargs)
func.__name__ = name
setattr(self, name, types.MethodType(func, self, self.__class__))
setattr(self.__class__, name,
types.MethodType(func, None, self.__class__))
return getattr(self, name)
else:
raise AttributeError(name)
def get_connection(self):
self._config_boto_timeout(self.connection_timeout, self.num_retries)
self._config_boto_ca_certificates_file(self.ca_cert)
ec2_client_args = {'aws_access_key_id': CONF.boto.aws_access,
'aws_secret_access_key': CONF.boto.aws_secret}
if not all(ec2_client_args.values()):
ec2_client_args = self.get_aws_credentials(self.identity_client)
self.connection_data.update(ec2_client_args)
return self.connect_method(**self.connection_data)
def get_aws_credentials(self, identity_client):
"""
Obtain existing, or create new AWS credentials
:param identity_client: identity client with embedded credentials
:return: EC2 credentials
"""
ec2_cred_list = identity_client.list_user_ec2_credentials(
identity_client.user_id)
for cred in ec2_cred_list:
if cred['tenant_id'] == identity_client.tenant_id:
ec2_cred = cred
break
else:
ec2_cred = identity_client.create_user_ec2_credentials(
identity_client.user_id, identity_client.tenant_id)
if not all((ec2_cred, ec2_cred['access'], ec2_cred['secret'])):
raise lib_exc.NotFound("Unable to get access and secret keys")
else:
ec2_cred_aws = {}
ec2_cred_aws['aws_access_key_id'] = ec2_cred['access']
ec2_cred_aws['aws_secret_access_key'] = ec2_cred['secret']
return ec2_cred_aws
class APIClientEC2(BotoClientBase):
def connect_method(self, *args, **kwargs):
return boto.connect_ec2(*args, **kwargs)
def __init__(self, identity_client):
super(APIClientEC2, self).__init__(identity_client)
insecure_ssl = CONF.identity.disable_ssl_certificate_validation
purl = urlparse.urlparse(CONF.boto.ec2_url)
region_name = CONF.compute.region
if not region_name:
region_name = CONF.identity.region
region = boto.ec2.regioninfo.RegionInfo(name=region_name,
endpoint=purl.hostname)
port = purl.port
if port is None:
if purl.scheme is not "https":
port = 80
else:
port = 443
else:
port = int(port)
self.connection_data.update({"is_secure": purl.scheme == "https",
"validate_certs": not insecure_ssl,
"region": region,
"host": purl.hostname,
"port": port,
"path": purl.path})
ALLOWED_METHODS = set(('create_key_pair', 'get_key_pair',
'delete_key_pair', 'import_key_pair',
'get_all_key_pairs',
'get_all_tags',
'create_image', 'get_image',
'register_image', 'deregister_image',
'get_all_images', 'get_image_attribute',
'modify_image_attribute', 'reset_image_attribute',
'get_all_kernels',
'create_volume', 'delete_volume',
'get_all_volume_status', 'get_all_volumes',
'get_volume_attribute', 'modify_volume_attribute'
'bundle_instance', 'cancel_spot_instance_requests',
'confirm_product_instanc',
'get_all_instance_status', 'get_all_instances',
'get_all_reserved_instances',
'get_all_spot_instance_requests',
'get_instance_attribute', 'monitor_instance',
'monitor_instances', 'unmonitor_instance',
'unmonitor_instances',
'purchase_reserved_instance_offering',
'reboot_instances', 'request_spot_instances',
'reset_instance_attribute', 'run_instances',
'start_instances', 'stop_instances',
'terminate_instances',
'attach_network_interface', 'attach_volume',
'detach_network_interface', 'detach_volume',
'get_console_output',
'delete_network_interface', 'create_subnet',
'create_network_interface', 'delete_subnet',
'get_all_network_interfaces',
'allocate_address', 'associate_address',
'disassociate_address', 'get_all_addresses',
'release_address',
'create_snapshot', 'delete_snapshot',
'get_all_snapshots', 'get_snapshot_attribute',
'modify_snapshot_attribute',
'reset_snapshot_attribute', 'trim_snapshots',
'get_all_regions', 'get_all_zones',
'get_all_security_groups', 'create_security_group',
'delete_security_group', 'authorize_security_group',
'authorize_security_group_egress',
'revoke_security_group',
'revoke_security_group_egress'))
class ObjectClientS3(BotoClientBase):
def connect_method(self, *args, **kwargs):
return boto.connect_s3(*args, **kwargs)
def __init__(self, identity_client):
super(ObjectClientS3, self).__init__(identity_client)
insecure_ssl = CONF.identity.disable_ssl_certificate_validation
purl = urlparse.urlparse(CONF.boto.s3_url)
port = purl.port
if port is None:
if purl.scheme is not "https":
port = 80
else:
port = 443
else:
port = int(port)
self.connection_data.update({"is_secure": purl.scheme == "https",
"validate_certs": not insecure_ssl,
"host": purl.hostname,
"port": port,
"calling_format": boto.s3.connection.
OrdinaryCallingFormat()})
ALLOWED_METHODS = set(('create_bucket', 'delete_bucket', 'generate_url',
'get_all_buckets', 'get_bucket', 'delete_key',
'lookup'))
|
|
# -*- coding: utf-8 -*-
from functools import update_wrapper
import os
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.translation import ugettext_lazy as _
from django.utils.six.moves.urllib.parse import urljoin
from cms import constants
__all__ = ['get_cms_setting']
class VERIFIED: pass # need a unique identifier for CMS_LANGUAGES
def default(name):
def decorator(wrapped):
def wrapper():
if hasattr(settings, name):
return getattr(settings, name)
return wrapped()
update_wrapper(wrapper, wrapped)
return wrapped
return decorator
DEFAULTS = {
'TEMPLATE_INHERITANCE': True,
'TOOLBAR_SIMPLE_STRUCTURE_MODE': True,
'PLACEHOLDER_CONF': {},
'PERMISSION': False,
# Whether to use raw ID lookups for users when PERMISSION is True
'RAW_ID_USERS': False,
'PUBLIC_FOR': 'all',
'APPHOOKS': [],
'TOOLBARS': [],
'SITE_CHOICES_CACHE_KEY': 'CMS:site_choices',
'PAGE_CHOICES_CACHE_KEY': 'CMS:page_choices',
'MEDIA_PATH': 'cms/',
'PAGE_MEDIA_PATH': 'cms_page_media/',
'TITLE_CHARACTER': '+',
'PAGE_CACHE': True,
'PLACEHOLDER_CACHE': True,
'PLUGIN_CACHE': True,
'CACHE_PREFIX': 'cms-',
'PLUGIN_PROCESSORS': [],
'PLUGIN_CONTEXT_PROCESSORS': [],
'UNIHANDECODE_VERSION': None,
'UNIHANDECODE_DECODERS': ['ja', 'zh', 'kr', 'vn', 'diacritic'],
'UNIHANDECODE_DEFAULT_DECODER': 'diacritic',
'MAX_PAGE_PUBLISH_REVERSIONS': 10,
'MAX_PAGE_HISTORY_REVERSIONS': 15,
'TOOLBAR_ANONYMOUS_ON': True,
'TOOLBAR_URL__EDIT_ON': 'edit',
'TOOLBAR_URL__EDIT_OFF': 'edit_off',
'TOOLBAR_URL__BUILD': 'build',
'TOOLBAR_URL__DISABLE': 'toolbar_off',
'ADMIN_NAMESPACE': 'admin',
'TOOLBAR_HIDE': False,
'WIZARD_DEFAULT_TEMPLATE': constants.TEMPLATE_INHERITANCE_MAGIC,
'WIZARD_CONTENT_PLUGIN': 'TextPlugin',
'WIZARD_CONTENT_PLUGIN_BODY': 'body',
}
def get_cache_durations():
"""
Returns the setting: CMS_CACHE_DURATIONS or the defaults.
"""
return getattr(settings, 'CMS_CACHE_DURATIONS', {
'menus': 60 * 60,
'content': 60,
'permissions': 60 * 60,
})
@default('CMS_MEDIA_ROOT')
def get_media_root():
return os.path.join(settings.MEDIA_ROOT, get_cms_setting('MEDIA_PATH'))
@default('CMS_MEDIA_URL')
def get_media_url():
return urljoin(settings.MEDIA_URL, get_cms_setting('MEDIA_PATH'))
@default('CMS_TOOLBAR_URL__EDIT_ON')
def get_toolbar_url__edit_on():
return get_cms_setting('TOOLBAR_URL__EDIT_ON')
@default('CMS_TOOLBAR_URL__EDIT_OFF')
def get_toolbar_url__edit_off():
return get_cms_setting('TOOLBAR_URL__EDIT_OFF')
@default('CMS_TOOLBAR_URL__BUILD')
def get_toolbar_url__build():
return get_cms_setting('TOOLBAR_URL__BUILD')
@default('CMS_TOOLBAR_URL__DISABLE')
def get_toolbar_url__disable():
return get_cms_setting('TOOLBAR_URL__DISABLE')
def get_templates():
from cms.utils.django_load import load_from_file
if getattr(settings, 'CMS_TEMPLATES_DIR', False):
tpldir = getattr(settings, 'CMS_TEMPLATES_DIR', False)
# CMS_TEMPLATES_DIR can either be a string poiting to the templates directory
# or a dictionary holding 'site: template dir' entries
if isinstance(tpldir, dict):
tpldir = tpldir[settings.SITE_ID]
# We must extract the relative path of CMS_TEMPLATES_DIR to the neares
# valid templates directory. Here we mimick what the filesystem and
# app_directories template loaders do
prefix = ''
# Relative to TEMPLATE_DIRS for filesystem loader
try:
path = settings.TEMPLATE_DIRS
except IndexError:
path = [template['DIRS'][0] for template in settings.TEMPLATES]
for basedir in path:
if tpldir.find(basedir) == 0:
prefix = tpldir.replace(basedir + os.sep, '')
break
# Relative to 'templates' directory that app_directory scans
if not prefix:
components = tpldir.split(os.sep)
try:
prefix = os.path.join(*components[components.index('templates') + 1:])
except ValueError:
# If templates is not found we use the directory name as prefix
# and hope for the best
prefix = os.path.basename(tpldir)
config_path = os.path.join(tpldir, '__init__.py')
# Try to load templates list and names from the template module
# If module file is not present skip configuration and just dump the filenames as templates
if config_path:
template_module = load_from_file(config_path)
templates = [(os.path.join(prefix, data[0].strip()), data[1]) for data in template_module.TEMPLATES.items()]
else:
templates = list((os.path.join(prefix, tpl), tpl) for tpl in os.listdir(tpldir))
else:
templates = list(getattr(settings, 'CMS_TEMPLATES', []))
if get_cms_setting('TEMPLATE_INHERITANCE'):
templates.append((constants.TEMPLATE_INHERITANCE_MAGIC, _('Inherit the template of the nearest ancestor')))
return templates
def _ensure_languages_settings(languages):
valid_language_keys = ['code', 'name', 'fallbacks', 'hide_untranslated', 'redirect_on_fallback', 'public']
required_language_keys = ['code', 'name']
simple_defaults = ['public', 'redirect_on_fallback', 'hide_untranslated']
if not isinstance(languages, dict):
raise ImproperlyConfigured(
"CMS_LANGUAGES must be a dictionary with site IDs and 'default'"
" as keys. Please check the format.")
defaults = languages.pop('default', {})
default_fallbacks = defaults.get('fallbacks')
needs_fallbacks = []
for key in defaults:
if key not in valid_language_keys:
raise ImproperlyConfigured("CMS_LANGUAGES has an invalid property in the default properties: %s" % key)
for key in simple_defaults:
if key not in defaults:
defaults[key] = True
for site, language_list in languages.items():
if site != hash(site):
raise ImproperlyConfigured(
"CMS_LANGUAGES can only be filled with integers (site IDs) and 'default'"
" for default values. %s is not a valid key." % site)
for language_object in language_list:
for required_key in required_language_keys:
if required_key not in language_object:
raise ImproperlyConfigured("CMS_LANGUAGES has a language which is missing the required key %r "
"in site %r" % (key, site))
language_code = language_object['code']
for key in language_object:
if key not in valid_language_keys:
raise ImproperlyConfigured(
"CMS_LANGUAGES has invalid key %r in language %r in site %r" % (key, language_code, site)
)
if 'fallbacks' not in language_object:
if default_fallbacks:
language_object['fallbacks'] = default_fallbacks
else:
needs_fallbacks.append((site, language_object))
for key in simple_defaults:
if key not in language_object:
language_object[key] = defaults[key]
site_fallbacks = {}
for site, language_object in needs_fallbacks:
if site not in site_fallbacks:
site_fallbacks[site] = [lang['code'] for lang in languages[site] if lang['public']]
language_object['fallbacks'] = [lang_code for lang_code in site_fallbacks[site] if
lang_code != language_object['code']]
languages['default'] = defaults
languages[VERIFIED] = True # this will be busted by @override_settings and cause a re-check
return languages
def get_languages():
if settings.SITE_ID != hash(settings.SITE_ID):
raise ImproperlyConfigured(
"SITE_ID must be an integer"
)
if not settings.USE_I18N:
return _ensure_languages_settings(
{settings.SITE_ID: [{'code': settings.LANGUAGE_CODE, 'name': settings.LANGUAGE_CODE}]})
if settings.LANGUAGE_CODE not in dict(settings.LANGUAGES):
raise ImproperlyConfigured(
'LANGUAGE_CODE "%s" must have a matching entry in LANGUAGES' % settings.LANGUAGE_CODE
)
languages = getattr(settings, 'CMS_LANGUAGES', {
settings.SITE_ID: [{'code': code, 'name': _(name)} for code, name in settings.LANGUAGES]
})
if VERIFIED in languages:
return languages
return _ensure_languages_settings(languages)
def get_unihandecode_host():
host = getattr(settings, 'CMS_UNIHANDECODE_HOST', None)
if not host:
return host
if host.endswith('/'):
return host
else:
return host + '/'
COMPLEX = {
'CACHE_DURATIONS': get_cache_durations,
'MEDIA_ROOT': get_media_root,
'MEDIA_URL': get_media_url,
# complex because not prefixed by CMS_
'TEMPLATES': get_templates,
'LANGUAGES': get_languages,
'UNIHANDECODE_HOST': get_unihandecode_host,
'CMS_TOOLBAR_URL__EDIT_ON': get_toolbar_url__edit_on,
'CMS_TOOLBAR_URL__EDIT_OFF': get_toolbar_url__edit_off,
'CMS_TOOLBAR_URL__BUILD': get_toolbar_url__build,
'CMS_TOOLBAR_URL__DISABLE': get_toolbar_url__disable,
}
def get_cms_setting(name):
if name in COMPLEX:
return COMPLEX[name]()
else:
return getattr(settings, 'CMS_%s' % name, DEFAULTS[name])
def get_site_id(site):
from django.contrib.sites.models import Site
if isinstance(site, Site):
return site.id
try:
return int(site)
except (TypeError, ValueError):
pass
return settings.SITE_ID
|
|
# -----------------
# own structure
# -----------------
# do separate scopes
def scope_basic():
from import_tree import mod1
#? int()
mod1.a
#? []
import_tree.a
#? []
import_tree.mod1
import import_tree
#? str()
import_tree.a
def scope_pkg():
import import_tree.mod1
#? str()
import_tree.a
#? ['mod1']
import_tree.mod1
#? int()
import_tree.mod1.a
def scope_nested():
import import_tree.pkg.mod1
#? str()
import_tree.a
#? list
import_tree.pkg.a
#? ['sqrt']
import_tree.pkg.sqrt
#? ['pkg']
import_tree.p
#? float()
import_tree.pkg.mod1.a
import import_tree.random
#? set
import_tree.random.a
def scope_nested2():
"""Multiple modules should be indexable, if imported"""
import import_tree.mod1
import import_tree.pkg
#? ['mod1']
import_tree.mod1
#? ['pkg']
import_tree.pkg
# With the latest changes this completion also works, because submodules
# are always included (some nested import structures lead to this,
# typically).
#? ['rename1']
import_tree.rename1
def scope_from_import_variable():
"""
All of them shouldn't work, because "fake" imports don't work in python
without the use of ``sys.modules`` modifications (e.g. ``os.path`` see also
github issue #213 for clarification.
"""
a = 3
#?
from import_tree.mod2.fake import a
#?
from import_tree.mod2.fake import c
#?
a
#?
c
def scope_from_import_variable_with_parenthesis():
from import_tree.mod2.fake import (
a, foobarbaz
)
#?
a
#?
foobarbaz
# shouldn't complete, should still list the name though.
#? ['foobarbaz']
foobarbaz
def as_imports():
from import_tree.mod1 import a as xyz
#? int()
xyz
import not_existant, import_tree.mod1 as foo
#? int()
foo.a
import import_tree.mod1 as bar
#? int()
bar.a
def test_import_priorities():
"""
It's possible to overwrite import paths in an ``__init__.py`` file, by
just assigining something there.
See also #536.
"""
from import_tree import the_pkg, invisible_pkg
#? int()
invisible_pkg
# In real Python, this would be the module, but it's not, because Jedi
# doesn't care about most stateful issues such as __dict__, which it would
# need to, to do this in a correct way.
#? int()
the_pkg
# Importing foo is still possible, even though inivisible_pkg got changed.
#? float()
from import_tree.invisible_pkg import foo
# -----------------
# std lib modules
# -----------------
import tokenize
#? ['tok_name']
tokenize.tok_name
from pyclbr import *
#? ['readmodule_ex']
readmodule_ex
import os
#? ['dirname']
os.path.dirname
from os.path import (
expanduser
)
#? os.path.expanduser
expanduser
from itertools import (tee,
islice)
#? ['islice']
islice
from functools import (partial, wraps)
#? ['wraps']
wraps
from keyword import kwlist, \
iskeyword
#? ['kwlist']
kwlist
#? []
from keyword import not_existing1, not_existing2
from tokenize import io
tokenize.generate_tokens
# -----------------
# builtins
# -----------------
import sys
#? ['prefix']
sys.prefix
#? ['append']
sys.path.append
from math import *
#? ['cos', 'cosh']
cos
def func_with_import():
import time
return time
#? ['sleep']
func_with_import().sleep
# -----------------
# relative imports
# -----------------
from .import_tree import mod1
#? int()
mod1.a
from ..import_tree import mod1
#?
mod1.a
from .......import_tree import mod1
#?
mod1.a
from .. import helpers
#? int()
helpers.sample_int
from ..helpers import sample_int as f
#? int()
f
from . import run
#? []
run.
from . import import_tree as imp_tree
#? str()
imp_tree.a
from . import datetime as mod1
#? []
mod1.
# self import
# this can cause recursions
from imports import *
# -----------------
# packages
# -----------------
from import_tree.mod1 import c
#? set
c
from import_tree import recurse_class1
#? ['a']
recurse_class1.C.a
# github #239 RecursionError
#? ['a']
recurse_class1.C().a
# -----------------
# Jedi debugging
# -----------------
# memoizing issues (check git history for the fix)
import not_existing_import
if not_existing_import:
a = not_existing_import
else:
a = not_existing_import
#?
a
# -----------------
# module underscore descriptors
# -----------------
def underscore():
import keyword
#? ['__file__']
keyword.__file__
#? str()
keyword.__file__
# Does that also work for the our own module?
#? ['__file__']
__file__
|
|
# encoding: utf-8
"""
Class for reading/writing analog signals in a text file.
Each columns represents a AnalogSignal. All AnalogSignal have the same sampling rate.
Covers many case when part of a file can be viewed as a CSV format.
Supported : Read/Write
Author: sgarcia
"""
from .baseio import BaseIO
from ..core import *
from .tools import create_many_to_one_relationship
import numpy as np
import quantities as pq
import csv
import os
from numpy import newaxis
class AsciiSignalIO(BaseIO):
"""
Class for reading signal in generic ascii format.
Columns respresents signal. They share all the same sampling rate.
The sampling rate is externally known or the first columns could hold the time
vector.
Usage:
>>> from neo import io
>>> r = io.AsciiSignalIO(filename='File_asciisignal_2.txt')
>>> seg = r.read_segment(lazy=False, cascade=True)
>>> print seg.analogsignals
[<AnalogSignal(array([ 39.0625 , 0. , 0. , ..., -26.85546875 ...
"""
is_readable = True
is_writable = True
supported_objects = [ Segment , AnalogSignal]
readable_objects = [ Segment]
writeable_objects = [Segment]
has_header = False
is_streameable = False
read_params = {
Segment : [
('delimiter' , {'value' : '\t', 'possible' : ['\t' , ' ' , ',' , ';'] }) ,
('usecols' , { 'value' : None , 'type' : int } ),
('skiprows' , { 'value' :0 } ),
('timecolumn' , { 'value' : None, 'type' : int } ) ,
('unit' , { 'value' : 'V', } ),
('sampling_rate' , { 'value' : 1000., } ),
('t_start' , { 'value' : 0., } ),
('method' , { 'value' : 'homemade', 'possible' : ['genfromtxt' , 'csv' , 'homemade' ] }) ,
]
}
write_params = {
Segment : [
('delimiter' , {'value' : '\t', 'possible' : ['\t' , ' ' , ',' , ';'] }) ,
('writetimecolumn' , { 'value' : True, } ) ,
]
}
name = None
extensions = [ 'txt' , 'asc', ]
mode = 'file'
def __init__(self , filename = None) :
"""
This class read/write AnalogSignal in a text file.
Each signal is a column.
One of the column can be the time vector
Arguments:
filename : the filename to read/write
"""
BaseIO.__init__(self)
self.filename = filename
def read_segment(self,
lazy = False,
cascade = True,
delimiter = '\t',
usecols = None,
skiprows =0,
timecolumn = None,
sampling_rate = 1.*pq.Hz,
t_start = 0.*pq.s,
unit = pq.V,
method = 'genfromtxt',
):
"""
Arguments:
delimiter : columns delimiter in file '\t' or one space or two space or ',' or ';'
usecols : if None take all columns otherwise a list for selected columns
skiprows : skip n first lines in case they contains header informations
timecolumn : None or a valid int that point the time vector
samplerate : the samplerate of signals if timecolumn is not None this is not take in account
t_start : time of the first sample
unit : unit of AnalogSignal can be a str or directly a Quantities
method : 'genfromtxt' or 'csv' or 'homemade'
in case of bugs you can try one of this methods
'genfromtxt' use numpy.genfromtxt
'csv' use cvs module
'homemade' use a intuitive more robust but slow method
"""
seg = Segment(file_origin = os.path.basename(self.filename))
if not cascade:
return seg
if type(sampling_rate) == float or type(sampling_rate)==int:
# if not quantitities Hz by default
sampling_rate = sampling_rate*pq.Hz
if type(t_start) == float or type(t_start)==int:
# if not quantitities s by default
t_start = t_start*pq.s
unit = pq.Quantity(1, unit)
#loadtxt
if method == 'genfromtxt' :
sig = np.genfromtxt(self.filename,
delimiter = delimiter,
usecols = usecols ,
skiprows = skiprows,
dtype = 'f')
if len(sig.shape) ==1:
sig = sig[:,newaxis]
elif method == 'csv' :
tab = [l for l in csv.reader( file(self.filename,'rU') , delimiter = delimiter ) ]
tab = tab[skiprows:]
sig = np.array( tab , dtype = 'f')
elif method == 'homemade' :
fid = open(self.filename,'rU')
for l in range(skiprows):
fid.readline()
tab = [ ]
for line in fid.readlines():
line = line.replace('\r','')
line = line.replace('\n','')
l = line.split(delimiter)
while '' in l :
l.remove('')
tab.append(l)
sig = np.array( tab , dtype = 'f')
if timecolumn is not None:
sampling_rate = 1./np.mean(np.diff(sig[:,timecolumn])) * pq.Hz
t_start = sig[0,timecolumn] * pq.s
for i in range(sig.shape[1]) :
if timecolumn == i : continue
if usecols is not None and i not in usecols: continue
if lazy:
signal = [ ]*unit
else:
signal = sig[:,i]*unit
anaSig = AnalogSignal( signal , sampling_rate = sampling_rate ,t_start =t_start, name = 'Column %d'%i)
if lazy:
anaSig.lazy_shape = sig.shape
anaSig.annotate( channel_index = i )
seg.analogsignals.append( anaSig )
create_many_to_one_relationship(seg)
return seg
def write_segment(self, segment,
delimiter = '\t',
skiprows =0,
writetimecolumn = True,
):
"""
Write a segment and AnalogSignal in a text file.
**Arguments**
delimiter : columns delimiter in file '\t' or one space or two space or ',' or ';'
writetimecolumn : True or Flase write time vector as first column
"""
l = [ ]
if writetimecolumn is not None:
l.append(segment._analogsignals[0].times[:,newaxis])
for anaSig in segment.analogsignals:
l.append(anaSig.magnitude[:,newaxis])
sigs = np.concatenate(l, axis=1)
#print sigs.shape
np.savetxt(self.filename , sigs , delimiter = delimiter)
|
|
# -*- coding: utf-8 -*-
"""
Adapted from Yelp Fusion API code sample.
Please refer to http://www.yelp.com/developers/v3/documentation for the API
documentation.
"""
from __future__ import print_function
from __future__ import absolute_import
import argparse
import json
import pprint
import requests
import sys
import os
import shutil
import sys
import logging
import time
from urllib.request import urlopen
from bs4 import BeautifulSoup
# This client code can run on Python 2.x or 3.x. Your imports can be
# simpler if you only need one of those.
try:
# For Python 3.0 and later
from urllib.error import HTTPError
from urllib.parse import quote
from urllib.parse import urlencode
except ImportError:
# Fall back to Python 2's urllib2 and urllib
from urllib2 import HTTPError
from urllib import quote
from urllib import urlencode
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# OAuth credential placeholders that must be filled in by users.
# You can find them on
# https://www.yelp.com/developers/v3/manage_app
YELP_API_KEY = os.environ.get('YELP_API_KEY')
if YELP_API_KEY:
logger.debug('Loaded Yelp API Key %s', YELP_API_KEY)
else:
logger.error('No environment variable set for Yelp API key - export YELP_API_KEY=XXX')
# API constants, you shouldn't have to change these.
API_HOST = 'https://api.yelp.com'
SEARCH_PATH = '/v3/businesses/search'
BUSINESS_PATH = '/v3/businesses/' # Business ID will come after slash.
def get_image_from_url(image_url, image_name):
# download image from image_url
# todo: catch error
try:
r = requests.get(image_url, verify=False)
#image_name = "image_to_classify__" + str(random.randint(1,10000)) + ".jpg"
image_file = open(image_name, 'wb')
for chunk in r.iter_content(100000):
image_file.write(chunk)
image_file.close()
return image_name
except:
logger.error('image could not be retrieved - waiting')
time.sleep(60)
return 0
def request(host, path, api_key, url_params=None):
"""Given your API_KEY, send a GET request to the API.
Args:
host (str): The domain host of the API.
path (str): The path of the API after the domain.
API_KEY (str): Your API Key.
url_params (dict): An optional set of query parameters in the request.
Returns:
dict: The JSON response from the request.
Raises:
HTTPError: An error occurs from the HTTP request.
"""
url_params = url_params or {}
url = '{0}{1}'.format(host, quote(path.encode('utf8')))
headers = {
'Authorization': 'Bearer %s' % api_key,
}
logger.info('Querying %s with headers %s and url params %s ...', url, headers, url_params)
response = requests.request('GET', url, headers=headers, params=url_params, verify=False)
logger.debug('querying returned json %s',response.json())
return response.json()
def search(api_key, location, num_of_businesses_to_get, offset):
"""Query the Search API by a search term and location.
Args:
location (str): The search location passed to the API.
num_of_businesses_to_get (int): # of businesses you want to get
Returns:
dict: The JSON response from the request.
"""
# change here to get different categories or search terms
# todo: load from config file
#term = "espresso"
term = ""
category = "coffee"
# coffeeroasteries
url_params = {
'term': term.replace(' ', '+'),
'categories': category.replace(' ', '+'),
'location': location.replace(' ', '+'),
'limit': num_of_businesses_to_get,
'offset': offset
}
return request(API_HOST, SEARCH_PATH, api_key, url_params=url_params)
def query_api(term, location):
"""Queries the API by the input values from the user.
Args:
term (str): The search term to query.
location (str): The location of the business to query.
"""
response = search(YELP_API_KEY, term, location)
businesses = response.get('businesses')
business_id = businesses[0]['id']
print(u'{0} businesses found, querying business info ' \
'for the top result "{1}" ...'.format(
len(businesses), business_id))
response = get_business(YELP_API_KEY, business_id)
print(u'Result for business "{0}" found:'.format(business_id))
pprint.pprint(response, indent=2)
def get_business(api_key, business_id):
"""Query the Business API by a business ID.
Args:
business_id (str): The ID of the business to query.
Returns:
dict: The JSON response from the request.
"""
business_path = BUSINESS_PATH + business_id
return request(API_HOST, business_path, api_key)
def get_business_ids_from_api(location, num_of_businesses_to_get):
"""Queries the API based on the input location from the user.
Args:
location (str): The location of the business to query.
"""
#bearer_token = obtain_bearer_token(API_HOST, TOKEN_PATH)
#default num_of_businesses_to_get = 1000
business_ids_list = []
num_of_businesses = 0
businesses = 1 # placeholder
i=0
while (i < num_of_businesses_to_get) and (i < 1000) and (businesses):
logger.info('Calling search api')
if (num_of_businesses_to_get < 50):
response = search(YELP_API_KEY, location, num_of_businesses_to_get, i)
else:
response = search(YELP_API_KEY, location, 50, i)
businesses = response.get('businesses')
if not businesses:
logger.error('No relevant businesses found in %s', location)
else:
num_of_businesses += len(businesses)
i+=50
for business in businesses:
business_ids_list.append(business['id'])
return business_ids_list
def get_business_images(biz_name,image_download_path):
"""download yelp images for a business
Args:
biz_name: yelp business id
image_download_path: directory to store images in.
Returns:
Downloads and Returns the number of images downloaded.
"""
logger.info('Downloading images for %s and putting them in %s', biz_name, image_download_path)
# delete if the directory already exists from last run
shutil.rmtree(image_download_path)
# make the directory again
os.makedirs(image_download_path)
temp_log_file = open(image_download_path + 'tmplog.txt', "w")
###
url = 'http://www.yelp.com/biz_photos/' + biz_name
urlfordrinks = 'http://www.yelp.com/biz_photos/' + biz_name + '?tab=drink'
nextpage = 1
photos=[]
while nextpage:
page = requests.get(urlfordrinks, verify=False)
soup = BeautifulSoup(page.text, 'html.parser')
current_photos = soup.findAll ('img', {'class' : 'photo-box-img'}, limit=None)
photos.extend(current_photos)
nexturl = soup.find('a', {'class': 'next'})['href']
if nexturl:
nextpage = 1
nexturldrinks = 'http://www.yelp.com'+nexturl
else
nextpage = 0
###
# url = 'http://www.yelp.com/biz_photos/' + biz_name
# urlfordrinks = 'http://www.yelp.com/biz_photos/' + biz_name + '?tab=drink'
# page = requests.get(urlfordrinks, verify=False)
# soup = BeautifulSoup(page.text, 'html.parser')
# #soup.find('a', {'class': 'next'})['href']
# photos = soup.findAll ('img', {'class' : 'photo-box-img'}, limit=None)
# i = 30
# if len(photos) > i:
# # if we found more than 30 photos, go to the next page of photos
# nexturldrinks = 'http://www.yelp.com/biz_photos/' + biz_name + '?start=' + i + '&tab=drink'
# page = requests.get(nexturldrinks, verify=False)
# soup = BeautifulSoup(page.text, 'html.parser')
# new_photos = soup.findAll ('img', {'class' : 'photo-box-img'}, limit=None)
# photos.extend(new_photos)
# i+=30
####
logger.info('Found %s images for drinks', len(photos))
image_counter=0
if not(len(photos)):
# if there were no drink photos, try getting regular photos
page = requests.get(url, verify=False)
soup = BeautifulSoup(page.text, 'html.parser')
photos = soup.findAll ('img', {'class' : 'photo-box-img'}, limit=None)
# go to next page if it exists and get more photos
i=30
if len(photos) > i:
nexturl = 'http://www.yelp.com/biz_photos/' + biz_name + '?start=' + i + '&tab=drink'
page = requests.get(nexturl, verify=False)
soup = BeautifulSoup(page.text, 'html.parser')
new_photos = soup.findAll ('img', {'class' : 'photo-box-img'}, limit=None)
photos.extend(new_photos)
i+=30
logger.info('No drink images found. Getting %s images for the business overall', len(photos))
if len(photos):
# if any photos were found
for photo in photos:
# todo: skip 30s photos
# todo: deal with error in getting image
if get_image_from_url(photo['src'], image_download_path + str(image_counter) + ".jpg"):
# urllib.urlretrieve(photo['src'], image_download_path + str(i) + ".jpg")
logger.info('Finished getting image %s', image_counter)
temp_log_file.write(str(image_counter) + ".jpg," + photo['src'] + "\n")
image_counter+=1
logger.info('Finished getting %s images for %s', image_counter, biz_name)
temp_log_file.close()
return image_counter
else:
logger.error('No images found', exc_info=True)
return 0
|
|
#! -*- coding:utf-8 -*-
"""
tests.test_utils
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:author: tell-k <ffk2005@gmail.com>
:copyright: tell-k. All Rights Reserved.
"""
from __future__ import division, print_function, absolute_import, unicode_literals # NOQA
from django.test import TestCase
import unittest
class TestGetModelAttr(unittest.TestCase):
def _callFUT(self, option_model, django_version):
from modelsdoc.utils import get_model_attr
return get_model_attr(option_model, django_version)
def _getDummyMetaOption(self):
class DummyMetaOption(object):
concrete_model = 'concrete_model'
model = 'model'
return DummyMetaOption()
def test_django_ver16_over(self):
self.assertEqual(
'model',
self._callFUT(self._getDummyMetaOption(), (1, 6))
)
def test_django_ver15(self):
self.assertEqual(
'concrete_model',
self._callFUT(self._getDummyMetaOption(), (1, 5))
)
class TestGetFieldsAttr(unittest.TestCase):
def _getDummyMetaOption(self):
class DummyMetaOption(object):
concrete_fields = ('concrete_fields',)
fields = ('fields',)
many_to_many = ('many2many',)
return DummyMetaOption()
def _callFUT(self, option_model, django_version):
from modelsdoc.utils import get_fields_attr
return get_fields_attr(option_model, django_version)
def test_django_ver16_over(self):
self.assertEqual(
['concrete_fields', 'many2many'],
self._callFUT(self._getDummyMetaOption(), (1, 6))
)
def test_django_ver15(self):
self.assertEqual(
['fields', 'many2many'],
self._callFUT(self._getDummyMetaOption(), (1, 5))
)
class TestGetParentModelAttr(unittest.TestCase):
def _callFUT(self, related_field, django_version):
from modelsdoc.utils import get_parent_model_attr
return get_parent_model_attr(related_field, django_version)
def _getDummyMetaOption(self):
class DummyMetaOption(object):
parent_model = 'parent_model'
model = 'model'
return DummyMetaOption()
def test_django_ver17_lower(self):
self.assertEqual(
'parent_model',
self._callFUT(self._getDummyMetaOption(), (1, 7))
)
def test_django_ver18(self):
self.assertEqual(
'model',
self._callFUT(self._getDummyMetaOption(), (1, 8))
)
class TestClassToString(unittest.TestCase):
def _callFUT(self, model):
from modelsdoc.utils import class_to_string
return class_to_string(model)
def _getDummyModel(self):
class DummyModel(object):
__name__ = 'DummyModel'
__module__ = 'test_mod'
return DummyModel()
def test_to_string(self):
self.assertEqual(
'test_mod.DummyModel',
self._callFUT(self._getDummyModel())
)
class TestGetNullBlank(TestCase):
def _callFUT(self, field):
from modelsdoc.utils import get_null_blank
return get_null_blank(field)
def _getDummyField(self, null, blank):
class DummyField(object):
def __init__(self, null, blank):
self.null = null
self.blank = blank
return DummyField(null, blank)
def test_not_allow_null_and_blank(self):
self.assertEqual(
'',
self._callFUT(self._getDummyField(null=False, blank=False))
)
def test_allow_blank_only(self):
self.assertEqual(
'Blank',
self._callFUT(self._getDummyField(null=False, blank=True))
)
def test_allow_null_only(self):
self.assertEqual(
'Null',
self._callFUT(self._getDummyField(null=True, blank=False))
)
def test_allow_both(self):
self.assertEqual(
'Both',
self._callFUT(self._getDummyField(null=True, blank=True))
)
class TestGetForeignkey(TestCase):
def _callFUT(self, field):
from modelsdoc.utils import get_foreignkey
return get_foreignkey(field)
def _getTargetField(self, field_name):
from tests.models import Choice
return Choice._meta.get_field(field_name)
def test_is_foreignkey(self):
self.assertEqual(
'FK:tests.models.Poll',
self._callFUT(self._getTargetField('poll'))
)
def test_not_foreignkey(self):
self.assertEqual(
'',
self._callFUT(self._getTargetField('choice'))
)
def test_many_to_many_field(self):
self.assertEqual(
'M2M:tests.models.Genre (through: tests.models.Choice_genres)',
self._callFUT(self._getTargetField('genres'))
)
class TestGetChoices(TestCase):
def _callFUT(self, field):
from modelsdoc.utils import get_choices
return get_choices(field)
def _getTargetField(self, field_name):
from tests.models import Choice
for f in Choice._meta.fields:
if f.name != field_name:
continue
return f
def test_is_choices(self):
self.assertEqual(
'1:test1, 2:test2, 3:test3',
self._callFUT(self._getTargetField('choice'))
)
def test_not_choices(self):
self.assertEqual(
'',
self._callFUT(self._getTargetField('poll'))
)
class TestImportClass(unittest.TestCase):
def _callFUT(self, cl):
from modelsdoc.utils import import_class
return import_class(cl)
def test_import_ok(self):
self.assertEqual(
"<class 'modelsdoc.wrappers.ModelWrapper'>",
str(self._callFUT('modelsdoc.wrappers.ModelWrapper'))
)
def test_raise_attribute_error(self):
with self.assertRaises(AttributeError):
self._callFUT('modelsdoc.wrappers.NonExistsWrapper')
def test_raise_import_error(self):
with self.assertRaises(ImportError):
self._callFUT('modelsdoc.nonexists.Hoge')
class TestGetRelatedField(unittest.TestCase):
def _callFUT(self, field, django_version):
from modelsdoc.utils import get_related_field
return get_related_field(field, django_version)
def _getDummyField(self):
class DummyField(object):
related = 'related'
remote_field = 'remote_field'
return DummyField()
def test_django_ver18_lower(self):
self.assertEqual(
'related',
self._callFUT(self._getDummyField(), (1, 8))
)
def test_django_ver19(self):
self.assertEqual(
'remote_field',
self._callFUT(self._getDummyField(), (1, 9))
)
class TestGetThrough(unittest.TestCase):
def _callFUT(self, field, django_version):
from modelsdoc.utils import get_through
return get_through(field, django_version)
def _getDummyField(self):
class DummyRel(object):
def __init__(self, through):
self.through = through
class DummyField(object):
rel = DummyRel('rel')
remote_field = DummyRel('remote_field')
return DummyField()
def test_django_ver18_lower(self):
self.assertEqual(
'rel',
self._callFUT(self._getDummyField(), (1, 8))
)
def test_django_ver19(self):
self.assertEqual(
'remote_field',
self._callFUT(self._getDummyField(), (1, 9))
)
|
|
# -*- coding: utf-8 -*-
import logging
import unittest
import sys
if sys.version_info[0] >= 3:
from tempfile import *
else:
from tempfile import *
from backports import tempfile
TemporaryDirectory = tempfile.TemporaryDirectory
from gos.algo.executable_containers.base_round import Round
from gos.algo.executable_containers.base_stage import Stage
from gos.algo.executable_containers.pipeline import Pipeline
from gos.exceptions import GOSExecutableContainerException, GOSIOException
from gos.executable_containers import ExecutableContainer
from gos.manager import Manager
def invalidate_caches():
pass
try:
import importlib
invalidate_caches = importlib.invalidate_caches
except (ImportError, AttributeError):
pass
class ExecutableContainerTestCase(unittest.TestCase):
def setUp(self):
self.executable_container = ExecutableContainer()
self.ec = self.executable_container
self.dm = Manager({})
self.dm.logger = logging.getLogger()
################################################################################
#
# testing attributes, that are utilized in referencing the EC object itself
#
################################################################################
def test_name_attribute(self):
self.assertTrue(hasattr(self.ec, "name"))
def test_type_name_attribute(self):
self.assertTrue(hasattr(self.ec, "type_name"))
def test_group_reference_name_attribute(self):
self.assertTrue(hasattr(self.ec, "group_reference_name"))
################################################################################
#
# testing attributes, that are utilized in referencing other EC / tasks that
# this EC object is working with
#
################################################################################
def test_entries_type_name_attribute(self):
self.assertTrue(hasattr(self.ec, "entries_type_names"))
def test_entries_info_attribute(self):
self.assertTrue(hasattr(self.ec, "entries_names"))
def test_entries_attribute(self):
self.assertTrue(hasattr(self.ec, "entries"))
################################################################################
#
# testing internal attributes and methods for the EC object
#
################################################################################
def test_self_loop_attribute(self):
self.assertTrue(hasattr(self.ec, "self_loop"))
def test_do_self_loop_attribute(self):
self.assertTrue(hasattr(self.ec, "do_self_loop"))
def test_run_method_existence(self):
self.assertTrue(hasattr(self.ec, "run"))
self.assertTrue(callable(getattr(self.ec, "run")))
def test_logger_attribute(self):
self.assertTrue(hasattr(self.ec, "logger"))
################################################################################
#
# testing logic of EC object
#
################################################################################
def test_default_group_reference_name_attribute(self):
ec = ExecutableContainer(name="test")
self.assertEqual(ec.group_reference_name, "tests")
def test_initialization_with_class_defined_entries_names_value(self):
entry_ = ["entry1", "entry2", "entry3"]
class MyEC(ExecutableContainer):
entries_names = entry_
value = MyEC()
self.assertListEqual(value.entries_names, entry_)
def test_initialization_with_no_defined_entries_names_values(self):
class MyEC(ExecutableContainer):
pass
self.assertListEqual(MyEC().entries_names, [])
def test_initialization_with_specified_entries_names_value(self):
class MyEC(ExecutableContainer):
pass
value_ = ["task1", "round1", "value1"]
self.assertListEqual(MyEC(entries_names=value_).entries_names, value_)
def test_initialization_with_class_defined_entries_type_names(self):
entry_ = ["task", "round"]
class MyEC(ExecutableContainer):
entries_type_names = entry_
value = MyEC()
self.assertListEqual(value.entries_type_names, entry_)
def test_initialization_with_no_defined_entries_type_names_value(self):
class MyEC(ExecutableContainer):
pass
self.assertEqual(MyEC().entries_type_names, [])
def test_initialization_with_specified_entries_type_names_value(self):
class MyEC(ExecutableContainer):
pass
value_ = ["task", "round", "value"]
self.assertListEqual(MyEC(entries_type_names=value_).entries_type_names, value_)
def test_initialization_entries_by_default_are_empty_list(self):
self.assertListEqual(ExecutableContainer().entries, [])
def test_setup_from_config_no_name(self):
with self.assertRaises(GOSExecutableContainerException):
ExecutableContainer.setup_from_config(manager=self.dm, config={})
def test_setup_from_config_self_loop_value(self):
ec = ExecutableContainer.setup_from_config(config={"name": "my_name",
"self_loop": False},
manager=self.dm)
self.assertFalse(ec.self_loop)
def test_setup_from_config_entries_names(self):
stage_name_list = ["stage1", "stage2", "stage3"]
ec = ExecutableContainer.setup_from_config(config={"name": "my_name",
"entries_names": stage_name_list},
manager=self.dm)
self.assertListEqual(ec.entries_names, stage_name_list)
def test_setup_from_file_file_does_not_exists(self):
non_existing_path = "non_existing_path.py"
with self.assertRaises(GOSIOException):
next(ExecutableContainer.setup_from_file(non_existing_path))
def test_setup_from_file_non_python_file(self):
non_py_file = NamedTemporaryFile(mode="wt", suffix=".non_py")
with self.assertRaises(GOSIOException):
next(ExecutableContainer.setup_from_file(non_py_file.name))
def test_setup_from_file_no_unique_name(self):
tmp_file = NamedTemporaryFile(mode="wt", suffix=".py")
tmp_file.write(self.get_executable_container_import_string())
tmp_file.write("""class MyContainer(ExecutableContainer):\n\tdef setup(self):\n\t\tpass""")
tmp_file.flush()
invalidate_caches()
with self.assertRaises(GOSExecutableContainerException):
next(ExecutableContainer.setup_from_file(tmp_file.name))
def get_executable_container_import_string(self):
return """from gos.executable_containers import ExecutableContainer\n"""
def test_setup_from_file_no_setup_method(self):
tmp_file = NamedTemporaryFile(mode="wt", suffix=".py")
tmp_file.write(self.get_executable_container_import_string())
tmp_file.write("""class MyContainer(ExecutableContainer):\n\tname="new_executable_container_name" """)
tmp_file.flush()
invalidate_caches()
with self.assertRaises(GOSExecutableContainerException):
next(ExecutableContainer.setup_from_file(tmp_file.name))
def test_setup_from_file(self):
tmp_file = NamedTemporaryFile(mode="wt", suffix=".py")
tmp_file.write(self.get_executable_container_import_string())
tmp_file.write(
"""class MyContainer(ExecutableContainer):\n\tname="my_ec"\n\tdef setup(self):\n\t\tself.entries_names = ["entry1"]\n\t\tself.entries_type_names=["task"] """)
tmp_file.flush()
invalidate_caches()
result = next(ExecutableContainer.setup_from_file(tmp_file.name))
self.assertIsInstance(result, ExecutableContainer)
self.assertListEqual(result.entries_names, ["entry1"])
self.assertListEqual(result.entries_type_names, ["task"])
def test_setup_from_file_multiple_ex_containers(self):
tmp_file = NamedTemporaryFile(mode="wt", suffix=".py")
tmp_file.write(self.get_executable_container_import_string())
tmp_file.write(
"""class MyContainerOne(ExecutableContainer):\n\tname="my_ec_1"\n\tdef setup(self):\n\t\tself.entries_names = ["entry1"]\n\t\tself.entries_type_names=["task"]\ndef run(self, manager):\n\t\tpass""")
tmp_file.write(
"""\n\n\nclass MyContainerTwo(ExecutableContainer):\n\tname="my_ec_2"\n\tdef setup(self):\n\t\tself.entries_names = ["entry1"]\n\t\tself.entries_type_names=["task"]\ndef run(self, manager):\n\t\tpass """)
tmp_file.flush()
invalidate_caches()
result = list(ExecutableContainer.setup_from_file(tmp_file.name))
self.assertEqual(len(result), 2)
names = {ex.name for ex in result}
self.assertSetEqual({"my_ec_1", "my_ec_2"}, names)
class BaseStageTestCase(unittest.TestCase):
def test_base_stage_executable_container_entries_type_name_attribute(self):
self.assertListEqual(Stage.entries_type_names, ["task"])
def test_base_stage_executable_container_type_name(self):
self.assertEqual(Stage.type_name, "stage")
class BaseRoundTestCase(unittest.TestCase):
def test_base_round_executable_container_entries_type_name_attribute(self):
self.assertListEqual(Round.entries_type_names, ["stage"])
def test_base_round_executable_container_type_name(self):
self.assertEqual(Round.type_name, "round")
class PipelineTestCase(unittest.TestCase):
def test_pipeline_executable_container_entries_type_name_attribute(self):
self.assertIsNone(Pipeline.entries_type_names)
def test_pipeline_executable_container_type_name(self):
self.assertEqual(Pipeline.type_name, "pipeline")
if __name__ == '__main__':
unittest.main()
|
|
import flask_sqlalchemy
from sqlalchemy.dialects import postgresql as pg
# pylint: disable=no-member
db = flask_sqlalchemy.SQLAlchemy()
RarityEnum = pg.ENUM('Common', 'Rare', 'Not For Sale', name='rarity', metadata=db.metadata)
BodyAreasEnum = pg.ENUM('Head', 'Neck', 'Upper Chest', 'Abdomen', 'Groin', 'Upper Back',
'Lower Back', 'Buttocks', 'Shoulders', 'Upper Arms', 'Forearms',
'Hands', 'Thighs', 'Shins', 'Feet', 'Joints', name='body_areas',
metadata=db.metadata)
FireArcEnum = pg.ENUM('Above', 'Below', 'Front', 'Back', 'Left', 'Right', name='fire_arc',
metadata=db.metadata)
GenderEnum = pg.ENUM('M', 'F', name='gender', metadata=db.metadata)
PlayableTypeEnum = pg.ENUM('PC', 'NPC', 'Creature', name='playable_type', metadata=db.metadata)
LanguageAbilityEnum = pg.ENUM('Speak', 'Understand', 'None', name='language_ability',
metadata=db.metadata)
class Armor(db.Model):
__tablename__ = 'armor'
id = db.Column(pg.UUID, primary_key=True, server_default=db.text("uuid_generate_v4()"))
areas_covered = db.Column(pg.ARRAY(BodyAreasEnum), nullable=False)
name = db.Column(db.String(100), nullable=False)
description = db.Column(db.Text)
resist_physical = db.Column(db.Numeric(3, 1), nullable=False)
resist_energy = db.Column(db.Numeric(3, 1), nullable=False)
rarity = db.Column(RarityEnum, nullable=False, server_default=db.text("'Common'::rarity"))
price_new = db.Column(db.SmallInteger, nullable=False)
price_used = db.Column(db.SmallInteger, nullable=False)
images = db.relationship('Image', secondary='armor_image')
t_armor_image = db.Table(
'armor_image', db.metadata,
db.Column('armor_id', db.ForeignKey('armor.id', ondelete='RESTRICT', onupdate='CASCADE'),
nullable=False),
db.Column('image_id', db.ForeignKey('image.id', ondelete='CASCADE', onupdate='CASCADE'),
nullable=False),
db.UniqueConstraint('armor_id', 'image_id')
)
class Attribute(db.Model):
__tablename__ = 'attribute'
name = db.Column(db.String(30), nullable=False)
id = db.Column(db.String(3), primary_key=True)
description = db.Column(db.Text)
display_order = db.Column(db.SmallInteger, nullable=False)
t_character_armor = db.Table(
'character_armor', db.metadata,
db.Column('character_id', db.ForeignKey('character_sheet.id', ondelete='CASCADE',
onupdate='RESTRICT'), nullable=False, index=True),
db.Column('armor_id', db.ForeignKey('armor.id', ondelete='CASCADE', onupdate='RESTRICT'),
nullable=False, index=True)
)
class CharacterAttribute(db.Model):
__tablename__ = 'character_attribute'
id = db.Column(pg.UUID, primary_key=True, server_default=db.text("uuid_generate_v4()"))
character_id = db.Column(db.ForeignKey('character_sheet.id', ondelete='RESTRICT',
onupdate='CASCADE'), nullable=False)
attribute_id = db.Column(db.ForeignKey('attribute.id', ondelete='RESTRICT',
onupdate='CASCADE'), nullable=False)
level = db.Column(db.Numeric(3, 1), nullable=False)
attribute = db.relationship('Attribute')
character = db.relationship('CharacterSheet')
class CharacterSheet(db.Model):
__tablename__ = 'character_sheet'
id = db.Column(pg.UUID, primary_key=True, server_default=db.text("uuid_generate_v4()"))
race_id = db.Column(db.ForeignKey('race.id', ondelete='RESTRICT', onupdate='CASCADE'),
nullable=False, index=True)
planet_id = db.Column(db.ForeignKey('planet.id', ondelete='RESTRICT', onupdate='CASCADE'),
index=True)
character_type_id = db.Column(db.ForeignKey('character_type.id', ondelete='CASCADE',
onupdate='RESTRICT'), nullable=False, index=True)
name = db.Column(db.String(100), nullable=False)
description = db.Column(db.Text)
background = db.Column(db.Text)
motivation = db.Column(db.Text)
quote = db.Column(db.Text)
gender = db.Column(GenderEnum, nullable=False, server_default=db.text("'M'::gender"))
age = db.Column(db.SmallInteger, nullable=False)
height = db.Column(db.Numeric(3, 1), nullable=False)
weight = db.Column(db.Integer, nullable=False)
move_land = db.Column(db.SmallInteger, nullable=False,
server_default=db.text("'10'::smallint"))
move_water = db.Column(db.SmallInteger, nullable=False,
server_default=db.text("'0'::smallint"))
move_air = db.Column(db.SmallInteger, nullable=False,
server_default=db.text("'0'::smallint"))
force_pts = db.Column(db.SmallInteger, nullable=False)
dark_side_pts = db.Column(db.SmallInteger, nullable=False)
character_pts = db.Column(db.SmallInteger, nullable=False)
credits_bank = db.Column(db.BigInteger, nullable=False)
credits_debt = db.Column(db.BigInteger, nullable=False)
is_template = db.Column(db.Boolean, nullable=False, server_default=db.text("false"))
character_type = db.relationship('CharacterType')
character_planet = db.relationship('Planet')
character_race = db.relationship('Race')
character_explosives = db.relationship('WeaponExplosive',
secondary='character_weapon_explosive')
character_vehicles = db.relationship('Vehicle', secondary='character_vehicle')
character_melee_weapons = db.relationship('WeaponMelee', secondary='character_weapon_melee')
character_starships = db.relationship('Starship', secondary='character_starship')
character_ranged_weapons = db.relationship('WeaponRanged', secondary='character_weapon_ranged')
images = db.relationship('Image', secondary='character_image')
t_character_image = db.Table(
'character_image', db.metadata,
db.Column('character_id', db.ForeignKey('character_sheet.id', ondelete='RESTRICT',
onupdate='CASCADE'),
nullable=False),
db.Column('image_id', db.ForeignKey('image.id', ondelete='CASCADE', onupdate='CASCADE'),
nullable=False),
db.UniqueConstraint('character_id', 'image_id')
)
class CharacterSkill(db.Model):
__tablename__ = 'character_skill'
id = db.Column(pg.UUID, primary_key=True, server_default=db.text("uuid_generate_v4()"))
character_id = db.Column(db.ForeignKey('character_sheet.id', ondelete='RESTRICT',
onupdate='CASCADE'), nullable=False)
skill_id = db.Column(db.ForeignKey('skill.id', ondelete='RESTRICT', onupdate='CASCADE'),
nullable=False)
level = db.Column(db.Numeric(3, 1), nullable=False)
character = db.relationship('CharacterSheet')
skill = db.relationship('Skill')
class CharacterSpecialization(db.Model):
__tablename__ = 'character_specialization'
id = db.Column(pg.UUID, primary_key=True, server_default=db.text("uuid_generate_v4()"))
character_id = db.Column(db.ForeignKey('character_sheet.id', ondelete='RESTRICT',
onupdate='CASCADE'), nullable=False)
specialization_id = db.Column(db.ForeignKey('skill_specialization.id', ondelete='RESTRICT',
onupdate='CASCADE'), nullable=False)
level = db.Column(db.Numeric(3, 1), nullable=False)
character = db.relationship('CharacterSheet')
specialization = db.relationship('SkillSpecialization')
t_character_starship = db.Table(
'character_starship', db.metadata,
db.Column('character_id', db.ForeignKey('character_sheet.id', ondelete='CASCADE',
onupdate='RESTRICT'), nullable=False, index=True),
db.Column('starship_id', db.ForeignKey('starship.id', ondelete='CASCADE',
onupdate='RESTRICT'), nullable=False, index=True)
)
class CharacterType(db.Model):
__tablename__ = 'character_type'
id = db.Column(pg.UUID, primary_key=True, server_default=db.text("uuid_generate_v4()"))
name = db.Column(db.String(50), nullable=False)
t_character_vehicle = db.Table(
'character_vehicle', db.metadata,
db.Column('character_id', db.ForeignKey('character_sheet.id', ondelete='CASCADE',
onupdate='RESTRICT'), nullable=False, index=True),
db.Column('vehicle_id', db.ForeignKey('vehicle.id', ondelete='CASCADE', onupdate='RESTRICT'),
nullable=False, index=True)
)
t_character_weapon_explosive = db.Table(
'character_weapon_explosive', db.metadata,
db.Column('character_id', db.ForeignKey('character_sheet.id', ondelete='CASCADE',
onupdate='RESTRICT'), nullable=False, index=True),
db.Column('explosive_id', db.ForeignKey('weapon_explosive.id', ondelete='CASCADE',
onupdate='RESTRICT'), nullable=False, index=True)
)
t_character_weapon_melee = db.Table(
'character_weapon_melee', db.metadata,
db.Column('character_id', db.ForeignKey('character_sheet.id', ondelete='CASCADE',
onupdate='RESTRICT'), nullable=False, index=True),
db.Column('melee_id', db.ForeignKey('weapon_melee.id', ondelete='CASCADE',
onupdate='RESTRICT'), nullable=False, index=True)
)
t_character_weapon_ranged = db.Table(
'character_weapon_ranged', db.metadata,
db.Column('character_id', db.ForeignKey('character_sheet.id', ondelete='CASCADE',
onupdate='RESTRICT'), nullable=False, index=True),
db.Column('ranged_id', db.ForeignKey('weapon_ranged.id', ondelete='CASCADE',
onupdate='RESTRICT'), nullable=False, index=True)
)
class ForceAbility(db.Model):
__tablename__ = 'force_ability'
name = db.Column(db.String(100), nullable=False)
difficulty = db.Column(db.Text)
time_required = db.Column(db.String(100), nullable=False)
description = db.Column(db.Text)
force_power_id = db.Column(pg.UUID)
id = db.Column(pg.UUID, primary_key=True, server_default=db.text("uuid_generate_v4()"))
class ForceAbilityPrerequisite(db.Model):
__tablename__ = 'force_ability_prerequisite'
__table_args__ = (
db.UniqueConstraint('force_ability_id', 'prerequisite_id'),
)
force_ability_id = db.Column(pg.UUID)
prerequisite_id = db.Column(pg.UUID)
id = db.Column(pg.UUID, primary_key=True, server_default=db.text("uuid_generate_v4()"))
class ForcePower(db.Model):
__tablename__ = 'force_power'
id = db.Column(pg.UUID, primary_key=True, server_default=db.text("uuid_generate_v4()"))
name = db.Column(db.String(100), nullable=False)
description = db.Column(db.Text)
class Image(db.Model):
__tablename__ = 'image'
id = db.Column(pg.UUID, primary_key=True, server_default=db.text("uuid_generate_v4()"))
display_order = db.Column(db.SmallInteger, nullable=False)
name = db.Column(db.String(120), nullable=False)
dir = db.Column(db.String(100), nullable=False)
caption = db.Column(db.String(200), nullable=False)
image_width = db.Column(db.SmallInteger, nullable=False)
image_height = db.Column(db.SmallInteger, nullable=False)
thumb_width = db.Column(db.SmallInteger, nullable=False)
thumb_height = db.Column(db.SmallInteger, nullable=False)
class Planet(db.Model):
__tablename__ = 'planet'
name = db.Column(db.String(100), nullable=False)
description = db.Column(db.Text)
id = db.Column(pg.UUID, primary_key=True, server_default=db.text("uuid_generate_v4()"))
images = db.relationship('Image', secondary='planet_image')
t_planet_image = db.Table(
'planet_image', db.metadata,
db.Column('planet_id', db.ForeignKey('planet.id', ondelete='RESTRICT', onupdate='CASCADE'),
nullable=False),
db.Column('image_id', db.ForeignKey('image.id', ondelete='CASCADE', onupdate='CASCADE'),
nullable=False),
db.UniqueConstraint('planet_id', 'image_id')
)
class Race(db.Model):
__tablename__ = 'race'
playable_type = db.Column(PlayableTypeEnum, nullable=False, index=True,
server_default=db.text("'PC'::playable_type"))
name = db.Column(db.String(100), nullable=False)
basic_ability = db.Column(LanguageAbilityEnum, nullable=False,
server_default=db.text("'Speak'::language_ability"))
description = db.Column(db.Text)
special_abilities = db.Column(db.Text)
story_factors = db.Column(db.Text)
min_move_land = db.Column(db.SmallInteger, nullable=False,
server_default=db.text("'10'::smallint"))
max_move_land = db.Column(db.SmallInteger, nullable=False,
server_default=db.text("'12'::smallint"))
min_move_water = db.Column(db.SmallInteger, nullable=False,
server_default=db.text("'5'::smallint"))
max_move_water = db.Column(db.SmallInteger, nullable=False,
server_default=db.text("'6'::smallint"))
min_move_air = db.Column(db.SmallInteger, nullable=False,
server_default=db.text("'0'::smallint"))
max_move_air = db.Column(db.SmallInteger, nullable=False,
server_default=db.text("'0'::smallint"))
min_height = db.Column(db.Numeric(3, 1), nullable=False,
server_default=db.text("'1.5'::double precision"))
max_height = db.Column(db.Numeric(3, 1), nullable=False,
server_default=db.text("'2'::double precision"))
attribute_level = db.Column(db.Numeric(3, 1), nullable=False, server_default=db.text("12.0"))
id = db.Column(pg.UUID, primary_key=True, server_default=db.text("uuid_generate_v4()"))
planet_id = db.Column(db.ForeignKey('planet.id', ondelete='RESTRICT', onupdate='CASCADE'))
planet = db.relationship('Planet', backref='races')
images = db.relationship('Image', secondary='race_image')
t_race_image = db.Table(
'race_image', db.metadata,
db.Column('race_id', db.ForeignKey('race.id', ondelete='RESTRICT', onupdate='CASCADE'),
nullable=False),
db.Column('image_id', db.ForeignKey('image.id', ondelete='CASCADE', onupdate='CASCADE'),
nullable=False),
db.UniqueConstraint('race_id', 'image_id')
)
class RaceAttribute(db.Model):
__tablename__ = 'race_attribute'
__table_args__ = (
db.UniqueConstraint('race_id', 'attribute_id'),
)
min_level = db.Column(db.Numeric(3, 1), nullable=False, server_default=db.text("2.0"))
max_level = db.Column(db.Numeric(3, 1), nullable=False, server_default=db.text("4.0"))
race_id = db.Column(db.ForeignKey('race.id', ondelete='CASCADE', onupdate='CASCADE'),
nullable=False)
attribute_id = db.Column(db.ForeignKey('attribute.id', ondelete='RESTRICT',
onupdate='CASCADE'), nullable=False)
id = db.Column(pg.UUID, primary_key=True, server_default=db.text("uuid_generate_v4()"))
attribute = db.relationship('Attribute', backref='attribute_race_attributes')
race = db.relationship('Race', backref='race_attributes')
class Scale(db.Model):
__tablename__ = 'scale'
id = db.Column(db.String(30), primary_key=True)
modifier = db.Column(db.Numeric(3, 1), nullable=False)
class Skill(db.Model):
__tablename__ = 'skill'
name = db.Column(db.String(100), nullable=False)
description = db.Column(db.Text)
has_specializations = db.Column(db.Boolean, nullable=False, server_default=db.text("true"))
id = db.Column(pg.UUID, primary_key=True, server_default=db.text("uuid_generate_v4()"))
attribute_id = db.Column(db.ForeignKey('attribute.id', ondelete='RESTRICT',
onupdate='CASCADE'), nullable=False)
attribute = db.relationship('Attribute')
class SkillAdvanced(db.Model):
__tablename__ = 'skill_advanced'
prerequisite_level = db.Column(db.Numeric(3, 1), nullable=False, server_default=db.text("5.0"))
id = db.Column(pg.UUID, primary_key=True, server_default=db.text("uuid_generate_v4()"))
skill_id = db.Column(db.ForeignKey('skill.id', ondelete='RESTRICT', onupdate='CASCADE'))
base_skill_id = db.Column(db.ForeignKey('skill.id', ondelete='RESTRICT', onupdate='CASCADE'))
base_skill = db.relationship('Skill', primaryjoin='SkillAdvanced.base_skill_id == Skill.id')
skill = db.relationship('Skill', primaryjoin='SkillAdvanced.skill_id == Skill.id')
class SkillSpecialization(db.Model):
__tablename__ = 'skill_specialization'
name = db.Column(db.String(100), nullable=False)
id = db.Column(pg.UUID, primary_key=True, server_default=db.text("uuid_generate_v4()"))
skill_id = db.Column(db.ForeignKey('skill.id', ondelete='RESTRICT', onupdate='CASCADE'))
skill = db.relationship('Skill')
class Starship(db.Model):
__tablename__ = 'starship'
id = db.Column(pg.UUID, primary_key=True, server_default=db.text("uuid_generate_v4()"))
skill_id = db.Column(db.ForeignKey('skill.id', ondelete='RESTRICT', onupdate='CASCADE'),
index=True)
scale_id = db.Column(db.ForeignKey('scale.id', ondelete='SET NULL', onupdate='RESTRICT'),
index=True)
name = db.Column(db.String(100), nullable=False)
type = db.Column(db.String(100), nullable=False)
description = db.Column(db.Text)
length = db.Column(db.Float(53), nullable=False)
capacity_crew = db.Column(db.SmallInteger, nullable=False)
capacity_passengers = db.Column(db.SmallInteger, nullable=False)
capacity_troops = db.Column(db.SmallInteger, nullable=False)
capacity_cargo = db.Column(db.SmallInteger, nullable=False)
capacity_consumables = db.Column(db.SmallInteger, nullable=False)
has_nav_computer = db.Column(db.SmallInteger, nullable=False)
hyperdrive_multiplier = db.Column(db.Float(53), nullable=False)
hyperdrive_backup = db.Column(db.Float(53), nullable=False)
speed_space = db.Column(db.SmallInteger, nullable=False)
speed_atmosphere_min = db.Column(db.SmallInteger, nullable=False)
speed_atmosphere_max = db.Column(db.SmallInteger, nullable=False)
maneuver = db.Column(db.Numeric(3, 1), nullable=False)
hull = db.Column(db.Numeric(3, 1), nullable=False)
shields = db.Column(db.Numeric(3, 1), nullable=False)
sensors_passive_range = db.Column(db.SmallInteger, nullable=False)
sensors_passive_level = db.Column(db.Numeric(3, 1), nullable=False)
sensors_scan_range = db.Column(db.SmallInteger, nullable=False)
sensors_scan_level = db.Column(db.Numeric(3, 1), nullable=False)
sensors_search_range = db.Column(db.SmallInteger, nullable=False)
sensors_search_level = db.Column(db.Numeric(3, 1), nullable=False)
sensors_focus_range = db.Column(db.SmallInteger, nullable=False)
sensors_focus_level = db.Column(db.Numeric(3, 1), nullable=False)
rarity = db.Column(RarityEnum, nullable=False, server_default=db.text("'Common'::rarity"))
price_new = db.Column(db.Integer)
price_used = db.Column(db.Integer)
scale = db.relationship('Scale')
skill = db.relationship('Skill')
images = db.relationship('Image', secondary='starship_image')
t_starship_image = db.Table(
'starship_image', db.metadata,
db.Column('starship_id', db.ForeignKey('starship.id', ondelete='RESTRICT', onupdate='CASCADE'),
nullable=False),
db.Column('image_id', db.ForeignKey('image.id', ondelete='CASCADE', onupdate='CASCADE'),
nullable=False),
db.UniqueConstraint('starship_id', 'image_id')
)
class StarshipWeapon(db.Model):
__tablename__ = 'starship_weapon'
id = db.Column(pg.UUID, primary_key=True, server_default=db.text("uuid_generate_v4()"))
starship_id = db.Column(db.ForeignKey('starship.id', ondelete='CASCADE',
onupdate='RESTRICT'), nullable=False, index=True)
skill_id = db.Column(db.ForeignKey('skill.id', ondelete='RESTRICT', onupdate='CASCADE'),
index=True)
type = db.Column(db.String(100), nullable=False)
number = db.Column(db.SmallInteger, nullable=False)
crew = db.Column(db.SmallInteger, nullable=False)
fire_rate = db.Column(db.Float(53))
fire_control = db.Column(db.Numeric(3, 1), nullable=False)
fire_arc = db.Column(pg.ARRAY(FireArcEnum), nullable=False)
fire_linked = db.Column(db.SmallInteger, nullable=False)
range_minimum_space = db.Column(db.SmallInteger, nullable=False)
range_short_space = db.Column(db.SmallInteger, nullable=False)
range_medium_space = db.Column(db.SmallInteger, nullable=False)
range_long_space = db.Column(db.SmallInteger, nullable=False)
range_minimum_atmosphere = db.Column(db.SmallInteger, nullable=False)
range_short_atmosphere = db.Column(db.SmallInteger, nullable=False)
range_medium_atmosphere = db.Column(db.SmallInteger, nullable=False)
range_long_atmosphere = db.Column(db.SmallInteger, nullable=False)
damage = db.Column(db.Numeric(3, 1), nullable=False)
skill = db.relationship('Skill')
starship = db.relationship('Starship')
class Vehicle(db.Model):
__tablename__ = 'vehicle'
id = db.Column(pg.UUID, primary_key=True, server_default=db.text("uuid_generate_v4()"))
skill_id = db.Column(db.ForeignKey('skill.id', ondelete='RESTRICT', onupdate='CASCADE'),
index=True)
scale_id = db.Column(db.ForeignKey('scale.id', ondelete='SET NULL', onupdate='RESTRICT'),
index=True)
name = db.Column(db.String(100), nullable=False)
type = db.Column(db.String(100), nullable=False)
description = db.Column(db.Text)
cover = db.Column(db.Float(53), nullable=False)
capacity_crew = db.Column(db.SmallInteger, nullable=False)
capacity_passengers = db.Column(db.SmallInteger, nullable=False)
capacity_troops = db.Column(db.SmallInteger, nullable=False)
capacity_cargo = db.Column(db.SmallInteger, nullable=False)
capacity_consumables = db.Column(db.SmallInteger, nullable=False)
speed_min = db.Column(db.SmallInteger, nullable=False)
speed_max = db.Column(db.SmallInteger, nullable=False)
altitude_min = db.Column(db.SmallInteger, nullable=False)
altitude_max = db.Column(db.SmallInteger, nullable=False)
maneuver = db.Column(db.Numeric(3, 1), nullable=False)
hull = db.Column(db.Numeric(3, 1), nullable=False)
shields = db.Column(db.Numeric(3, 1), nullable=False)
rarity = db.Column(RarityEnum, nullable=False, server_default=db.text("'Common'::rarity"))
price_new = db.Column(db.Integer)
price_used = db.Column(db.Integer)
scale = db.relationship('Scale')
skill = db.relationship('Skill')
images = db.relationship('Image', secondary='vehicle_image')
t_vehicle_image = db.Table(
'vehicle_image', db.metadata,
db.Column('vehicle_id', db.ForeignKey('vehicle.id', ondelete='RESTRICT', onupdate='CASCADE'),
nullable=False),
db.Column('image_id', db.ForeignKey('image.id', ondelete='CASCADE', onupdate='CASCADE'),
nullable=False),
db.UniqueConstraint('vehicle_id', 'image_id')
)
class VehicleWeapon(db.Model):
__tablename__ = 'vehicle_weapon'
id = db.Column(pg.UUID, primary_key=True, server_default=db.text("uuid_generate_v4()"))
vehicle_id = db.Column(db.ForeignKey('vehicle.id', ondelete='CASCADE',
onupdate='RESTRICT'), nullable=False, index=True)
skill_id = db.Column(db.ForeignKey('skill.id', ondelete='RESTRICT', onupdate='CASCADE'),
index=True)
type = db.Column(db.String(100), nullable=False)
number = db.Column(db.SmallInteger, nullable=False)
crew = db.Column(db.SmallInteger, nullable=False)
fire_rate = db.Column(db.Float(53))
fire_control = db.Column(db.Numeric(3, 1), nullable=False)
fire_arc = db.Column(pg.ARRAY(FireArcEnum), nullable=False)
fire_linked = db.Column(db.SmallInteger, nullable=False)
range_minimum = db.Column(db.SmallInteger, nullable=False)
range_short = db.Column(db.SmallInteger, nullable=False)
range_medium = db.Column(db.SmallInteger, nullable=False)
range_long = db.Column(db.SmallInteger, nullable=False)
damage = db.Column(db.Numeric(3, 1), nullable=False)
skill = db.relationship('Skill')
vehicle = db.relationship('Vehicle')
class WeaponExplosive(db.Model):
__tablename__ = 'weapon_explosive'
id = db.Column(pg.UUID, primary_key=True, server_default=db.text("uuid_generate_v4()"))
skill_id = db.Column(db.ForeignKey('skill.id', ondelete='RESTRICT', onupdate='CASCADE'),
index=True)
name = db.Column(db.String(100), nullable=False)
description = db.Column(db.Text)
range_minimum = db.Column(db.SmallInteger, nullable=False)
range_short = db.Column(db.SmallInteger, nullable=False)
range_medium = db.Column(db.SmallInteger, nullable=False)
range_long = db.Column(db.SmallInteger, nullable=False)
skill = db.relationship('Skill')
images = db.relationship('Image', secondary='weapon_explosive_image')
t_weapon_explosive_image = db.Table(
'weapon_explosive_image', db.metadata,
db.Column('weapon_explosive_id', db.ForeignKey('weapon_explosive.id', ondelete='RESTRICT',
onupdate='CASCADE'), nullable=False),
db.Column('image_id', db.ForeignKey('image.id', ondelete='CASCADE', onupdate='CASCADE'),
nullable=False),
db.UniqueConstraint('weapon_explosive_id', 'image_id')
)
t_weapon_explosive_damage = db.Table(
'weapon_explosive_damage', db.metadata,
db.Column('explosive_id', db.ForeignKey('weapon_explosive.id', ondelete='CASCADE',
onupdate='RESTRICT'), nullable=False, index=True),
db.Column('radius', db.SmallInteger, nullable=False),
db.Column('damage', db.Numeric(3, 1), nullable=False)
)
class WeaponMelee(db.Model):
__tablename__ = 'weapon_melee'
id = db.Column(pg.UUID, primary_key=True, server_default=db.text("uuid_generate_v4()"))
skill_id = db.Column(db.ForeignKey('skill.id', ondelete='RESTRICT', onupdate='CASCADE'),
index=True)
name = db.Column(db.String(100), nullable=False)
description = db.Column(db.Text)
damage = db.Column(db.Numeric(3, 1), nullable=False)
max_damage = db.Column(db.Numeric(3, 1), nullable=False)
skill = db.relationship('Skill')
images = db.relationship('Image', secondary='weapon_melee_image')
t_weapon_melee_image = db.Table(
'weapon_melee_image', db.metadata,
db.Column('weapon_melee_id', db.ForeignKey('weapon_melee.id', ondelete='RESTRICT',
onupdate='CASCADE'), nullable=False),
db.Column('image_id', db.ForeignKey('image.id', ondelete='CASCADE', onupdate='CASCADE'),
nullable=False),
db.UniqueConstraint('weapon_melee_id', 'image_id')
)
class WeaponRanged(db.Model):
__tablename__ = 'weapon_ranged'
id = db.Column(pg.UUID, primary_key=True, server_default=db.text("uuid_generate_v4()"))
skill_id = db.Column(db.ForeignKey('skill.id', ondelete='RESTRICT', onupdate='CASCADE'),
index=True)
name = db.Column(db.String(100), nullable=False)
description = db.Column(db.Text)
fire_rate = db.Column(db.Float(53))
range_minimum = db.Column(db.SmallInteger, nullable=False)
range_short = db.Column(db.SmallInteger, nullable=False)
range_medium = db.Column(db.SmallInteger, nullable=False)
range_long = db.Column(db.SmallInteger, nullable=False)
damage = db.Column(db.Numeric(3, 1), nullable=False)
skill = db.relationship('Skill')
images = db.relationship('Image', secondary='weapon_ranged_image')
t_weapon_ranged_image = db.Table(
'weapon_ranged_image', db.metadata,
db.Column('weapon_ranged_id', db.ForeignKey('weapon_ranged.id', ondelete='RESTRICT',
onupdate='CASCADE'), nullable=False),
db.Column('image_id', db.ForeignKey('image.id', ondelete='CASCADE', onupdate='CASCADE'),
nullable=False),
db.UniqueConstraint('weapon_ranged_id', 'image_id')
)
|
|
# -*- coding: utf-8 -*-
import mock
import unittest
from nose.tools import * # noqa
from github3 import GitHubError
from github3.repos import Repository
from tests.base import OsfTestCase, get_default_metaschema
from tests.factories import UserFactory, ProjectFactory
from framework.auth import Auth
from website.addons.github.exceptions import NotFoundError
from website.addons.github import settings as github_settings
from website.addons.github.tests.factories import GitHubOauthSettingsFactory
from website.addons.github.model import AddonGitHubUserSettings
from website.addons.github.model import AddonGitHubNodeSettings
from website.addons.github.model import AddonGitHubOauthSettings
from .utils import create_mock_github
mock_github = create_mock_github()
class TestCallbacks(OsfTestCase):
def setUp(self):
super(TestCallbacks, self).setUp()
self.project = ProjectFactory.build()
self.consolidated_auth = Auth(self.project.creator)
self.non_authenticator = UserFactory()
self.project.save()
self.project.add_contributor(
contributor=self.non_authenticator,
auth=self.consolidated_auth,
)
self.project.add_addon('github', auth=self.consolidated_auth)
self.project.creator.add_addon('github')
self.node_settings = self.project.get_addon('github')
self.user_settings = self.project.creator.get_addon('github')
self.node_settings.user_settings = self.user_settings
self.node_settings.user = 'Queen'
self.node_settings.repo = 'Sheer-Heart-Attack'
self.node_settings.save()
@mock.patch('website.addons.github.api.GitHub.repo')
def test_before_make_public(self, mock_repo):
mock_repo.side_effect = NotFoundError
result = self.node_settings.before_make_public(self.project)
assert_is(result, None)
@mock.patch('website.addons.github.api.GitHub.repo')
def test_before_page_load_osf_public_gh_public(self, mock_repo):
self.project.is_public = True
self.project.save()
mock_repo.return_value = Repository.from_json({'private': False})
message = self.node_settings.before_page_load(self.project, self.project.creator)
mock_repo.assert_called_with(
self.node_settings.user,
self.node_settings.repo,
)
assert_false(message)
@mock.patch('website.addons.github.api.GitHub.repo')
def test_before_page_load_osf_public_gh_private(self, mock_repo):
self.project.is_public = True
self.project.save()
mock_repo.return_value = Repository.from_json({'private': True})
message = self.node_settings.before_page_load(self.project, self.project.creator)
mock_repo.assert_called_with(
self.node_settings.user,
self.node_settings.repo,
)
assert_true(message)
@mock.patch('website.addons.github.api.GitHub.repo')
def test_before_page_load_osf_private_gh_public(self, mock_repo):
mock_repo.return_value = Repository.from_json({'private': False})
message = self.node_settings.before_page_load(self.project, self.project.creator)
mock_repo.assert_called_with(
self.node_settings.user,
self.node_settings.repo,
)
assert_true(message)
@mock.patch('website.addons.github.api.GitHub.repo')
def test_before_page_load_osf_private_gh_private(self, mock_repo):
mock_repo.return_value = Repository.from_json({'private': True})
message = self.node_settings.before_page_load(self.project, self.project.creator)
mock_repo.assert_called_with(
self.node_settings.user,
self.node_settings.repo,
)
assert_false(message)
def test_before_page_load_not_contributor(self):
message = self.node_settings.before_page_load(self.project, UserFactory())
assert_false(message)
def test_before_page_load_not_logged_in(self):
message = self.node_settings.before_page_load(self.project, None)
assert_false(message)
def test_before_remove_contributor_authenticator(self):
message = self.node_settings.before_remove_contributor(
self.project, self.project.creator
)
assert_true(message)
def test_before_remove_contributor_not_authenticator(self):
message = self.node_settings.before_remove_contributor(
self.project, self.non_authenticator
)
assert_false(message)
def test_after_remove_contributor_authenticator_self(self):
message = self.node_settings.after_remove_contributor(
self.project, self.project.creator, self.consolidated_auth
)
assert_equal(
self.node_settings.user_settings,
None
)
assert_true(message)
assert_not_in("You can re-authenticate", message)
def test_after_remove_contributor_authenticator_not_self(self):
auth = Auth(user=self.non_authenticator)
message = self.node_settings.after_remove_contributor(
self.project, self.project.creator, auth
)
assert_equal(
self.node_settings.user_settings,
None
)
assert_true(message)
assert_in("You can re-authenticate", message)
def test_after_remove_contributor_not_authenticator(self):
self.node_settings.after_remove_contributor(
self.project, self.non_authenticator, self.consolidated_auth
)
assert_not_equal(
self.node_settings.user_settings,
None,
)
@unittest.skipIf(not github_settings.SET_PRIVACY, 'Setting privacy is disabled.')
@mock.patch('website.addons.github.api.GitHub.set_privacy')
def test_after_set_privacy_private_authenticated(self, mock_set_privacy):
mock_set_privacy.return_value = {}
message = self.node_settings.after_set_privacy(
self.project, 'private',
)
mock_set_privacy.assert_called_with(
self.node_settings.user,
self.node_settings.repo,
True,
)
assert_true(message)
assert_in('made private', message.lower())
@unittest.skipIf(not github_settings.SET_PRIVACY, 'Setting privacy is disabled.')
@mock.patch('website.addons.github.api.GitHub.set_privacy')
def test_after_set_privacy_public_authenticated(self, mock_set_privacy):
mock_set_privacy.return_value = {}
message = self.node_settings.after_set_privacy(
self.project, 'public'
)
mock_set_privacy.assert_called_with(
self.node_settings.user,
self.node_settings.repo,
False,
)
assert_true(message)
assert_in('made public', message.lower())
@unittest.skipIf(not github_settings.SET_PRIVACY, 'Setting privacy is disabled.')
@mock.patch('website.addons.github.api.GitHub.repo')
@mock.patch('website.addons.github.api.GitHub.set_privacy')
def test_after_set_privacy_not_authenticated(self, mock_set_privacy, mock_repo):
mock_set_privacy.return_value = {'errors': ['it broke']}
mock_repo.return_value = {'private': True}
message = self.node_settings.after_set_privacy(
self.project, 'private',
)
mock_set_privacy.assert_called_with(
self.node_settings.user,
self.node_settings.repo,
True,
)
mock_repo.assert_called_with(
self.node_settings.user,
self.node_settings.repo,
)
assert_true(message)
assert_in('could not set privacy', message.lower())
def test_after_fork_authenticator(self):
fork = ProjectFactory()
clone, message = self.node_settings.after_fork(
self.project, fork, self.project.creator,
)
assert_equal(
self.node_settings.user_settings,
clone.user_settings,
)
def test_after_fork_not_authenticator(self):
fork = ProjectFactory()
clone, message = self.node_settings.after_fork(
self.project, fork, self.non_authenticator,
)
assert_equal(
clone.user_settings,
None,
)
def test_after_delete(self):
self.project.remove_node(Auth(user=self.project.creator))
# Ensure that changes to node settings have been saved
self.node_settings.reload()
assert_true(self.node_settings.user_settings is None)
@mock.patch('website.archiver.tasks.archive')
def test_does_not_get_copied_to_registrations(self, mock_archive):
registration = self.project.register_node(
schema=get_default_metaschema(),
auth=Auth(user=self.project.creator),
data='hodor',
)
assert_false(registration.has_addon('github'))
class TestAddonGithubUserSettings(OsfTestCase):
def setUp(self):
OsfTestCase.setUp(self)
self.user_settings = AddonGitHubUserSettings()
self.oauth_settings = AddonGitHubOauthSettings()
self.oauth_settings.github_user_id = 'testuser'
self.oauth_settings.save()
self.user_settings.oauth_settings = self.oauth_settings
self.user_settings.save()
def test_repr(self):
self.user_settings.owner = UserFactory()
assert_in(self.user_settings.owner._id, repr(self.user_settings))
oauth_settings = GitHubOauthSettingsFactory()
def test_public_id_is_none_if_no_oauth_settings_attached(self):
self.user_settings.oauth_settings = None
self.user_settings.save()
# Regression test for:
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/1053
assert_is_none(self.user_settings.public_id)
def test_github_user_name(self):
self.oauth_settings.github_user_name = "test user name"
self.oauth_settings.save()
assert_equal(self.user_settings.github_user_name, "test user name")
def test_oauth_access_token(self):
self.oauth_settings.oauth_access_token = "test access token"
self.oauth_settings.save()
assert_equal(self.user_settings.oauth_access_token, "test access token")
def test_oauth_token_type(self):
self.oauth_settings.oauth_token_type = "test token type"
self.oauth_settings.save()
assert_equal(self.user_settings.oauth_token_type, "test token type")
@mock.patch('website.addons.github.api.GitHub.revoke_token')
def test_clear_auth(self, mock_revoke_token):
mock_revoke_token.return_value = True
self.user_settings.clear_auth(save=True)
assert_false(self.user_settings.github_user_name)
assert_false(self.user_settings.oauth_token_type)
assert_false(self.user_settings.oauth_access_token)
assert_false(self.user_settings.oauth_settings)
class TestAddonGithubNodeSettings(OsfTestCase):
def setUp(self):
OsfTestCase.setUp(self)
self.user = UserFactory()
self.user.add_addon('github')
self.user_settings = self.user.get_addon('github')
self.oauth_settings = AddonGitHubOauthSettings(oauth_access_token='foobar')
self.oauth_settings.github_user_id = 'testuser'
self.oauth_settings.save()
self.user_settings.oauth_settings = self.oauth_settings
self.user_settings.save()
self.node_settings = AddonGitHubNodeSettings(
owner=ProjectFactory(),
user='chrisseto',
repo='openpokemon',
user_settings=self.user_settings,
)
self.node_settings.save()
def test_complete_true(self):
assert_true(self.node_settings.has_auth)
assert_true(self.node_settings.complete)
def test_complete_false(self):
self.node_settings.user = None
assert_true(self.node_settings.has_auth)
assert_false(self.node_settings.complete)
def test_complete_repo_false(self):
self.node_settings.repo = None
assert_true(self.node_settings.has_auth)
assert_false(self.node_settings.complete)
def test_complete_auth_false(self):
self.node_settings.user_settings = None
assert_false(self.node_settings.has_auth)
assert_false(self.node_settings.complete)
@mock.patch('website.addons.github.api.GitHub.delete_hook')
def test_delete_hook(self, mock_delete_hook):
self.node_settings.hook_id = 'hook'
self.node_settings.save()
args = (
self.node_settings.user,
self.node_settings.repo,
self.node_settings.hook_id,
)
res = self.node_settings.delete_hook()
assert_true(res)
mock_delete_hook.assert_called_with(*args)
@mock.patch('website.addons.github.api.GitHub.delete_hook')
def test_delete_hook_no_hook(self, mock_delete_hook):
res = self.node_settings.delete_hook()
assert_false(res)
assert_false(mock_delete_hook.called)
@mock.patch('website.addons.github.api.GitHub.delete_hook')
def test_delete_hook_not_found(self, mock_delete_hook):
self.node_settings.hook_id = 'hook'
self.node_settings.save()
mock_delete_hook.side_effect = NotFoundError
args = (
self.node_settings.user,
self.node_settings.repo,
self.node_settings.hook_id,
)
res = self.node_settings.delete_hook()
assert_false(res)
mock_delete_hook.assert_called_with(*args)
@mock.patch('website.addons.github.api.GitHub.delete_hook')
def test_delete_hook_error(self, mock_delete_hook):
self.node_settings.hook_id = 'hook'
self.node_settings.save()
mock_delete_hook.side_effect = GitHubError(mock.Mock())
args = (
self.node_settings.user,
self.node_settings.repo,
self.node_settings.hook_id,
)
res = self.node_settings.delete_hook()
assert_false(res)
mock_delete_hook.assert_called_with(*args)
def test_to_json_noauthorizing_authed_user(self):
user = UserFactory()
user.add_addon('github')
user_settings = user.get_addon('github')
oauth_settings = AddonGitHubOauthSettings(oauth_access_token='foobar')
oauth_settings.github_user_id = 'testuser'
oauth_settings.save()
user_settings.oauth_settings = self.oauth_settings
user_settings.save()
self.node_settings.to_json(user)
|
|
from django.test import TestCase
from django.contrib.auth.models import Group
from hs_access_control.models import PrivilegeCodes
from hs_core import hydroshare
from hs_core.models import GenericResource
from hs_core.testing import MockIRODSTestCaseMixin
from hs_access_control.tests.utilities import global_reset, is_equal_to_as_set, \
assertUserResourceState, assertResourceUserState
class T03CreateResource(MockIRODSTestCaseMixin, TestCase):
def setUp(self):
super(T03CreateResource, self).setUp()
global_reset()
self.group, _ = Group.objects.get_or_create(name='Hydroshare Author')
self.admin = hydroshare.create_account(
'admin@gmail.com',
username='admin',
first_name='administrator',
last_name='couch',
superuser=True,
groups=[]
)
self.cat = hydroshare.create_account(
'cat@gmail.com',
username='cat',
first_name='not a dog',
last_name='last_name_cat',
superuser=False,
groups=[]
)
self.dog = hydroshare.create_account(
'dog@gmail.com',
username='dog',
first_name='a little arfer',
last_name='last_name_dog',
superuser=False,
groups=[]
)
def test_01_create(self):
"""Resource creator has appropriate access"""
cat = self.cat
# check that user cat owns and holds nothing
assertUserResourceState(self, cat, [], [], [])
# create a resource
holes = hydroshare.create_resource(resource_type='GenericResource',
owner=cat,
title='all about dog holes',
metadata=[],)
assertUserResourceState(self, cat, [holes], [], [])
# metadata state
self.assertFalse(holes.raccess.immutable)
self.assertFalse(holes.raccess.published)
self.assertFalse(holes.raccess.discoverable)
self.assertFalse(holes.raccess.public)
self.assertTrue(holes.raccess.shareable)
# protection state for owner
self.assertTrue(cat.uaccess.owns_resource(holes))
self.assertTrue(cat.uaccess.can_change_resource(holes))
self.assertTrue(cat.uaccess.can_view_resource(holes))
# composite django state
self.assertTrue(cat.uaccess.can_change_resource_flags(holes))
self.assertTrue(cat.uaccess.can_delete_resource(holes))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.OWNER))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.CHANGE))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.VIEW))
# unsharing with cat would violate owner constraint
self.assertTrue(
is_equal_to_as_set(
[], cat.uaccess.get_resource_unshare_users(holes)))
self.assertFalse(
cat.uaccess.can_unshare_resource_with_user(
holes, cat))
def test_02_isolate(self):
"""A user who didn't create a resource cannot access it"""
cat = self.cat
dog = self.dog
holes = hydroshare.create_resource(resource_type='GenericResource',
owner=cat,
title='all about dog holes',
metadata=[],)
# check that resource was created
assertUserResourceState(self, cat, [holes], [], [])
# check that resource is not accessible to others
assertUserResourceState(self, dog, [], [], [])
# metadata should be the same as before
self.assertFalse(holes.raccess.immutable)
self.assertFalse(holes.raccess.published)
self.assertFalse(holes.raccess.discoverable)
self.assertFalse(holes.raccess.public)
self.assertTrue(holes.raccess.shareable)
# protection state for non-owner
self.assertFalse(dog.uaccess.owns_resource(holes))
self.assertFalse(dog.uaccess.can_change_resource(holes))
self.assertFalse(dog.uaccess.can_view_resource(holes))
# composite django state for non-owner
self.assertFalse(dog.uaccess.can_change_resource_flags(holes))
self.assertFalse(dog.uaccess.can_delete_resource(holes))
self.assertFalse(
dog.uaccess.can_share_resource(
holes, PrivilegeCodes.OWNER))
self.assertFalse(
dog.uaccess.can_share_resource(
holes, PrivilegeCodes.CHANGE))
self.assertFalse(
dog.uaccess.can_share_resource(
holes, PrivilegeCodes.VIEW))
# test list access functions for unshare targets
# these return empty because allowing this would violate the last owner
# rule
self.assertTrue(
is_equal_to_as_set(
[], cat.uaccess.get_resource_unshare_users(holes)))
self.assertTrue(
is_equal_to_as_set(
[], dog.uaccess.get_resource_unshare_users(holes)))
def test_06_check_flag_immutable(self):
"""Resource owner can set and reset immutable flag"""
cat = self.cat
dog = self.dog
# create a resource
holes = hydroshare.create_resource(resource_type='GenericResource',
owner=cat,
title='all about dog holes',
metadata=[],)
assertUserResourceState(self, cat, [holes], [], [])
assertResourceUserState(self, holes, [cat], [], [])
# metadata state
self.assertFalse(holes.raccess.immutable)
self.assertFalse(holes.raccess.published)
self.assertFalse(holes.raccess.discoverable)
self.assertFalse(holes.raccess.public)
self.assertTrue(holes.raccess.shareable)
# protection state for owner
self.assertTrue(cat.uaccess.owns_resource(holes))
self.assertTrue(cat.uaccess.can_change_resource(holes))
self.assertTrue(cat.uaccess.can_view_resource(holes))
# composite django state
self.assertTrue(cat.uaccess.can_change_resource_flags(holes))
self.assertTrue(cat.uaccess.can_delete_resource(holes))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.OWNER))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.CHANGE))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.VIEW))
# make it immutable: what changes?
holes.raccess.immutable = True
holes.raccess.save()
# metadata state
self.assertTrue(holes.raccess.immutable)
self.assertFalse(holes.raccess.published)
self.assertFalse(holes.raccess.discoverable)
self.assertFalse(holes.raccess.public)
self.assertTrue(holes.raccess.shareable)
assertUserResourceState(self, cat, [holes], [], [])
# protection state for owner
self.assertTrue(cat.uaccess.owns_resource(holes))
self.assertFalse(cat.uaccess.can_change_resource(holes))
self.assertTrue(cat.uaccess.can_view_resource(holes))
# composite django state
self.assertTrue(cat.uaccess.can_change_resource_flags(holes))
self.assertTrue(cat.uaccess.can_delete_resource(holes))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.OWNER))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.CHANGE))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.VIEW))
# django admin access
self.assertFalse(self.admin.uaccess.owns_resource(holes))
self.assertTrue(self.admin.uaccess.can_change_resource(holes))
self.assertTrue(self.admin.uaccess.can_view_resource(holes))
self.assertTrue(self.admin.uaccess.can_change_resource_flags(holes))
self.assertTrue(self.admin.uaccess.can_delete_resource(holes))
self.assertTrue(
self.admin.uaccess.can_share_resource(
holes, PrivilegeCodes.OWNER))
self.assertTrue(
self.admin.uaccess.can_share_resource(
holes, PrivilegeCodes.CHANGE))
self.assertTrue(
self.admin.uaccess.can_share_resource(
holes, PrivilegeCodes.VIEW))
# change squash
self.cat.uaccess.share_resource_with_user(
holes, dog, PrivilegeCodes.CHANGE)
# CHANGE squashed to VIEW
assertUserResourceState(self, dog, [], [], [holes])
# now no longer immutable
holes.raccess.immutable = False
holes.raccess.save()
assertUserResourceState(self, dog, [], [holes], [])
self.cat.uaccess.unshare_resource_with_user(holes, dog)
# metadata state
self.assertFalse(holes.raccess.immutable)
self.assertFalse(holes.raccess.published)
self.assertFalse(holes.raccess.discoverable)
self.assertFalse(holes.raccess.public)
self.assertTrue(holes.raccess.shareable)
# protection state for owner
self.assertTrue(cat.uaccess.owns_resource(holes))
self.assertTrue(cat.uaccess.can_change_resource(holes))
self.assertTrue(cat.uaccess.can_view_resource(holes))
# composite django state
self.assertTrue(cat.uaccess.can_change_resource_flags(holes))
self.assertTrue(cat.uaccess.can_delete_resource(holes))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.OWNER))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.CHANGE))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.VIEW))
def test_07_check_flag_discoverable(self):
"""Resource owner can set and reset discoverable flag"""
cat = self.cat
# create a resource
holes = hydroshare.create_resource(resource_type='GenericResource',
owner=cat,
title='all about dog holes',
metadata=[],)
# metadata state
self.assertFalse(holes.raccess.immutable)
self.assertFalse(holes.raccess.published)
self.assertFalse(holes.raccess.discoverable)
self.assertFalse(holes.raccess.public)
self.assertTrue(holes.raccess.shareable)
# protection state for owner
self.assertTrue(cat.uaccess.owns_resource(holes))
self.assertTrue(cat.uaccess.can_change_resource(holes))
self.assertTrue(cat.uaccess.can_view_resource(holes))
# composite django state
self.assertTrue(cat.uaccess.can_change_resource_flags(holes))
self.assertTrue(cat.uaccess.can_delete_resource(holes))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.OWNER))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.CHANGE))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.VIEW))
# is it listed as discoverable?
self.assertTrue(
is_equal_to_as_set(
[], GenericResource.discoverable_resources.all()))
self.assertTrue(
is_equal_to_as_set(
[], GenericResource.public_resources.all()))
# make it discoverable
holes.raccess.discoverable = True
holes.raccess.save()
# is it listed as discoverable?
self.assertTrue(
is_equal_to_as_set(
[holes],
GenericResource.discoverable_resources.all()))
self.assertTrue(
is_equal_to_as_set(
[], GenericResource.public_resources.all()))
# metadata state
self.assertFalse(holes.raccess.immutable)
self.assertFalse(holes.raccess.published)
self.assertTrue(holes.raccess.discoverable)
self.assertFalse(holes.raccess.public)
self.assertTrue(holes.raccess.shareable)
# protection state for owner
self.assertTrue(cat.uaccess.owns_resource(holes))
self.assertTrue(cat.uaccess.can_change_resource(holes))
self.assertTrue(cat.uaccess.can_view_resource(holes))
# composite django state
self.assertTrue(cat.uaccess.can_change_resource_flags(holes))
self.assertTrue(cat.uaccess.can_delete_resource(holes))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.OWNER))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.CHANGE))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.VIEW))
# make it not discoverable
holes.raccess.discoverable = False
holes.raccess.save()
# metadata state
self.assertFalse(holes.raccess.immutable)
self.assertFalse(holes.raccess.published)
self.assertFalse(holes.raccess.discoverable)
self.assertFalse(holes.raccess.public)
self.assertTrue(holes.raccess.shareable)
# protection state for owner
self.assertTrue(cat.uaccess.owns_resource(holes))
self.assertTrue(cat.uaccess.can_change_resource(holes))
self.assertTrue(cat.uaccess.can_view_resource(holes))
# composite django state
self.assertTrue(cat.uaccess.can_change_resource_flags(holes))
self.assertTrue(cat.uaccess.can_delete_resource(holes))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.OWNER))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.CHANGE))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.VIEW))
# django admin should have full access to any not discoverable
# resource
self.assertTrue(self.admin.uaccess.can_change_resource_flags(holes))
self.assertTrue(self.admin.uaccess.can_delete_resource(holes))
self.assertTrue(
self.admin.uaccess.can_share_resource(
holes, PrivilegeCodes.OWNER))
self.assertTrue(
self.admin.uaccess.can_share_resource(
holes, PrivilegeCodes.CHANGE))
self.assertTrue(
self.admin.uaccess.can_share_resource(
holes, PrivilegeCodes.VIEW))
# TODO: test get_discoverable_resources and get_public_resources
def test_08_check_flag_published(self):
"""Resource owner can set and reset published flag"""
cat = self.cat
# create a resource
holes = hydroshare.create_resource(resource_type='GenericResource',
owner=cat,
title='all about dog holes',
metadata=[],)
# metadata state
self.assertFalse(holes.raccess.immutable)
self.assertFalse(holes.raccess.published)
self.assertFalse(holes.raccess.discoverable)
self.assertFalse(holes.raccess.public)
self.assertTrue(holes.raccess.shareable)
# protection state for owner
self.assertTrue(cat.uaccess.owns_resource(holes))
self.assertTrue(cat.uaccess.can_change_resource(holes))
self.assertTrue(cat.uaccess.can_view_resource(holes))
# composite django state
self.assertTrue(cat.uaccess.can_change_resource_flags(holes))
self.assertTrue(cat.uaccess.can_delete_resource(holes))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.OWNER))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.CHANGE))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.VIEW))
# make it published
holes.raccess.published = True
holes.raccess.save()
# metadata state
self.assertFalse(holes.raccess.immutable)
self.assertTrue(holes.raccess.published)
self.assertFalse(holes.raccess.discoverable)
self.assertFalse(holes.raccess.public)
self.assertTrue(holes.raccess.shareable)
# protection state for owner
self.assertTrue(cat.uaccess.owns_resource(holes))
self.assertTrue(cat.uaccess.can_change_resource(holes))
self.assertTrue(cat.uaccess.can_view_resource(holes))
# composite django state
self.assertFalse(cat.uaccess.can_change_resource_flags(holes))
self.assertFalse(cat.uaccess.can_delete_resource(holes))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.OWNER))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.CHANGE))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.VIEW))
# django admin access for published resource
self.assertFalse(self.admin.uaccess.owns_resource(holes))
self.assertTrue(self.admin.uaccess.can_change_resource(holes))
self.assertTrue(self.admin.uaccess.can_view_resource(holes))
self.assertTrue(self.admin.uaccess.can_change_resource_flags(holes))
# admin even can delete a published resource
self.assertTrue(self.admin.uaccess.can_delete_resource(holes))
self.assertTrue(
self.admin.uaccess.can_share_resource(
holes, PrivilegeCodes.OWNER))
self.assertTrue(
self.admin.uaccess.can_share_resource(
holes, PrivilegeCodes.CHANGE))
self.assertTrue(
self.admin.uaccess.can_share_resource(
holes, PrivilegeCodes.VIEW))
# make it not published
holes.raccess.published = False
holes.raccess.save()
# metadata state
self.assertFalse(holes.raccess.immutable)
self.assertFalse(holes.raccess.published)
self.assertFalse(holes.raccess.discoverable)
self.assertFalse(holes.raccess.public)
self.assertTrue(holes.raccess.shareable)
# protection state for owner
self.assertTrue(cat.uaccess.owns_resource(holes))
self.assertTrue(cat.uaccess.can_change_resource(holes))
self.assertTrue(cat.uaccess.can_view_resource(holes))
# composite django state
self.assertTrue(cat.uaccess.can_change_resource_flags(holes))
self.assertTrue(cat.uaccess.can_delete_resource(holes))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.OWNER))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.CHANGE))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.VIEW))
def test_09_check_flag_public(self):
"""Resource owner can set and reset public flag"""
cat = self.cat
# create a resource
holes = hydroshare.create_resource(resource_type='GenericResource',
owner=cat,
title='all about dog holes',
metadata=[],)
# metadata state
self.assertFalse(holes.raccess.immutable)
self.assertFalse(holes.raccess.published)
self.assertFalse(holes.raccess.discoverable)
self.assertFalse(holes.raccess.public)
self.assertTrue(holes.raccess.shareable)
# protection state for owner
self.assertTrue(cat.uaccess.owns_resource(holes))
self.assertTrue(cat.uaccess.can_change_resource(holes))
self.assertTrue(cat.uaccess.can_view_resource(holes))
# composite django state
self.assertTrue(cat.uaccess.can_change_resource_flags(holes))
self.assertTrue(cat.uaccess.can_delete_resource(holes))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.OWNER))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.CHANGE))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.VIEW))
# is it listed as discoverable?
self.assertTrue(
is_equal_to_as_set(
[], GenericResource.discoverable_resources.all()))
self.assertTrue(
is_equal_to_as_set(
[], GenericResource.public_resources.all()))
# make it public
holes.raccess.public = True
holes.raccess.save()
# is it listed as discoverable?
self.assertTrue(
is_equal_to_as_set(
[holes],
GenericResource.discoverable_resources.all()))
self.assertTrue(
is_equal_to_as_set(
[holes],
GenericResource.public_resources.all()))
# metadata state
self.assertFalse(holes.raccess.immutable)
self.assertFalse(holes.raccess.published)
self.assertFalse(holes.raccess.discoverable)
self.assertTrue(holes.raccess.public)
self.assertTrue(holes.raccess.shareable)
# protection state for owner
self.assertTrue(cat.uaccess.owns_resource(holes))
self.assertTrue(cat.uaccess.can_change_resource(holes))
self.assertTrue(cat.uaccess.can_view_resource(holes))
# composite django state
self.assertTrue(cat.uaccess.can_change_resource_flags(holes))
self.assertTrue(cat.uaccess.can_delete_resource(holes))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.OWNER))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.CHANGE))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.VIEW))
# make it not public
holes.raccess.public = False
holes.raccess.save()
# metadata state
self.assertFalse(holes.raccess.immutable)
self.assertFalse(holes.raccess.published)
self.assertFalse(holes.raccess.discoverable)
self.assertFalse(holes.raccess.public)
self.assertTrue(holes.raccess.shareable)
# protection state for owner
self.assertTrue(cat.uaccess.owns_resource(holes))
self.assertTrue(cat.uaccess.can_change_resource(holes))
self.assertTrue(cat.uaccess.can_view_resource(holes))
# composite django state
self.assertTrue(cat.uaccess.can_change_resource_flags(holes))
self.assertTrue(cat.uaccess.can_delete_resource(holes))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.OWNER))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.CHANGE))
self.assertTrue(
cat.uaccess.can_share_resource(
holes, PrivilegeCodes.VIEW))
# django admin should have full access to any private resource
self.assertFalse(self.admin.uaccess.owns_resource(holes))
self.assertTrue(self.admin.uaccess.can_change_resource_flags(holes))
self.assertTrue(self.admin.uaccess.can_delete_resource(holes))
self.assertTrue(
self.admin.uaccess.can_share_resource(
holes, PrivilegeCodes.OWNER))
self.assertTrue(
self.admin.uaccess.can_share_resource(
holes, PrivilegeCodes.CHANGE))
self.assertTrue(
self.admin.uaccess.can_share_resource(
holes, PrivilegeCodes.VIEW))
|
|
#!/usr/bin/env python2
# -*- coding: utf-8-*-
import os
import wave
import json
import tempfile
import logging
import urllib
import urlparse
import re
import subprocess
from abc import ABCMeta, abstractmethod
import requests
import yaml
import jasperpath
import diagnose
import vocabcompiler
class AbstractSTTEngine(object):
"""
Generic parent class for all STT engines
"""
__metaclass__ = ABCMeta
VOCABULARY_TYPE = None
@classmethod
def get_config(cls):
return {}
@classmethod
def get_instance(cls, vocabulary_name, phrases):
config = cls.get_config()
if cls.VOCABULARY_TYPE:
vocabulary = cls.VOCABULARY_TYPE(vocabulary_name,
path=jasperpath.config(
'vocabularies'))
if not vocabulary.matches_phrases(phrases):
vocabulary.compile(phrases)
config['vocabulary'] = vocabulary
instance = cls(**config)
return instance
@classmethod
def get_passive_instance(cls):
phrases = vocabcompiler.get_keyword_phrases()
return cls.get_instance('keyword', phrases)
@classmethod
def get_active_instance(cls):
phrases = vocabcompiler.get_all_phrases()
return cls.get_instance('default', phrases)
@classmethod
@abstractmethod
def is_available(cls):
return True
@abstractmethod
def transcribe(self, fp):
pass
class PocketSphinxSTT(AbstractSTTEngine):
"""
The default Speech-to-Text implementation which relies on PocketSphinx.
"""
SLUG = 'sphinx'
VOCABULARY_TYPE = vocabcompiler.PocketsphinxVocabulary
def __init__(self, vocabulary, hmm_dir="/usr/local/share/" +
"pocketsphinx/model/hmm/en_US/hub4wsj_sc_8k"):
"""
Initiates the pocketsphinx instance.
Arguments:
vocabulary -- a PocketsphinxVocabulary instance
hmm_dir -- the path of the Hidden Markov Model (HMM)
"""
self._logger = logging.getLogger(__name__)
# quirky bug where first import doesn't work
try:
import pocketsphinx as ps
except:
import pocketsphinx as ps
with tempfile.NamedTemporaryFile(prefix='psdecoder_',
suffix='.log', delete=False) as f:
self._logfile = f.name
self._logger.debug("Initializing PocketSphinx Decoder with hmm_dir " +
"'%s'", hmm_dir)
# Perform some checks on the hmm_dir so that we can display more
# meaningful error messages if neccessary
if not os.path.exists(hmm_dir):
msg = ("hmm_dir '%s' does not exist! Please make sure that you " +
"have set the correct hmm_dir in your profile.") % hmm_dir
self._logger.error(msg)
raise RuntimeError(msg)
# Lets check if all required files are there. Refer to:
# http://cmusphinx.sourceforge.net/wiki/acousticmodelformat
# for details
missing_hmm_files = []
for fname in ('mdef', 'feat.params', 'means', 'noisedict',
'transition_matrices', 'variances'):
if not os.path.exists(os.path.join(hmm_dir, fname)):
missing_hmm_files.append(fname)
mixweights = os.path.exists(os.path.join(hmm_dir, 'mixture_weights'))
sendump = os.path.exists(os.path.join(hmm_dir, 'sendump'))
if not mixweights and not sendump:
# We only need mixture_weights OR sendump
missing_hmm_files.append('mixture_weights or sendump')
if missing_hmm_files:
self._logger.warning("hmm_dir '%s' is missing files: %s. Please " +
"make sure that you have set the correct " +
"hmm_dir in your profile.",
hmm_dir, ', '.join(missing_hmm_files))
self._decoder = ps.Decoder(hmm=hmm_dir, logfn=self._logfile,
**vocabulary.decoder_kwargs)
def __del__(self):
os.remove(self._logfile)
@classmethod
def get_config(cls):
# FIXME: Replace this as soon as we have a config module
config = {}
# HMM dir
# Try to get hmm_dir from config
profile_path = jasperpath.config('profile.yml')
if os.path.exists(profile_path):
with open(profile_path, 'r') as f:
profile = yaml.safe_load(f)
try:
config['hmm_dir'] = profile['pocketsphinx']['hmm_dir']
except KeyError:
pass
return config
def transcribe(self, fp):
"""
Performs STT, transcribing an audio file and returning the result.
Arguments:
fp -- a file object containing audio data
"""
fp.seek(44)
# FIXME: Can't use the Decoder.decode_raw() here, because
# pocketsphinx segfaults with tempfile.SpooledTemporaryFile()
data = fp.read()
self._decoder.start_utt()
self._decoder.process_raw(data, False, True)
self._decoder.end_utt()
result = self._decoder.get_hyp()
with open(self._logfile, 'r+') as f:
for line in f:
self._logger.debug(line.strip())
f.truncate()
transcribed = [result[0]]
self._logger.info('Transcribed: %r', transcribed)
return transcribed
@classmethod
def is_available(cls):
return diagnose.check_python_import('pocketsphinx')
class JuliusSTT(AbstractSTTEngine):
"""
A very basic Speech-to-Text engine using Julius.
"""
SLUG = 'julius'
VOCABULARY_TYPE = vocabcompiler.JuliusVocabulary
def __init__(self, vocabulary=None, hmmdefs="/usr/share/voxforge/julius/" +
"acoustic_model_files/hmmdefs", tiedlist="/usr/share/" +
"voxforge/julius/acoustic_model_files/tiedlist"):
self._logger = logging.getLogger(__name__)
self._vocabulary = vocabulary
self._hmmdefs = hmmdefs
self._tiedlist = tiedlist
self._pattern = re.compile(r'sentence(\d+): <s> (.+) </s>')
# Inital test run: we run this command once to log errors/warnings
cmd = ['julius',
'-input', 'stdin',
'-dfa', self._vocabulary.dfa_file,
'-v', self._vocabulary.dict_file,
'-h', self._hmmdefs,
'-hlist', self._tiedlist,
'-forcedict']
cmd = [str(x) for x in cmd]
self._logger.debug('Executing: %r', cmd)
with tempfile.SpooledTemporaryFile() as out_f:
with tempfile.SpooledTemporaryFile() as f:
with tempfile.SpooledTemporaryFile() as err_f:
subprocess.call(cmd, stdin=f, stdout=out_f, stderr=err_f)
out_f.seek(0)
for line in out_f.read().splitlines():
line = line.strip()
if len(line) > 7 and line[:7].upper() == 'ERROR: ':
if not line[7:].startswith('adin_'):
self._logger.error(line[7:])
elif len(line) > 9 and line[:9].upper() == 'WARNING: ':
self._logger.warning(line[9:])
elif len(line) > 6 and line[:6].upper() == 'STAT: ':
self._logger.debug(line[6:])
@classmethod
def get_config(cls):
# FIXME: Replace this as soon as we have a config module
config = {}
# HMM dir
# Try to get hmm_dir from config
profile_path = jasperpath.config('profile.yml')
if os.path.exists(profile_path):
with open(profile_path, 'r') as f:
profile = yaml.safe_load(f)
if 'julius' in profile:
if 'hmmdefs' in profile['julius']:
config['hmmdefs'] = profile['julius']['hmmdefs']
if 'tiedlist' in profile['julius']:
config['tiedlist'] = profile['julius']['tiedlist']
return config
def transcribe(self, fp, mode=None):
cmd = ['julius',
'-quiet',
'-nolog',
'-input', 'stdin',
'-dfa', self._vocabulary.dfa_file,
'-v', self._vocabulary.dict_file,
'-h', self._hmmdefs,
'-hlist', self._tiedlist,
'-forcedict']
cmd = [str(x) for x in cmd]
self._logger.debug('Executing: %r', cmd)
with tempfile.SpooledTemporaryFile() as out_f:
with tempfile.SpooledTemporaryFile() as err_f:
subprocess.call(cmd, stdin=fp, stdout=out_f, stderr=err_f)
out_f.seek(0)
results = [(int(i), text) for i, text in
self._pattern.findall(out_f.read())]
transcribed = [text for i, text in
sorted(results, key=lambda x: x[0])
if text]
if not transcribed:
transcribed.append('')
self._logger.info('Transcribed: %r', transcribed)
return transcribed
@classmethod
def is_available(cls):
return diagnose.check_executable('julius')
class GoogleSTT(AbstractSTTEngine):
"""
Speech-To-Text implementation which relies on the Google Speech API.
This implementation requires a Google API key to be present in profile.yml
To obtain an API key:
1. Join the Chromium Dev group:
https://groups.google.com/a/chromium.org/forum/?fromgroups#!forum/chromium-dev
2. Create a project through the Google Developers console:
https://console.developers.google.com/project
3. Select your project. In the sidebar, navigate to "APIs & Auth." Activate
the Speech API.
4. Under "APIs & Auth," navigate to "Credentials." Create a new key for
public API access.
5. Add your credentials to your profile.yml. Add an entry to the 'keys'
section using the key name 'GOOGLE_SPEECH.' Sample configuration:
6. Set the value of the 'stt_engine' key in your profile.yml to 'google'
Excerpt from sample profile.yml:
...
timezone: US/Pacific
stt_engine: google
keys:
GOOGLE_SPEECH: $YOUR_KEY_HERE
"""
SLUG = 'google'
def __init__(self, api_key=None, language='en-us'):
# FIXME: get init args from config
"""
Arguments:
api_key - the public api key which allows access to Google APIs
"""
self._logger = logging.getLogger(__name__)
self._request_url = None
self._language = None
self._api_key = None
self._http = requests.Session()
self.language = language
self.api_key = api_key
@property
def request_url(self):
return self._request_url
@property
def language(self):
return self._language
@language.setter
def language(self, value):
self._language = value
self._regenerate_request_url()
@property
def api_key(self):
return self._api_key
@api_key.setter
def api_key(self, value):
self._api_key = value
self._regenerate_request_url()
def _regenerate_request_url(self):
if self.api_key and self.language:
query = urllib.urlencode({'output': 'json',
'client': 'chromium',
'key': self.api_key,
'lang': self.language,
'maxresults': 6,
'pfilter': 2})
self._request_url = urlparse.urlunparse(
('https', 'www.google.com', '/speech-api/v2/recognize', '',
query, ''))
else:
self._request_url = None
@classmethod
def get_config(cls):
# FIXME: Replace this as soon as we have a config module
config = {}
# HMM dir
# Try to get hmm_dir from config
profile_path = jasperpath.config('profile.yml')
if os.path.exists(profile_path):
with open(profile_path, 'r') as f:
profile = yaml.safe_load(f)
if 'keys' in profile and 'GOOGLE_SPEECH' in profile['keys']:
config['api_key'] = profile['keys']['GOOGLE_SPEECH']
return config
def transcribe(self, fp):
"""
Performs STT via the Google Speech API, transcribing an audio file and
returning an English string.
Arguments:
audio_file_path -- the path to the .wav file to be transcribed
"""
if not self.api_key:
self._logger.critical('API key missing, transcription request ' +
'aborted.')
return []
elif not self.language:
self._logger.critical('Language info missing, transcription ' +
'request aborted.')
return []
wav = wave.open(fp, 'rb')
frame_rate = wav.getframerate()
wav.close()
data = fp.read()
headers = {'content-type': 'audio/l16; rate=%s' % frame_rate}
r = self._http.post(self.request_url, data=data, headers=headers)
try:
r.raise_for_status()
except requests.exceptions.HTTPError:
self._logger.critical('Request failed with http status %d',
r.status_code)
if r.status_code == requests.codes['forbidden']:
self._logger.warning('Status 403 is probably caused by an ' +
'invalid Google API key.')
return []
r.encoding = 'utf-8'
try:
# We cannot simply use r.json() because Google sends invalid json
# (i.e. multiple json objects, seperated by newlines. We only want
# the last one).
response = json.loads(list(r.text.strip().split('\n', 1))[-1])
if len(response['result']) == 0:
# Response result is empty
raise ValueError('Nothing has been transcribed.')
results = [alt['transcript'] for alt
in response['result'][0]['alternative']]
except ValueError as e:
self._logger.warning('Empty response: %s', e.args[0])
results = []
except (KeyError, IndexError):
self._logger.warning('Cannot parse response.', exc_info=True)
results = []
else:
# Convert all results to uppercase
results = tuple(result.upper() for result in results)
self._logger.info('Transcribed: %r', results)
return results
@classmethod
def is_available(cls):
return diagnose.check_network_connection()
class AttSTT(AbstractSTTEngine):
"""
Speech-To-Text implementation which relies on the AT&T Speech API.
This implementation requires an AT&T app_key/app_secret to be present in
profile.yml. Please sign up at http://developer.att.com/apis/speech and
create a new app. You can then take the app_key/app_secret and put it into
your profile.yml:
...
stt_engine: att
att-stt:
app_key: 4xxzd6abcdefghijklmnopqrstuvwxyz
app_secret: 6o5jgiabcdefghijklmnopqrstuvwxyz
"""
SLUG = "att"
def __init__(self, app_key, app_secret):
self._logger = logging.getLogger(__name__)
self._token = None
self.app_key = app_key
self.app_secret = app_secret
@classmethod
def get_config(cls):
# FIXME: Replace this as soon as we have a config module
config = {}
# Try to get AT&T app_key/app_secret from config
profile_path = jasperpath.config('profile.yml')
if os.path.exists(profile_path):
with open(profile_path, 'r') as f:
profile = yaml.safe_load(f)
if 'att-stt' in profile:
if 'app_key' in profile['att-stt']:
config['app_key'] = profile['att-stt']['app_key']
if 'app_secret' in profile['att-stt']:
config['app_secret'] = profile['att-stt']['app_secret']
return config
@property
def token(self):
if not self._token:
headers = {'content-type': 'application/x-www-form-urlencoded',
'accept': 'application/json'}
payload = {'client_id': self.app_key,
'client_secret': self.app_secret,
'scope': 'SPEECH',
'grant_type': 'client_credentials'}
r = requests.post('https://api.att.com/oauth/v4/token',
data=payload,
headers=headers)
self._token = r.json()['access_token']
return self._token
def transcribe(self, fp):
data = fp.read()
r = self._get_response(data)
if r.status_code == requests.codes['unauthorized']:
# Request token invalid, retry once with a new token
self._logger.warning('OAuth access token invalid, generating a ' +
'new one and retrying...')
self._token = None
r = self._get_response(data)
try:
r.raise_for_status()
except requests.exceptions.HTTPError:
self._logger.critical('Request failed with response: %r',
r.text,
exc_info=True)
return []
except requests.exceptions.RequestException:
self._logger.critical('Request failed.', exc_info=True)
return []
else:
try:
recognition = r.json()['Recognition']
if recognition['Status'] != 'OK':
raise ValueError(recognition['Status'])
results = [(x['Hypothesis'], x['Confidence'])
for x in recognition['NBest']]
except ValueError as e:
self._logger.debug('Recognition failed with status: %s',
e.args[0])
return []
except KeyError:
self._logger.critical('Cannot parse response.',
exc_info=True)
return []
else:
transcribed = [x[0].upper() for x in sorted(results,
key=lambda x: x[1],
reverse=True)]
self._logger.info('Transcribed: %r', transcribed)
return transcribed
def _get_response(self, data):
headers = {'authorization': 'Bearer %s' % self.token,
'accept': 'application/json',
'content-type': 'audio/wav'}
return requests.post('https://api.att.com/speech/v3/speechToText',
data=data,
headers=headers)
@classmethod
def is_available(cls):
return diagnose.check_network_connection()
class WitAiSTT(AbstractSTTEngine):
"""
Speech-To-Text implementation which relies on the Wit.ai Speech API.
This implementation requires an Wit.ai Access Token to be present in
profile.yml. Please sign up at https://wit.ai and copy your instance
token, which can be found under Settings in the Wit console to your
profile.yml:
...
stt_engine: witai
witai-stt:
access_token: ERJKGE86SOMERANDOMTOKEN23471AB
"""
SLUG = "witai"
def __init__(self, access_token):
self._logger = logging.getLogger(__name__)
self.token = access_token
@classmethod
def get_config(cls):
# FIXME: Replace this as soon as we have a config module
config = {}
# Try to get wit.ai Auth token from config
profile_path = jasperpath.config('profile.yml')
if os.path.exists(profile_path):
with open(profile_path, 'r') as f:
profile = yaml.safe_load(f)
if 'witai-stt' in profile:
if 'access_token' in profile['witai-stt']:
config['access_token'] = \
profile['witai-stt']['access_token']
return config
@property
def token(self):
return self._token
@token.setter
def token(self, value):
self._token = value
self._headers = {'Authorization': 'Bearer %s' % self.token,
'accept': 'application/json',
'Content-Type': 'audio/wav'}
@property
def headers(self):
return self._headers
def transcribe(self, fp):
data = fp.read()
r = requests.post('https://api.wit.ai/speech?v=20151002',
data=data,
headers=self.headers)
try:
r.raise_for_status()
text = r.json()['_text']
except requests.exceptions.HTTPError:
self._logger.critical('Request failed with response: %r',
r.text,
exc_info=True)
return []
except requests.exceptions.RequestException:
self._logger.critical('Request failed.', exc_info=True)
return []
except ValueError as e:
self._logger.critical('Cannot parse response: %s',
e.args[0])
return []
except KeyError:
self._logger.critical('Cannot parse response.',
exc_info=True)
return []
else:
self._logger.warning('Transcribed: %r', r.json())
return r.json()
@classmethod
def is_available(cls):
return diagnose.check_network_connection()
def get_engine_by_slug(slug=None):
"""
Returns:
An STT Engine implementation available on the current platform
Raises:
ValueError if no speaker implementation is supported on this platform
"""
if not slug or type(slug) is not str:
raise TypeError("Invalid slug '%s'", slug)
selected_engines = filter(lambda engine: hasattr(engine, "SLUG") and
engine.SLUG == slug, get_engines())
if len(selected_engines) == 0:
raise ValueError("No STT engine found for slug '%s'" % slug)
else:
if len(selected_engines) > 1:
print(("WARNING: Multiple STT engines found for slug '%s'. " +
"This is most certainly a bug.") % slug)
engine = selected_engines[0]
if not engine.is_available():
raise ValueError(("STT engine '%s' is not available (due to " +
"missing dependencies, missing " +
"dependencies, etc.)") % slug)
return engine
def get_engines():
def get_subclasses(cls):
subclasses = set()
for subclass in cls.__subclasses__():
subclasses.add(subclass)
subclasses.update(get_subclasses(subclass))
return subclasses
return [tts_engine for tts_engine in
list(get_subclasses(AbstractSTTEngine))
if hasattr(tts_engine, 'SLUG') and tts_engine.SLUG]
|
|
#!/usr/bin/env python3
# Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
import idlnode
import idlparser
import logging.config
import sys
import unittest
class IDLNodeTestCase(unittest.TestCase):
def _run_test(self, syntax, content, expected):
"""Utility run tests and prints extra contextual information.
Args:
syntax -- IDL grammar to use (either idlparser.WEBKIT_SYNTAX,
WEBIDL_SYNTAX or FREMONTCUT_SYNTAX). If None, will run
multiple tests, each with a different syntax.
content -- input text for the parser.
expected -- expected parse result.
"""
if syntax is None:
self._run_test(idlparser.WEBIDL_SYNTAX, content, expected)
self._run_test(idlparser.WEBKIT_SYNTAX, content, expected)
self._run_test(idlparser.FREMONTCUT_SYNTAX, content, expected)
return
actual = None
error = None
ast = None
parseResult = None
try:
parser = idlparser.IDLParser(syntax)
ast = parser.parse(content)
node = idlnode.IDLFile(ast)
actual = node.to_dict() if node else None
except SyntaxError as e:
error = e
pass
if actual == expected:
return
else:
msg = '''
SYNTAX : %s
CONTENT :
%s
EXPECTED:
%s
ACTUAL :
%s
ERROR : %s
AST :
%s
''' % (syntax, content, expected, actual, error, ast)
self.fail(msg)
def test_empty_module(self):
self._run_test(None, 'module TestModule {};',
{'modules': [{
'id': 'TestModule'
}]})
def test_empty_interface(self):
self._run_test(
None, 'module TestModule { interface Interface1 {}; };', {
'modules': [{
'interfaces': [{
'javascript_binding_name': 'Interface1',
'doc_js_name': 'Interface1',
'id': 'Interface1'
}],
'id':
'TestModule'
}]
})
def test_gcc_preprocessor(self):
self._run_test(idlparser.WEBKIT_SYNTAX,
'#if 1\nmodule TestModule {};\n#endif\n',
{'modules': [{
'id': 'TestModule'
}]})
def test_extended_attributes(self):
self._run_test(
idlparser.WEBKIT_SYNTAX,
'module M { interface [ExAt1, ExAt2] I {};};', {
'modules': [{
'interfaces': [{
'javascript_binding_name': 'I',
'doc_js_name': 'I',
'ext_attrs': {
'ExAt1': None,
'ExAt2': None
},
'id': 'I'
}],
'id':
'M'
}]
})
def test_implements_statement(self):
self._run_test(
idlparser.WEBIDL_SYNTAX, 'module M { X implements Y; };', {
'modules': [{
'implementsStatements': [{
'implementor': {
'id': 'X'
},
'implemented': {
'id': 'Y'
}
}],
'id':
'M'
}]
})
def test_attributes(self):
self._run_test(
idlparser.WEBIDL_SYNTAX, '''interface I {
attribute long a1;
readonly attribute DOMString a2;
attribute any a3;
};''', {
'interfaces': [{
'javascript_binding_name':
'I',
'attributes': [{
'type': {
'id': 'long'
},
'id': 'a1',
'doc_js_interface_name': 'I'
},
{
'type': {
'id': 'DOMString'
},
'is_read_only': True,
'id': 'a2',
'doc_js_interface_name': 'I'
},
{
'type': {
'id': 'any'
},
'id': 'a3',
'doc_js_interface_name': 'I'
}],
'id':
'I',
'doc_js_name':
'I'
}]
})
def test_operations(self):
self._run_test(
idlparser.WEBIDL_SYNTAX, '''interface I {
[ExAttr] t1 op1();
t2 op2(in int arg1, in long arg2);
getter any item(in long index);
};''', {
'interfaces': [{
'operations':
[{
'doc_js_interface_name': 'I',
'type': {
'id': 't1'
},
'ext_attrs': {
'ExAttr': None
},
'id': 'op1'
},
{
'doc_js_interface_name':
'I',
'type': {
'id': 't2'
},
'id':
'op2',
'arguments': [{
'type': {
'id': 'int'
},
'id': 'arg1'
}, {
'type': {
'id': 'long'
},
'id': 'arg2'
}]
},
{
'specials': ['getter'],
'doc_js_interface_name': 'I',
'type': {
'id': 'any'
},
'id': 'item',
'arguments': [{
'type': {
'id': 'long'
},
'id': 'index'
}]
},
{
'is_stringifier': True,
'type': {
'id': 'name'
},
'doc_js_interface_name': 'I'
}],
'javascript_binding_name':
'I',
'id':
'I',
'doc_js_name':
'I'
}]
})
def test_constants(self):
self._run_test(
None, '''interface I {
const long c1 = 0;
const long c2 = 1;
const long c3 = 0x01;
const long c4 = 10;
const boolean b1 = false;
const boolean b2 = true;
};''', {
'interfaces': [{
'javascript_binding_name':
'I',
'doc_js_name':
'I',
'id':
'I',
'constants': [{
'type': {
'id': 'long'
},
'id': 'c1',
'value': '0',
'doc_js_interface_name': 'I'
},
{
'type': {
'id': 'long'
},
'id': 'c2',
'value': '1',
'doc_js_interface_name': 'I'
},
{
'type': {
'id': 'long'
},
'id': 'c3',
'value': '0x01',
'doc_js_interface_name': 'I'
},
{
'type': {
'id': 'long'
},
'id': 'c4',
'value': '10',
'doc_js_interface_name': 'I'
},
{
'type': {
'id': 'boolean'
},
'id': 'b1',
'value': 'false',
'doc_js_interface_name': 'I'
},
{
'type': {
'id': 'boolean'
},
'id': 'b2',
'value': 'true',
'doc_js_interface_name': 'I'
}]
}]
})
def test_annotations(self):
self._run_test(
idlparser.FREMONTCUT_SYNTAX,
'@Ano1 @Ano2() @Ano3(x=1) @Ano4(x,y=2) interface I {};', {
'interfaces': [{
'javascript_binding_name': 'I',
'doc_js_name': 'I',
'id': 'I',
'annotations': {
'Ano4': {
'y': '2',
'x': None
},
'Ano1': {},
'Ano2': {},
'Ano3': {
'x': '1'
}
}
}]
})
self._run_test(
idlparser.FREMONTCUT_SYNTAX, '''interface I : @Ano1 J {
@Ano2 attribute int someAttr;
@Ano3 void someOp();
@Ano3 const int someConst = 0;
};''', {
'interfaces': [{
'operations': [{
'annotations': {
'Ano3': {}
},
'type': {
'id': 'void'
},
'id': 'someOp',
'doc_js_interface_name': 'I'
}],
'javascript_binding_name':
'I',
'parents': [{
'type': {
'id': 'J'
},
'annotations': {
'Ano1': {}
}
}],
'attributes': [{
'annotations': {
'Ano2': {}
},
'type': {
'id': 'int'
},
'id': 'someAttr',
'doc_js_interface_name': 'I'
}],
'doc_js_name':
'I',
'id':
'I',
'constants': [{
'annotations': {
'Ano3': {}
},
'type': {
'id': 'int'
},
'id': 'someConst',
'value': '0',
'doc_js_interface_name': 'I'
}]
}]
})
def test_inheritance(self):
self._run_test(
None,
'interface Shape {}; interface Rectangle : Shape {}; interface Square : Rectangle, Shape {};',
{
'interfaces': [{
'javascript_binding_name': 'Shape',
'doc_js_name': 'Shape',
'id': 'Shape'
},
{
'javascript_binding_name': 'Rectangle',
'doc_js_name': 'Rectangle',
'parents': [{
'type': {
'id': 'Shape'
}
}],
'id': 'Rectangle'
},
{
'javascript_binding_name':
'Square',
'doc_js_name':
'Square',
'parents': [{
'type': {
'id': 'Rectangle'
}
}, {
'type': {
'id': 'Shape'
}
}],
'id':
'Square'
}]
})
if __name__ == "__main__":
logging.config.fileConfig("logging.conf")
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os.path
from datetime import date, datetime
import time
from sqlite3 import dbapi2 as sqlite
import simplejson as json
import xbmcgui
import common
import connection
import database_common as db_common
from bs4 import BeautifulSoup
def create():
c = _database.cursor()
c.execute('''CREATE TABLE movies
(content_id INTEGER PRIMARY KEY,
media_id TEXT,
url TEXT,
title TEXT,
title_sort TEXT,
plot TEXT,
duration INTEGER,
year INTEGER,
studio TEXT,
mpaa TEXT,
directors TEXT,
actors TEXT,
genres TEXT,
popularity INTEGER,
added_date timestamp,
cc_available BOOLEAN,
is_hd BOOLEAN,
audio_type TEXT,
playcount INTEGER DEFAULT 0,
favor BOOLEAN DEFAULT 0,
in_last_update BOOLEAN DEFAULT 1)''')
_database.commit()
c.close()
def insert(content_id, media_id, url=None, title=None, title_sort=None, plot=None, duration=None, year=None,
studio=None, mpaa=None,
directors=None, actors=None, genres=None, popularity=None, added_date=None, cc_available=False,
is_hd=False, audio_type=None):
c = _database.cursor()
c.execute('''INSERT OR REPLACE INTO movies (
content_id,
media_id,
url,
title,
title_sort,
plot,
duration,
year,
studio,
mpaa,
directors,
actors,
genres,
popularity,
added_date,
cc_available,
is_hd,
audio_type,
playcount,
favor,
in_last_update) VALUES (
:content_id,
:media_id,
:url,
:title,
:title_sort,
:plot,
:duration,
:year,
:studio,
:mpaa,
:directors,
:actors,
:genres,
:popularity,
:added_date,
:cc_available,
:is_hd,
:audio_type,
(SELECT playcount FROM movies WHERE content_id = :content_id),
(SELECT favor FROM movies WHERE content_id = :content_id),
:in_last_update)''', {
'content_id': int(content_id),
'media_id': media_id,
'url': url,
'title': title,
'title_sort': title_sort,
'plot': plot,
'duration': duration,
'year': year,
'studio': studio,
'mpaa': mpaa,
'directors': directors,
'actors': actors,
'genres': genres,
'popularity': popularity,
'added_date': added_date,
'cc_available': cc_available,
'is_hd': is_hd,
'audio_type': audio_type,
'in_last_update': True
})
_database.commit()
c.close()
def get_movie(content_id):
c = _database.cursor()
return c.execute('SELECT DISTINCT * FROM movies WHERE content_id = (?)', (content_id,))
def delete(content_id):
c = _database.cursor()
c.execute('DELETE FROM movies WHERE content_id = (?)', (content_id,))
c.close()
def watch(content_id):
# TODO make this actually increment
c = _database.cursor()
c.execute("UPDATE movies SET playcount = 1 WHERE content_id = (?)", (content_id,))
_database.commit()
c.close()
return c.rowcount
def unwatch(content_id):
c = _database.cursor()
c.execute("UPDATE movies SET playcount=? WHERE content_id = (?)", (0, content_id))
_database.commit()
c.close()
return c.rowcount
def favor(content_id):
c = _database.cursor()
c.execute("UPDATE movies SET favor=? WHERE content_id=?", (True, content_id))
_database.commit()
c.close()
return c.rowcount
def unfavor(content_id):
c = _database.cursor()
c.execute("UPDATE movies SET favor=? WHERE content_id=?", (False, content_id))
_database.commit()
c.close()
return c.rowcount
def get_movies(genrefilter=False, actorfilter=False, directorfilter=False, studiofilter=False, yearfilter=False,
mpaafilter=False, watchedfilter=False, favorfilter=False, alphafilter=False):
c = _database.cursor()
if genrefilter:
genrefilter = '%' + genrefilter + '%'
return c.execute('SELECT DISTINCT * FROM movies WHERE genres LIKE (?)',
(genrefilter,))
elif mpaafilter:
return c.execute('SELECT DISTINCT * FROM movies WHERE mpaa = (?)', (mpaafilter,))
elif actorfilter:
actorfilter = '%' + actorfilter + '%'
return c.execute('SELECT DISTINCT * FROM movies WHERE actors LIKE (?)',
(actorfilter,))
elif directorfilter:
return c.execute('SELECT DISTINCT * FROM movies WHERE directors LIKE (?)',
(directorfilter,))
elif studiofilter:
return c.execute('SELECT DISTINCT * FROM movies WHERE studio = (?)', (studiofilter,))
elif yearfilter:
return c.execute('SELECT DISTINCT * FROM movies WHERE year = (?)', (int(yearfilter),))
elif watchedfilter:
return c.execute('SELECT DISTINCT * FROM movies WHERE playcount > 0')
elif favorfilter:
return c.execute('SELECT DISTINCT * FROM movies WHERE favor = 1')
elif alphafilter:
return c.execute('SELECT DISTINCT * FROM movies WHERE title REGEXP (?)',
(alphafilter + '*',))
else:
return c.execute('SELECT DISTINCT * FROM movies')
def get_types(col):
c = _database.cursor()
items = c.execute('select distinct %s from movies' % col)
list = []
for data in items:
data = data[0]
if type(data) == type(str()):
if 'Rated' in data:
item = data.split('for')[0]
if item not in list and item <> '' and item <> 0 and item <> 'Inc.' and item <> 'LLC.':
list.append(item)
else:
data = data.decode('utf-8').encode('utf-8').split(',')
for item in data:
item = item.replace('& ', '').strip()
if item not in list and item <> '' and item <> 0 and item <> 'Inc.' and item <> 'LLC.':
list.append(item)
elif data <> 0:
if data is not None:
list.append(str(data))
c.close()
return list
def update_movies(force=False):
# Check if we've recently updated and skip
global audio_type
if not force and not _needs_update():
return
dialog = xbmcgui.DialogProgress()
dialog.create('Refreshing Movie Database')
dialog.update(0, 'Initializing Movie Scan')
data = connection.get_url(db_common.WEB_DOMAIN + '/Movies')
tree = BeautifulSoup(data, 'html.parser')
movies_html = tree.find(attrs={'id': 'work-items'}).findAll('div', recursive=False,
attrs={'class': 'item', 'context': 'Movies'})
del tree
del data
json_url = '{0}/metadata-service/play/content/partner/Web_{1}.json?contentType=Movie'.format(db_common.API_DOMAIN, db_common.SERVICE)
data = connection.get_url(json_url)
movies_json = json.loads(data)['playContentArray']['playContents']
# Mark all movies as unfound. This will be updated as we go through
c = _database.cursor()
c.execute("UPDATE movies SET in_last_update = 0")
_database.commit()
c.close()
total = len(movies_html)
count = 0
for movie in movies_html:
count += 1
dialog.update(0, 'Scanned {0} of {1} movies'.format(count, total))
content_id = int(movie['catalogid'])
playLinkElem = movie.find('a', attrs={'class': 'collectionPlay'})
url = db_common.WEB_DOMAIN + playLinkElem['href']
genreList = []
genreLI = movie.find('ul', attrs={'class': 'genres'}).findAll('li', recursive=False)
for genre in genreLI:
genreList.append(genre.string)
genres = ','.join(genreList)
# Find the movie in the json for the remainder of content
for movie_json in movies_json:
if (movie_json['contentId'] == content_id):
media_id = movie_json['mediaId']
title = movie_json['title']
runtime = int(movie_json['runtime'] / 60)
year = int(movie_json['releaseYear'])
plot = movie_json['logLine']
studio = movie_json['studio']
popularity = movie_json['popularity']
title_sort = movie_json['titleSort']
cc_available = movie_json['closedCaption']
audio_type = movie_json['audioType']
is_hd = movie_json['hd']
mpaa = db_common.parse_mpaa(movie_json['mpaaRating'])
try:
date_without_time = movie_json['startDate'][:10]
added_date = datetime.strptime(date_without_time, '%Y-%m-%d')
except TypeError:
added_date = datetime(*(time.strptime(date_without_time, '%Y-%m-%d')[0:6]))
actors_list = []
for actor in movie_json['actors']:
actors_list.append(actor['fullName'])
actors = ','.join(actors_list)
directors_list = []
for director in movie_json['directors']:
directors_list.append(director['fullName'])
directors = ','.join(directors_list)
break
insert(content_id=content_id, media_id=media_id, url=url, title=title, title_sort=title_sort, plot=plot,
duration=runtime, year=year, mpaa=mpaa, popularity=popularity, added_date=added_date,
audio_type=audio_type, actors=actors, directors=directors, genres=genres, studio=studio,
cc_available=cc_available, is_hd=is_hd)
# Preload images
db_common.get_poster(content_id)
db_common.get_thumb(content_id)
_set_last_update()
# Find unfound movies and remove them
c = _database.cursor()
c.execute("DELETE FROM movies WHERE in_last_update = 0")
c.close()
def _needs_update():
# Update every 15 days
if 'last_update' in _database_meta:
# http://forum.kodi.tv/showthread.php?tid=112916
try:
last_update = datetime.strptime(_database_meta['last_update'], '%Y-%m-%d')
except TypeError:
last_update = datetime(*(time.strptime(_database_meta['last_update'], '%Y-%m-%d')[0:6]))
return (date.today() - last_update.date()).days > 15
return True
def _set_last_update():
_database_meta['last_update'] = date.today().strftime('%Y-%m-%d')
_write_meta_file()
def _write_meta_file():
f = open(DB_META_FILE, 'w')
json.dump(_database_meta, f)
f.close()
DB_META_FILE = os.path.join(common.__addonprofile__, 'movies.meta')
_database_meta = False
if os.path.exists(DB_META_FILE):
f = open(DB_META_FILE, 'r')
_database_meta = json.load(f)
f.close()
else:
_database_meta = {}
DB_FILE = os.path.join(common.__addonprofile__, 'movies.db')
if not os.path.exists(DB_FILE):
_database = sqlite.connect(DB_FILE)
_database.text_factory = str
_database.row_factory = sqlite.Row
create()
else:
_database = sqlite.connect(DB_FILE)
_database.text_factory = str
_database.row_factory = sqlite.Row
|
|
#!/usr/bin/env python
"""Performance test to compare the performance of buck between two revisions.
The general algorithm is:
Checkout <revisions_to_go_back - 1>
Warm up the cache:
Set .buckversion to old revision, build all targets
Set .buckversion to new revision, build all targets
For each revision to test:
- Rename directory being tested
- Build all targets, check to ensure everything pulls from dir cache
- Check out revision to test
- Clean Build all targets <iterations_per_diff> times, only reading from
cache, not writing (except for the last one, write that time)
- Buck build all targets to verify no-op build works.
"""
import argparse
import re
import subprocess
import os
import tempfile
import sys
from collections import defaultdict
from datetime import datetime
def createArgParser():
parser = argparse.ArgumentParser(
description='Run the buck performance test')
parser.add_argument(
'--perftest_id',
action='store',
type=str,
help='The identifier of this performance test')
parser.add_argument(
'--revisions_to_go_back',
action='store',
type=int,
help='The maximum number of revisions to go back when testing')
parser.add_argument(
'--iterations_per_diff',
action='store',
type=int,
help='The number of iterations to run on diff')
parser.add_argument(
'--targets_to_build',
action='append',
type=str,
help='The targets to build')
parser.add_argument(
'--repo_under_test',
action='store',
type=str,
help='Path to the repo under test')
parser.add_argument(
'--path_to_buck',
action='store',
type=str,
help='The path to the buck binary')
parser.add_argument(
'--old_buck_revision',
action='store',
type=str,
help='The original buck revision')
parser.add_argument(
'--new_buck_revision',
action='store',
type=str,
help='The new buck revision')
return parser
def log(message):
print '%s\t%s' % (str(datetime.now()), message)
sys.stdout.flush()
def timedelta_total_seconds(timedelta):
return (
timedelta.microseconds + 0.0 +
(timedelta.seconds + timedelta.days * 24 * 3600) * 10 ** 6) / 10 ** 6
class BuildResult():
def __init__(self, time_delta, cache_results, rule_key_map):
self.time_delta = time_delta
self.cache_results = cache_results
self.rule_key_map = rule_key_map
def git_clean(cwd):
log('Running git clean.')
subprocess.check_call(
['git', 'clean', '--quiet', '-xfd'],
cwd=cwd)
def git_reset(cwd, revision):
subprocess.check_call(
['git', 'reset', '--quiet', '--hard', revision],
cwd=cwd)
def buck_clean(args, cwd):
log('Running buck clean.')
subprocess.check_call(
[args.path_to_buck, 'clean'],
cwd=cwd)
def git_get_revisions(args):
cmd = [
'git', 'log', '--pretty=format:%H', 'HEAD', '-n',
str(args.revisions_to_go_back + 1)]
proc = subprocess.Popen(
cmd,
cwd=args.repo_under_test,
stdout=subprocess.PIPE)
try:
return list(reversed(proc.communicate()[0].splitlines()))
finally:
if proc.wait():
raise subprocess.CalledProcessError(
proc.returncode,
' '.join(cmd))
def git_checkout(revision, cwd):
log('Checking out %s.' % revision)
git_reset(cwd, 'HEAD')
subprocess.check_call(
['git', 'checkout', '--quiet', revision],
cwd=cwd)
BUILD_RESULT_LOG_LINE = re.compile(
r'BuildRuleFinished\((?P<rule_name>[\w_\-:#\/,]+)\): (?P<result>[A-Z_]+) '
r'(?P<cache_result>[A-Z_]+) (?P<success_type>[A-Z_]+) '
r'(?P<rule_key>[0-9a-f]*)')
RULEKEY_LINE = re.compile(
r'^INFO: RuleKey (?P<rule_key>[0-9a-f]*)='
r'(?P<rule_key_debug>.*)$')
BUCK_LOG_RULEKEY_LINE = re.compile(
r'.*\[[\w ]+\](?:\[command:[0-9a-f-]+\])?\[tid:\d+\]'
r'\[com.facebook.buck.rules.RuleKey[\$\.]?Builder\] '
r'RuleKey (?P<rule_key>[0-9a-f]+)='
r'(?P<rule_key_debug>.*)$')
def buck_build_target(args, cwd, targets, perftest_side, log_as_perftest=True):
"""Builds a target with buck and returns performance information.
"""
log('Running buck build %s.' % ' '.join(targets))
bucklogging_properties_path = os.path.join(
cwd, '.bucklogging.local.properties')
with open(bucklogging_properties_path, 'w') as bucklogging_properties:
# The default configuration has the root logger and FileHandler
# discard anything below FINE level.
#
# We need RuleKey logging, which uses FINER (verbose), so the
# root logger and file handler both need to be reconfigured
# to enable verbose logging.
bucklogging_properties.write(
'''.level=FINER
java.util.logging.FileHandler.level=FINER''')
env = os.environ.copy()
# Force buck to pretend it's repo is clean.
env.update({
'BUCK_REPOSITORY_DIRTY': '0'
})
if log_as_perftest:
env.update({
'BUCK_EXTRA_JAVA_ARGS':
'-Dbuck.perftest_id=%s, -Dbuck.perftest_side=%s' % (
args.perftest_id, perftest_side)
})
start = datetime.now()
tmpFile = tempfile.TemporaryFile()
try:
subprocess.check_call(
[args.path_to_buck, 'build', '--deep'] + targets + ['-v', '5'],
stdout=tmpFile,
stderr=tmpFile,
cwd=cwd,
env=env)
except:
tmpFile.seek(0)
log('Buck build failed: %s' % tmpFile.read())
raise
tmpFile.seek(0)
finish = datetime.now()
java_utils_log_path = os.path.join(
cwd,
'buck-out', 'log', 'buck-0.log')
if os.path.exists(java_utils_log_path):
pattern = BUCK_LOG_RULEKEY_LINE
build_output_file = open(java_utils_log_path)
else:
pattern = RULEKEY_LINE
build_output_file = tmpFile
rule_debug_map = {}
for line in build_output_file:
match = pattern.match(line)
if match:
rule_debug_map[match.group('rule_key')] = match.group(
'rule_key_debug')
logfile_path = os.path.join(
cwd,
'buck-out', 'bin', 'build.log')
cache_results = defaultdict(list)
rule_key_map = {}
with open(logfile_path, 'r') as logfile:
for line in logfile.readlines():
line = line.strip()
match = BUILD_RESULT_LOG_LINE.search(line)
if match:
rule_name = match.group('rule_name')
rule_key = match.group('rule_key')
if not rule_key in rule_debug_map:
raise Exception('''ERROR: build.log contains an entry
which was not found in buck build -v 5 output.
Rule: {0}, rule key: {1}'''.format(rule_name, rule_key))
cache_results[match.group('cache_result')].append({
'rule_name': rule_name,
'rule_key': rule_key,
'rule_key_debug': rule_debug_map[rule_key]
})
rule_key_map[match.group('rule_name')] = rule_debug_map[
match.group('rule_key')]
result = BuildResult(finish - start, cache_results, rule_key_map)
cache_counts = {}
for key, value in result.cache_results.iteritems():
cache_counts[key] = len(value)
log('Test Build Finished! Elapsed Seconds: %d, Cache Counts: %s' % (
timedelta_total_seconds(result.time_delta), repr(cache_counts)))
return result
def set_perftest_side(
args,
cwd,
perftest_side,
cache_mode,
dir_cache_only=True):
log('Reconfiguring to test %s version of buck.' % perftest_side)
buckconfig_path = os.path.join(cwd, '.buckconfig.local')
with open(buckconfig_path, 'w') as buckconfig:
buckconfig.write('''[cache]
%s
dir = buck-cache-%s
dir_mode = %s
''' % ('mode = dir' if dir_cache_only else '', perftest_side, cache_mode))
buckconfig.truncate()
buckversion_path = os.path.join(cwd, '.buckversion')
with open(buckversion_path, 'w') as buckversion:
if perftest_side == 'old':
buckversion.write(args.old_buck_revision + os.linesep)
else:
buckversion.write(args.new_buck_revision + os.linesep)
buckversion.truncate()
def build_all_targets(
args,
cwd,
perftest_side,
cache_mode,
run_clean=True,
dir_cache_only=True,
log_as_perftest=True):
set_perftest_side(
args,
cwd,
perftest_side,
cache_mode,
dir_cache_only=dir_cache_only)
targets = []
for target_str in args.targets_to_build:
targets.extend(target_str.split(','))
if run_clean:
buck_clean(args, cwd)
#TODO(rowillia): Do smart things with the results here.
return buck_build_target(
args,
cwd,
targets,
perftest_side,
log_as_perftest=log_as_perftest)
def run_tests_for_diff(args, revisions_to_test, test_index, last_result):
log('=== Running tests at revision %s ===' % revisions_to_test[test_index])
new_directory_name = (os.path.basename(args.repo_under_test) +
'_test_iteration_%d' % test_index)
# Rename the directory to flesh out any cache problems.
cwd = os.path.join(os.path.dirname(args.repo_under_test),
new_directory_name)
log('Renaming %s to %s' % (args.repo_under_test, cwd))
os.rename(args.repo_under_test, cwd)
try:
log('== Checking new revision for problems with absolute paths ==')
result = build_all_targets(args, cwd, 'new', 'readonly')
if (len(result.cache_results.keys()) != 1 or
'DIR_HIT' not in result.cache_results):
# Remove DIR_HITs to make error message cleaner
result.cache_results.pop('DIR_HIT', None)
log('Building at revision %s with the new buck version '
'was unable to reuse the cache from a previous run. '
'This suggests one of the rule keys contains an '
'abosolute path.' % (
revisions_to_test[test_index - 1]))
for rule in result.cache_results['MISS']:
rule_name = rule['rule_name']
old_key = last_result.rule_key_map[rule_name]
log('Rule %s missed.' % rule_name)
log('\tOld Rule Key: %s.' % old_key)
log('\tNew Rule Key: %s.' % result.rule_key_map[rule_name])
raise Exception('Failed to reuse cache across directories!!!')
git_checkout(revisions_to_test[test_index], cwd)
for attempt in xrange(args.iterations_per_diff):
cache_mode = 'readonly'
if attempt == args.iterations_per_diff - 1:
cache_mode = 'readwrite'
build_all_targets(args, cwd, 'old', cache_mode)
build_all_targets(args, cwd, 'new', cache_mode)
log('== Checking new revision to ensure noop build does nothing. ==')
result = build_all_targets(
args,
cwd,
'new',
cache_mode,
run_clean=False)
if (len(result.cache_results.keys()) != 1 or
'LOCAL_KEY_UNCHANGED_HIT' not in result.cache_results):
result.cache_results.pop('DIR_HIT', None)
raise Exception(
'Doing a noop build at revision %s with the new '
'buck version did not hit all of it\'s keys.\nMissed '
'Rules: %s' % (
revisions_to_test[test_index - 1],
repr(result.cache_results)))
finally:
log('Renaming %s to %s' % (cwd, args.repo_under_test))
os.rename(cwd, args.repo_under_test)
return result
def main():
args = createArgParser().parse_args()
log('Running Performance Test!')
git_clean(args.repo_under_test)
revisions_to_test = git_get_revisions(args)
# Checkout the revision previous to the test and warm up the local dir
# cache.
# git_clean(args.repo_under_test)
log('=== Warming up cache ===')
git_checkout(revisions_to_test[0], args.repo_under_test)
build_all_targets(
args,
args.repo_under_test,
'old',
'readwrite',
dir_cache_only=False,
log_as_perftest=False)
results_for_new = build_all_targets(
args,
args.repo_under_test,
'new',
'readwrite',
dir_cache_only=False,
log_as_perftest=False)
log('=== Cache Warm! Running tests ===')
for i in xrange(1, args.revisions_to_go_back):
results_for_new = run_tests_for_diff(
args,
revisions_to_test,
i,
results_for_new)
if __name__ == '__main__':
main()
|
|
# coding: utf-8
import os
import sys
import requests
import multiprocessing
import click
import time
import difflib
from os import path, makedirs
from subprocess import PIPE, Popen
from itertools import chain
from multiprocessing import Pool
from concurrent.futures import ThreadPoolExecutor
from concurrent.futures import as_completed
from threading import Thread
from cses.ui import clr, color_prompt
from cses.errors import RunTimeoutError, RunNoSuchProgramError
class Run(object):
def __init__(self, cmd, cwd, input=None, timeout=3):
self.cmd = cmd
self.cwd = cwd
if input != None:
input = input.encode("utf-8")
self.input = input
self.process = None
self.out = b""
self.err = b""
self.timeout = timeout
self.exception = None
def run(self):
def target():
try:
self.process = Popen(self.cmd,
stdout=PIPE,
stderr=PIPE,
stdin=PIPE,
cwd=self.cwd)
self.out, self.err = self.process.communicate(self.input)
except OSError as e:
if e.errno in [os.errno.ENOENT, os.errno.EACCES]:
self.exception = RunNoSuchProgramError(self.cmd[0])
else:
self.exception = e
thread = Thread(target=target)
thread.start()
thread.join(self.timeout)
if thread.is_alive():
self.process.terminate()
thread.join()
return "", "TIMEOUT", 1
if self.exception is not None:
raise self.exception
return (self.out.decode("utf-8"),
self.err.decode("utf-8"),
self.process.returncode)
class Result(object):
def __init__(self, testid, stderr="", input="", got="", expected="",
full=False, diff=False):
self.testid = testid
self.warning = stderr
self.input = input
self.got = got
self.expected = expected
self.full = full
self.success = got == expected
self.message = "\033[32mok\n" if self.success else "\033[31mfail\n"
self.message += "\033[0m"
self.diff = diff
def __str__(self):
def ens(str):
return str if str.endswith("\n") else str + "\n"
def title(str):
return clr(str + "\n")
def show(str):
if not self.full:
nl_num = str[:200].count("\n")
if nl_num >= 15:
return "\n".join(str.split("\n")[:15]) + "\n...\n"
return str[:200] if len(str) < 200 else str[:200] + "\n...\n"
return str
msg = "{}Test #{} {}".format(color_prompt, self.testid, self.message)
if self.warning != "":
msg += ens(self.warning)
if not self.success and len(self.input) > 0:
msg += title("Input")
msg += show(self.input)
msg += title("Correct output")
msg += show(self.expected)
msg += title("Your output")
msg += show(self.got)
if self.diff:
msg += title("Difference")
for line in difflib.context_diff(self.expected.splitlines(True),
self.got.splitlines(True),
fromfile="expected",
tofile="got"):
msg += line
return msg
class Base(object):
def __init__(self, name, file_extensions, template):
self.name = name
self.file_extensions = file_extensions
self.template = template
def getdir(self):
return path.join(path.expanduser("~"), ".cses")
def getfile(self, name="out"):
return path.join(self.getdir(), name)
def makedir(self):
makedirs(self.getdir(), mode=0o700, exist_ok=True)
def download_tests(self, tests):
duplicate_files = list(chain.from_iterable(((1, x["input"]),
(2, x["output"]))
for x in tests["test"]))
files = []
for file in duplicate_files:
if file[1] not in files:
files.append(file)
baseurl = "http://cses.fi/download/"
def load(dir, hash):
fname = self.getfile(hash)
if path.isfile(fname):
return
req = requests.get("{}{}/{}".format(baseurl, dir, hash))
req.raise_for_status()
with open(fname, "w") as fp:
fp.write(req.text)
return True
with ThreadPoolExecutor(max_workers=5) as executor:
futures = {executor.submit(load, f[0], f[1]): f for f in files}
for future in as_completed(futures):
yield future.result()
def run(self, cmd, cwd=None, input=None, timeout=3):
if cwd is None:
cwd = self.getdir()
return Run(cmd=cmd, cwd=cwd, input=input, timeout=timeout).run()
def applies_to(self, filename):
return any([filename.endswith(x) for x in self.file_extensions])
def _prepare(self, filename):
raise NotImplementedError()
def _run_cmd(self, filename):
raise NotImplementedError()
def user_run(self, filename):
self.makedir()
print(clr("Preparing"))
out, err, code = self._prepare(filename)
if len(err) > 0:
sys.stderr.write(err + "\n")
if code != 0:
sys.exit(code)
cmd = self._run_cmd(self.getfile())
print(clr("Running {}".format(" ".join(cmd))))
ret = Popen(cmd, cwd=self.getdir())
code = ret.communicate()
sys.exit(ret.returncode)
def test(self, filename, tests, keep_going, full=False, diff=False):
if tests["result"] != "ok":
sys.stderr.write("Can't test this")
sys.exit(1)
self.makedir()
with click.progressbar(self.download_tests(tests),
length=len(tests["test"]) * 2,
label=clr("Downloading tests")) as downloads:
for dl in downloads:
pass
print(clr("Preparing code"))
out, err, code = self._prepare(filename)
if len(err) > 0:
sys.stderr.write(err + "\n")
if code != 0:
sys.exit(code)
returns = []
with click.progressbar(tests["test"],
label=clr("Running tests")) as tests:
for test in tests:
f_in = self.getfile(test["input"])
f_expected = self.getfile(test["output"])
input, c_expected = "", ""
with open(f_in) as fp:
input = fp.read()
with open(f_expected) as fp:
expected = fp.read()
got, err, code = self.run(self._run_cmd(self.getfile()),
input=input)
result = Result(test["order"], err, input, got, expected, full,
diff)
returns.append(result)
if not result.success and not keep_going:
break
ok = True
for res in returns:
if not res.success:
ok = False
print(res)
if not ok:
print("There were some \033[31merrors\033[0m")
sys.exit(1)
else:
print("All \033[32mOK\033[0m!")
def compare(self, expected, got):
return expected == got
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Relu and ReluGrad."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
def _elu_grad_grad(activation):
if activation < 0:
return np.exp(activation)
return 0
class ReluTest(test.TestCase):
def _npRelu(self, np_features):
return np.maximum(np_features, np.zeros(np_features.shape))
def testNpRelu(self):
self.assertAllClose(
np.array([[0.0, 0.7, 0.0, 0.3, 0.0], [0.1, 0.0, 0.5, 0.0, 0.9]]),
self._npRelu(
np.array([[-0.9, 0.7, -0.5, 0.3, -0.1], [0.1, -0.3, 0.5, -0.7, 0.9]
])))
def _testRelu(self, np_features, use_gpu=False):
np_relu = self._npRelu(np_features)
with self.test_session(use_gpu=use_gpu):
relu = nn_ops.relu(np_features)
tf_relu = relu.eval()
self.assertAllClose(np_relu, tf_relu)
self.assertShapeEqual(np_relu, relu)
def testNumbers(self):
for t in [np.int32, np.int64, np.float16, np.float32, np.float64]:
self._testRelu(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
use_gpu=False)
if t in [np.float16, np.float32, np.float64]:
self._testRelu(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
use_gpu=True)
# The gradient test for ReLU is a bit tricky as the derivative is not well
# defined at around zero and we want to avoid that in terms of input values.
def testGradientFloat32(self):
with self.test_session():
x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
shape=[2, 5],
name="x")
y = nn_ops.relu(x, name="relu")
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker.compute_gradient_error(
x, [2, 5], y, [2, 5], x_init_value=x_init)
print("relu (float32) gradient err = ", err)
self.assertLess(err, 1e-4)
def testGradientFloat64(self):
with self.test_session():
x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
shape=[2, 5],
dtype=dtypes.float64,
name="x")
y = nn_ops.relu(x, name="relu")
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float64,
order="F")
err = gradient_checker.compute_gradient_error(
x, [2, 5], y, [2, 5], x_init_value=x_init)
print("relu (float64) gradient err = ", err)
self.assertLess(err, 1e-10)
def testGradGradFloat32(self):
with self.test_session():
x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
shape=[2, 5],
name="x")
y = nn_ops.relu(x, name="relu")
z = gradients_impl.gradients(y, x)
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker.compute_gradient_error(
x, [2, 5], z[0], [2, 5], x_init_value=x_init)
print("relu (float32) gradient of gradient err = ", err)
self.assertLess(err, 1e-4)
def testGradGradFloat64(self):
with self.test_session():
x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
shape=[2, 5],
dtype=dtypes.float64,
name="x")
y = nn_ops.relu(x, name="relu")
z = gradients_impl.gradients(y, x)
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float64,
order="F")
err = gradient_checker.compute_gradient_error(
x, [2, 5], z[0], [2, 5], x_init_value=x_init)
print("relu (float64) gradient of gradient err = ", err)
self.assertLess(err, 1e-10)
def testGradientScalar(self):
with self.test_session() as sess:
x = variables.Variable(100.)
y = nn_ops.relu(x)
loss = y**2
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.25)
train_op = optimizer.minimize(loss)
sess.run(variables.global_variables_initializer())
sess.run(train_op)
self.assertAllClose(x.eval(), 50.0)
class Relu6Test(test.TestCase):
def _npRelu6(self, np_features):
sixes = np.copy(np_features)
sixes.fill(6.0)
return np.minimum(
np.maximum(np_features, np.zeros(np_features.shape)), sixes)
def testNpRelu6(self):
self.assertAllClose(
np.array([[0.0, 0.7, 0.0, 0.3, 6.0], [0.1, 0.0, 6.0, 0.0, 0.9]]),
self._npRelu6(
np.array([[-0.9, 0.7, -0.5, 0.3, 6.0], [0.1, -0.3, 6.5, -0.7, 0.9]
])))
def _testRelu6(self, np_features, use_gpu=False):
np_relu6 = self._npRelu6(np_features)
with self.test_session(use_gpu=use_gpu):
relu6 = nn_ops.relu6(np_features)
tf_relu6 = relu6.eval()
self.assertAllClose(np_relu6, tf_relu6)
self.assertShapeEqual(np_relu6, relu6)
def testNumbers(self):
for t in [np.int32, np.int64, np.float16, np.float32, np.float64]:
self._testRelu6(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
use_gpu=False)
if t in [np.float16, np.float, np.double]:
self._testRelu6(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
use_gpu=True)
# The gradient test for ReLU6 is a bit tricky as the derivative is
# not well defined at around zero and six and we want to avoid that
# in terms of input values.
def testGradientFloat32(self):
with self.test_session():
x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 6.1, 6.3, 6.5, 6.7, 6.9],
shape=[2, 5],
name="x")
y = nn_ops.relu6(x, name="relu6")
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [6.1, 6.3, 6.5, 6.7, 6.9]],
dtype=np.float32,
order="F")
err = gradient_checker.compute_gradient_error(
x, [2, 5], y, [2, 5], x_init_value=x_init)
print("relu6 (float32) gradient err = ", err)
self.assertLess(err, 1e-4)
def testGradientFloat64(self):
with self.test_session():
x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 6.1, 6.3, 6.5, 6.7, 6.9],
shape=[2, 5],
dtype=dtypes.float64,
name="x")
y = nn_ops.relu6(x, name="relu6")
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [6.1, 6.3, 6.5, 6.7, 6.9]],
dtype=np.float64,
order="F")
err = gradient_checker.compute_gradient_error(
x, [2, 5], y, [2, 5], x_init_value=x_init)
print("relu6 (float64) gradient err = ", err)
self.assertLess(err, 1e-10)
class EluTest(test.TestCase):
def _npElu(self, np_features):
return np.where(np_features < 0, np.exp(np_features) - 1, np_features)
def testNpElu(self):
self.assertAllClose(
np.array([[-0.59343034025, 0.7, -0.39346934028, 0.3, -0.09516258196],
[0.1, -0.25918177931, 0.5, -0.5034146962, 0.9]]),
self._npElu(
np.array([[-0.9, 0.7, -0.5, 0.3, -0.1], [0.1, -0.3, 0.5, -0.7, 0.9]
])))
def _testElu(self, np_features, use_gpu=False):
np_elu = self._npElu(np_features)
with self.test_session(use_gpu=use_gpu):
elu = nn_ops.elu(np_features)
tf_elu = elu.eval()
self.assertAllClose(np_elu, tf_elu)
self.assertShapeEqual(np_elu, elu)
def testNumbers(self):
for t in [np.float16, np.float32, np.float64]:
self._testElu(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
use_gpu=False)
self._testElu(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
use_gpu=True)
def testGradientFloat32(self):
with self.test_session():
x_val = [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]]
x = constant_op.constant(x_val, name="x")
y = nn_ops.elu(x, name="elu")
x_init = np.asarray(x_val, dtype=np.float32, order="F")
err = gradient_checker.compute_gradient_error(
x, [2, 5], y, [2, 5], x_init_value=x_init)
print("elu (float32) gradient err = ", err)
self.assertLess(err, 1e-4)
def testGradientFloat64(self):
with self.test_session():
x_val = [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]]
x = constant_op.constant(x_val, dtype=dtypes.float64, name="x")
y = nn_ops.elu(x, name="elu")
x_init = np.asarray(x_val, dtype=np.float64, order="F")
err = gradient_checker.compute_gradient_error(
x, [2, 5], y, [2, 5], x_init_value=x_init)
print("elu (float64) gradient err = ", err)
self.assertLess(err, 1e-6)
def testGradGrad(self):
with self.test_session():
x = array_ops.placeholder(dtype=dtypes.float32)
elu = nn_ops.elu(x)
g, = gradients_impl.gradients(elu, x)
gg, = gradients_impl.gradients(g, x)
for x_val in [-1, -0.5, 0.5, 1]:
err = np.abs(gg.eval(feed_dict={x: x_val}) - _elu_grad_grad(x_val))
self.assertLess(err, 1e-4)
def testGradGradFloat32(self):
with self.test_session():
x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
shape=[2, 5],
name="x")
y = nn_ops.elu(x, name="elu")
z = gradients_impl.gradients(y, x)
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker.compute_gradient_error(
x, [2, 5], z[0], [2, 5], x_init_value=x_init)
print("elu (float32) gradient of gradient err = ", err)
self.assertLess(err, 1e-4)
def testGradGradFloat64(self):
with self.test_session():
x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
shape=[2, 5],
dtype=dtypes.float64,
name="x")
y = nn_ops.elu(x, name="elu")
z = gradients_impl.gradients(y, x)
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float64,
order="F")
err = gradient_checker.compute_gradient_error(
x, [2, 5], z[0], [2, 5], x_init_value=x_init)
print("elu (float64) gradient of gradient err = ", err)
self.assertLess(err, 1e-6)
class SeluTest(test.TestCase):
def _npSelu(self, np_features):
scale = 1.0507009873554804934193349852946
scale_alpha = 1.7580993408473768599402175208123
return np.where(np_features < 0, scale_alpha * (np.exp(np_features) - 1),
scale * np_features)
def testNpSelu(self):
self.assertAllClose(
np.array([[-1.0433095, 0.73549069, -0.6917582, 0.3152103 , -0.16730527],
[0.1050701 , -0.45566732, 0.5253505, -0.88505305, 0.9456309]]),
self._npSelu(
np.array([[-0.9, 0.7, -0.5, 0.3, -0.1], [0.1, -0.3, 0.5, -0.7, 0.9]
])))
def _testSelu(self, np_features, use_gpu=False):
np_selu = self._npSelu(np_features)
with self.test_session(use_gpu=use_gpu):
selu = nn_ops.selu(np_features)
tf_selu = selu.eval()
self.assertAllClose(np_selu, tf_selu)
self.assertShapeEqual(np_selu, selu)
def testNumbers(self):
for t in [np.float16, np.float32, np.float64]:
self._testSelu(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
use_gpu=False)
self._testSelu(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
use_gpu=True)
def testGradientFloat32(self):
with self.test_session():
x_val = [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]]
x = constant_op.constant(x_val, name="x")
y = nn_ops.selu(x, name="selu")
x_init = np.asarray(x_val, dtype=np.float32, order="F")
err = gradient_checker.compute_gradient_error(
x, [2, 5], y, [2, 5], x_init_value=x_init)
print("selu (float32) gradient err = ", err)
self.assertLess(err, 1e-4)
def testGradientFloat64(self):
with self.test_session():
x_val = [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]]
x = constant_op.constant(x_val, dtype=dtypes.float64, name="x")
y = nn_ops.selu(x, name="selu")
x_init = np.asarray(x_val, dtype=np.float64, order="F")
err = gradient_checker.compute_gradient_error(
x, [2, 5], y, [2, 5], x_init_value=x_init)
print("selu (float64) gradient err = ", err)
self.assertLess(err, 1e-6)
def testGradGradFloat32(self):
with self.test_session():
x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
shape=[2, 5],
name="x")
y = nn_ops.selu(x, name="selu")
z = gradients_impl.gradients(y, x)
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker.compute_gradient_error(
x, [2, 5], z[0], [2, 5], x_init_value=x_init)
print("selu (float32) gradient of gradient err = ", err)
self.assertLess(err, 1e-4)
def testGradGradFloat64(self):
with self.test_session():
x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
shape=[2, 5],
dtype=dtypes.float64,
name="x")
y = nn_ops.selu(x, name="selu")
z = gradients_impl.gradients(y, x)
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float64,
order="F")
err = gradient_checker.compute_gradient_error(
x, [2, 5], z[0], [2, 5], x_init_value=x_init)
print("selu (float64) gradient of gradient err = ", err)
self.assertLess(err, 1e-6)
class CreluTest(test.TestCase):
def testCreluShape(self):
f = random_ops.random_normal([50, 5, 7, 10])
t = nn_ops.crelu(f)
self.assertEqual([50, 5, 7, 20], t.get_shape())
def _testCrelu(self, np_features, use_gpu=False):
np_relu = np.maximum(np_features, np.zeros_like(np_features))
np_neg_relu = np.maximum(-np_features, np.zeros_like(np_features))
np_crelu = np.concatenate((np_relu, np_neg_relu),
len(np_features.shape) - 1)
with self.test_session(use_gpu=use_gpu):
crelu = nn_ops.crelu(np_features)
tf_relu = crelu.eval()
self.assertAllClose(np_crelu, tf_relu)
self.assertShapeEqual(np_crelu, crelu)
def testNumbers(self):
for t in [np.int32, np.int64, np.float16, np.float32, np.float64]:
self._testCrelu(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
use_gpu=False)
if t in [np.float16, np.float32, np.float64]:
self._testCrelu(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
use_gpu=True)
def testNumbersWithAxis0(self):
with self.test_session():
crelu = nn_ops.crelu(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]),
axis=0)
tf_relu = crelu.eval()
np_crelu = np.array([[0, 7, 0, 3, 0],
[1, 0, 5, 0, 9],
[9, 0, 5, 0, 1],
[0, 3, 0, 7, 0]])
self.assertAllEqual(np_crelu, tf_relu)
def testNumbersWithAxis1(self):
with self.test_session():
crelu = nn_ops.crelu(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]),
axis=1)
tf_relu = crelu.eval()
np_crelu = np.array([[0, 7, 0, 3, 0, 9, 0, 5, 0, 1],
[1, 0, 5, 0, 9, 0, 3, 0, 7, 0]])
self.assertAllEqual(np_crelu, tf_relu)
if __name__ == "__main__":
test.main()
|
|
# Copyright (C) 2013,2014 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2013,2014 YAMAMOTO Takashi <yamamoto at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import logging
import os
import sys
import unittest
from nose.tools import eq_
from nose.tools import ok_
from ryu.utils import binary_str
from ryu.lib import pcaplib
from ryu.lib.packet import packet
from ryu.lib.packet import bgp
from ryu.lib.packet import afi
from ryu.lib.packet import safi
LOG = logging.getLogger(__name__)
BGP4_PACKET_DATA_DIR = os.path.join(
os.path.dirname(sys.modules[__name__].__file__), '../../packet_data/bgp4/')
PMSI_TYPE_NO_TUNNEL_INFORMATION_PRESENT = (
bgp.BGPPathAttributePmsiTunnel.TYPE_NO_TUNNEL_INFORMATION_PRESENT
)
PMSI_TYPE_INGRESS_REPLICATION = (
bgp.BGPPathAttributePmsiTunnel.TYPE_INGRESS_REPLICATION
)
class Test_bgp(unittest.TestCase):
""" Test case for ryu.lib.packet.bgp
"""
def setUp(self):
pass
def tearDown(self):
pass
def test_open1(self):
msg = bgp.BGPOpen(my_as=30000, bgp_identifier='192.0.2.1')
binmsg = msg.serialize()
msg2, _, rest = bgp.BGPMessage.parser(binmsg)
eq_(str(msg), str(msg2))
eq_(len(msg), 29)
eq_(rest, b'')
def test_open2(self):
opt_param = [bgp.BGPOptParamCapabilityUnknown(cap_code=200,
cap_value=b'hoge'),
bgp.BGPOptParamCapabilityGracefulRestart(flags=0,
time=120,
tuples=[]),
bgp.BGPOptParamCapabilityRouteRefresh(),
bgp.BGPOptParamCapabilityCiscoRouteRefresh(),
bgp.BGPOptParamCapabilityMultiprotocol(
afi=afi.IP, safi=safi.MPLS_VPN),
bgp.BGPOptParamCapabilityCarryingLabelInfo(),
bgp.BGPOptParamCapabilityFourOctetAsNumber(
as_number=1234567),
bgp.BGPOptParamUnknown(type_=99, value=b'fuga')]
msg = bgp.BGPOpen(my_as=30000, bgp_identifier='192.0.2.2',
opt_param=opt_param)
binmsg = msg.serialize()
msg2, _, rest = bgp.BGPMessage.parser(binmsg)
eq_(str(msg), str(msg2))
ok_(len(msg) > 29)
eq_(rest, b'')
def test_update1(self):
msg = bgp.BGPUpdate()
binmsg = msg.serialize()
msg2, _, rest = bgp.BGPMessage.parser(binmsg)
eq_(str(msg), str(msg2))
eq_(len(msg), 23)
eq_(rest, b'')
def test_update2(self):
withdrawn_routes = [bgp.BGPWithdrawnRoute(length=0,
addr='192.0.2.13'),
bgp.BGPWithdrawnRoute(length=1,
addr='192.0.2.13'),
bgp.BGPWithdrawnRoute(length=3,
addr='192.0.2.13'),
bgp.BGPWithdrawnRoute(length=7,
addr='192.0.2.13'),
bgp.BGPWithdrawnRoute(length=32,
addr='192.0.2.13')]
mp_nlri = [
bgp.LabelledVPNIPAddrPrefix(24, '192.0.9.0',
route_dist='100:100',
labels=[1, 2, 3]),
bgp.LabelledVPNIPAddrPrefix(26, '192.0.10.192',
route_dist='10.0.0.1:10000',
labels=[5, 6, 7, 8]),
]
mp_nlri2 = [
bgp.LabelledIPAddrPrefix(24, '192.168.0.0', labels=[1, 2, 3])
]
mp_nlri_v6 = [
bgp.LabelledVPNIP6AddrPrefix(64, '2001:db8:1111::',
route_dist='200:200',
labels=[1, 2, 3]),
bgp.LabelledVPNIP6AddrPrefix(64, '2001:db8:2222::',
route_dist='10.0.0.1:10000',
labels=[5, 6, 7, 8]),
]
mp_nlri2_v6 = [
bgp.LabelledIP6AddrPrefix(64, '2001:db8:3333::', labels=[1, 2, 3])
]
communities = [
bgp.BGP_COMMUNITY_NO_EXPORT,
bgp.BGP_COMMUNITY_NO_ADVERTISE,
]
ecommunities = [
bgp.BGPTwoOctetAsSpecificExtendedCommunity(
subtype=1, as_number=65500, local_administrator=3908876543),
bgp.BGPFourOctetAsSpecificExtendedCommunity(
subtype=2, as_number=10000000, local_administrator=59876),
bgp.BGPIPv4AddressSpecificExtendedCommunity(
subtype=3, ipv4_address='192.0.2.1',
local_administrator=65432),
bgp.BGPOpaqueExtendedCommunity(subtype=13, opaque=b'abcdef'),
bgp.BGPEncapsulationExtendedCommunity(
subtype=0x0c, tunnel_type=10),
bgp.BGPEvpnMacMobilityExtendedCommunity(
subtype=0, flags=0xff, sequence_number=0x11223344),
bgp.BGPEvpnEsiLabelExtendedCommunity(
subtype=1, flags=0xff, label=b'\xFF\xFF\xFF'),
bgp.BGPEvpnEsiLabelExtendedCommunity(
subtype=1, flags=0xff, mpls_label=0xfffff),
bgp.BGPEvpnEsiLabelExtendedCommunity(
subtype=1, flags=0xff, vni=0xffffff),
bgp.BGPEvpnEsImportRTExtendedCommunity(
subtype=2, es_import="aa:bb:cc:dd:ee:ff"),
bgp.BGPUnknownExtendedCommunity(type_=99, value=b'abcdefg'),
]
path_attributes = [
bgp.BGPPathAttributeOrigin(value=1),
bgp.BGPPathAttributeAsPath(value=[[1000], {1001, 1002},
[1003, 1004]]),
bgp.BGPPathAttributeNextHop(value='192.0.2.199'),
bgp.BGPPathAttributeMultiExitDisc(value=2000000000),
bgp.BGPPathAttributeLocalPref(value=1000000000),
bgp.BGPPathAttributeAtomicAggregate(),
bgp.BGPPathAttributeAggregator(as_number=40000,
addr='192.0.2.99'),
bgp.BGPPathAttributeCommunities(communities=communities),
bgp.BGPPathAttributeOriginatorId(value='10.1.1.1'),
bgp.BGPPathAttributeClusterList(value=['1.1.1.1', '2.2.2.2']),
bgp.BGPPathAttributeExtendedCommunities(communities=ecommunities),
bgp.BGPPathAttributePmsiTunnel(
pmsi_flags=1,
tunnel_type=PMSI_TYPE_NO_TUNNEL_INFORMATION_PRESENT,
label=b'\xFF\xFF\xFF'),
bgp.BGPPathAttributePmsiTunnel(
pmsi_flags=1,
tunnel_type=PMSI_TYPE_NO_TUNNEL_INFORMATION_PRESENT,
tunnel_id=None),
bgp.BGPPathAttributePmsiTunnel(
pmsi_flags=1,
tunnel_type=PMSI_TYPE_INGRESS_REPLICATION,
mpls_label=0xfffff,
tunnel_id=bgp.PmsiTunnelIdIngressReplication(
tunnel_endpoint_ip="1.1.1.1")),
bgp.BGPPathAttributePmsiTunnel(
pmsi_flags=1,
tunnel_type=PMSI_TYPE_INGRESS_REPLICATION,
vni=0xffffff,
tunnel_id=bgp.PmsiTunnelIdIngressReplication(
tunnel_endpoint_ip="aa:bb:cc::dd:ee:ff")),
bgp.BGPPathAttributePmsiTunnel(
pmsi_flags=1,
tunnel_type=2,
label=b'\xFF\xFF\xFF',
tunnel_id=bgp.PmsiTunnelIdUnknown(value=b'test')),
bgp.BGPPathAttributeAs4Path(value=[[1000000], {1000001, 1002},
[1003, 1000004]]),
bgp.BGPPathAttributeAs4Aggregator(as_number=100040000,
addr='192.0.2.99'),
bgp.BGPPathAttributeMpReachNLRI(afi=afi.IP, safi=safi.MPLS_VPN,
next_hop='1.1.1.1',
nlri=mp_nlri),
bgp.BGPPathAttributeMpReachNLRI(afi=afi.IP, safi=safi.MPLS_LABEL,
next_hop='1.1.1.1',
nlri=mp_nlri2),
bgp.BGPPathAttributeMpReachNLRI(afi=afi.IP6, safi=safi.MPLS_VPN,
next_hop=['2001:db8::1'],
nlri=mp_nlri_v6),
bgp.BGPPathAttributeMpReachNLRI(afi=afi.IP6, safi=safi.MPLS_LABEL,
next_hop=['2001:db8::1',
'fe80::1'],
nlri=mp_nlri2_v6),
bgp.BGPPathAttributeMpUnreachNLRI(afi=afi.IP, safi=safi.MPLS_VPN,
withdrawn_routes=mp_nlri),
bgp.BGPPathAttributeUnknown(flags=0, type_=100, value=300 * b'bar')
]
nlri = [
bgp.BGPNLRI(length=24, addr='203.0.113.1'),
bgp.BGPNLRI(length=16, addr='203.0.113.0')
]
msg = bgp.BGPUpdate(withdrawn_routes=withdrawn_routes,
path_attributes=path_attributes,
nlri=nlri)
binmsg = msg.serialize()
msg2, _, rest = bgp.BGPMessage.parser(binmsg)
eq_(str(msg), str(msg2))
ok_(len(msg) > 23)
eq_(rest, b'')
def test_keepalive(self):
msg = bgp.BGPKeepAlive()
binmsg = msg.serialize()
msg2, _, rest = bgp.BGPMessage.parser(binmsg)
eq_(str(msg), str(msg2))
eq_(len(msg), 19)
eq_(rest, b'')
def test_notification(self):
data = b'hoge'
msg = bgp.BGPNotification(error_code=1, error_subcode=2, data=data)
binmsg = msg.serialize()
msg2, _, rest = bgp.BGPMessage.parser(binmsg)
eq_(str(msg), str(msg2))
eq_(len(msg), 21 + len(data))
eq_(rest, b'')
def test_route_refresh(self):
msg = bgp.BGPRouteRefresh(afi=afi.IP, safi=safi.MPLS_VPN)
binmsg = msg.serialize()
msg2, _, rest = bgp.BGPMessage.parser(binmsg)
eq_(str(msg), str(msg2))
eq_(len(msg), 23)
eq_(rest, b'')
def test_stream_parser(self):
msgs = [
bgp.BGPNotification(error_code=1, error_subcode=2, data=b'foo'),
bgp.BGPNotification(error_code=3, error_subcode=4, data=b'bar'),
bgp.BGPNotification(error_code=5, error_subcode=6, data=b'baz'),
]
binmsgs = b''.join([bytes(msg.serialize()) for msg in msgs])
sp = bgp.StreamParser()
results = []
for b in binmsgs:
for m in sp.parse(b):
results.append(m)
eq_(str(results), str(msgs))
def test_parser(self):
files = [
'bgp4-open',
'bgp4-update',
'bgp4-update_ipv6',
'bgp4-update_vpnv6',
'bgp4-keepalive',
'evpn_esi_arbitrary',
'evpn_esi_lacp',
'evpn_esi_l2_bridge',
'evpn_esi_mac_base',
'evpn_esi_router_id',
'evpn_esi_as_based',
'evpn_nlri_eth_a-d',
'evpn_nlri_mac_ip_ad',
'evpn_nlri_inc_multi_eth_tag',
'evpn_nlri_eth_seg',
'evpn_nlri_ip_prefix',
]
for f in files:
LOG.debug('*** testing %s ...', f)
for _, buf in pcaplib.Reader(
open(BGP4_PACKET_DATA_DIR + f + '.pcap', 'rb')):
# Checks if BGP message can be parsed as expected.
pkt = packet.Packet(buf)
ok_(isinstance(pkt.protocols[-1], bgp.BGPMessage),
'Failed to parse BGP message: %s' % pkt)
# Checks if BGP message can be serialized as expected.
pkt.serialize()
eq_(buf, pkt.data,
"b'%s' != b'%s'" % (binary_str(buf), binary_str(pkt.data)))
def test_json1(self):
opt_param = [bgp.BGPOptParamCapabilityUnknown(cap_code=200,
cap_value=b'hoge'),
bgp.BGPOptParamCapabilityRouteRefresh(),
bgp.BGPOptParamCapabilityMultiprotocol(
afi=afi.IP, safi=safi.MPLS_VPN),
bgp.BGPOptParamCapabilityFourOctetAsNumber(
as_number=1234567),
bgp.BGPOptParamUnknown(type_=99, value=b'fuga')]
msg1 = bgp.BGPOpen(my_as=30000, bgp_identifier='192.0.2.2',
opt_param=opt_param)
jsondict = msg1.to_jsondict()
msg2 = bgp.BGPOpen.from_jsondict(jsondict['BGPOpen'])
eq_(str(msg1), str(msg2))
def test_json2(self):
withdrawn_routes = [bgp.BGPWithdrawnRoute(length=0,
addr='192.0.2.13'),
bgp.BGPWithdrawnRoute(length=1,
addr='192.0.2.13'),
bgp.BGPWithdrawnRoute(length=3,
addr='192.0.2.13'),
bgp.BGPWithdrawnRoute(length=7,
addr='192.0.2.13'),
bgp.BGPWithdrawnRoute(length=32,
addr='192.0.2.13')]
mp_nlri = [
bgp.LabelledVPNIPAddrPrefix(24, '192.0.9.0',
route_dist='100:100',
labels=[1, 2, 3]),
bgp.LabelledVPNIPAddrPrefix(26, '192.0.10.192',
route_dist='10.0.0.1:10000',
labels=[5, 6, 7, 8]),
]
mp_nlri2 = [
bgp.LabelledIPAddrPrefix(24, '192.168.0.0', labels=[1, 2, 3])
]
mp_nlri_v6 = [
bgp.LabelledVPNIP6AddrPrefix(64, '2001:db8:1111::',
route_dist='200:200',
labels=[1, 2, 3]),
bgp.LabelledVPNIP6AddrPrefix(64, '2001:db8:2222::',
route_dist='10.0.0.1:10000',
labels=[5, 6, 7, 8]),
]
mp_nlri2_v6 = [
bgp.LabelledIP6AddrPrefix(64, '2001:db8:3333::', labels=[1, 2, 3])
]
communities = [
bgp.BGP_COMMUNITY_NO_EXPORT,
bgp.BGP_COMMUNITY_NO_ADVERTISE,
]
ecommunities = [
bgp.BGPTwoOctetAsSpecificExtendedCommunity(
subtype=1, as_number=65500, local_administrator=3908876543),
bgp.BGPFourOctetAsSpecificExtendedCommunity(
subtype=2, as_number=10000000, local_administrator=59876),
bgp.BGPIPv4AddressSpecificExtendedCommunity(
subtype=3, ipv4_address='192.0.2.1',
local_administrator=65432),
bgp.BGPOpaqueExtendedCommunity(subtype=13, opaque=b'abcdef'),
bgp.BGPEncapsulationExtendedCommunity(
subtype=0x0c, tunnel_type=10),
bgp.BGPEvpnMacMobilityExtendedCommunity(
subtype=0, flags=0xff, sequence_number=0x11223344),
bgp.BGPEvpnEsiLabelExtendedCommunity(
subtype=1, flags=0xff, label=b'\xFF\xFF\xFF'),
bgp.BGPEvpnEsiLabelExtendedCommunity(
subtype=1, flags=0xff, mpls_label=0xfffff),
bgp.BGPEvpnEsiLabelExtendedCommunity(
subtype=1, flags=0xff, vni=0xffffff),
bgp.BGPEvpnEsImportRTExtendedCommunity(
subtype=2, es_import="aa:bb:cc:dd:ee:ff"),
bgp.BGPUnknownExtendedCommunity(type_=99, value=b'abcdefg'),
]
path_attributes = [
bgp.BGPPathAttributeOrigin(value=1),
bgp.BGPPathAttributeAsPath(value=[[1000], {1001, 1002},
[1003, 1004]]),
bgp.BGPPathAttributeNextHop(value='192.0.2.199'),
bgp.BGPPathAttributeMultiExitDisc(value=2000000000),
bgp.BGPPathAttributeLocalPref(value=1000000000),
bgp.BGPPathAttributeAtomicAggregate(),
bgp.BGPPathAttributeAggregator(as_number=40000,
addr='192.0.2.99'),
bgp.BGPPathAttributeCommunities(communities=communities),
bgp.BGPPathAttributeExtendedCommunities(communities=ecommunities),
bgp.BGPPathAttributePmsiTunnel(
pmsi_flags=1,
tunnel_type=PMSI_TYPE_NO_TUNNEL_INFORMATION_PRESENT,
label=b'\xFF\xFF\xFF'),
bgp.BGPPathAttributePmsiTunnel(
pmsi_flags=1,
tunnel_type=PMSI_TYPE_NO_TUNNEL_INFORMATION_PRESENT,
tunnel_id=None),
bgp.BGPPathAttributePmsiTunnel(
pmsi_flags=1,
tunnel_type=PMSI_TYPE_INGRESS_REPLICATION,
mpls_label=0xfffff,
tunnel_id=bgp.PmsiTunnelIdIngressReplication(
tunnel_endpoint_ip="1.1.1.1")),
bgp.BGPPathAttributePmsiTunnel(
pmsi_flags=1,
tunnel_type=PMSI_TYPE_INGRESS_REPLICATION,
vni=0xffffff,
tunnel_id=bgp.PmsiTunnelIdIngressReplication(
tunnel_endpoint_ip="aa:bb:cc::dd:ee:ff")),
bgp.BGPPathAttributePmsiTunnel(
pmsi_flags=1,
tunnel_type=2,
label=b'\xFF\xFF\xFF',
tunnel_id=bgp.PmsiTunnelIdUnknown(value=b'test')),
bgp.BGPPathAttributeAs4Path(value=[[1000000], {1000001, 1002},
[1003, 1000004]]),
bgp.BGPPathAttributeAs4Aggregator(as_number=100040000,
addr='192.0.2.99'),
bgp.BGPPathAttributeMpReachNLRI(afi=afi.IP, safi=safi.MPLS_VPN,
next_hop='1.1.1.1',
nlri=mp_nlri),
bgp.BGPPathAttributeMpReachNLRI(afi=afi.IP, safi=safi.MPLS_LABEL,
next_hop='1.1.1.1',
nlri=mp_nlri2),
bgp.BGPPathAttributeMpReachNLRI(afi=afi.IP6, safi=safi.MPLS_VPN,
next_hop=['2001:db8::1'],
nlri=mp_nlri_v6),
bgp.BGPPathAttributeMpReachNLRI(afi=afi.IP6, safi=safi.MPLS_LABEL,
next_hop=['2001:db8::1',
'fe80::1'],
nlri=mp_nlri2_v6),
bgp.BGPPathAttributeMpUnreachNLRI(afi=afi.IP, safi=safi.MPLS_VPN,
withdrawn_routes=mp_nlri),
bgp.BGPPathAttributeUnknown(flags=0, type_=100, value=300 * b'bar')
]
nlri = [
bgp.BGPNLRI(length=24, addr='203.0.113.1'),
bgp.BGPNLRI(length=16, addr='203.0.113.0')
]
msg1 = bgp.BGPUpdate(withdrawn_routes=withdrawn_routes,
path_attributes=path_attributes,
nlri=nlri)
jsondict = msg1.to_jsondict()
msg2 = bgp.BGPUpdate.from_jsondict(jsondict['BGPUpdate'])
eq_(str(msg1), str(msg2))
|
|
import scipy as SP
import numpy as NP
import scipy.linalg as LA
import scipy.optimize as OPT
import scipy.stats as st
import pdb
# log of 2pi
L2pi = 1.8378770664093453
def nLLeval(ldelta,UY,UX,S,MLparams=False):
"""evaluate the negative LL of a LMM with kernel USU.T"""
delta=SP.exp(ldelta);
n,d=UX.shape;
Sdi=S+delta;
ldet=SP.sum(NP.log(Sdi));
Sdi=1.0/Sdi;
XSdi=UX.T*SP.tile(Sdi,(d,1));
XSX=SP.dot(XSdi,UX);
XSY=SP.dot(XSdi,UY);
beta=LA.lstsq(XSX,XSY);
res=UY-SP.dot(UX,beta[0]);
res*=res;
res*=Sdi;
sigg2=SP.sum(res)/n;
nLL=0.5*(n*L2pi+ldet+n+n*NP.log(sigg2));
if MLparams:
return nLL, beta[0], sigg2;
else:
return nLL;
def optdelta(UY,UX,S,ldeltanull=None,numintervals=100,ldeltamin=-10.0,ldeltamax=10.0):
"""find the optimal delta"""
if ldeltanull==None:
nllgrid=SP.ones(numintervals+1)*SP.inf;
ldeltagrid=SP.arange(numintervals+1)/(numintervals*1.0)*(ldeltamax-ldeltamin)+ldeltamin;
nllmin=SP.inf;
for i in SP.arange(numintervals+1):
nllgrid[i]=nLLeval(ldeltagrid[i],UY,UX,S);
if nllgrid[i]<nllmin:
nllmin=nllgrid[i];
ldeltaopt_glob=ldeltagrid[i];
foundMin=False
for i in SP.arange(numintervals-1)+1:
continue
ee = 1E-8
#carry out brent optimization within the interval
if ((nllgrid[i-1]-nllgrid[i])>ee) and ((nllgrid[i+1]-nllgrid[i])>1E-8):
foundMin = True
ldeltaopt,nllopt,iter,funcalls = OPT.brent(nLLeval,(UY,UX,S),(ldeltagrid[i-1],ldeltagrid[i],ldeltagrid[i+1]),full_output=True);
if nllopt<nllmin:
nllmin=nllopt;
ldeltaopt_glob=ldeltaopt;
else:
ldeltaopt_glob=ldeltanull;
return ldeltaopt_glob;
def estimateBeta(X,Y,K,C=None,addBiasTerm=False,numintervals0=100,ldeltamin0=-5.0,ldeltamax0=5.0):
""" compute all pvalues
If numintervalsAlt==0 use EMMA-X trick (keep delta fixed over alternative models)
"""
n,s=X.shape;
n_pheno=Y.shape[1];
S,U=LA.eigh(K);
UY=SP.dot(U.T,Y);
UX=SP.dot(U.T,X);
if (C==None):
Ucovariate=SP.dot(U.T,SP.ones([n,1]));
else:
if (addBiasTerm):
C_=SP.concatenate((C,SP.ones([n,1])),axis=1)
Ucovariate=SP.dot(U.T,C_);
else:
Ucovariate=SP.dot(U.T,C);
n_covar=Ucovariate.shape[1];
beta = SP.empty((n_pheno,s,n_covar+1));
LL=SP.ones((n_pheno,s))*(-SP.inf);
ldelta=SP.empty((n_pheno,s));
sigg2=SP.empty((n_pheno,s));
pval=SP.ones((n_pheno,s))*(-SP.inf);
for phen in SP.arange(n_pheno):
UY_=UY[:,phen];
ldelta[phen]=optdelta(UY_,Ucovariate,S,ldeltanull=None,numintervals=numintervals0,ldeltamin=ldeltamin0,ldeltamax=ldeltamax0);
for snp in SP.arange(s):
UX_=SP.hstack((UX[:,snp:snp+1],Ucovariate));
nLL_, beta_, sigg2_=nLLeval(ldelta[phen,snp],UY_,UX_,S,MLparams=True);
beta[phen,snp,:]=beta_;
sigg2[phen,snp]=sigg2_;
LL[phen,snp]=-nLL_;
return beta, ldelta
def train_associations(X,Y,K,C=None,addBiasTerm=False,numintervalsAlt=0,ldeltaminAlt=-1.0,ldeltamaxAlt=1.0,numintervals0=100,ldeltamin0=-5.0,ldeltamax0=5.0, calc_pval=True):
""" compute all pvalues
If numintervalsAlt==0 use EMMA-X trick (keep delta fixed over alternative models)
"""
n,s=X.shape;
n_pheno=Y.shape[1];
S,U=LA.eigh(K);
UY=SP.dot(U.T,Y);
UX=SP.dot(U.T,X);
if (C==None):
Ucovariate=SP.dot(U.T,SP.ones([n,1]));
else:
if (addBiasTerm):
C_=SP.concatenate((C,SP.ones([n,1])),axis=1)
Ucovariate=SP.dot(U.T,C_);
else:
Ucovariate=SP.dot(U.T,C);
n_covar=Ucovariate.shape[1];
beta = SP.empty((n_pheno,s,n_covar+1));
beta0 = SP.empty((n_pheno,n_covar));
LL=SP.ones((n_pheno,s))*(-SP.inf);
LL0=SP.ones((n_pheno))*(-SP.inf);
ldelta=SP.empty((n_pheno,s));
ldelta0=SP.empty(n_pheno);
sigg2=SP.empty((n_pheno,s));
sigg20=SP.empty((n_pheno));
pval=SP.ones((n_pheno,s))*(-SP.inf);
for phen in SP.arange(n_pheno):
UY_=UY[:,phen];
ldelta0[phen]=optdelta(UY_,Ucovariate,S,ldeltanull=None,numintervals=numintervals0,ldeltamin=ldeltamin0,ldeltamax=ldeltamax0);
print(('log(delta) was fitted to', ldelta0))
#print ldelta0
#print "ldelta0 \n"
nLL0_, beta0_, sigg20_=nLLeval(ldelta0[phen],UY_,Ucovariate,S,MLparams=True);
beta0[phen,:]=beta0_;
sigg20[phen]=sigg20_;
LL0[phen]=-nLL0_;
for snp in SP.arange(s):
UX_=SP.hstack((UX[:,snp:snp+1],Ucovariate));
if numintervalsAlt==0: #EMMA-X trick #fast version, no refitting of detla
ldelta[phen,snp]=ldelta0[phen];
else: #fit delta
ldelta[phen,snp]=optdelta(UY_,UX_,S,ldeltanull=None,numintervals=numintervalsAlt,ldeltamin=ldelta0[phen]+ldeltaminAlt,ldeltamax=ldelta0[phen]+ldeltamaxAlt);
nLL_, beta_, sigg2_=nLLeval(ldelta[phen,snp],UY_,UX_,S,MLparams=True);
beta[phen,snp,:]=beta_;
sigg2[phen,snp]=sigg2_;
LL[phen,snp]=-nLL_;
#reshaping of LL0
LL0 = LL0[:,SP.newaxis]
lods = LL-LL0
if calc_pval:
arg2 = st.chi2.sf(2*(lods),1)
else:
arg2 = ldelta
#return LL0, LL, pval, ldelta0, sigg20, beta0, ldelta, sigg2, beta
return lods, arg2
def train_interact(X,Y,K,interactants=None,covariates=None,addBiasTerm=True,numintervalsAlt=0,ldeltaminAlt=-1.0,ldeltamaxAlt=1.0,numintervals0=10,ldeltamin0=-5.0,ldeltamax0=5.0):
""" compute all pvalues
If numintervalsAlt==0 use EMMA-X trick (keep delta fixed over alternative models)
difference to previous model: Ux and Ucovariate are recomputed for every SNP
"""
n,s=X.shape;
n_pheno=Y.shape[1];
S,U=LA.eigh(K);
UY=SP.dot(U.T,Y);
UX=SP.dot(U.T,X);
if (covariates==None):
covariates = SP.ones([n,0])
if (addBiasTerm):
covariates=SP.concatenate((covariates,SP.ones([n,1])),axis=1)
#Ucovariates
Ucovariate=SP.dot(U.T,covariates);
#Uinteractants
Uinteractants = SP.dot(U.T,interactants)
n_covar=covariates.shape[1]
n_inter=interactants.shape[1]
#weights
#foreground: covaraits + SNP + interactions
beta = SP.empty((n_pheno,s,1+n_covar+2*n_inter));
#background: covariates + direct SNP effect
beta0 = SP.empty((n_pheno,s,1+n_covar+n_inter));
LL=SP.ones((n_pheno,s))*(-SP.inf);
LL0=SP.ones((n_pheno,s))*(-SP.inf);
ldelta=SP.empty([n_pheno,s]);
ldelta0=SP.empty([n_pheno,s]);
sigg2=SP.empty((n_pheno,s));
sigg20=SP.empty((n_pheno,s));
pval=SP.ones((n_pheno,s))*(-SP.inf);
for snp in SP.arange(s):
#loop through all SNPs
#1. snp-specific backgroud model SNP effect + covaraites + interactants
Ucovariates_=SP.hstack((UX[:,snp:snp+1],Uinteractants,Ucovariate))
#2. snp-specific foreground model
#interactions
Xi_ = X[:,snp:snp+1]*interactants
#transform
UXi_ = SP.dot(U.T,Xi_)
#stack: interactions, interactants (main) SNPs (main) covariates (if any)
UX_ = SP.hstack((UXi_,Ucovariates_))
for phen in SP.arange(n_pheno):
print(phen)
#loop through phenoptypes
#get transformed Y
UY_=UY[:,phen]
#1. fit background model
ldelta0[phen,snp]=optdelta(UY_,Ucovariates_,S,ldeltanull=None,numintervals=numintervals0,ldeltamin=ldeltamin0,ldeltamax=ldeltamax0);
nLL0_, beta0_, sigg20_=nLLeval(ldelta0[phen,snp],UY_,Ucovariates_,S,MLparams=True);
beta0[phen,snp,:]=beta0_;
sigg20[phen,snp]=sigg20_;
LL0[phen,snp]=-nLL0_;
#2. fit foreground model
if numintervalsAlt==0: #EMMA-X trick #fast version, no refitting of detla
ldelta[phen,snp]=ldelta0[phen,snp]
else: #fit delta
ldelta[phen,snp]=optdelta(UY_,UX_,S,ldeltanull=None,numintervals=numintervalsAlt,ldeltamin=ldelta0[phen,snp]+ldeltaminAlt,ldeltamax=ldelta0[phen,snp]+ldeltamaxAlt);
nLL_, beta_, sigg2_=nLLeval(ldelta[phen,snp],UY_,UX_,S,MLparams=True);
beta[phen,snp,:]=beta_;
sigg2[phen,snp]=sigg2_;
LL[phen,snp]=-nLL_;
pval = st.chi2.sf(2*(LL-LL0),1)
return LL0, LL, pval, ldelta0, sigg20, beta0, ldelta, sigg2, beta
def train_interactX(X,Y,K,interactants=None,covariates=None,addBiasTerm=True,numintervalsAlt=0,ldeltaminAlt=-1.0,ldeltamaxAlt=1.0,numintervals0=10,ldeltamin0=-5.0,ldeltamax0=5.0):
""" compute all pvalues
If numintervalsAlt==0 use EMMA-X trick (keep delta fixed over alternative models)
difference to previous model: Ux and Ucovariate are recomputed for every SNP
"""
n,s=X.shape;
n_pheno=Y.shape[1];
S,U=LA.eigh(K);
UY=SP.dot(U.T,Y);
UX=SP.dot(U.T,X);
if (covariates==None):
covariates = SP.ones([n,0])
if (addBiasTerm):
covariates=SP.concatenate((covariates,SP.ones([n,1])),axis=1)
#Ucovariates
Ucovariate=SP.dot(U.T,covariates);
#Uinteractants
Uinteractants = SP.dot(U.T,interactants)
n_covar=covariates.shape[1]
n_inter=interactants.shape[1]
#weights
#foreground: covaraits + SNP + interactions
beta = SP.empty((n_pheno,s,1+n_covar+2*n_inter));
#background: covariates + direct SNP effect
beta0 = SP.empty((n_pheno,s,1+n_covar+n_inter));
LL=SP.ones((n_pheno,s))*(-SP.inf);
LL0=SP.ones((n_pheno,s))*(-SP.inf);
ldelta=SP.empty([n_pheno,s]);
ldelta0=SP.empty([n_pheno,s]);
sigg2=SP.empty((n_pheno,s));
sigg20=SP.empty((n_pheno,s));
pval=SP.ones((n_pheno,s))*(-SP.inf);
#0. fit 0 model on phenotypes and covariates alone
for phen in SP.arange(n_pheno):
#fit if phen is visited the first time
#loop through phenoptypes
#get transformed Y
UY_=UY[:,phen]
#1. fit background model to set delta
ldelta0[phen,:]=optdelta(UY_,Ucovariate,S,ldeltanull=None,numintervals=numintervals0,ldeltamin=ldeltamin0,ldeltamax=ldeltamax0);
#1. loop through all snps
for snp in SP.arange(s):
#loop through all SNPs
#1. snp-specific backgroud model SNP effect + covaraites + interactants
Ucovariates_=SP.hstack((UX[:,snp:snp+1],Uinteractants,Ucovariate))
#2. snp-specific foreground model
#interactions
Xi_ = X[:,snp:snp+1]*interactants
#transform
UXi_ = SP.dot(U.T,Xi_)
#stack: interactions, interactants (main) SNPs (main) covariates (if any)
UX_ = SP.hstack((UXi_,Ucovariates_))
for phen in SP.arange(n_pheno):
UY_=UY[:,phen]
#loop through all phenotypes
#emmaX trick
ldelta[phen,snp]=ldelta0[phen,snp]
#evluate background and foreground
#null model
nLL0_, beta0_, sigg20_=nLLeval(ldelta0[phen,snp],UY_,Ucovariates_,S,MLparams=True)
beta0[phen,snp,:]=beta0_
sigg20[phen,snp]=sigg20_
LL0[phen,snp]=-nLL0_
#foreground model
nLL_, beta_, sigg2_=nLLeval(ldelta[phen,snp],UY_,UX_,S,MLparams=True)
beta[phen,snp,:]=beta_
sigg2[phen,snp]=sigg2_
LL[phen,snp]=-nLL_
pval = st.chi2.sf(2*(LL-LL0),1)
return LL0, LL, pval, ldelta0, sigg20, beta0, ldelta, sigg2, beta
def run_interact(Y, intA, intB, covs, K):
""" Calculate pvalues for the nested model of including a multiplicative term between intA and intB into the additive model """
[N, Ny] = Y.shape
Na = intA.shape[1] # number of interaction terms 1
Nb = intB.shape[1] # number of interaction terms 2
S,U=LA.eigh(K);
UY=SP.dot(U.T,Y);
UintA=SP.dot(U.T,intA);
UintB=SP.dot(U.T,intB);
Ucovs=SP.dot(U.T,covs);
# for each snp/gene/factor combination, run a lod
# snps need to be diced bc of missing values - iterate over them, else in arrays
lods = SP.zeros([Na, Nb, Ny])
#add mean column:
if covs is None: covs = SP.ones([Ny,1])
# for each pair of interacting terms
for a in range(Na):
for b in range(Nb):
# calculate additive and interaction terms
C = SP.concatenate((Ucovs, UintA[:,a:a+1], UintB[:,b:b+1]))
X = intA[:,a:a+1]*intB[:,b:b+1]
UX = SP.dot(U.T,X);
UX = SP.concatenate((UX, C))
for phen in SP.arange(Ny):
UY_=UY[:,phen];
nllnull,ldeltanull=optdelta(UY_,C,S,ldeltanull=None,numintervals=10,ldeltamin=-5.0,ldeltamax=5.0);
nllalt,ldeltaalt=optdelta(UY_,UX,S,ldeltanull=ldeltanull,numintervals=100,ldeltamin=-5.0,ldeltamax=5.0);
lods[a,b,phen] = nllalt-nllalt;
return lods
|
|
import math
import time
from hashlib import md5
from itertools import cycle
import random
import numpy as np
from spring.states import STATES, NUM_STATES
from fastdocgen import build_achievements
class Iterator(object):
def __init__(self):
self.prefix = None
def __iter__(self):
return self
def add_prefix(self, key):
if self.prefix:
return '%s-%s' % (self.prefix, key)
else:
return key
class ExistingKey(Iterator):
def __init__(self, working_set, working_set_access, prefix):
self.working_set = working_set
self.working_set_access = working_set_access
self.prefix = prefix
def next(self, curr_items, curr_deletes):
num_existing_items = curr_items - curr_deletes
num_hot_items = int(num_existing_items * self.working_set / 100.0)
num_cold_items = num_existing_items - num_hot_items
left_limit = 1 + curr_deletes
if self.working_set_access == 100 or \
random.randint(0, 100) <= self.working_set_access:
left_limit += num_cold_items
right_limit = curr_items
else:
right_limit = left_limit + num_cold_items
key = np.random.random_integers(left_limit, right_limit)
key = '%012d' % key
return self.add_prefix(key)
class SequentialHotKey(Iterator):
def __init__(self, sid, ws, prefix):
self.sid = sid
self.ws = ws
self.prefix = prefix
def __iter__(self):
num_hot_keys = int(self.ws.items * self.ws.working_set / 100.0)
num_cold_items = self.ws.items - num_hot_keys
for seq_id in xrange(1 + num_cold_items + self.sid,
1 + self.ws.items,
self.ws.workers):
key = '%012d' % seq_id
key = self.add_prefix(key)
yield key
class NewKey(Iterator):
def __init__(self, prefix, expiration):
self.prefix = prefix
self.expiration = expiration
self.ttls = cycle(range(150, 450, 30))
def next(self, curr_items):
key = '%012d' % curr_items
key = self.add_prefix(key)
ttl = None
if self.expiration and random.randint(1, 100) <= self.expiration:
ttl = self.ttls.next()
return key, ttl
class KeyForRemoval(Iterator):
def __init__(self, prefix):
self.prefix = prefix
def next(self, curr_deletes):
key = '%012d' % curr_deletes
return self.add_prefix(key)
class NewDocument(Iterator):
SIZE_VARIATION = 0.25 # 25%
KEY_LENGTH = 10
def __init__(self, avg_size):
self.avg_size = avg_size
@classmethod
def _get_variation_coeff(cls):
return np.random.uniform(1 - cls.SIZE_VARIATION, 1 + cls.SIZE_VARIATION)
@staticmethod
def _build_alphabet(key):
return md5(key).hexdigest() + md5(key[::-1]).hexdigest()
@staticmethod
def _build_name(alphabet):
return '%s %s' % (alphabet[:6], alphabet[6:12])
@staticmethod
def _build_email(alphabet):
return '%s@%s.com' % (alphabet[12:18], alphabet[18:24])
@staticmethod
def _build_city(alphabet):
return alphabet[24:30]
@staticmethod
def _build_realm(alphabet):
return alphabet[30:36]
@staticmethod
def _build_country(alphabet):
return alphabet[42:48]
@staticmethod
def _build_county(alphabet):
return alphabet[48:54]
@staticmethod
def _build_street(alphabet):
return alphabet[54:62]
@staticmethod
def _build_coins(alphabet):
return max(0.1, int(alphabet[36:40], 16) / 100.0)
@staticmethod
def _build_gmtime(alphabet):
seconds = 396 * 24 * 3600 * (int(alphabet[63], 16) % 12)
return tuple(time.gmtime(seconds))
@staticmethod
def _build_year(alphabet):
return 1985 + int(alphabet[62], 16)
@staticmethod
def _build_state(alphabet):
idx = alphabet.find('7') % NUM_STATES
return STATES[idx][0]
@staticmethod
def _build_full_state(alphabet):
idx = alphabet.find('8') % NUM_STATES
return STATES[idx][1]
@staticmethod
def _build_category(alphabet):
return int(alphabet[41], 16) % 3
@staticmethod
def _build_achievements(alphabet):
return build_achievements(alphabet) or [0]
@staticmethod
def _build_body(alphabet, length):
length_int = int(length)
num_slices = int(math.ceil(length / 64)) # 64 == len(alphabet)
body = num_slices * alphabet
return body[:length_int]
def next(self, key):
next_length = self._get_variation_coeff() * self.avg_size
alphabet = self._build_alphabet(key)
return {
'name': self._build_name(alphabet),
'email': self._build_email(alphabet),
'city': self._build_city(alphabet),
'realm': self._build_realm(alphabet),
'coins': self._build_coins(alphabet),
'category': self._build_category(alphabet),
'achievements': self._build_achievements(alphabet),
'body': self._build_body(alphabet, next_length)
}
class NewNestedDocument(NewDocument):
OVERHEAD = 450 # Minimum size due to fixed fields, body size is variable
def _size(self):
if self.avg_size <= self.OVERHEAD:
return 0
if random.random() < 0.975:
# Normal distribution with mean=self.avg_size
normal = np.random.normal(loc=1.0, scale=0.17)
return (self.avg_size - self.OVERHEAD) * normal
else:
# Beta distribution, 2KB-2MB range
return 2048 / np.random.beta(a=2.2, b=1.0)
def next(self, key):
alphabet = self._build_alphabet(key)
size = self._size()
return {
'name': {'f': {'f': {'f': self._build_name(alphabet)}}},
'email': {'f': {'f': self._build_email(alphabet)}},
'street': {'f': {'f': self._build_street(alphabet)}},
'city': {'f': {'f': self._build_city(alphabet)}},
'county': {'f': {'f': self._build_county(alphabet)}},
'state': {'f': self._build_state(alphabet)},
'full_state': {'f': self._build_full_state(alphabet)},
'country': {'f': self._build_country(alphabet)},
'realm': {'f': self._build_realm(alphabet)},
'coins': {'f': self._build_coins(alphabet)},
'category': self._build_category(alphabet),
'achievements': self._build_achievements(alphabet),
'gmtime': self._build_gmtime(alphabet),
'year': self._build_year(alphabet),
'body': self._build_body(alphabet, size),
}
|
|
import copy
import itertools
from typing import (
Any,
Callable,
Dict,
Iterable,
Iterator,
List,
MutableSequence,
Optional,
Tuple,
Type,
Union,
)
from ._utils import (
ValueKind,
_is_missing_literal,
_is_none,
_resolve_optional,
format_and_raise,
get_value_kind,
is_int,
is_primitive_list,
is_structured_config,
type_str,
)
from .base import Container, ContainerMetadata, Node
from .basecontainer import BaseContainer
from .errors import (
ConfigAttributeError,
ConfigTypeError,
ConfigValueError,
KeyValidationError,
MissingMandatoryValue,
ReadonlyConfigError,
ValidationError,
)
class ListConfig(BaseContainer, MutableSequence[Any]):
_content: Union[List[Node], None, str]
def __init__(
self,
content: Union[List[Any], Tuple[Any, ...], str, None],
key: Any = None,
parent: Optional[Container] = None,
element_type: Union[Type[Any], Any] = Any,
is_optional: bool = True,
ref_type: Union[Type[Any], Any] = Any,
flags: Optional[Dict[str, bool]] = None,
) -> None:
try:
if isinstance(content, ListConfig):
if flags is None:
flags = content._metadata.flags
super().__init__(
parent=parent,
metadata=ContainerMetadata(
ref_type=ref_type,
object_type=list,
key=key,
optional=is_optional,
element_type=element_type,
key_type=int,
flags=flags,
),
)
self.__dict__["_content"] = None
self._set_value(value=content, flags=flags)
except Exception as ex:
format_and_raise(node=None, key=key, value=None, cause=ex, msg=str(ex))
def _validate_get(self, key: Any, value: Any = None) -> None:
if not isinstance(key, (int, slice)):
raise KeyValidationError(
"ListConfig indices must be integers or slices, not $KEY_TYPE"
)
def _validate_set(self, key: Any, value: Any) -> None:
from omegaconf import OmegaConf
self._validate_get(key, value)
if self._get_flag("readonly"):
raise ReadonlyConfigError("ListConfig is read-only")
if 0 <= key < self.__len__():
target = self._get_node(key)
if target is not None:
assert isinstance(target, Node)
if value is None and not target._is_optional():
raise ValidationError(
"$FULL_KEY is not optional and cannot be assigned None"
)
vk = get_value_kind(value)
if vk == ValueKind.MANDATORY_MISSING:
return
else:
is_optional, target_type = _resolve_optional(self._metadata.element_type)
value_type = OmegaConf.get_type(value)
if (value_type is None and not is_optional) or (
is_structured_config(target_type)
and value_type is not None
and not issubclass(value_type, target_type)
):
msg = (
f"Invalid type assigned: {type_str(value_type)} is not a "
f"subclass of {type_str(target_type)}. value: {value}"
)
raise ValidationError(msg)
def __deepcopy__(self, memo: Dict[int, Any]) -> "ListConfig":
res = ListConfig(None)
res.__dict__["_metadata"] = copy.deepcopy(self.__dict__["_metadata"], memo=memo)
res.__dict__["_flags_cache"] = copy.deepcopy(
self.__dict__["_flags_cache"], memo=memo
)
src_content = self.__dict__["_content"]
if isinstance(src_content, list):
content_copy: List[Optional[Node]] = []
for v in src_content:
old_parent = v.__dict__["_parent"]
try:
v.__dict__["_parent"] = None
vc = copy.deepcopy(v, memo=memo)
vc.__dict__["_parent"] = res
content_copy.append(vc)
finally:
v.__dict__["_parent"] = old_parent
else:
# None and strings can be assigned as is
content_copy = src_content
res.__dict__["_content"] = content_copy
res.__dict__["_parent"] = self.__dict__["_parent"]
return res
def copy(self) -> "ListConfig":
return copy.copy(self)
# hide content while inspecting in debugger
def __dir__(self) -> Iterable[str]:
if self._is_missing() or self._is_none():
return []
return [str(x) for x in range(0, len(self))]
def __setattr__(self, key: str, value: Any) -> None:
self._format_and_raise(
key=key,
value=value,
cause=ConfigAttributeError("ListConfig does not support attribute access"),
)
assert False
def __getattr__(self, key: str) -> Any:
# PyCharm is sometimes inspecting __members__, be sure to tell it we don't have that.
if key == "__members__":
raise AttributeError()
if key == "__name__":
raise AttributeError()
if is_int(key):
return self.__getitem__(int(key))
else:
self._format_and_raise(
key=key,
value=None,
cause=ConfigAttributeError(
"ListConfig does not support attribute access"
),
)
def __getitem__(self, index: Union[int, slice]) -> Any:
try:
if self._is_missing():
raise MissingMandatoryValue("ListConfig is missing")
self._validate_get(index, None)
if self._is_none():
raise TypeError(
"ListConfig object representing None is not subscriptable"
)
assert isinstance(self.__dict__["_content"], list)
if isinstance(index, slice):
result = []
start, stop, step = self._correct_index_params(index)
for slice_idx in itertools.islice(
range(0, len(self)), start, stop, step
):
val = self._resolve_with_default(
key=slice_idx, value=self.__dict__["_content"][slice_idx]
)
result.append(val)
if index.step and index.step < 0:
result.reverse()
return result
else:
return self._resolve_with_default(
key=index, value=self.__dict__["_content"][index]
)
except Exception as e:
self._format_and_raise(key=index, value=None, cause=e)
def _correct_index_params(self, index: slice) -> Tuple[int, int, int]:
start = index.start
stop = index.stop
step = index.step
if index.start and index.start < 0:
start = self.__len__() + index.start
if index.stop and index.stop < 0:
stop = self.__len__() + index.stop
if index.step and index.step < 0:
step = abs(step)
if start and stop:
if start > stop:
start, stop = stop + 1, start + 1
else:
start = stop = 0
elif not start and stop:
start = list(range(self.__len__() - 1, stop, -step))[0]
stop = None
elif start and not stop:
stop = start + 1
start = (stop - 1) % step
else:
start = (self.__len__() - 1) % step
return start, stop, step
def _set_at_index(self, index: Union[int, slice], value: Any) -> None:
self._set_item_impl(index, value)
def __setitem__(self, index: Union[int, slice], value: Any) -> None:
try:
if isinstance(index, slice):
_ = iter(value) # check iterable
self_indices = index.indices(len(self))
indexes = range(*self_indices)
# Ensure lengths match for extended slice assignment
if index.step not in (None, 1):
if len(indexes) != len(value):
raise ValueError(
f"attempt to assign sequence of size {len(value)}"
f" to extended slice of size {len(indexes)}"
)
# Initialize insertion offsets for empty slices
if len(indexes) == 0:
curr_index = self_indices[0] - 1
val_i = -1
# Delete and optionally replace non empty slices
only_removed = 0
for val_i, i in enumerate(indexes):
curr_index = i - only_removed
del self[curr_index]
if val_i < len(value):
self.insert(curr_index, value[val_i])
else:
only_removed += 1
# Insert any remaining input items
for val_i in range(val_i + 1, len(value)):
curr_index += 1
self.insert(curr_index, value[val_i])
else:
self._set_at_index(index, value)
except Exception as e:
self._format_and_raise(key=index, value=value, cause=e)
def append(self, item: Any) -> None:
content = self.__dict__["_content"]
index = len(content)
content.append(None)
try:
self._set_item_impl(index, item)
except Exception as e:
del content[index]
self._format_and_raise(key=index, value=item, cause=e)
assert False
def _update_keys(self) -> None:
for i in range(len(self)):
node = self._get_node(i)
if node is not None:
assert isinstance(node, Node)
node._metadata.key = i
def insert(self, index: int, item: Any) -> None:
from omegaconf.omegaconf import _maybe_wrap
try:
if self._get_flag("readonly"):
raise ReadonlyConfigError("Cannot insert into a read-only ListConfig")
if self._is_none():
raise TypeError(
"Cannot insert into ListConfig object representing None"
)
if self._is_missing():
raise MissingMandatoryValue("Cannot insert into missing ListConfig")
try:
assert isinstance(self.__dict__["_content"], list)
# insert place holder
self.__dict__["_content"].insert(index, None)
is_optional, ref_type = _resolve_optional(self._metadata.element_type)
node = _maybe_wrap(
ref_type=ref_type,
key=index,
value=item,
is_optional=is_optional,
parent=self,
)
self._validate_set(key=index, value=node)
self._set_at_index(index, node)
self._update_keys()
except Exception:
del self.__dict__["_content"][index]
self._update_keys()
raise
except Exception as e:
self._format_and_raise(key=index, value=item, cause=e)
assert False
def extend(self, lst: Iterable[Any]) -> None:
assert isinstance(lst, (tuple, list, ListConfig))
for x in lst:
self.append(x)
def remove(self, x: Any) -> None:
del self[self.index(x)]
def __delitem__(self, key: Union[int, slice]) -> None:
if self._get_flag("readonly"):
self._format_and_raise(
key=key,
value=None,
cause=ReadonlyConfigError(
"Cannot delete item from read-only ListConfig"
),
)
del self.__dict__["_content"][key]
self._update_keys()
def clear(self) -> None:
del self[:]
def index(
self, x: Any, start: Optional[int] = None, end: Optional[int] = None
) -> int:
if start is None:
start = 0
if end is None:
end = len(self)
assert start >= 0
assert end <= len(self)
found_idx = -1
for idx in range(start, end):
item = self[idx]
if x == item:
found_idx = idx
break
if found_idx != -1:
return found_idx
else:
self._format_and_raise(
key=None,
value=None,
cause=ConfigValueError("Item not found in ListConfig"),
)
assert False
def count(self, x: Any) -> int:
c = 0
for item in self:
if item == x:
c = c + 1
return c
def _get_node(
self,
key: Union[int, slice],
validate_access: bool = True,
validate_key: bool = True,
throw_on_missing_value: bool = False,
throw_on_missing_key: bool = False,
) -> Union[Optional[Node], List[Optional[Node]]]:
try:
if self._is_none():
raise TypeError(
"Cannot get_node from a ListConfig object representing None"
)
if self._is_missing():
raise MissingMandatoryValue("Cannot get_node from a missing ListConfig")
assert isinstance(self.__dict__["_content"], list)
if validate_access:
self._validate_get(key)
value = self.__dict__["_content"][key]
if value is not None:
if isinstance(key, slice):
assert isinstance(value, list)
for v in value:
if throw_on_missing_value and v._is_missing():
raise MissingMandatoryValue("Missing mandatory value")
else:
assert isinstance(value, Node)
if throw_on_missing_value and value._is_missing():
raise MissingMandatoryValue("Missing mandatory value: $KEY")
return value
except (IndexError, TypeError, MissingMandatoryValue, KeyValidationError) as e:
if isinstance(e, MissingMandatoryValue) and throw_on_missing_value:
raise
if validate_access:
self._format_and_raise(key=key, value=None, cause=e)
assert False
else:
return None
def get(self, index: int, default_value: Any = None) -> Any:
try:
if self._is_none():
raise TypeError("Cannot get from a ListConfig object representing None")
if self._is_missing():
raise MissingMandatoryValue("Cannot get from a missing ListConfig")
self._validate_get(index, None)
assert isinstance(self.__dict__["_content"], list)
return self._resolve_with_default(
key=index,
value=self.__dict__["_content"][index],
default_value=default_value,
)
except Exception as e:
self._format_and_raise(key=index, value=None, cause=e)
assert False
def pop(self, index: int = -1) -> Any:
try:
if self._get_flag("readonly"):
raise ReadonlyConfigError("Cannot pop from read-only ListConfig")
if self._is_none():
raise TypeError("Cannot pop from a ListConfig object representing None")
if self._is_missing():
raise MissingMandatoryValue("Cannot pop from a missing ListConfig")
assert isinstance(self.__dict__["_content"], list)
node = self._get_node(index)
assert isinstance(node, Node)
ret = self._resolve_with_default(key=index, value=node, default_value=None)
del self.__dict__["_content"][index]
self._update_keys()
return ret
except KeyValidationError as e:
self._format_and_raise(
key=index, value=None, cause=e, type_override=ConfigTypeError
)
assert False
except Exception as e:
self._format_and_raise(key=index, value=None, cause=e)
assert False
def sort(
self, key: Optional[Callable[[Any], Any]] = None, reverse: bool = False
) -> None:
try:
if self._get_flag("readonly"):
raise ReadonlyConfigError("Cannot sort a read-only ListConfig")
if self._is_none():
raise TypeError("Cannot sort a ListConfig object representing None")
if self._is_missing():
raise MissingMandatoryValue("Cannot sort a missing ListConfig")
if key is None:
def key1(x: Any) -> Any:
return x._value()
else:
def key1(x: Any) -> Any:
return key(x._value()) # type: ignore
assert isinstance(self.__dict__["_content"], list)
self.__dict__["_content"].sort(key=key1, reverse=reverse)
except Exception as e:
self._format_and_raise(key=None, value=None, cause=e)
assert False
def __eq__(self, other: Any) -> bool:
if isinstance(other, (list, tuple)) or other is None:
other = ListConfig(other, flags={"allow_objects": True})
return ListConfig._list_eq(self, other)
if other is None or isinstance(other, ListConfig):
return ListConfig._list_eq(self, other)
if self._is_missing():
return _is_missing_literal(other)
return NotImplemented
def __ne__(self, other: Any) -> bool:
x = self.__eq__(other)
if x is not NotImplemented:
return not x
return NotImplemented
def __hash__(self) -> int:
return hash(str(self))
def __iter__(self) -> Iterator[Any]:
return self._iter_ex(resolve=True)
class ListIterator(Iterator[Any]):
def __init__(self, lst: Any, resolve: bool) -> None:
self.resolve = resolve
self.iterator = iter(lst.__dict__["_content"])
self.index = 0
from .nodes import ValueNode
self.ValueNode = ValueNode
def __next__(self) -> Any:
x = next(self.iterator)
if self.resolve:
x = x._dereference_node()
if x._is_missing():
raise MissingMandatoryValue(f"Missing value at index {self.index}")
self.index = self.index + 1
if isinstance(x, self.ValueNode):
return x._value()
else:
# Must be omegaconf.Container. not checking for perf reasons.
if x._is_none():
return None
return x
def __repr__(self) -> str: # pragma: no cover
return f"ListConfig.ListIterator(resolve={self.resolve})"
def _iter_ex(self, resolve: bool) -> Iterator[Any]:
try:
if self._is_none():
raise TypeError("Cannot iterate a ListConfig object representing None")
if self._is_missing():
raise MissingMandatoryValue("Cannot iterate a missing ListConfig")
return ListConfig.ListIterator(self, resolve)
except (TypeError, MissingMandatoryValue) as e:
self._format_and_raise(key=None, value=None, cause=e)
assert False
def __add__(self, other: Union[List[Any], "ListConfig"]) -> "ListConfig":
# res is sharing this list's parent to allow interpolation to work as expected
res = ListConfig(parent=self._get_parent(), content=[])
res.extend(self)
res.extend(other)
return res
def __radd__(self, other: Union[List[Any], "ListConfig"]) -> "ListConfig":
# res is sharing this list's parent to allow interpolation to work as expected
res = ListConfig(parent=self._get_parent(), content=[])
res.extend(other)
res.extend(self)
return res
def __iadd__(self, other: Iterable[Any]) -> "ListConfig":
self.extend(other)
return self
def __contains__(self, item: Any) -> bool:
if self._is_none():
raise TypeError(
"Cannot check if an item is in a ListConfig object representing None"
)
if self._is_missing():
raise MissingMandatoryValue(
"Cannot check if an item is in missing ListConfig"
)
lst = self.__dict__["_content"]
for x in lst:
x = x._dereference_node()
if x == item:
return True
return False
def _set_value(self, value: Any, flags: Optional[Dict[str, bool]] = None) -> None:
try:
previous_content = self.__dict__["_content"]
self._set_value_impl(value, flags)
except Exception as e:
self.__dict__["_content"] = previous_content
raise e
def _set_value_impl(
self, value: Any, flags: Optional[Dict[str, bool]] = None
) -> None:
from omegaconf import MISSING, flag_override
if flags is None:
flags = {}
vk = get_value_kind(value, strict_interpolation_validation=True)
if _is_none(value, resolve=True):
if not self._is_optional():
raise ValidationError(
"Non optional ListConfig cannot be constructed from None"
)
self.__dict__["_content"] = None
elif vk is ValueKind.MANDATORY_MISSING:
self.__dict__["_content"] = MISSING
elif vk == ValueKind.INTERPOLATION:
self.__dict__["_content"] = value
else:
if not (is_primitive_list(value) or isinstance(value, ListConfig)):
type_ = type(value)
msg = f"Invalid value assigned: {type_.__name__} is not a ListConfig, list or tuple."
raise ValidationError(msg)
self.__dict__["_content"] = []
if isinstance(value, ListConfig):
self.__dict__["_metadata"] = copy.deepcopy(value._metadata)
self._metadata.flags = copy.deepcopy(flags)
# disable struct and readonly for the construction phase
# retaining other flags like allow_objects. The real flags are restored at the end of this function
with flag_override(self, ["struct", "readonly"], False):
for item in value._iter_ex(resolve=False):
self.append(item)
elif is_primitive_list(value):
with flag_override(self, ["struct", "readonly"], False):
for item in value:
self.append(item)
@staticmethod
def _list_eq(l1: Optional["ListConfig"], l2: Optional["ListConfig"]) -> bool:
l1_none = l1.__dict__["_content"] is None
l2_none = l2.__dict__["_content"] is None
if l1_none and l2_none:
return True
if l1_none != l2_none:
return False
assert isinstance(l1, ListConfig)
assert isinstance(l2, ListConfig)
if len(l1) != len(l2):
return False
for i in range(len(l1)):
if not BaseContainer._item_eq(l1, i, l2, i):
return False
return True
|
|
#!/usr/bin/env python3
# Copyright (c) 2015-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test processing of unrequested blocks.
Setup: two nodes, node0 + node1, not connected to each other. Node1 will have
nMinimumChainWork set to 0x10, so it won't process low-work unrequested blocks.
We have one P2PInterface connection to node0 called test_node, and one to node1
called min_work_node.
The test:
1. Generate one block on each node, to leave IBD.
2. Mine a new block on each tip, and deliver to each node from node's peer.
The tip should advance for node0, but node1 should skip processing due to
nMinimumChainWork.
Node1 is unused in tests 3-7:
3. Mine a block that forks from the genesis block, and deliver to test_node.
Node0 should not process this block (just accept the header), because it
is unrequested and doesn't have more or equal work to the tip.
4a,b. Send another two blocks that build on the forking block.
Node0 should process the second block but be stuck on the shorter chain,
because it's missing an intermediate block.
4c.Send 288 more blocks on the longer chain (the number of blocks ahead
we currently store).
Node0 should process all but the last block (too far ahead in height).
5. Send a duplicate of the block in #3 to Node0.
Node0 should not process the block because it is unrequested, and stay on
the shorter chain.
6. Send Node0 an inv for the height 3 block produced in #4 above.
Node0 should figure out that Node0 has the missing height 2 block and send a
getdata.
7. Send Node0 the missing block again.
Node0 should process and the tip should advance.
8. Create a fork which is invalid at a height longer than the current chain
(ie to which the node will try to reorg) but which has headers built on top
of the invalid block. Check that we get disconnected if we send more headers
on the chain the node now knows to be invalid.
9. Test Node1 is able to sync when connected to node0 (which should have sufficient
work on its chain).
"""
import time
from test_framework.blocktools import create_block, create_coinbase, create_tx_with_script
from test_framework.messages import CBlockHeader, CInv, MSG_BLOCK, msg_block, msg_headers, msg_inv
from test_framework.p2p import p2p_lock, P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class AcceptBlockTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [[], ["-minimumchainwork=0x10"]]
def setup_network(self):
self.setup_nodes()
def run_test(self):
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
min_work_node = self.nodes[1].add_p2p_connection(P2PInterface())
# 1. Have nodes mine a block (leave IBD)
[self.generate(n, 1, sync_fun=self.no_op) for n in self.nodes]
tips = [int("0x" + n.getbestblockhash(), 0) for n in self.nodes]
# 2. Send one block that builds on each tip.
# This should be accepted by node0
blocks_h2 = [] # the height 2 blocks on each node's chain
block_time = int(time.time()) + 1
for i in range(2):
blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
blocks_h2[i].solve()
block_time += 1
test_node.send_and_ping(msg_block(blocks_h2[0]))
min_work_node.send_and_ping(msg_block(blocks_h2[1]))
assert_equal(self.nodes[0].getblockcount(), 2)
assert_equal(self.nodes[1].getblockcount(), 1)
self.log.info("First height 2 block accepted by node0; correctly rejected by node1")
# 3. Send another block that builds on genesis.
block_h1f = create_block(int("0x" + self.nodes[0].getblockhash(0), 0), create_coinbase(1), block_time)
block_time += 1
block_h1f.solve()
test_node.send_and_ping(msg_block(block_h1f))
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h1f.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert tip_entry_found
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_h1f.hash)
# 4. Send another two block that build on the fork.
block_h2f = create_block(block_h1f.sha256, create_coinbase(2), block_time)
block_time += 1
block_h2f.solve()
test_node.send_and_ping(msg_block(block_h2f))
# Since the earlier block was not processed by node, the new block
# can't be fully validated.
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h2f.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert tip_entry_found
# But this block should be accepted by node since it has equal work.
self.nodes[0].getblock(block_h2f.hash)
self.log.info("Second height 2 block accepted, but not reorg'ed to")
# 4b. Now send another block that builds on the forking chain.
block_h3 = create_block(block_h2f.sha256, create_coinbase(3), block_h2f.nTime+1)
block_h3.solve()
test_node.send_and_ping(msg_block(block_h3))
# Since the earlier block was not processed by node, the new block
# can't be fully validated.
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h3.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert tip_entry_found
self.nodes[0].getblock(block_h3.hash)
# But this block should be accepted by node since it has more work.
self.nodes[0].getblock(block_h3.hash)
self.log.info("Unrequested more-work block accepted")
# 4c. Now mine 288 more blocks and deliver; all should be processed but
# the last (height-too-high) on node (as long as it is not missing any headers)
tip = block_h3
all_blocks = []
for i in range(288):
next_block = create_block(tip.sha256, create_coinbase(i + 4), tip.nTime+1)
next_block.solve()
all_blocks.append(next_block)
tip = next_block
# Now send the block at height 5 and check that it wasn't accepted (missing header)
test_node.send_and_ping(msg_block(all_blocks[1]))
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, all_blocks[1].hash)
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblockheader, all_blocks[1].hash)
# The block at height 5 should be accepted if we provide the missing header, though
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(all_blocks[0]))
test_node.send_message(headers_message)
test_node.send_and_ping(msg_block(all_blocks[1]))
self.nodes[0].getblock(all_blocks[1].hash)
# Now send the blocks in all_blocks
for i in range(288):
test_node.send_message(msg_block(all_blocks[i]))
test_node.sync_with_ping()
# Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead
for x in all_blocks[:-1]:
self.nodes[0].getblock(x.hash)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash)
# 5. Test handling of unrequested block on the node that didn't process
# Should still not be processed (even though it has a child that has more
# work).
# The node should have requested the blocks at some point, so
# disconnect/reconnect first
self.nodes[0].disconnect_p2ps()
self.nodes[1].disconnect_p2ps()
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
test_node.send_and_ping(msg_block(block_h1f))
assert_equal(self.nodes[0].getblockcount(), 2)
self.log.info("Unrequested block that would complete more-work chain was ignored")
# 6. Try to get node to request the missing block.
# Poke the node with an inv for block at height 3 and see if that
# triggers a getdata on block 2 (it should if block 2 is missing).
with p2p_lock:
# Clear state so we can check the getdata request
test_node.last_message.pop("getdata", None)
test_node.send_message(msg_inv([CInv(MSG_BLOCK, block_h3.sha256)]))
test_node.sync_with_ping()
with p2p_lock:
getdata = test_node.last_message["getdata"]
# Check that the getdata includes the right block
assert_equal(getdata.inv[0].hash, block_h1f.sha256)
self.log.info("Inv at tip triggered getdata for unprocessed block")
# 7. Send the missing block for the third time (now it is requested)
test_node.send_and_ping(msg_block(block_h1f))
assert_equal(self.nodes[0].getblockcount(), 290)
self.nodes[0].getblock(all_blocks[286].hash)
assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[287].hash)
self.log.info("Successfully reorged to longer chain")
# 8. Create a chain which is invalid at a height longer than the
# current chain, but which has more blocks on top of that
block_289f = create_block(all_blocks[284].sha256, create_coinbase(289), all_blocks[284].nTime+1)
block_289f.solve()
block_290f = create_block(block_289f.sha256, create_coinbase(290), block_289f.nTime+1)
block_290f.solve()
# block_291 spends a coinbase below maturity!
tx_to_add = create_tx_with_script(block_290f.vtx[0], 0, script_sig=b"42", amount=1)
block_291 = create_block(block_290f.sha256, create_coinbase(291), block_290f.nTime+1, txlist=[tx_to_add])
block_291.solve()
block_292 = create_block(block_291.sha256, create_coinbase(292), block_291.nTime+1)
block_292.solve()
# Now send all the headers on the chain and enough blocks to trigger reorg
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(block_289f))
headers_message.headers.append(CBlockHeader(block_290f))
headers_message.headers.append(CBlockHeader(block_291))
headers_message.headers.append(CBlockHeader(block_292))
test_node.send_and_ping(headers_message)
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_292.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert tip_entry_found
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_292.hash)
test_node.send_message(msg_block(block_289f))
test_node.send_and_ping(msg_block(block_290f))
self.nodes[0].getblock(block_289f.hash)
self.nodes[0].getblock(block_290f.hash)
test_node.send_message(msg_block(block_291))
# At this point we've sent an obviously-bogus block, wait for full processing
# without assuming whether we will be disconnected or not
try:
# Only wait a short while so the test doesn't take forever if we do get
# disconnected
test_node.sync_with_ping(timeout=1)
except AssertionError:
test_node.wait_for_disconnect()
self.nodes[0].disconnect_p2ps()
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
# We should have failed reorg and switched back to 290 (but have block 291)
assert_equal(self.nodes[0].getblockcount(), 290)
assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
assert_equal(self.nodes[0].getblock(block_291.hash)["confirmations"], -1)
# Now send a new header on the invalid chain, indicating we're forked off, and expect to get disconnected
block_293 = create_block(block_292.sha256, create_coinbase(293), block_292.nTime+1)
block_293.solve()
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(block_293))
test_node.send_message(headers_message)
test_node.wait_for_disconnect()
# 9. Connect node1 to node0 and ensure it is able to sync
self.connect_nodes(0, 1)
self.sync_blocks([self.nodes[0], self.nodes[1]])
self.log.info("Successfully synced nodes 1 and 0")
if __name__ == '__main__':
AcceptBlockTest().main()
|
|
import unittest
import functools
import math
import numpy
from operator import mul
import six
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
from chainer.utils import conv
from chainer_tests.functions_tests.pooling_tests import pooling_nd_helper
@testing.parameterize(*testing.product({
'dims': [(4,), (4, 3), (4, 3, 2), (1, 1, 1, 1)],
'cover_all': [True, False],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestMaxPoolingND(unittest.TestCase):
def setUp(self):
self.ndim = len(self.dims)
self.ksize = (3,) * self.ndim
self.stride = (2,) * self.ndim
self.pad = (1,) * self.ndim
# Avoid unstability of numerical gradient
x_shape = (2, 3) + self.dims
self.x = numpy.arange(
functools.reduce(mul, x_shape), dtype=self.dtype).reshape(x_shape)
self.x = 2 * self.x / self.x.size - 1
outs = tuple(conv.get_conv_outsize(d, k, s, p, self.cover_all)
for (d, k, s, p)
in six.moves.zip(
self.dims, self.ksize, self.stride, self.pad))
gy_shape = (2, 3) + outs
self.gy = numpy.random.uniform(-1, 1, gy_shape).astype(self.dtype)
self.ggx = numpy.random.uniform(
-1, 1, x_shape).astype(self.dtype)
self.check_backward_options = {}
if self.dtype == numpy.float16:
self.check_backward_options = {
'atol': 1e-3, 'rtol': 1e-2}
self.check_double_backward_options = {
'atol': 1e-3, 'rtol': 1e-2}
else:
self.check_backward_options = {
'atol': 1e-4, 'rtol': 1e-3}
self.check_double_backward_options = {
'atol': 1e-4, 'rtol': 1e-3}
def check_forward(self, x_data, use_cudnn='always'):
dims = self.dims
ksize = self.ksize
stride = self.stride
pad = self.pad
x = chainer.Variable(x_data)
with chainer.using_config('use_cudnn', use_cudnn):
y = functions.max_pooling_nd(x, ksize, stride=stride, pad=pad,
cover_all=self.cover_all)
self.assertEqual(y.data.dtype, self.dtype)
y_data = cuda.to_cpu(y.data)
self.assertEqual(self.gy.shape, y_data.shape)
patches = pooling_nd_helper.pooling_patches(
dims, ksize, stride, pad, self.cover_all)
for i in six.moves.range(2):
for c in six.moves.range(3):
x = self.x[i, c]
expect = numpy.array([x[idx].max() for idx in patches])
expect = expect.reshape(y_data.shape[2:])
testing.assert_allclose(expect, y_data[i, c])
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.x, use_cudnn='never')
def test_forward_cpu_wide(self): # see #120
ndim = self.ndim
x_shape = (2, 3) + (15,) * ndim
x_data = numpy.random.rand(*x_shape).astype(self.dtype)
x = chainer.Variable(x_data)
ksize = stride = int(math.ceil(pow(32, 1.0 / ndim)))
functions.max_pooling_nd(x, ksize, stride=stride, pad=0)
@attr.cudnn
@condition.retry(3)
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
@attr.cudnn
@condition.retry(3)
def test_forward_gpu_non_contiguous(self):
self.check_forward(cuda.cupy.asfortranarray(cuda.to_gpu(self.x)))
@attr.gpu
@condition.retry(3)
def test_forward_gpu_no_cudnn(self):
self.check_forward(cuda.to_gpu(self.x), 'never')
def check_forward_consistency_regression(self, x_data, use_cudnn='always'):
# Regression test to max_pooling_2d.
if len(self.dims) != 2:
return
ksize = self.ksize
stride = self.stride
pad = self.pad
with chainer.using_config('use_cudnn', use_cudnn):
y_nd = functions.max_pooling_nd(self.x, ksize, stride=stride,
pad=pad, cover_all=self.cover_all)
y_2d = functions.max_pooling_2d(self.x, ksize, stride=stride,
pad=pad, cover_all=self.cover_all)
testing.assert_allclose(y_nd.data, y_2d.data)
@condition.retry(3)
def test_forward_consistency_regression_cpu(self):
self.check_forward_consistency_regression(self.x)
@attr.cudnn
@condition.retry(3)
def test_forward_consistency_regression_gpu(self):
self.check_forward_consistency_regression(cuda.to_gpu(self.x))
@attr.gpu
@condition.retry(3)
def test_forward_consistency_regression_no_cudnn(self):
self.check_forward_consistency_regression(cuda.to_gpu(self.x), 'never')
def check_backward(self, x_data, y_grad, use_cudnn='always'):
def f(x):
return functions.max_pooling_nd(
x, self.ksize, stride=self.stride, pad=self.pad,
cover_all=self.cover_all)
with chainer.using_config('use_cudnn', use_cudnn):
gradient_check.check_backward(
f, x_data, y_grad, dtype='d', **self.check_backward_options)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.cudnn
@condition.retry(3)
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
@attr.cudnn
@condition.retry(3)
def test_backward_gpu_non_contiguous(self):
self.check_backward(
cuda.cupy.asfortranarray(cuda.to_gpu(self.x)),
cuda.cupy.asfortranarray(cuda.to_gpu(self.gy)))
@attr.gpu
@condition.retry(3)
def test_backward_gpu_no_cudnn(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy), 'never')
def check_backward_consistency_regression(self, x_data, gy_data,
use_cudnn='always'):
# Regression test to two-dimensional max pooling layer.
if len(self.dims) != 2:
return
ksize = self.ksize
stride = self.stride
pad = self.pad
xp = cuda.get_array_module(x_data)
# Backward computation for N-dimensional max pooling layer.
x_nd = chainer.Variable(xp.array(x_data))
with chainer.using_config('use_cudnn', use_cudnn):
y_nd = functions.max_pooling_nd(
x_nd, ksize, stride=stride, pad=pad, cover_all=self.cover_all)
y_nd.grad = gy_data
y_nd.backward()
# Backward computation for two-dimensional max pooling layer.
x_2d = chainer.Variable(xp.array(x_data))
with chainer.using_config('use_cudnn', use_cudnn):
y_2d = functions.max_pooling_2d(
x_2d, ksize, stride=stride, pad=pad, cover_all=self.cover_all)
y_2d.grad = gy_data
y_2d.backward()
# Test that the two result gradients are close enough.
testing.assert_allclose(x_nd.grad, x_2d.grad)
@condition.retry(3)
def test_backward_consistency_regression_cpu(self):
self.check_backward_consistency_regression(self.x, self.gy)
@attr.cudnn
@condition.retry(3)
def test_backward_consistency_regression_gpu(self):
self.check_backward_consistency_regression(
cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
@attr.gpu
@condition.retry(3)
def test_backward_consistency_regression_no_cudnn(self):
self.check_backward_consistency_regression(
cuda.to_gpu(self.x), cuda.to_gpu(self.gy), use_cudnn='never')
def test_backward_cpu_more_than_once(self):
func = functions.pooling.max_pooling_nd.MaxPoolingND(
self.ndim, self.ksize, stride=self.stride, pad=self.pad,
cover_all=self.cover_all)
func.apply((self.x,))
func.backward((self.x,), (self.gy,))
func.backward((self.x,), (self.gy,))
def check_double_backward(self, x_data, y_grad, x_grad_grad,
use_cudnn='always'):
def f(x):
return functions.max_pooling_nd(
x, self.ksize, stride=self.stride, pad=self.pad,
cover_all=self.cover_all)
with chainer.using_config('use_cudnn', use_cudnn):
gradient_check.check_double_backward(
f, x_data, y_grad, x_grad_grad,
dtype='d',
**self.check_double_backward_options)
def test_double_backward_cpu(self):
self.check_double_backward(self.x, self.gy, self.ggx, 'never')
@attr.cudnn
def test_double_backward_gpu(self):
self.check_double_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx))
@attr.cudnn
def test_double_backward_gpu_non_contiguous(self):
self.check_double_backward(
cuda.cupy.asfortranarray(cuda.to_gpu(self.x)),
cuda.cupy.asfortranarray(cuda.to_gpu(self.gy)),
cuda.cupy.asfortranarray(cuda.to_gpu(self.ggx)))
@attr.gpu
def test_double_backward_gpu_no_cudnn(self):
self.check_double_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx),
'never')
@testing.parameterize(*testing.product({
'dims': [(4, 3, 2), (3, 2), (2,)],
'use_cudnn': ['always', 'auto', 'never'],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@attr.cudnn
class TestMaxPoolingNDCudnnCall(unittest.TestCase):
def setUp(self):
self.ndim = len(self.dims)
self.ksize = (3,) * self.ndim
self.stride = (2,) * self.ndim
self.pad = (1,) * self.ndim
x_shape = (2, 3) + self.dims
self.x = cuda.cupy.arange(functools.reduce(mul, x_shape),
dtype=self.dtype).reshape(x_shape)
gy_shape = (2, 3) + tuple(
conv.get_conv_outsize(d, k, s, p)
for (d, k, s, p)
in six.moves.zip(self.dims, self.ksize, self.stride, self.pad))
self.gy = cuda.cupy.random.uniform(-1, 1, gy_shape).astype(self.dtype)
def forward(self):
x = chainer.Variable(self.x)
return functions.max_pooling_nd(
x, self.ksize, self.stride, self.pad, cover_all=False)
def test_call_cudnn_forward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
with testing.patch('cupy.cuda.cudnn.poolingForward') as func:
self.forward()
self.assertEqual(func.called,
chainer.should_use_cudnn('>=auto') and
self.ndim > 1)
def test_call_cudnn_backward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
expect = chainer.should_use_cudnn('>=auto') and self.ndim > 1
y = self.forward()
# should be consistent to forward regardless of use_cudnn config
y.grad = self.gy
with testing.patch('cupy.cuda.cudnn.poolingBackward') as func:
y.backward()
self.assertEqual(func.called, expect)
class TestMaxPoolingNDIndices(unittest.TestCase):
def setUp(self):
self.x = numpy.arange(
2 * 3 * 4 * 4, dtype=numpy.float32).reshape(2, 3, 4, 4)
def _check(self, x):
out, indices = functions.max_pooling_nd(
x, 2, cover_all=False, return_indices=True)
assert isinstance(out, chainer.Variable)
assert isinstance(out.array, type(x))
assert isinstance(indices, type(x))
assert indices.shape == out.array.shape
# Calculate expected indices.
expect = numpy.zeros(indices.shape, dtype=indices.dtype)
for i in six.moves.range(2):
for c in six.moves.range(3):
xx = x[i, c]
expect[i, c] = numpy.array([
[xx[0:2, 0:2].ravel().argmax(),
xx[0:2, 2:4].ravel().argmax()],
[xx[2:4, 0:2].ravel().argmax(),
xx[2:4, 2:4].ravel().argmax()],
])
if out.xp is not numpy:
expect = cuda.to_gpu(expect)
assert (expect == indices).all()
def test_cpu(self):
self._check(self.x)
@attr.gpu
@attr.cudnn
def test_gpu(self):
x = cuda.to_gpu(self.x)
with chainer.using_config('use_cudnn', 'never'):
self._check(x)
with chainer.using_config('use_cudnn', 'always'):
self._check(x)
testing.run_module(__name__, __file__)
|
|
#!/usr/bin/python
"""
Communication module for interfacing with Polly, a deterministic Bitcoin hardware wallet adhering to BIP32.
Requires the HID API for USB communications.
The MIT License (MIT)
Copyright (c) 2014 by Nathaniel Burke
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import sys
import hid
import serial
from serial.tools import list_ports
import time
from struct import pack, unpack
# Polly USB vendor and device ID
POLLY_VID = 0x0451
POLLY_DID = 0x16C9
# Commands # Response (assuming properly formed command)
CMD_RESET = 1 # SUCCESS
CMD_IDENTIFY = 2 # SUCCESS
CMD_GET_PUBLIC_KEY = 3 # SUCCESS, INVALID
CMD_SIGN_TX = 4 # SUCCESS, INVALID
CMD_PREV_TX = 5 # SUCCESS, INVALID
CMD_GET_SIGNED_TX = 6 # SUCCESS, INVALID, USER, DENIED, BUSY
CMD_FW_DOWNLOAD = 8 # SUCCESS, INVALID
CMD_SET_MASTER_SEED = 11 # SUCCESS, INVALID
CMD_ACK_SUCCESS = 32
CMD_ACK_INVALID = 33
CMD_ACK_DENIED = 34
CMD_ACK_USER = 35
CMD_ACK_BUSY = 36
# Command payloads
CMD_SIMPLE_BYTES = 1
CMD_IDENTIFY_RESP_BYTES = 17
CMD_GET_PUBLIC_KEY_RESP_BYTES = 65 + 32
CMD_GET_PUBLIC_KEY_BYTES = 8
CMD_SET_MASTER_SEED_MAX_BYTES = ((18 * 8) + 7) # 18 words, max 8 chars per word, 7 spaces
# Packet size
PACKET_BYTES = 64
# Control flow
CTRL_START = 0x80
CTRL_CONT = 0x88
CTRL_START_STREAM = 0xC0
CTRL_CONT_STREAM = 0xC8
# Default command timeout
READ_TIMEOUT_MS = 100000
class PollyCom:
"""
Class for communication with the Polly hardware Bitcoin wallet.
"""
# General device handle, could be USB or Bluetooth serial
dev = None
# String for the handle type ('usb' or 'bluetooth')
devtype = None
KEY_MASTER = 0
KEY_ACCOUNT = 1
KEY_CHAIN = 2
KEY_ADDRESS = 3
def __init__(self, usbscan = True):
# Tracks time to execute commands on Polly
self.t = 0
# Make a connection with Polly
if None == PollyCom.dev :
print ()
print ("Connecting to Polly")
print ("-------------------")
print ()
print ("Trying USB : ", end = '')
PollyCom.dev = hid.device()
try:
raise IOError("ok")
if True == usbscan:
self.__usb_scan()
PollyCom.dev.open(POLLY_VID, POLLY_DID)
PollyCom.devtype = 'usb'
# TODO flush out any previous command data
model = self.send_identify()
if 'Polly' in model:
print ("found")
print ()
print (" Manufacturer : %s" % PollyCom.dev.get_manufacturer_string())
print (" Product : %s" % PollyCom.dev.get_product_string())
print (" Serial No : %s" % PollyCom.dev.get_serial_number_string())
print (" Model : %s" % model)
return
else:
raise IOError()
except IOError:
print ("not found")
# Look at all the Bluetooth serial ports
ports = list_ports.comports()
PollyCom.devtype = 'bluetooth'
bt_com = False
for port, name, _ in ports:
if 'BLUETOOTH' in name.upper():
bt_com = True
print ("Trying Bluetooth serial", port, ": ", end = '')
try:
PollyCom.dev = serial.Serial(port, 115200, timeout = 3, writeTimeout = 3)
# TODO flush out any previous command data
model = self.send_identify()
if 'Polly' in model:
print ("found")
print ()
print (" Model : %s" % model)
PollyCom.devtype = 'bluetooth'
return
except IOError:
# Unable to connect
print ("not found")
if False == bt_com:
print ("Trying Bluetooth serial : no Bluetooth COM ports found")
print ("\n ERROR: Polly not found")
raise Exception('Polly not found')
def send_reset(self):
"""
Sends the reset command and waits for an ACK.
"""
# Send
data = pack('<IB', CMD_SIMPLE_BYTES, CMD_RESET)
self.send_data(data)
# Receive
data = self.get_data()
cmd_bytes, cmd = unpack('<IB', bytes(data))
assert cmd_bytes == CMD_SIMPLE_BYTES and\
cmd == CMD_ACK_SUCCESS, "send_reset : FAILED"
def send_identify(self):
"""
Sends the identify command and returns the ID string.
"""
# Send
data = pack('<IB', CMD_SIMPLE_BYTES, CMD_IDENTIFY)
self.send_data(data)
# Receive
data = self.get_data()
cmd_bytes, cmd, idstr = unpack('<IB16s', bytes(data))
assert cmd_bytes == CMD_IDENTIFY_RESP_BYTES and\
cmd == CMD_ACK_SUCCESS, "send_get_id : FAILED"
return ''.join(map(chr,idstr))
def send_set_master_seed(self, wordlist):
"""
Sends the set master seed command and waits for an ACK.
wordlist - a space separated string of 18 mnemonic words from the Polly wordlist.
Note: the checksum must be correct (part of the 18th word) - see BIP0039.
gen_wordlist can be used to generate a wordlist including the proper checksum.
"""
assert len(wordlist.split(" ")) == 18, "expecting 18 words"
assert len(wordlist) <= CMD_SET_MASTER_SEED_MAX_BYTES, "seed too long, must have invalid words"
# Send
data = pack('<IB' + str(len(wordlist)) + 's', 1 + len(wordlist), CMD_SET_MASTER_SEED, bytes(wordlist, 'utf-8'))
self.send_data(data)
# Receive
data = self.get_data()
cmd_bytes, cmd = unpack('<IB', bytes(data))
assert cmd_bytes == CMD_SIMPLE_BYTES and\
cmd == CMD_ACK_SUCCESS, "send_set_master_seed : FAILED"
def send_get_public_key(self, keytype, account, chain, address):
"""
Sends the get public key command and waits for the key.
keytype - Type of key to retrieve, valid values are KEY_MASTER, KEY_ACCOUNT, KEY_CHAIN, or KEY_ADDRESS.
account - Account to use for type KEY_ACCOUNT|CHAIN|ADDRESS.
chain - Chain to use for type KEY_CHAIN|ADDRESS.
address - Index (0 - 0x7FFF_FFFF) to use for type KEY_ADDRESS.
Returns a extended public key in the form (x,y,chaincode). (0,0) indicates a failure occured.
"""
assert address < 0x80000000, "hardened address keys are not supported"
# Send
data = pack('<IBBBBL', CMD_GET_PUBLIC_KEY_BYTES, CMD_GET_PUBLIC_KEY, keytype, account, chain, address)
self.send_data(data)
# Receive
data = self.get_data()
cmd_bytes, cmd, pub_x, pub_y, chaincode = unpack('IB32s32s32s', bytes(data))
assert cmd_bytes == CMD_GET_PUBLIC_KEY_RESP_BYTES, "send_get_public_key : FAILED"
if cmd == CMD_ACK_SUCCESS:
return int.from_bytes(pub_x, 'big'), int.from_bytes(pub_y, 'big'), chaincode
return 0, 0, 0
def send_sign_tx(self, in_key_num_pubkey, out_addr_160, out_satoshi, change_key_num, change_satoshi):
"""
Sends the initial information needed to sign a tx and waits for an ACK.
Note: This command must be followed by one or more send_prev_tx() calls to support
the key nums used to fund the payment. Finally, send_get_signed_tx() must be called
to get the signed tx.
in_key_num_pubkey - tuple list of the form (in_key_num, in_key_pubkey). Each entry contains the key
number to fund payment (0 - 0x7FFF_FFFF) and the key's compressed public
key (33 bytes).
out_addr_160 - output address to pay in a RIPEMD-160 form.
out_satoshi - satoshis to output.
change_key_num - send change to this key num (0 - 0x7FFF_FFFF). Pass 'None' for no change.
change_satoshi - satoshis to change.
"""
# Number of inputs
data = pack('B', len(in_key_num_pubkey))
# Input key ids and their public keys, assuming a m/0h/0/in_key_num path
for in_key_num, in_key_pubkey in in_key_num_pubkey:
data = data + pack('<BBI33s', 0, 0, in_key_num, in_key_pubkey)
# Output address
data = data + pack('<20sQ', out_addr_160, out_satoshi)
# Change address (optional), assuming an m/0h/1/change_key_num path
if change_key_num != None:
data = data + pack('<BBIQ', 0, 1, change_key_num, change_satoshi)
# Command id and number of bytes
data = pack('<IB', len(data) + 1, CMD_SIGN_TX) + data
# Send
self.send_data(data)
# Receive
data = self.get_data()
cmd_bytes, cmd = unpack('<IB', bytes(data))
assert cmd_bytes == CMD_SIMPLE_BYTES and\
cmd == CMD_ACK_SUCCESS, "send_sign_tx : FAILED"
def send_prev_tx(self, in_idx_out_idx, prev_tx_data):
"""
Sends a previous tx for one or more input keys sent in send_sign_tx, and waits for an ACK.
Note: This command must be preceded by send_sign_tx() and followed by send_get_signed_tx()
to get the signed tx.
Each input key sent in send_sign_tx() must have an associated previous transaction to indicate
how many unspent coins it has. This function is used to send these supporting transactions.
These can be faked (e.g. set the input values very high), and Polly will sign the tx. However,
if the previous tx is not found in the blockchain the network will reject the signed tx.
in_idx_out_idx - tuple list in the form (in_idx, out_idx). Input keys are indexed by the
order they were presented to the device in send_sign_tx(). in_idx is
this 0-based index. Each input key num (associated with in_idx) must have
unspent coins. The out_idx is the output index from this previous tx
that matches the input key num and indicates its unspent coins.
prev_tx_data - a byte stream of the complete previous tx.
"""
# Compile the out index information
data = pack('<B', len(in_idx_out_idx))
for in_idx, out_idx in in_idx_out_idx :
data += pack('<BL', in_idx, out_idx)
# Pack the command header and prev tx
send_len = len(prev_tx_data) + len(data) + 1
data = pack('<IB' , send_len, CMD_PREV_TX) + data + pack(str(len(prev_tx_data)) + 's', prev_tx_data)
# Send
self.send_data(data, stream = True)
# Receive
data = self.get_data()
cmd_bytes, cmd = unpack('<IB', bytes(data))
assert cmd_bytes == CMD_SIMPLE_BYTES and\
cmd == CMD_ACK_SUCCESS, "send_prev_tx : FAILED"
def send_get_signed_tx(self):
"""
Sends the get signed tx command and waits for a response.
Note: This command must be preceded by send_sign_tx() and then by one or more
send_prev_tx() calls to support the key nums used to fund the payment.
Returns a complete signed tx.
"""
while True:
# Send
data = pack('<IB', CMD_SIMPLE_BYTES, CMD_GET_SIGNED_TX)
self.send_data(data)
# Receive
data = self.get_data()
cmd_bytes, cmd = unpack('<IB', bytes(data[0:5]))
# SUCCESS, INVALID, USER, DENIED, BUSY
if cmd == CMD_ACK_SUCCESS:
# Strip away the command and command bytes, just return the signed tx
return bytes(data[5:(5 + cmd_bytes)])
elif cmd == CMD_ACK_INVALID:
assert 0, "send_get_signed_tx: invalid response, command incorrect"
elif cmd == CMD_ACK_USER:
pass
elif cmd == CMD_ACK_BUSY:
pass
elif cmd == CMD_ACK_DENIED:
assert 0, "send_get_signed_tx: user denied the signing"
time.sleep(0.5)
def send_fw_download(self, fwfile):
"""
Sends the FW download command to update the device FW.
Throws FileNotFoundError if the file passed in does not exist
file_handle - path to and file name of the FW image to download.
Returns True if succeeded, False otherwise.
"""
with open(fwfile, "rb") as f:
fw_id = unpack('I', f.read(4))
if fw_id[0] != 0xdeadbeef :
print(hex(fw_id[0]))
return False;
f.seek(0);
fwdata = f.read()
# Pack the command header and prev tx
data = pack('<IB' , len(fwdata) + 1, CMD_FW_DOWNLOAD) + fwdata
# Send
self.send_data(data, stream = True)
# Receive
data = self.get_data()
cmd_bytes, cmd = unpack('<IB', bytes(data))
assert cmd_bytes == CMD_SIMPLE_BYTES and\
cmd == CMD_ACK_SUCCESS, "send_prev_tx : FAILED"
def get_cmd_time(self):
"""
Returns the time in seconds to execute the last command.
"""
return "{0:.3f}s".format(self.t)
def send_data(self, data, stream = False):
"""
Sends raw data to Polly via USB, typically the command specific functions are used instead of this.
data - raw byte array to packet. Packetization and padding is done by this routine.
stream - use stream flow control if True, or normal control if False
"""
# Commands to Polly are always send_data/get_data pairs
# Start the timer here, it will be stopped in get_data
self.t = time.clock()
if not stream :
ctrl_start = CTRL_START
ctrl_cont = CTRL_CONT
else:
ctrl_start = CTRL_START_STREAM
ctrl_cont = CTRL_CONT_STREAM
ctrl_byte = ctrl_start
# The command byte count in the data does not include the count field itself, hence the +4
data_bytes_remain = (data[3] << 24) + (data[2] << 16) + (data[1] << 8) + data[0] + 4;
data_offset = 0
# Send out the data
while (data_bytes_remain > 0):
# Room must be left for the control flow byte, hence PACKET_BYTES - 1
data_bytes = min(data_bytes_remain, PACKET_BYTES - 1)
packet = bytes([ctrl_byte]) + data[data_offset : data_offset + data_bytes]
# Pad out the packet if it is < PACKET_BYTES
if len(packet) < PACKET_BYTES:
packet = packet + bytes(PACKET_BYTES - len(packet))
# USB needs the preamble byte, it is stripped off by Polly upon reception
if PollyCom.devtype == 'usb':
packet = b'\x00' + packet
if PollyCom.dev.write(packet) != 64: raise IOError
time.sleep(0.01)
data_offset += data_bytes
data_bytes_remain -= data_bytes
ctrl_byte = ctrl_cont
def get_data(self, timeout = READ_TIMEOUT_MS):
"""
Gets raw data from Polly via USB, typically the command specific functions are used instead of this.
Returns a single raw byte array with flow control bytes stripped.
"""
data = []
# Read in the first chunk
if (PollyCom.devtype == 'usb'):
tmp = PollyCom.dev.read(PACKET_BYTES, timeout)
else:
tmp = PollyCom.dev.read(PACKET_BYTES)
assert tmp, "read timeout"
assert tmp[0] == CTRL_START, "invalid control token, expecting CTRL_START"
# The command bytes count, plus the command bytes count field itself
data_bytes = (tmp[4] << 24) + (tmp[3] << 16) + (tmp[2] << 8) + tmp[1] + 4
# Read in the rest
while True:
# Stripping off the control byte, hence PACKET_BYTES - 1
read_bytes = min(data_bytes, PACKET_BYTES - 1)
# Strip off the control byte
data += tmp[1 : read_bytes + 1]
data_bytes -= read_bytes
if data_bytes < 1 :
break
if (PollyCom.devtype == 'usb'):
tmp = PollyCom.dev.read(PACKET_BYTES, timeout)
else:
tmp = PollyCom.dev.read(PACKET_BYTES)
assert tmp, "read timeout"
assert tmp[0] == CTRL_CONT, "invalid control token, expecting CTRL_CONT"
# Calculate the time delta between send_data and get_data (the total command time)
self.t = time.clock() - self.t;
return data
def __usb_scan(self):
"""
Diagnostic scan of available USB devices.
"""
for d in hid.enumerate(0, 0):
keys = d.keys()
for key in keys:
print ("%s : %s" % (key, d[key]))
print ("")
def blueserial():
ser = serial.Serial("COM3", 115200)
ser.write(bytearray("$$$", 'ascii'))
ser.timeout = 1
data = ser.read(1000)
print(str(data))
ser.close()
def main():
PollyCom()
if __name__ == '__main__':
status = main()
sys.exit(status)
|
|
# Copyright (c) 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import re
import uuid
from ply import lex
from ply import yacc
from yaql.language import exceptions
from yaql.language import expressions
from yaql.language import lexer
from yaql.language import parser
from yaql.language import utils
OperatorType = collections.namedtuple('OperatorType', [
'PREFIX_UNARY', 'SUFFIX_UNARY',
'BINARY_LEFT_ASSOCIATIVE', 'BINARY_RIGHT_ASSOCIATIVE',
'NAME_VALUE_PAIR'
])(
PREFIX_UNARY='PREFIX_UNARY',
SUFFIX_UNARY='SUFFIX_UNARY',
BINARY_LEFT_ASSOCIATIVE='BINARY_LEFT_ASSOCIATIVE',
BINARY_RIGHT_ASSOCIATIVE='BINARY_RIGHT_ASSOCIATIVE',
NAME_VALUE_PAIR='NAME_VALUE_PAIR'
)
class YaqlOperators(object):
def __init__(self, operators, name_value_op=None):
self.operators = operators
self.name_value_op = name_value_op
class YaqlEngine(object):
def __init__(self, ply_lexer, ply_parser, options, factory):
self._lexer = ply_lexer
self._parser = ply_parser
self._options = utils.FrozenDict(options or {})
self._factory = factory
@property
def lexer(self):
return self._lexer
@property
def parser(self):
return self._parser
@property
def options(self):
return self._options
@property
def factory(self):
return self._factory
def __call__(self, expression, options=None):
if options:
return self.copy(options)(expression)
return expressions.Statement(
self.parser.parse(expression, lexer=self.lexer), self)
def copy(self, options):
opt = dict(self._options)
opt.update(options)
return YaqlEngine(self._lexer, self._parser, opt, self._factory)
class YaqlFactory(object):
def __init__(self, keyword_operator='=>', allow_delegates=False):
self._keyword_operator = keyword_operator
self._allow_delegates = allow_delegates
self.operators = self._standard_operators()
if keyword_operator:
self.operators.insert(0, (keyword_operator,
OperatorType.NAME_VALUE_PAIR))
@property
def keyword_operator(self):
return self._keyword_operator
@property
def allow_delegates(self):
return self._allow_delegates
# noinspection PyMethodMayBeStatic
def _standard_operators(self):
return [
('.', OperatorType.BINARY_LEFT_ASSOCIATIVE),
('?.', OperatorType.BINARY_LEFT_ASSOCIATIVE),
(),
('[]', OperatorType.BINARY_LEFT_ASSOCIATIVE),
('{}', OperatorType.BINARY_LEFT_ASSOCIATIVE),
(),
('+', OperatorType.PREFIX_UNARY),
('-', OperatorType.PREFIX_UNARY),
(),
('=~', OperatorType.BINARY_LEFT_ASSOCIATIVE),
('!~', OperatorType.BINARY_LEFT_ASSOCIATIVE),
(),
('*', OperatorType.BINARY_LEFT_ASSOCIATIVE),
('/', OperatorType.BINARY_LEFT_ASSOCIATIVE),
('mod', OperatorType.BINARY_LEFT_ASSOCIATIVE),
(),
('+', OperatorType.BINARY_LEFT_ASSOCIATIVE),
('-', OperatorType.BINARY_LEFT_ASSOCIATIVE),
(),
('>', OperatorType.BINARY_LEFT_ASSOCIATIVE),
('<', OperatorType.BINARY_LEFT_ASSOCIATIVE),
('>=', OperatorType.BINARY_LEFT_ASSOCIATIVE),
('<=', OperatorType.BINARY_LEFT_ASSOCIATIVE),
('!=', OperatorType.BINARY_LEFT_ASSOCIATIVE, 'not_equal'),
('=', OperatorType.BINARY_LEFT_ASSOCIATIVE, 'equal'),
('in', OperatorType.BINARY_LEFT_ASSOCIATIVE),
(),
('not', OperatorType.PREFIX_UNARY),
(),
('and', OperatorType.BINARY_LEFT_ASSOCIATIVE),
(),
('or', OperatorType.BINARY_LEFT_ASSOCIATIVE),
(),
('->', OperatorType.BINARY_RIGHT_ASSOCIATIVE),
]
def insert_operator(self, existing_operator, existing_operator_binary,
new_operator, new_operator_type, create_group,
new_operator_alias=None):
binary_types = (OperatorType.BINARY_RIGHT_ASSOCIATIVE,
OperatorType.BINARY_LEFT_ASSOCIATIVE)
unary_types = (OperatorType.PREFIX_UNARY, OperatorType.SUFFIX_UNARY)
position = 0
if existing_operator is not None:
position = -1
for i, t in enumerate(self.operators):
if len(t) < 2 or t[0] != existing_operator:
continue
if existing_operator_binary and t[1] not in binary_types:
continue
if not existing_operator_binary and t[1] not in unary_types:
continue
position = i
break
if position < 0:
raise ValueError('Operator {0} is not found'.format(
existing_operator))
while position < len(self.operators) and len(
self.operators[position]) > 1:
position += 1
if create_group:
if position == len(self.operators):
self.operators.append(())
position += 1
else:
while position < len(self.operators) and len(
self.operators[position]) < 2:
position += 1
self.operators.insert(position, ())
self.operators.insert(
position, (new_operator, new_operator_type, new_operator_alias))
@staticmethod
def _name_generator():
value = 1
while True:
t = value
chars = []
while t:
chars.append(chr(ord('A') + t % 26))
t //= 26
yield ''.join(chars)
value += 1
def _build_operator_table(self, name_generator):
operators = {}
name_value_op = None
precedence = 1
for record in self.operators:
if not record:
precedence += 1
continue
up, bp, name, alias = operators.get(record[0], (0, 0, '', None))
if record[1] == OperatorType.NAME_VALUE_PAIR:
if name_value_op is not None:
raise exceptions.InvalidOperatorTableException(record[0])
name_value_op = record[0]
continue
if record[1] == OperatorType.PREFIX_UNARY:
if up:
raise exceptions.InvalidOperatorTableException(record[0])
up = precedence
elif record[1] == OperatorType.SUFFIX_UNARY:
if up:
raise exceptions.InvalidOperatorTableException(record[0])
up = -precedence
elif record[1] == OperatorType.BINARY_LEFT_ASSOCIATIVE:
if bp:
raise exceptions.InvalidOperatorTableException(record[0])
bp = precedence
elif record[1] == OperatorType.BINARY_RIGHT_ASSOCIATIVE:
if bp:
raise exceptions.InvalidOperatorTableException(record[0])
bp = -precedence
if record[0] == '[]':
name = 'INDEXER'
elif record[0] == '{}':
name = 'MAP'
else:
name = name or 'OP_' + next(name_generator)
operators[record[0]] = (
up, bp, name, record[2] if len(record) > 2 else None)
return YaqlOperators(operators, name_value_op)
# noinspection PyMethodMayBeStatic
def _create_lexer(self, operators):
return lexer.Lexer(operators)
# noinspection PyMethodMayBeStatic
def _create_parser(self, lexer_rules, operators):
return parser.Parser(lexer_rules, operators, self)
def create(self, options=None):
names = self._name_generator()
operators = self._build_operator_table(names)
lexer_rules = self._create_lexer(operators)
ply_lexer = lex.lex(object=lexer_rules,
reflags=re.UNICODE | re.VERBOSE)
ply_parser = yacc.yacc(
module=self._create_parser(lexer_rules, operators),
debug=False if not options else options.get('yaql.debug', False),
tabmodule='m' + uuid.uuid4().hex, write_tables=False)
return YaqlEngine(ply_lexer, ply_parser, options, self)
|
|
# pyOCD debugger
# Copyright (c) 2006-2020 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import threading
import six
from time import sleep
import platform
import errno
from .interface import Interface
from .common import (
USB_CLASS_HID,
filter_device_by_class,
is_known_cmsis_dap_vid_pid,
check_ep,
)
from ..dap_access_api import DAPAccessIntf
from ... import common
LOG = logging.getLogger(__name__)
try:
import usb.core
import usb.util
except:
if platform.system() == "Linux":
LOG.error("PyUSB is required for CMSIS-DAP support on Linux")
IS_AVAILABLE = False
else:
IS_AVAILABLE = True
class PyUSB(Interface):
"""! @brief CMSIS-DAP USB interface class using pyusb for the backend.
"""
isAvailable = IS_AVAILABLE
did_show_no_libusb_warning = False
def __init__(self):
super(PyUSB, self).__init__()
self.ep_out = None
self.ep_in = None
self.dev = None
self.intf_number = None
self.serial_number = None
self.kernel_driver_was_attached = False
self.closed = True
self.thread = None
self.rcv_data = []
self.read_sem = threading.Semaphore(0)
self.packet_size = 64
def open(self):
assert self.closed is True
# Get device handle
dev = usb.core.find(custom_match=FindDap(self.serial_number))
if dev is None:
raise DAPAccessIntf.DeviceError("Device %s not found" % self.serial_number)
# get active config
config = dev.get_active_configuration()
# Get count of HID interfaces and create the matcher object
hid_interface_count = len(list(usb.util.find_descriptor(config, find_all=True, bInterfaceClass=USB_CLASS_HID)))
matcher = MatchCmsisDapv1Interface(hid_interface_count)
# Get CMSIS-DAPv1 interface
interface = usb.util.find_descriptor(config, custom_match=matcher)
if interface is None:
raise DAPAccessIntf.DeviceError("Device %s has no CMSIS-DAPv1 interface" %
self.serial_number)
interface_number = interface.bInterfaceNumber
# Find endpoints
ep_in, ep_out = None, None
for endpoint in interface:
if endpoint.bEndpointAddress & usb.util.ENDPOINT_IN:
ep_in = endpoint
else:
ep_out = endpoint
# Detach kernel driver
self.kernel_driver_was_attached = False
try:
if dev.is_kernel_driver_active(interface_number):
LOG.debug("Detaching Kernel Driver of Interface %d from USB device (VID=%04x PID=%04x).", interface_number, dev.idVendor, dev.idProduct)
dev.detach_kernel_driver(interface_number)
self.kernel_driver_was_attached = True
except (NotImplementedError, usb.core.USBError) as e:
# Some implementations don't don't have kernel attach/detach
LOG.warning("USB Kernel Driver Detach Failed ([%s] %s). Attached driver may interfere with pyOCD operations.", e.errno, e.strerror)
pass
# Explicitly claim the interface
try:
usb.util.claim_interface(dev, interface_number)
except usb.core.USBError as exc:
raise six.raise_from(DAPAccessIntf.DeviceError("Unable to open device"), exc)
# Update all class variables if we made it here
self.ep_out = ep_out
self.ep_in = ep_in
self.dev = dev
self.intf_number = interface_number
# Start RX thread as the last step
self.closed = False
self.start_rx()
def start_rx(self):
# Flush the RX buffers by reading until timeout exception
try:
while True:
self.ep_in.read(self.ep_in.wMaxPacketSize, 1)
except usb.core.USBError:
# USB timeout expected
pass
# Start RX thread
self.thread = threading.Thread(target=self.rx_task)
self.thread.daemon = True
self.thread.start()
def rx_task(self):
try:
while not self.closed:
self.read_sem.acquire()
if not self.closed:
self.rcv_data.append(self.ep_in.read(self.ep_in.wMaxPacketSize, 10 * 1000))
finally:
# Set last element of rcv_data to None on exit
self.rcv_data.append(None)
@staticmethod
def get_all_connected_interfaces():
"""! @brief Returns all the connected CMSIS-DAP devices.
returns an array of PyUSB (Interface) objects
"""
# find all cmsis-dap devices
try:
all_devices = usb.core.find(find_all=True, custom_match=FindDap())
except usb.core.NoBackendError:
if not PyUSB.did_show_no_libusb_warning:
LOG.warning("CMSIS-DAPv1 probes may not be detected because no libusb library was found.")
PyUSB.did_show_no_libusb_warning = True
return []
# iterate on all devices found
boards = []
for board in all_devices:
new_board = PyUSB()
new_board.vid = board.idVendor
new_board.pid = board.idProduct
new_board.product_name = board.product
new_board.vendor_name = board.manufacturer
new_board.serial_number = board.serial_number
boards.append(new_board)
return boards
def write(self, data):
"""! @brief Write data on the OUT endpoint associated to the HID interface
"""
report_size = self.packet_size
if self.ep_out:
report_size = self.ep_out.wMaxPacketSize
for _ in range(report_size - len(data)):
data.append(0)
self.read_sem.release()
if not self.ep_out:
bmRequestType = 0x21 #Host to device request of type Class of Recipient Interface
bmRequest = 0x09 #Set_REPORT (HID class-specific request for transferring data over EP0)
wValue = 0x200 #Issuing an OUT report
wIndex = self.intf_number #mBed Board interface number for HID
self.dev.ctrl_transfer(bmRequestType, bmRequest, wValue, wIndex, data)
return
self.ep_out.write(data)
def read(self):
"""! @brief Read data on the IN endpoint associated to the HID interface
"""
while len(self.rcv_data) == 0:
sleep(0)
if self.rcv_data[0] is None:
raise DAPAccessIntf.DeviceError("Device %s read thread exited" %
self.serial_number)
return self.rcv_data.pop(0)
def close(self):
"""! @brief Close the interface
"""
assert self.closed is False
LOG.debug("closing interface")
self.closed = True
self.read_sem.release()
self.thread.join()
assert self.rcv_data[-1] is None
self.rcv_data = []
usb.util.release_interface(self.dev, self.intf_number)
if self.kernel_driver_was_attached:
try:
self.dev.attach_kernel_driver(self.intf_number)
except Exception as exception:
LOG.warning('Exception attaching kernel driver: %s',
str(exception))
usb.util.dispose_resources(self.dev)
self.ep_out = None
self.ep_in = None
self.dev = None
self.intf_number = None
self.kernel_driver_was_attached = False
self.thread = None
class MatchCmsisDapv1Interface(object):
"""! @brief Match class for finding CMSIS-DAPv1 interface.
This match class performs several tests on the provided USB interface descriptor, to
determine whether it is a CMSIS-DAPv1 interface. These requirements must be met by the
interface:
1. If there is more than one HID interface on the device, the interface must have an interface
name string containing "CMSIS-DAP".
2. bInterfaceClass must be 0x03 (HID).
3. bInterfaceSubClass must be 0.
4. Must have interrupt in endpoint, with an optional interrupt out endpoint, in that order.
"""
def __init__(self, hid_interface_count):
"""! @brief Constructor."""
self._hid_count = hid_interface_count
def __call__(self, interface):
"""! @brief Return True if this is a CMSIS-DAPv1 interface."""
try:
if self._hid_count > 1:
interface_name = usb.util.get_string(interface.device, interface.iInterface)
# This tells us whether the interface is CMSIS-DAP, but not whether it's v1 or v2.
if (interface_name is None) or ("CMSIS-DAP" not in interface_name):
return False
# Now check the interface class to distinguish v1 from v2.
if (interface.bInterfaceClass != USB_CLASS_HID) \
or (interface.bInterfaceSubClass != 0):
return False
# Must have either 1 or 2 endpoints.
if interface.bNumEndpoints not in (1, 2):
return False
# Endpoint 0 must be interrupt in.
if not check_ep(interface, 0, usb.util.ENDPOINT_IN, usb.util.ENDPOINT_TYPE_INTR):
return False
# Endpoint 1 is optional. If present it must be interrupt out.
if (interface.bNumEndpoints == 2) \
and not check_ep(interface, 1, usb.util.ENDPOINT_OUT, usb.util.ENDPOINT_TYPE_INTR):
return False
# All checks passed, this is a CMSIS-DAPv2 interface!
return True
except (UnicodeDecodeError, IndexError):
# UnicodeDecodeError exception can be raised if the device has a corrupted interface name.
# Certain versions of STLinkV2 are known to have this problem. If we can't read the
# interface name, there's no way to tell if it's a CMSIS-DAPv2 interface.
#
# IndexError can be raised if an endpoint is missing.
return False
class FindDap(object):
"""! @brief CMSIS-DAP match class to be used with usb.core.find"""
def __init__(self, serial=None):
"""! @brief Create a new FindDap object with an optional serial number"""
self._serial = serial
def __call__(self, dev):
"""! @brief Return True if this is a DAP device, False otherwise"""
# Check if the device class is a valid one for CMSIS-DAP.
if filter_device_by_class(dev.idVendor, dev.idProduct, dev.bDeviceClass):
return False
try:
# First attempt to get the active config. This produces a more direct error
# when you don't have device permissions on Linux
config = dev.get_active_configuration()
# Now read the product name string.
device_string = dev.product
if (device_string is None) or ("CMSIS-DAP" not in device_string):
return False
# Get count of HID interfaces.
hid_interface_count = len(list(usb.util.find_descriptor(config, find_all=True, bInterfaceClass=USB_CLASS_HID)))
# Find the CMSIS-DAPv1 interface.
matcher = MatchCmsisDapv1Interface(hid_interface_count)
cmsis_dap_interface = usb.util.find_descriptor(config, custom_match=matcher)
except usb.core.USBError as error:
if error.errno == errno.EACCES and platform.system() == "Linux":
msg = ("%s while trying to interrogate a USB device "
"(VID=%04x PID=%04x). This can probably be remedied with a udev rule. "
"See <https://github.com/mbedmicro/pyOCD/tree/master/udev> for help." %
(error, dev.idVendor, dev.idProduct))
# If we recognize this device as one that should be CMSIS-DAP, we can raise
# the level of the log message since it's almost certainly a permissions issue.
if is_known_cmsis_dap_vid_pid(dev.idVendor, dev.idProduct):
LOG.warning(msg)
else:
LOG.debug(msg)
else:
LOG.debug("Error accessing USB device (VID=%04x PID=%04x): %s",
dev.idVendor, dev.idProduct, error)
return False
except (IndexError, NotImplementedError, ValueError, UnicodeDecodeError) as error:
LOG.debug("Error accessing USB device (VID=%04x PID=%04x): %s", dev.idVendor, dev.idProduct, error)
return False
if cmsis_dap_interface is None:
return False
if self._serial is not None:
if self._serial != dev.serial_number:
return False
return True
|
|
#
# Cassandra Cluster Management lib
#
import os, common, shutil, re, cluster, socket, stat
USER_HOME = os.path.expanduser('~')
CASSANDRA_BIN_DIR= "bin"
CASSANDRA_CONF_DIR= "conf"
CASSANDRA_CONF = "cassandra.yaml"
LOG4J_CONF = "log4j-server.properties"
LOG4J_TOOL_CONF = "log4j-tools.properties"
LOGBACK_CONF = "logback.xml"
CASSANDRA_ENV = "cassandra-env.sh"
CASSANDRA_SH = "cassandra.in.sh"
class CCMError(Exception):
pass
class LoadError(CCMError):
pass
class ArgumentError(CCMError):
pass
class UnavailableSocketError(CCMError):
pass
def get_default_path():
default_path = os.path.join(USER_HOME, '.ccm')
if not os.path.exists(default_path):
os.mkdir(default_path)
return default_path
def parse_interface(itf, default_port):
i = itf.split(':')
if len(i) == 1:
return (i[0].strip(), default_port)
elif len(i) == 2:
return (i[0].strip(), int(i[1].strip()))
else:
raise ValueError("Invalid interface definition: " + itf)
def current_cluster_name(path):
try:
with open(os.path.join(path, 'CURRENT'), 'r') as f:
return f.readline().strip()
except IOError:
return None
def load_current_cluster(path):
name = current_cluster_name(path)
if name is None:
print 'No currently active cluster (use ccm cluster switch)'
exit(1)
try:
return cluster.Cluster.load(path, name)
except common.LoadError as e:
print str(e)
exit(1)
def switch_cluster(path, new_name):
with open(os.path.join(path, 'CURRENT'), 'w') as f:
f.write(new_name + '\n')
def replace_in_file(file, regexp, replace):
replaces_in_file(file, [(regexp, replace)])
def replaces_in_file(file, replacement_list):
rs = [ (re.compile(regexp), repl) for (regexp, repl) in replacement_list]
file_tmp = file + ".tmp"
with open(file, 'r') as f:
with open(file_tmp, 'w') as f_tmp:
for line in f:
for r, replace in rs:
match = r.search(line)
if match:
line = replace + "\n"
f_tmp.write(line)
shutil.move(file_tmp, file)
def replace_or_add_into_file_tail(file, regexp, replace):
replaces_or_add_into_file_tail(file, [(regexp, replace)])
def replaces_or_add_into_file_tail(file, replacement_list):
rs = [ (re.compile(regexp), repl) for (regexp, repl) in replacement_list]
is_line_found = False
file_tmp = file + ".tmp"
with open(file, 'r') as f:
with open(file_tmp, 'w') as f_tmp:
for line in f:
for r, replace in rs:
match = r.search(line)
if match:
line = replace + "\n"
is_line_found = True
f_tmp.write(line)
# In case, entry is not found, and need to be added
if is_line_found == False:
f_tmp.write('\n'+ replace + "\n")
shutil.move(file_tmp, file)
def make_cassandra_env(cassandra_dir, node_path):
sh_file = os.path.join(CASSANDRA_BIN_DIR, CASSANDRA_SH)
orig = os.path.join(cassandra_dir, sh_file)
dst = os.path.join(node_path, sh_file)
shutil.copy(orig, dst)
replacements = [
('CASSANDRA_HOME=', '\tCASSANDRA_HOME=%s' % cassandra_dir),
('CASSANDRA_CONF=', '\tCASSANDRA_CONF=%s' % os.path.join(node_path, 'conf'))
]
common.replaces_in_file(dst, replacements)
# If a cluster-wide cassandra.in.sh file exists in the parent
# directory, append it to the node specific one:
cluster_sh_file = os.path.join(node_path, os.path.pardir, 'cassandra.in.sh')
if os.path.exists(cluster_sh_file):
append = open(cluster_sh_file).read()
with open(dst, 'a') as f:
f.write('\n\n### Start Cluster wide config ###\n')
f.write(append)
f.write('\n### End Cluster wide config ###\n\n')
env = os.environ.copy()
env['CASSANDRA_INCLUDE'] = os.path.join(dst)
return env
def get_stress_bin(cassandra_dir):
candidates = [
os.path.join(cassandra_dir, 'contrib', 'stress', 'bin', 'stress'),
os.path.join(cassandra_dir, 'tools', 'stress', 'bin', 'stress'),
os.path.join(cassandra_dir, 'tools', 'bin', 'stress'),
os.path.join(cassandra_dir, 'tools', 'bin', 'cassandra-stress')
]
for candidate in candidates:
if os.path.exists(candidate):
stress = candidate
break
else:
raise Exception("Cannot find stress binary (maybe it isn't compiled)")
# make sure it's executable
if not os.access(stress, os.X_OK):
try:
# try to add user execute permissions
os.chmod(stress, os.stat(stress).st_mode | stat.S_IXUSR)
except:
raise Exception("stress binary is not executable: %s" % (stress,))
return stress
def validate_cassandra_dir(cassandra_dir):
if cassandra_dir is None:
raise ArgumentError('Undefined cassandra directory')
bin_dir = os.path.join(cassandra_dir, CASSANDRA_BIN_DIR)
conf_dir = os.path.join(cassandra_dir, CASSANDRA_CONF_DIR)
cnd = os.path.exists(bin_dir)
cnd = cnd and os.path.exists(conf_dir)
cnd = cnd and os.path.exists(os.path.join(conf_dir, CASSANDRA_CONF))
if not cnd:
raise ArgumentError('%s does not appear to be a cassandra source directory' % cassandra_dir)
def check_socket_available(itf):
s = socket.socket()
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
s.bind(itf)
s.close()
except socket.error, msg:
s.close()
addr, port = itf
raise UnavailableSocketError("Inet address %s:%s is not available: %s" % (addr, port, msg))
def parse_settings(args):
settings = {}
for s in args:
splitted = s.split(':')
if len(splitted) != 2:
raise ArgumentError("A new setting should be of the form 'key: value', got" + s)
val = splitted[1].strip()
# ok, that's not super beautiful
if val.lower() == "true":
val = True
if val.lower() == "false":
val = True
try:
val = int(val)
except ValueError:
pass
settings[splitted[0].strip()] = val
return settings
#
# Copy file from source to destination with reasonable error handling
#
def copy_file(src_file, dst_file):
try:
shutil.copy2(src_file, dst_file)
except (IOError, shutil.Error) as e:
print >> sys.stderr, str(e)
exit(1)
|
|
#!/usr/bin/env python
#
# Copyright (c), 2016-2020, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author Davide Brunato <brunato@sissa.it>
#
"""Tests concerning XML documents"""
import unittest
import os
import io
import pathlib
import tempfile
from decimal import Decimal
try:
import lxml.etree as lxml_etree
except ImportError:
lxml_etree = None
from xmlschema import XMLSchema10, XMLSchema11, XmlDocument, \
XMLResourceError, XMLSchemaValidationError, XMLSchemaDecodeError, \
to_json, from_json
from xmlschema.etree import ElementTree
from xmlschema.names import XSD_NAMESPACE, XSI_NAMESPACE
from xmlschema.helpers import is_etree_element, is_etree_document
from xmlschema.resources import XMLResource
from xmlschema.documents import get_context
TEST_CASES_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'test_cases/')
def casepath(relative_path):
return os.path.join(TEST_CASES_DIR, relative_path)
class TestXmlDocuments(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.vh_dir = casepath('examples/vehicles')
cls.vh_xsd_file = casepath('examples/vehicles/vehicles.xsd')
cls.vh_xml_file = casepath('examples/vehicles/vehicles.xml')
cls.col_dir = casepath('examples/collection')
cls.col_xsd_file = casepath('examples/collection/collection.xsd')
cls.col_xml_file = casepath('examples/collection/collection.xml')
def test_to_json_api(self):
json_data = to_json(self.col_xml_file, lazy=True)
self.assertIsInstance(json_data, str)
self.assertIn('"@xmlns:col"', json_data)
self.assertIn(r'"name": "Joan Mir\u00f3"', json_data)
with self.assertRaises(TypeError) as ctx:
to_json(self.col_xml_file, lazy=True, decimal_type=Decimal)
self.assertIn("is not JSON serializable", str(ctx.exception))
col_1_error_xml_file = casepath('examples/collection/collection-1_error.xml')
json_data, errors = to_json(col_1_error_xml_file, validation='lax', lazy=True)
self.assertEqual(len(errors), 1)
self.assertIsInstance(errors[0], XMLSchemaDecodeError)
self.assertIn('"position": null', json_data)
json_data, errors = to_json(col_1_error_xml_file, validation='lax', lazy=True,
json_options={'default': lambda x: None})
self.assertEqual(len(errors), 0)
self.assertIn('"object": [null, null]', json_data)
def test_from_json_api(self):
json_data = to_json(self.col_xml_file, lazy=True)
with self.assertRaises(TypeError) as ctx:
from_json(json_data, self.col_xsd_file)
self.assertIn("invalid type <class 'str'> for argument 'schema'", str(ctx.exception))
col_schema = XMLSchema10(self.col_xsd_file)
collection = from_json(json_data, schema=col_schema)
self.assertEqual(collection.tag, '{http://example.com/ns/collection}collection')
col_schema = XMLSchema10(self.col_xsd_file)
collection = from_json(json_data, col_schema, json_options={'parse_float': Decimal})
self.assertEqual(collection.tag, '{http://example.com/ns/collection}collection')
def test_get_context_with_schema(self):
source, schema = get_context(self.col_xml_file)
self.assertIsInstance(source, XMLResource)
self.assertIsInstance(schema, XMLSchema10)
source, schema = get_context(self.col_xml_file, self.col_xsd_file)
self.assertIsInstance(source, XMLResource)
self.assertIsInstance(schema, XMLSchema10)
col_schema = XMLSchema10(self.col_xsd_file)
source, schema = get_context(self.col_xml_file, col_schema)
self.assertIsInstance(source, XMLResource)
self.assertIs(schema, col_schema)
source, schema = get_context(self.vh_xml_file, cls=XMLSchema10)
self.assertIsInstance(source, XMLResource)
self.assertIsInstance(schema, XMLSchema10)
source, schema = get_context(self.col_xml_file, cls=XMLSchema11)
self.assertIsInstance(source, XMLResource)
self.assertIsInstance(schema, XMLSchema11)
source, schema = get_context(XMLResource(self.vh_xml_file))
self.assertIsInstance(source, XMLResource)
self.assertIsInstance(schema, XMLSchema10)
xml_document = XmlDocument(self.vh_xml_file)
source, schema = get_context(xml_document)
self.assertIsInstance(source, XMLResource)
self.assertIsInstance(schema, XMLSchema10)
self.assertIs(xml_document.schema, schema)
# Issue #145
with open(self.vh_xml_file) as f:
source, schema = get_context(f, schema=self.vh_xsd_file)
self.assertIsInstance(source, XMLResource)
self.assertIsInstance(schema, XMLSchema10)
with open(self.vh_xml_file) as f:
source, schema = get_context(XMLResource(f), schema=self.vh_xsd_file)
self.assertIsInstance(source, XMLResource)
self.assertIsInstance(schema, XMLSchema10)
with open(self.vh_xml_file) as f:
source, schema = get_context(f, base_url=self.vh_dir)
self.assertIsInstance(source, XMLResource)
self.assertIsInstance(schema, XMLSchema10)
def test_get_context_without_schema(self):
xml_data = '<text xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\n' \
' xmlns:xs="http://www.w3.org/2001/XMLSchema"\n' \
' xsi:type="xs:string">foo</text>'
source, schema = get_context(xml_data)
self.assertIsInstance(source, XMLResource)
self.assertIs(schema, XMLSchema10.meta_schema)
self.assertEqual(source.root.tag, 'text')
self.assertTrue(schema.is_valid(source))
with self.assertRaises(ValueError) as ctx:
get_context('<empty/>')
self.assertEqual(str(ctx.exception),
"no schema can be retrieved for the provided XML data")
source, schema = get_context('<empty/>', dummy_schema=True)
self.assertEqual(source.root.tag, 'empty')
self.assertIsInstance(schema, XMLSchema10)
col_xml_resource = XMLResource(self.col_xml_file)
col_xml_resource.root.attrib.clear()
self.assertEqual(col_xml_resource.get_locations(), [])
source, schema = get_context(col_xml_resource, self.col_xsd_file)
self.assertIs(source, col_xml_resource)
self.assertIsInstance(schema, XMLSchema10)
self.assertEqual(schema.target_namespace, 'http://example.com/ns/collection')
# Schema target namespace doesn't match source namespace
vh_schema = XMLSchema10(self.vh_xsd_file)
source, schema = get_context(col_xml_resource, vh_schema)
self.assertIs(source, col_xml_resource)
self.assertIs(schema, vh_schema)
self.assertFalse(schema.is_valid(source))
vh_schema.import_schema('http://example.com/ns/collection', self.col_xsd_file)
vh_schema.build()
source, schema = get_context(col_xml_resource, vh_schema)
self.assertIs(source, col_xml_resource)
self.assertIs(schema, vh_schema)
self.assertTrue(schema.is_valid(source))
def test_xml_document_init_with_schema(self):
xml_document = XmlDocument(self.vh_xml_file)
self.assertEqual(os.path.basename(xml_document.url), 'vehicles.xml')
self.assertEqual(xml_document.errors, ())
self.assertIsInstance(xml_document.schema, XMLSchema10)
xml_document = XmlDocument(self.vh_xml_file, cls=XMLSchema11)
self.assertIsInstance(xml_document.schema, XMLSchema11)
xml_document = XmlDocument(self.vh_xml_file, self.vh_xsd_file)
self.assertIsInstance(xml_document.schema, XMLSchema10)
vh_schema = XMLSchema10(self.vh_xsd_file)
xml_document = XmlDocument(self.vh_xml_file, vh_schema)
self.assertIsInstance(xml_document.schema, XMLSchema10)
with self.assertRaises(XMLSchemaValidationError) as ctx:
XmlDocument(self.vh_xml_file, self.col_xsd_file)
self.assertIn('is not an element of the schema', str(ctx.exception))
xml_document = XmlDocument(self.col_xml_file)
self.assertEqual(os.path.basename(xml_document.url), 'collection.xml')
self.assertIsInstance(xml_document.schema, XMLSchema10)
xml_file = casepath('examples/collection/collection-1_error.xml')
with self.assertRaises(XMLSchemaValidationError) as ctx:
XmlDocument(xml_file)
self.assertIn('invalid literal for int() with base 10', str(ctx.exception))
xml_document = XmlDocument(xml_file, validation='lax')
self.assertEqual(os.path.basename(xml_document.url), 'collection-1_error.xml')
self.assertIsInstance(xml_document.schema, XMLSchema10)
self.assertTrue(len(xml_document.errors), 1)
with self.assertRaises(ValueError) as ctx:
XmlDocument(xml_file, validation='foo')
self.assertEqual(str(ctx.exception), "'foo': not a validation mode")
def test_xml_document_init_without_schema(self):
with self.assertRaises(ValueError) as ctx:
XmlDocument('<empty/>')
self.assertIn('no schema can be retrieved for the XML resource', str(ctx.exception))
xml_document = XmlDocument('<empty/>', validation='skip')
self.assertIsNone(xml_document.schema)
self.assertIsInstance(xml_document._fallback_schema, XMLSchema10)
self.assertEqual(xml_document._fallback_schema.target_namespace, '')
xml_document = XmlDocument(
'<tns:empty xmlns:tns="http://example.com/ns" />', validation='skip'
)
self.assertIsNone(xml_document.schema)
self.assertIsInstance(xml_document._fallback_schema, XMLSchema10)
self.assertEqual(xml_document._fallback_schema.target_namespace, xml_document.namespace)
def test_xml_document_parse(self):
xml_document = XmlDocument(self.vh_xml_file)
self.assertEqual(os.path.basename(xml_document.url), 'vehicles.xml')
self.assertFalse(xml_document.is_lazy())
xml_file = casepath('examples/vehicles/vehicles-1_error.xml')
with self.assertRaises(XMLSchemaValidationError):
xml_document.parse(xml_file)
xml_document.parse(self.vh_xml_file, lazy=True)
self.assertEqual(os.path.basename(xml_document.url), 'vehicles.xml')
self.assertTrue(xml_document.is_lazy())
xml_document = XmlDocument(self.vh_xml_file, validation='lax')
xml_document.parse(xml_file)
self.assertEqual(len(xml_document.errors), 1)
def test_xml_document_decode_with_schema(self):
xml_document = XmlDocument(self.vh_xml_file)
vh_schema = XMLSchema10(self.vh_xsd_file)
self.assertEqual(xml_document.decode(), vh_schema.decode(self.vh_xml_file))
namespaces = {'vh': 'http://example.com/ns'}
self.assertEqual(xml_document.decode(namespaces=namespaces),
vh_schema.decode(self.vh_xml_file, namespaces=namespaces))
self.assertNotEqual(xml_document.decode(namespaces=namespaces),
vh_schema.decode(self.vh_xml_file))
xml_file = casepath('examples/collection/collection-1_error.xml')
xml_document = XmlDocument(xml_file, validation='lax')
col_schema = XMLSchema10(self.col_xsd_file)
self.assertEqual(xml_document.decode(), col_schema.decode(xml_file, validation='lax')[0])
xml_document = XmlDocument(xml_file, validation='skip')
self.assertEqual(xml_document.decode(), col_schema.decode(xml_file, validation='skip'))
self.assertEqual(xml_document.decode(validation='lax'),
col_schema.decode(xml_file, validation='lax')[0])
def test_xml_document_decode_without_schema(self):
xml_document = XmlDocument('<x:root xmlns:x="ns" />', validation='skip')
self.assertIsNone(xml_document.decode())
xml_document = XmlDocument(
'<x:root xmlns:x="ns" a="true"><b1>10</b1><b2/></x:root>', validation='skip'
)
self.assertEqual(xml_document.decode(), {'@a': 'true', 'b1': ['10'], 'b2': [None]})
def test_xml_document_decode_with_xsi_type(self):
xml_data = '<root xmlns:xsi="{}" xmlns:xs="{}" ' \
'xsi:type="xs:integer">10</root>'.format(XSI_NAMESPACE, XSD_NAMESPACE)
xml_document = XmlDocument(xml_data)
self.assertEqual(xml_document.decode(),
{'@xmlns:xsi': 'http://www.w3.org/2001/XMLSchema-instance',
'@xmlns:xs': 'http://www.w3.org/2001/XMLSchema',
'@xsi:type': 'xs:integer', '$': 10})
def test_xml_document_to_json(self):
xml_document = XmlDocument(self.col_xml_file, lazy=True)
json_data = xml_document.to_json()
self.assertIsInstance(json_data, str)
self.assertIn('"@xmlns:col"', json_data)
self.assertIn(r'"name": "Joan Mir\u00f3"', json_data)
self.assertEqual(xml_document.to_json(validation='lax')[0], json_data)
self.assertEqual(xml_document.to_json(namespaces=None), json_data)
with self.assertRaises(TypeError) as ctx:
xml_document.to_json(decimal_type=Decimal)
self.assertIn("is not JSON serializable", str(ctx.exception))
fp = io.StringIO()
xml_document.to_json(fp=fp)
self.assertEqual(fp.getvalue(), json_data)
fp.close()
fp = io.StringIO()
self.assertEqual(xml_document.to_json(fp=fp, validation='lax'), ())
self.assertEqual(fp.getvalue(), json_data)
fp.close()
col_1_error_xml_file = casepath('examples/collection/collection-1_error.xml')
xml_document = XmlDocument(col_1_error_xml_file, validation='lax')
json_data, errors = xml_document.to_json()
self.assertEqual(len(errors), 1)
self.assertIsInstance(errors[0], XMLSchemaDecodeError)
self.assertIn('"position": null', json_data)
xml_document = XmlDocument(col_1_error_xml_file, validation='lax', lazy=True)
json_data, errors = xml_document.to_json(json_options={'default': lambda x: None})
self.assertEqual(len(errors), 0)
self.assertIn('"object": [null, null]', json_data)
def test_xml_document_write(self):
with tempfile.TemporaryDirectory() as dirname:
col_file_path = pathlib.Path(dirname).joinpath('collection.xml')
xml_document = XmlDocument(self.col_xml_file)
with col_file_path.open(mode='wb') as fp:
xml_document.write(fp)
schema = XMLSchema10(self.col_xsd_file)
xml_document = XmlDocument(str(col_file_path), schema=schema)
self.assertEqual(xml_document.root.tag,
'{http://example.com/ns/collection}collection')
self.assertIs(xml_document.schema, schema)
col_file_path.unlink()
xml_document.write(str(col_file_path))
xml_document = XmlDocument(str(col_file_path), schema=schema)
self.assertIs(xml_document.schema, schema)
col_file_path.unlink()
xml_document.write(str(col_file_path), encoding='unicode')
xml_document = XmlDocument(str(col_file_path), schema=schema)
self.assertIs(xml_document.schema, schema)
col_file_path.unlink()
xml_document.write(str(col_file_path),
default_namespace="http://example.com/ns/collection")
xml_document = XmlDocument(str(col_file_path), schema=schema)
self.assertIs(xml_document.schema, schema)
if lxml_etree is not None:
col_file_path.unlink()
col_etree_document = lxml_etree.parse(self.col_xml_file)
xml_document = XmlDocument(col_etree_document, base_url=self.col_dir)
xml_document.write(str(col_file_path),
default_namespace="http://example.com/ns/collection")
xml_document = XmlDocument(str(col_file_path), schema=schema)
self.assertIs(xml_document.schema, schema)
col_file_path.unlink()
xml_document = XmlDocument(self.col_xml_file, lazy=True)
with self.assertRaises(XMLResourceError) as ctx:
xml_document.write(str(col_file_path))
self.assertEqual(str(ctx.exception), "cannot serialize a lazy XML document")
def test_xml_document_etree_interface(self):
xml_document = XmlDocument(self.vh_xml_file)
self.assertIs(xml_document.getroot(), xml_document._root)
self.assertTrue(is_etree_element(xml_document.getroot()))
self.assertTrue(is_etree_document(xml_document.get_etree_document()))
xml_document = XmlDocument(self.vh_xml_file, lazy=1)
with self.assertRaises(XMLResourceError) as ctx:
xml_document.get_etree_document()
self.assertIn('cannot create an ElementTree from a lazy resource', str(ctx.exception))
vh_tree = ElementTree.parse(self.vh_xml_file)
xml_document = XmlDocument(vh_tree, base_url=self.vh_dir)
self.assertIs(xml_document.source, vh_tree)
self.assertIs(xml_document.get_etree_document(), vh_tree)
@unittest.skipIf(lxml_etree is None, "Skip: lxml is not available.")
def test_xml_document_with_lxml(self):
vh_tree = lxml_etree.parse(self.vh_xml_file)
xml_document = XmlDocument(vh_tree, base_url=self.vh_dir)
self.assertIs(xml_document.get_etree_document(), vh_tree)
xml_document = XmlDocument(vh_tree.getroot(), base_url=self.vh_dir)
etree_document = xml_document.get_etree_document()
self.assertIsNot(etree_document, vh_tree)
self.assertTrue(is_etree_document(etree_document))
self.assertTrue(hasattr(etree_document, 'xpath'))
self.assertTrue(hasattr(etree_document, 'xslt'))
def test_xml_document_tostring(self):
xml_document = XmlDocument(self.vh_xml_file)
self.assertTrue(xml_document.tostring().startswith('<vh:vehicles'))
with self.assertRaises(XMLResourceError):
XmlDocument(self.vh_xml_file, lazy=True).tostring()
if __name__ == '__main__':
import platform
header_template = "Test xmlschema's XML documents with Python {} on {}"
header = header_template.format(platform.python_version(), platform.platform())
print('{0}\n{1}\n{0}'.format("*" * len(header), header))
unittest.main()
|
|
# -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import warnings
import re
from datetime import datetime
from packaging.version import parse
from pathlib import Path
from io import StringIO
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath("sphinxext"))
from github_link import make_linkcode_resolve
import sphinx_gallery
import matplotlib as mpl
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"numpydoc",
"sphinx.ext.linkcode",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.imgconverter",
"sphinx_gallery.gen_gallery",
"sphinx_issues",
"add_toctree_functions",
"sphinx-prompt",
"sphinxext.opengraph",
"doi_role",
]
# Support for `plot::` directives in sphinx 3.2 requires matplotlib 3.1.0 or newer
if parse(mpl.__version__) >= parse("3.1.0"):
extensions.append("matplotlib.sphinxext.plot_directive")
# Produce `plot::` directives for examples that contain `import matplotlib` or
# `from matplotlib import`.
numpydoc_use_plots = True
# Options for the `::plot` directive:
# https://matplotlib.org/stable/api/sphinxext_plot_directive_api.html
plot_formats = ["png"]
plot_include_source = True
plot_html_show_formats = False
plot_html_show_source_link = False
# this is needed for some reason...
# see https://github.com/numpy/numpydoc/issues/69
numpydoc_class_members_toctree = False
# For maths, use mathjax by default and svg if NO_MATHJAX env variable is set
# (useful for viewing the doc offline)
if os.environ.get("NO_MATHJAX"):
extensions.append("sphinx.ext.imgmath")
imgmath_image_format = "svg"
mathjax_path = ""
else:
extensions.append("sphinx.ext.mathjax")
mathjax_path = "https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-chtml.js"
autodoc_default_options = {"members": True, "inherited-members": True}
# Add any paths that contain templates here, relative to this directory.
templates_path = ["templates"]
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8'
# The main toctree document.
main_doc = "contents"
# General information about the project.
project = "scikit-learn"
copyright = f"2007 - {datetime.now().year}, scikit-learn developers (BSD License)"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
parsed_version = parse(sklearn.__version__)
version = ".".join(parsed_version.base_version.split(".")[:2])
# The full version, including alpha/beta/rc tags.
# Removes post from release name
if parsed_version.is_postrelease:
release = parsed_version.base_version
else:
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build", "templates", "includes", "themes"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = "literal"
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = "scikit-learn-modern"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {"google_analytics": True, "mathjax_path": mathjax_path}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["themes"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = "scikit-learn"
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "logos/scikit-learn-logo-small.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "logos/favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["images"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {"index": "index.html"}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = "scikit-learndoc"
# If true, the reST sources are included in the HTML build as _sources/name.
html_copy_source = True
# Adds variables into templates
html_context = {}
# finds latest release highlights and places it into HTML context for
# index.html
release_highlights_dir = Path("..") / "examples" / "release_highlights"
# Finds the highlight with the latest version number
latest_highlights = sorted(release_highlights_dir.glob("plot_release_highlights_*.py"))[
-1
]
latest_highlights = latest_highlights.with_suffix("").name
html_context[
"release_highlights"
] = f"auto_examples/release_highlights/{latest_highlights}"
# get version from highlight name assuming highlights have the form
# plot_release_highlights_0_22_0
highlight_version = ".".join(latest_highlights.split("_")[-3:-1])
html_context["release_highlights_version"] = highlight_version
# redirects dictionary maps from old links to new links
redirects = {
"documentation": "index",
"auto_examples/feature_selection/plot_permutation_test_for_classification": (
"auto_examples/model_selection/plot_permutation_tests_for_classification"
),
}
html_context["redirects"] = redirects
for old_link in redirects:
html_additional_pages[old_link] = "redirects.html"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
"preamble": r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}
\usepackage{morefloats}\usepackage{enumitem} \setlistdepth{10}
\let\oldhref\href
\renewcommand{\href}[2]{\oldhref{#1}{\hbox{#2}}}
"""
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
(
"contents",
"user_guide.tex",
"scikit-learn user guide",
"scikit-learn developers",
"manual",
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
trim_doctests_flags = True
# intersphinx configuration
intersphinx_mapping = {
"python": ("https://docs.python.org/{.major}".format(sys.version_info), None),
"numpy": ("https://numpy.org/doc/stable", None),
"scipy": ("https://docs.scipy.org/doc/scipy/reference", None),
"matplotlib": ("https://matplotlib.org/", None),
"pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None),
"joblib": ("https://joblib.readthedocs.io/en/latest/", None),
"seaborn": ("https://seaborn.pydata.org/", None),
}
v = parse(release)
if v.release is None:
raise ValueError(
"Ill-formed version: {!r}. Version should follow PEP440".format(version)
)
if v.is_devrelease:
binder_branch = "main"
else:
major, minor = v.release[:2]
binder_branch = "{}.{}.X".format(major, minor)
class SubSectionTitleOrder:
"""Sort example gallery by title of subsection.
Assumes README.txt exists for all subsections and uses the subsection with
dashes, '---', as the adornment.
"""
def __init__(self, src_dir):
self.src_dir = src_dir
self.regex = re.compile(r"^([\w ]+)\n-", re.MULTILINE)
def __repr__(self):
return "<%s>" % (self.__class__.__name__,)
def __call__(self, directory):
src_path = os.path.normpath(os.path.join(self.src_dir, directory))
# Forces Release Highlights to the top
if os.path.basename(src_path) == "release_highlights":
return "0"
readme = os.path.join(src_path, "README.txt")
try:
with open(readme, "r") as f:
content = f.read()
except FileNotFoundError:
return directory
title_match = self.regex.search(content)
if title_match is not None:
return title_match.group(1)
return directory
sphinx_gallery_conf = {
"doc_module": "sklearn",
"backreferences_dir": os.path.join("modules", "generated"),
"show_memory": False,
"reference_url": {"sklearn": None},
"examples_dirs": ["../examples"],
"gallery_dirs": ["auto_examples"],
"subsection_order": SubSectionTitleOrder("../examples"),
"binder": {
"org": "scikit-learn",
"repo": "scikit-learn",
"binderhub_url": "https://mybinder.org",
"branch": binder_branch,
"dependencies": "./binder/requirements.txt",
"use_jupyter_lab": True,
},
# avoid generating too many cross links
"inspect_global_variables": False,
"remove_config_comments": True,
}
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {"sphx_glr_plot_classifier_comparison_001.png": 600}
# enable experimental module so that experimental estimators can be
# discovered properly by sphinx
from sklearn.experimental import enable_iterative_imputer # noqa
from sklearn.experimental import enable_halving_search_cv # noqa
def make_carousel_thumbs(app, exception):
"""produces the final resized carousel images"""
if exception is not None:
return
print("Preparing carousel images")
image_dir = os.path.join(app.builder.outdir, "_images")
for glr_plot, max_width in carousel_thumbs.items():
image = os.path.join(image_dir, glr_plot)
if os.path.exists(image):
c_thumb = os.path.join(image_dir, glr_plot[:-4] + "_carousel.png")
sphinx_gallery.gen_rst.scale_image(image, c_thumb, max_width, 190)
def filter_search_index(app, exception):
if exception is not None:
return
# searchindex only exist when generating html
if app.builder.name != "html":
return
print("Removing methods from search index")
searchindex_path = os.path.join(app.builder.outdir, "searchindex.js")
with open(searchindex_path, "r") as f:
searchindex_text = f.read()
searchindex_text = re.sub(r"{__init__.+?}", "{}", searchindex_text)
searchindex_text = re.sub(r"{__call__.+?}", "{}", searchindex_text)
with open(searchindex_path, "w") as f:
f.write(searchindex_text)
def generate_min_dependency_table(app):
"""Generate min dependency table for docs."""
from sklearn._min_dependencies import dependent_packages
# get length of header
package_header_len = max(len(package) for package in dependent_packages) + 4
version_header_len = len("Minimum Version") + 4
tags_header_len = max(len(tags) for _, tags in dependent_packages.values()) + 4
output = StringIO()
output.write(
" ".join(
["=" * package_header_len, "=" * version_header_len, "=" * tags_header_len]
)
)
output.write("\n")
dependency_title = "Dependency"
version_title = "Minimum Version"
tags_title = "Purpose"
output.write(
f"{dependency_title:<{package_header_len}} "
f"{version_title:<{version_header_len}} "
f"{tags_title}\n"
)
output.write(
" ".join(
["=" * package_header_len, "=" * version_header_len, "=" * tags_header_len]
)
)
output.write("\n")
for package, (version, tags) in dependent_packages.items():
output.write(
f"{package:<{package_header_len}} {version:<{version_header_len}} {tags}\n"
)
output.write(
" ".join(
["=" * package_header_len, "=" * version_header_len, "=" * tags_header_len]
)
)
output.write("\n")
output = output.getvalue()
with (Path(".") / "min_dependency_table.rst").open("w") as f:
f.write(output)
def generate_min_dependency_substitutions(app):
"""Generate min dependency substitutions for docs."""
from sklearn._min_dependencies import dependent_packages
output = StringIO()
for package, (version, _) in dependent_packages.items():
package = package.capitalize()
output.write(f".. |{package}MinVersion| replace:: {version}")
output.write("\n")
output = output.getvalue()
with (Path(".") / "min_dependency_substitutions.rst").open("w") as f:
f.write(output)
# Config for sphinx_issues
# we use the issues path for PRs since the issues URL will forward
issues_github_path = "scikit-learn/scikit-learn"
def setup(app):
app.connect("builder-inited", generate_min_dependency_table)
app.connect("builder-inited", generate_min_dependency_substitutions)
# to hide/show the prompt in code examples:
app.connect("build-finished", make_carousel_thumbs)
app.connect("build-finished", filter_search_index)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve(
"sklearn",
"https://github.com/scikit-learn/"
"scikit-learn/blob/{revision}/"
"{package}/{path}#L{lineno}",
)
warnings.filterwarnings(
"ignore",
category=UserWarning,
message=(
"Matplotlib is currently using agg, which is a"
" non-GUI backend, so cannot show the figure."
),
)
# maps functions with a class name that is indistinguishable when case is
# ignore to another filename
autosummary_filename_map = {
"sklearn.cluster.dbscan": "dbscan-function",
"sklearn.covariance.oas": "oas-function",
"sklearn.decomposition.fastica": "fastica-function",
}
# Config for sphinxext.opengraph
ogp_site_url = "https://scikit-learn/stable/"
ogp_image = "https://scikit-learn.org/stable/_static/scikit-learn-logo-small.png"
ogp_use_first_image = True
ogp_site_name = "scikit-learn"
|
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exceptions that can be thrown by calliope tools.
The exceptions in this file, and those that extend them, can be thrown by
the Run() function in calliope tools without worrying about stack traces
littering the screen in CLI mode. In interpreter mode, they are not caught
from within calliope.
"""
from functools import wraps
import os
import sys
from googlecloudsdk.core import exceptions as core_exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core.console import console_attr_os
class ToolException(core_exceptions.Error):
"""ToolException is for Run methods to throw for non-code-bug errors.
Attributes:
command_name: The dotted group and command name for the command that threw
this exception. This value is set by calliope.
"""
@staticmethod
def FromCurrent(*args):
"""Creates a new ToolException based on the current exception being handled.
If no exception is being handled, a new ToolException with the given args
is created. If there is a current exception, the original exception is
first logged (to file only). A new ToolException is then created with the
same args as the current one.
Args:
*args: The standard args taken by the constructor of Exception for the new
exception that is created. If None, the args from the exception
currently being handled will be used.
Returns:
The generated ToolException.
"""
(_, current_exception, _) = sys.exc_info()
# Log original exception details and traceback to the log file if we are
# currently handling an exception.
if current_exception:
file_logger = log.file_only_logger
file_logger.error('Handling the source of a tool exception, '
'original details follow.')
file_logger.exception(current_exception)
if args:
return ToolException(*args)
elif current_exception:
return ToolException(*current_exception.args)
return ToolException('An unknown error has occurred')
class ExitCodeNoError(core_exceptions.Error):
"""A special exception for exit codes without error messages.
If this exception is raised, it's identical in behavior to returning from
the command code, except the overall exit code will be different.
"""
class FailedSubCommand(core_exceptions.Error):
"""Exception capturing a subcommand which did sys.exit(code)."""
def __init__(self, cmd, code):
super(FailedSubCommand, self).__init__(
'Failed command: [{0}] with exit code [{1}]'.format(
' '.join(cmd), code),
exit_code=code)
def RaiseToolExceptionInsteadOf(*error_types):
"""RaiseToolExceptionInsteadOf is a decorator that re-raises as ToolException.
If any of the error_types are raised in the decorated function, this decorator
will re-raise the as a ToolException.
Args:
*error_types: [Exception], A list of exception types that this decorator
will watch for.
Returns:
The decorated function.
"""
def Wrap(func):
"""Wrapper function for the decorator."""
@wraps(func)
def TryFunc(*args, **kwargs):
try:
return func(*args, **kwargs)
except error_types:
(_, _, exc_traceback) = sys.exc_info()
# The 3 element form takes (type, instance, traceback). If the first
# element is an instance, it is used as the type and instance and the
# second element must be None. This preserves the original traceback.
# pylint:disable=nonstandard-exception, ToolException is an Exception.
raise ToolException.FromCurrent(), None, exc_traceback
return TryFunc
return Wrap
def _TruncateToLineWidth(string, align, width, fill=''):
"""Truncate string to line width, right aligning at align.
Examples (assuming a screen width of 10):
>>> _TruncateToLineWidth('foo', 0)
'foo'
>>> # Align to the beginning. Should truncate the end.
... _TruncateToLineWidth('0123456789abcdef', 0)
'0123456789'
>>> _TruncateToLineWidth('0123456789abcdef', 0, fill='...')
'0123456...'
>>> # Align to the end. Should truncate the beginning.
... _TruncateToLineWidth('0123456789abcdef', 16)
'6789abcdef'
>>> _TruncateToLineWidth('0123456789abcdef', 16, fill='...')
'...9abcdef'
>>> # Align to the middle (note: the index is toward the end of the string,
... # because this function right-aligns to the given index).
... # Should truncate the begnining and end.
... _TruncateToLineWidth('0123456789abcdef', 12)
'23456789ab'
>>> _TruncateToLineWidth('0123456789abcdef', 12, fill='...')
'...5678...'
Args:
string: string to truncate
align: index to right-align to
width: maximum length for the resulting string
fill: if given, indicate truncation with this string. Must be shorter than
terminal width / 2.
Returns:
str, the truncated string
Raises:
ValueError, if provided fill is too long for the terminal.
"""
if len(fill) >= width / 2:
# Either the caller provided a fill that's way too long, or the user has a
# terminal that's way too narrow. In either case, we aren't going to be able
# to make this look nice, but we don't want to throw an error because that
# will mask the original error.
log.warn('Screen not wide enough to display correct error message.')
return string
if len(string) <= width:
return string
if align > width:
string = fill + string[align-width+len(fill):]
if len(string) <= width:
return string
string = string[:width-len(fill)] + fill
return string
_MARKER = '^ invalid character'
# pylint: disable=g-doc-bad-indent
def _FormatNonAsciiMarkerString(args_string):
u"""Format a string that will mark the first non-ASCII character it contains.
Example:
>>> args = 'command.py --foo=\xce\x94'
>>> _FormatNonAsciiMarkerString(args) == (
... 'command.py --foo=\u0394\n'
... ' ^ invalid character'
... )
True
Args:
args_string: str representing the command executed
Returns:
unicode, a properly formatted string with two lines, the second of which
indicates the non-ASCII character in the first.
Raises:
ValueError: if the given string is all ASCII characters
"""
# idx is the first index of the first non-ASCII character in args_string
idx = None
for idx, char in enumerate(args_string):
try:
char.decode('ascii')
# idx gets set by enumerate; unset it to indicate that the last character
# was successfully decoded as ASCII
idx = None
except UnicodeError:
# idx will remain set, indicating the first non-ASCII character
break
if idx is None:
raise ValueError('Given string is composed entirely of ASCII characters.')
# Make a string that, when printed in parallel, will point to the non-ASCII
# character
marker_string = ' ' * idx + _MARKER
# Make sure that this will still print out nicely on an odd-sized screen
align = len(marker_string)
args_string = args_string.decode('utf-8', 'replace')
width, _ = console_attr_os.GetTermSize()
fill = '...'
if width < len(_MARKER) + len(fill):
# It's hopeless to try to wrap this and make it look nice. Preserve it in
# full for logs and so on.
return '\n'.join((args_string, marker_string))
# If len(args_string) < width < len(marker_string) (ex:)
#
# args_string = 'command BAD'
# marker_string = ' ^ invalid character'
# width = len('----------------')
#
# then the truncation can give a result like the following:
#
# args_string = 'command BAD'
# marker_string = ' ^ invalid character'
#
# (This occurs when args_string is short enough to not be truncated, but
# marker_string is long enough to be truncated.)
#
# ljust args_string to make it as long as marker_string before passing to
# _TruncateToLineWidth, which will yield compatible truncations. rstrip at the
# end to get rid of the new trailing spaces.
formatted_args_string = _TruncateToLineWidth(args_string.ljust(align), align,
width, fill=fill).rstrip()
formatted_marker_string = _TruncateToLineWidth(marker_string, align, width)
return '\n'.join((formatted_args_string, formatted_marker_string))
class InvalidCharacterInArgException(ToolException):
"""InvalidCharacterInArgException is for non-ASCII CLI arguments."""
def __init__(self, args, invalid_arg):
self.invalid_arg = invalid_arg
args = [os.path.basename(args[0])] + args[1:]
super(InvalidCharacterInArgException, self).__init__(
u'Failed to read command line argument [{0}] because it does '
u'not appear to be valid 7-bit ASCII. (Argument is composed of '
u'bytes [{1}].)\n\n'
u'{2}'.format(
self.invalid_arg.decode('utf-8', 'replace'),
str(repr(invalid_arg))[1:-1], # get rid of surrounding quotes
_FormatNonAsciiMarkerString(' '.join(args))))
class HttpException(ToolException):
"""HttpException is raised whenever the Http response status code != 200."""
def __init__(self, error):
super(HttpException, self).__init__(error)
self.error = error
class InvalidArgumentException(ToolException):
"""InvalidArgumentException is for malformed arguments."""
def __init__(self, parameter_name, message):
super(InvalidArgumentException, self).__init__(
'Invalid value for [{0}]: {1}'.format(parameter_name, message))
self.parameter_name = parameter_name
class UnknownArgumentException(ToolException):
"""UnknownArgumentException is for arguments with unexpected values."""
def __init__(self, parameter_name, message):
super(UnknownArgumentException, self).__init__(
'Unknown value for [{0}]: {1}'.format(parameter_name, message))
self.parameter_name = parameter_name
class RequiredArgumentException(ToolException):
"""An exception for when a usually optional argument is required in this case.
"""
def __init__(self, parameter_name, message):
super(RequiredArgumentException, self).__init__(
'Missing required argument [{0}]: {1}'.format(parameter_name, message))
self.parameter_name = parameter_name
class BadFileException(ToolException):
"""BadFileException is for problems reading or writing a file."""
|
|
from decimal import Decimal
from datetime import date
from go.vumitools.tests.helpers import djangotest_imports
with djangotest_imports(globals()):
from go.base.tests.helpers import GoDjangoTestCase, DjangoVumiApiHelper
from go.billing.models import (
Account, TagPool, MessageCost, Transaction, TransactionArchive,
Statement, LineItem)
from go.billing.tests.helpers import (
start_of_month, end_of_month, this_month, maybe_decimal,
get_billing_account, mk_tagpool, mk_message_cost,
mk_transaction, mk_transaction_archive,
mk_statement, get_session_length_cost,
get_message_credits, get_session_credits,
get_storage_credits, get_session_length_credits, get_line_items)
class TestHelpers(GoDjangoTestCase):
def setUp(self):
self.vumi_helper = self.add_helper(DjangoVumiApiHelper())
self.user_helper = self.vumi_helper.make_django_user()
self.user = self.user_helper.get_django_user()
self.account = Account.objects.get(user=self.user)
def test_start_of_month(self):
self.assertEqual(start_of_month(date(2015, 3, 23)), date(2015, 3, 1))
self.assertEqual(start_of_month(date(2015, 4, 28)), date(2015, 4, 1))
self.assertEqual(start_of_month(date(2015, 5, 31)), date(2015, 5, 1))
def test_start_of_month_default(self):
self.assertEqual(start_of_month(), start_of_month(date.today()))
def test_end_of_month(self):
self.assertEqual(end_of_month(date(2015, 3, 23)), date(2015, 3, 31))
self.assertEqual(end_of_month(date(2015, 4, 28)), date(2015, 4, 30))
self.assertEqual(end_of_month(date(2015, 5, 31)), date(2015, 5, 31))
def test_end_of_month_default(self):
self.assertEqual(end_of_month(), end_of_month(date.today()))
def test_this_month(self):
self.assertEqual(
this_month(date(2015, 3, 23)),
(date(2015, 3, 1), date(2015, 3, 31)))
self.assertEqual(
this_month(date(2015, 4, 28)),
(date(2015, 4, 1), date(2015, 4, 30)))
self.assertEqual(
this_month(date(2015, 5, 31)),
(date(2015, 5, 1), date(2015, 5, 31)))
def test_this_month_today(self):
self.assertEqual(this_month(), this_month(date.today()))
def test_maybe_decimal_none(self):
self.assertEqual(maybe_decimal(None), None)
def test_maybe_decimal_float(self):
self.assertEqual(maybe_decimal(23.23), Decimal('23.23'))
def test_maybe_decimal_str(self):
self.assertEqual(maybe_decimal('23.23'), Decimal('23.23'))
def test_maybe_decimal_int(self):
self.assertEqual(maybe_decimal(23), Decimal('23.0'))
def test_maybe_decimal_decimal(self):
self.assertEqual(maybe_decimal(Decimal('23.23')), Decimal('23.23'))
def test_get_billing_account(self):
self.assertEqual(get_billing_account(self.user), self.account)
def test_mk_tagpool(self):
pool = mk_tagpool('pool1')
[found_pool] = TagPool.objects.filter(name='pool1')
self.assertEqual(pool, found_pool)
def test_mk_message_cost(self):
pool = mk_tagpool('pool1')
cost = mk_message_cost(
tag_pool=pool,
message_direction=MessageCost.DIRECTION_INBOUND,
message_cost=0.1,
storage_cost=0.2,
session_cost=0.3,
markup_percent=10.0)
[found_cost] = MessageCost.objects.filter(
tag_pool=pool,
message_direction=MessageCost.DIRECTION_INBOUND,
message_cost=Decimal('0.1'),
storage_cost=Decimal('0.2'),
session_cost=Decimal('0.3'),
markup_percent=Decimal('10.0'))
self.assertEqual(cost, found_cost)
def test_mk_transaction(self):
transaction = mk_transaction(
account=self.account,
transaction_type=Transaction.TRANSACTION_TYPE_MESSAGE,
tag_pool_name='pool1',
tag_name='tag1',
provider='mtn',
message_direction=MessageCost.DIRECTION_INBOUND,
message_cost=0.1,
storage_cost=0.2,
session_cost=0.3,
session_unit_cost=0.4,
session_length_cost=0.4,
markup_percent=10.0,
credit_factor=11.0,
credit_amount=28,
session_length=23,
created=date(2015, 3, 23),
status=Transaction.STATUS_COMPLETED)
[found_transaction] = Transaction.objects.filter(
account_number=self.account.account_number,
transaction_type=Transaction.TRANSACTION_TYPE_MESSAGE,
provider='mtn',
tag_pool_name='pool1',
tag_name='tag1',
message_direction=MessageCost.DIRECTION_INBOUND,
message_cost=Decimal('0.1'),
storage_cost=Decimal('0.2'),
session_cost=Decimal('0.3'),
session_unit_cost=Decimal('0.4'),
session_length_cost=Decimal('0.4'),
message_credits=get_message_credits(0.1, 10.0),
storage_credits=get_storage_credits(0.2, 10.0),
session_credits=get_session_credits(0.3, 10.0),
session_length_credits=get_session_length_credits(0.4, 10.0),
markup_percent=Decimal('10.0'),
credit_factor=Decimal('11.0'),
credit_amount=28,
session_length=Decimal('23.0'),
created=date(2015, 3, 23),
status=Transaction.STATUS_COMPLETED)
self.assertEqual(transaction, found_transaction)
def test_mk_transaction_archive(self):
archive = mk_transaction_archive(
account=self.account,
from_date=date(2015, 3, 21),
to_date=date(2015, 3, 22),
status=TransactionArchive.STATUS_ARCHIVE_COMPLETED)
[found_archive] = TransactionArchive.objects.filter(
account=self.account,
from_date=date(2015, 3, 21),
to_date=date(2015, 3, 22),
status=TransactionArchive.STATUS_ARCHIVE_COMPLETED)
self.assertEqual(archive, found_archive)
def test_mk_statement(self):
statement = mk_statement(
account=self.account,
title='Foo',
statement_type=Statement.TYPE_MONTHLY,
from_date=date(2015, 3, 23),
to_date=date(2015, 4, 23),
items=[{
'billed_by': 'Pool 1',
'channel_type': 'USSD',
'channel': 'Tag 1.1',
'description': 'Messages Received',
'cost': Decimal('150.0'),
'credits': Decimal('200.0'),
}, {
'billed_by': 'Pool 2',
'channel_type': 'SMS',
'channel': 'Tag 2.1',
'description': 'Messages Received',
'cost': Decimal('200.0'),
'credits': None,
}])
[found_statement] = Statement.objects.filter(
account=self.account,
title='Foo',
type=Statement.TYPE_MONTHLY,
from_date=date(2015, 3, 23),
to_date=date(2015, 4, 23))
self.assertEqual(statement, found_statement)
self.assertEqual(1, len(LineItem.objects.filter(
statement=statement,
billed_by='Pool 1',
channel_type='USSD',
channel='Tag 1.1',
description='Messages Received',
cost=Decimal('150.0'),
credits=Decimal('200.0'))))
self.assertEqual(1, len(LineItem.objects.filter(
statement=statement,
billed_by='Pool 2',
channel_type='SMS',
channel='Tag 2.1',
description='Messages Received',
cost=Decimal('200.0'),
credits=None)))
def test_get_session_length_cost(self):
self.assertEqual(
get_session_length_cost(1, 2, 3),
MessageCost.calculate_session_length_cost(
Decimal('1.0'),
Decimal('2.0'),
Decimal('3.0')))
def test_get_message_credits(self):
self.assertEqual(
get_message_credits(0.1, 10.0),
MessageCost.calculate_message_credit_cost(
Decimal('0.1'),
Decimal('10.0')))
def test_get_message_credits_none_cost(self):
self.assertEqual(get_message_credits(0.1, None), None)
def test_get_message_credits_none_markup(self):
self.assertEqual(get_message_credits(None, 10.0), None)
def test_get_storage_credits(self):
self.assertEqual(
get_storage_credits(0.1, 10.0),
MessageCost.calculate_storage_credit_cost(
Decimal('0.1'),
Decimal('10.0')))
def test_get_storage_credits_none_cost(self):
self.assertEqual(get_storage_credits(0.1, None), None)
def test_get_storage_credits_none_markup(self):
self.assertEqual(get_storage_credits(None, 10.0), None)
def test_get_session_credits(self):
self.assertEqual(
get_session_credits(0.1, 10.0),
MessageCost.calculate_session_credit_cost(
Decimal('0.1'),
Decimal('10.0')))
def test_get_session_credits_none_cost(self):
self.assertEqual(get_session_credits(0.1, None), None)
def test_get_session_credits_none_markup(self):
self.assertEqual(get_session_credits(None, 10.0), None)
def test_get_session_length_credits(self):
self.assertEqual(
get_session_length_credits(0.1, 10.0),
MessageCost.calculate_session_length_credit_cost(
Decimal('0.1'),
Decimal('10.0')))
def test_get_session_length_credits_none_cost(self):
self.assertEqual(get_session_length_credits(0.1, None), None)
def test_get_session_length_credits_none_markup(self):
self.assertEqual(get_session_length_credits(None, 10.0), None)
def test_get_line_items(self):
statement = mk_statement(
account=self.account,
items=[{
'billed_by': 'Pool 1',
'description': 'A',
'credits': Decimal('23.23')
}, {
'billed_by': 'Pool 2',
'description': 'B',
'credits': Decimal('23.23')
}, {
'billed_by': 'Pool 3',
'description': 'A',
'credits': Decimal('3.3')
}])
self.assertEqual(list(get_line_items(statement)), [
LineItem.objects.get(
description='A',
credits=Decimal('3.3')),
LineItem.objects.get(
description='A',
credits=Decimal('23.23')),
LineItem.objects.get(
description='B',
credits=Decimal('23.23')),
])
|
|
""" terminal reporting of the full testing process.
This is a good source for looking at the various reporting hooks.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import itertools
import platform
import sys
import time
import attr
import pluggy
import py
import six
from more_itertools import collapse
import pytest
from _pytest import nodes
from _pytest.main import EXIT_INTERRUPTED
from _pytest.main import EXIT_NOTESTSCOLLECTED
from _pytest.main import EXIT_OK
from _pytest.main import EXIT_TESTSFAILED
from _pytest.main import EXIT_USAGEERROR
class MoreQuietAction(argparse.Action):
"""
a modified copy of the argparse count action which counts down and updates
the legacy quiet attribute at the same time
used to unify verbosity handling
"""
def __init__(self, option_strings, dest, default=None, required=False, help=None):
super(MoreQuietAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
default=default,
required=required,
help=help,
)
def __call__(self, parser, namespace, values, option_string=None):
new_count = getattr(namespace, self.dest, 0) - 1
setattr(namespace, self.dest, new_count)
# todo Deprecate config.quiet
namespace.quiet = getattr(namespace, "quiet", 0) + 1
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting", "reporting", after="general")
group._addoption(
"-v",
"--verbose",
action="count",
default=0,
dest="verbose",
help="increase verbosity.",
),
group._addoption(
"-q",
"--quiet",
action=MoreQuietAction,
default=0,
dest="verbose",
help="decrease verbosity.",
),
group._addoption(
"--verbosity", dest="verbose", type=int, default=0, help="set verbosity"
)
group._addoption(
"-r",
action="store",
dest="reportchars",
default="",
metavar="chars",
help="show extra test summary info as specified by chars (f)ailed, "
"(E)error, (s)skipped, (x)failed, (X)passed, "
"(p)passed, (P)passed with output, (a)all except pP. "
"Warnings are displayed at all times except when "
"--disable-warnings is set",
)
group._addoption(
"--disable-warnings",
"--disable-pytest-warnings",
default=False,
dest="disable_warnings",
action="store_true",
help="disable warnings summary",
)
group._addoption(
"-l",
"--showlocals",
action="store_true",
dest="showlocals",
default=False,
help="show locals in tracebacks (disabled by default).",
)
group._addoption(
"--tb",
metavar="style",
action="store",
dest="tbstyle",
default="auto",
choices=["auto", "long", "short", "no", "line", "native"],
help="traceback print mode (auto/long/short/line/native/no).",
)
group._addoption(
"--show-capture",
action="store",
dest="showcapture",
choices=["no", "stdout", "stderr", "log", "all"],
default="all",
help="Controls how captured stdout/stderr/log is shown on failed tests. "
"Default is 'all'.",
)
group._addoption(
"--fulltrace",
"--full-trace",
action="store_true",
default=False,
help="don't cut any tracebacks (default is to cut).",
)
group._addoption(
"--color",
metavar="color",
action="store",
dest="color",
default="auto",
choices=["yes", "no", "auto"],
help="color terminal output (yes/no/auto).",
)
parser.addini(
"console_output_style",
help="console output: classic or with additional progress information (classic|progress).",
default="progress",
)
def pytest_configure(config):
reporter = TerminalReporter(config, sys.stdout)
config.pluginmanager.register(reporter, "terminalreporter")
if config.option.debug or config.option.traceconfig:
def mywriter(tags, args):
msg = " ".join(map(str, args))
reporter.write_line("[traceconfig] " + msg)
config.trace.root.setprocessor("pytest:config", mywriter)
def getreportopt(config):
reportopts = ""
reportchars = config.option.reportchars
if not config.option.disable_warnings and "w" not in reportchars:
reportchars += "w"
elif config.option.disable_warnings and "w" in reportchars:
reportchars = reportchars.replace("w", "")
if reportchars:
for char in reportchars:
if char not in reportopts and char != "a":
reportopts += char
elif char == "a":
reportopts = "fEsxXw"
return reportopts
def pytest_report_teststatus(report):
if report.passed:
letter = "."
elif report.skipped:
letter = "s"
elif report.failed:
letter = "F"
if report.when != "call":
letter = "f"
return report.outcome, letter, report.outcome.upper()
@attr.s
class WarningReport(object):
"""
Simple structure to hold warnings information captured by ``pytest_logwarning`` and ``pytest_warning_captured``.
:ivar str message: user friendly message about the warning
:ivar str|None nodeid: node id that generated the warning (see ``get_location``).
:ivar tuple|py.path.local fslocation:
file system location of the source of the warning (see ``get_location``).
:ivar bool legacy: if this warning report was generated from the deprecated ``pytest_logwarning`` hook.
"""
message = attr.ib()
nodeid = attr.ib(default=None)
fslocation = attr.ib(default=None)
legacy = attr.ib(default=False)
def get_location(self, config):
"""
Returns the more user-friendly information about the location
of a warning, or None.
"""
if self.nodeid:
return self.nodeid
if self.fslocation:
if isinstance(self.fslocation, tuple) and len(self.fslocation) >= 2:
filename, linenum = self.fslocation[:2]
relpath = py.path.local(filename).relto(config.invocation_dir)
if not relpath:
relpath = str(filename)
return "%s:%s" % (relpath, linenum)
else:
return str(self.fslocation)
return None
class TerminalReporter(object):
def __init__(self, config, file=None):
import _pytest.config
self.config = config
self.verbosity = self.config.option.verbose
self.showheader = self.verbosity >= 0
self.showfspath = self.verbosity >= 0
self.showlongtestinfo = self.verbosity > 0
self._numcollected = 0
self._session = None
self.stats = {}
self.startdir = py.path.local()
if file is None:
file = sys.stdout
self._tw = _pytest.config.create_terminal_writer(config, file)
# self.writer will be deprecated in pytest-3.4
self.writer = self._tw
self._screen_width = self._tw.fullwidth
self.currentfspath = None
self.reportchars = getreportopt(config)
self.hasmarkup = self._tw.hasmarkup
self.isatty = file.isatty()
self._progress_nodeids_reported = set()
self._show_progress_info = self._determine_show_progress_info()
def _determine_show_progress_info(self):
"""Return True if we should display progress information based on the current config"""
# do not show progress if we are not capturing output (#3038)
if self.config.getoption("capture") == "no":
return False
# do not show progress if we are showing fixture setup/teardown
if self.config.getoption("setupshow"):
return False
return self.config.getini("console_output_style") in ("progress", "count")
def hasopt(self, char):
char = {"xfailed": "x", "skipped": "s"}.get(char, char)
return char in self.reportchars
def write_fspath_result(self, nodeid, res):
fspath = self.config.rootdir.join(nodeid.split("::")[0])
if fspath != self.currentfspath:
if self.currentfspath is not None and self._show_progress_info:
self._write_progress_information_filling_space()
self.currentfspath = fspath
fspath = self.startdir.bestrelpath(fspath)
self._tw.line()
self._tw.write(fspath + " ")
self._tw.write(res)
def write_ensure_prefix(self, prefix, extra="", **kwargs):
if self.currentfspath != prefix:
self._tw.line()
self.currentfspath = prefix
self._tw.write(prefix)
if extra:
self._tw.write(extra, **kwargs)
self.currentfspath = -2
def ensure_newline(self):
if self.currentfspath:
self._tw.line()
self.currentfspath = None
def write(self, content, **markup):
self._tw.write(content, **markup)
def write_line(self, line, **markup):
if not isinstance(line, six.text_type):
line = six.text_type(line, errors="replace")
self.ensure_newline()
self._tw.line(line, **markup)
def rewrite(self, line, **markup):
"""
Rewinds the terminal cursor to the beginning and writes the given line.
:kwarg erase: if True, will also add spaces until the full terminal width to ensure
previous lines are properly erased.
The rest of the keyword arguments are markup instructions.
"""
erase = markup.pop("erase", False)
if erase:
fill_count = self._tw.fullwidth - len(line) - 1
fill = " " * fill_count
else:
fill = ""
line = str(line)
self._tw.write("\r" + line + fill, **markup)
def write_sep(self, sep, title=None, **markup):
self.ensure_newline()
self._tw.sep(sep, title, **markup)
def section(self, title, sep="=", **kw):
self._tw.sep(sep, title, **kw)
def line(self, msg, **kw):
self._tw.line(msg, **kw)
def pytest_internalerror(self, excrepr):
for line in six.text_type(excrepr).split("\n"):
self.write_line("INTERNALERROR> " + line)
return 1
def pytest_logwarning(self, fslocation, message, nodeid):
warnings = self.stats.setdefault("warnings", [])
warning = WarningReport(
fslocation=fslocation, message=message, nodeid=nodeid, legacy=True
)
warnings.append(warning)
def pytest_warning_captured(self, warning_message, item):
# from _pytest.nodes import get_fslocation_from_item
from _pytest.warnings import warning_record_to_str
warnings = self.stats.setdefault("warnings", [])
fslocation = warning_message.filename, warning_message.lineno
message = warning_record_to_str(warning_message)
nodeid = item.nodeid if item is not None else ""
warning_report = WarningReport(
fslocation=fslocation, message=message, nodeid=nodeid
)
warnings.append(warning_report)
def pytest_plugin_registered(self, plugin):
if self.config.option.traceconfig:
msg = "PLUGIN registered: %s" % (plugin,)
# XXX this event may happen during setup/teardown time
# which unfortunately captures our output here
# which garbles our output if we use self.write_line
self.write_line(msg)
def pytest_deselected(self, items):
self.stats.setdefault("deselected", []).extend(items)
def pytest_runtest_logstart(self, nodeid, location):
# ensure that the path is printed before the
# 1st test of a module starts running
if self.showlongtestinfo:
line = self._locationline(nodeid, *location)
self.write_ensure_prefix(line, "")
elif self.showfspath:
fsid = nodeid.split("::")[0]
self.write_fspath_result(fsid, "")
def pytest_runtest_logreport(self, report):
rep = report
res = self.config.hook.pytest_report_teststatus(report=rep)
category, letter, word = res
if isinstance(word, tuple):
word, markup = word
else:
markup = None
self.stats.setdefault(category, []).append(rep)
self._tests_ran = True
if not letter and not word:
# probably passed setup/teardown
return
running_xdist = hasattr(rep, "node")
if self.verbosity <= 0:
if not running_xdist and self.showfspath:
self.write_fspath_result(rep.nodeid, letter)
else:
self._tw.write(letter)
else:
self._progress_nodeids_reported.add(rep.nodeid)
if markup is None:
if rep.passed:
markup = {"green": True}
elif rep.failed:
markup = {"red": True}
elif rep.skipped:
markup = {"yellow": True}
else:
markup = {}
line = self._locationline(rep.nodeid, *rep.location)
if not running_xdist:
self.write_ensure_prefix(line, word, **markup)
if self._show_progress_info:
self._write_progress_information_filling_space()
else:
self.ensure_newline()
self._tw.write("[%s]" % rep.node.gateway.id)
if self._show_progress_info:
self._tw.write(
self._get_progress_information_message() + " ", cyan=True
)
else:
self._tw.write(" ")
self._tw.write(word, **markup)
self._tw.write(" " + line)
self.currentfspath = -2
def pytest_runtest_logfinish(self, nodeid):
if self.config.getini("console_output_style") == "count":
num_tests = self._session.testscollected
progress_length = len(" [{}/{}]".format(str(num_tests), str(num_tests)))
else:
progress_length = len(" [100%]")
if self.verbosity <= 0 and self._show_progress_info:
self._progress_nodeids_reported.add(nodeid)
last_item = (
len(self._progress_nodeids_reported) == self._session.testscollected
)
if last_item:
self._write_progress_information_filling_space()
else:
w = self._width_of_current_line
past_edge = w + progress_length + 1 >= self._screen_width
if past_edge:
msg = self._get_progress_information_message()
self._tw.write(msg + "\n", cyan=True)
def _get_progress_information_message(self):
if self.config.getoption("capture") == "no":
return ""
collected = self._session.testscollected
if self.config.getini("console_output_style") == "count":
if collected:
progress = self._progress_nodeids_reported
counter_format = "{{:{}d}}".format(len(str(collected)))
format_string = " [{}/{{}}]".format(counter_format)
return format_string.format(len(progress), collected)
return " [ {} / {} ]".format(collected, collected)
else:
if collected:
progress = len(self._progress_nodeids_reported) * 100 // collected
return " [{:3d}%]".format(progress)
return " [100%]"
def _write_progress_information_filling_space(self):
msg = self._get_progress_information_message()
w = self._width_of_current_line
fill = self._tw.fullwidth - w - 1
self.write(msg.rjust(fill), cyan=True)
@property
def _width_of_current_line(self):
"""Return the width of current line, using the superior implementation of py-1.6 when available"""
try:
return self._tw.width_of_current_line
except AttributeError:
# py < 1.6.0
return self._tw.chars_on_current_line
def pytest_collection(self):
if not self.isatty and self.config.option.verbose >= 1:
self.write("collecting ... ", bold=True)
def pytest_collectreport(self, report):
if report.failed:
self.stats.setdefault("error", []).append(report)
elif report.skipped:
self.stats.setdefault("skipped", []).append(report)
items = [x for x in report.result if isinstance(x, pytest.Item)]
self._numcollected += len(items)
if self.isatty:
# self.write_fspath_result(report.nodeid, 'E')
self.report_collect()
def report_collect(self, final=False):
if self.config.option.verbose < 0:
return
errors = len(self.stats.get("error", []))
skipped = len(self.stats.get("skipped", []))
deselected = len(self.stats.get("deselected", []))
if final:
line = "collected "
else:
line = "collecting "
line += (
str(self._numcollected) + " item" + ("" if self._numcollected == 1 else "s")
)
if errors:
line += " / %d errors" % errors
if deselected:
line += " / %d deselected" % deselected
if skipped:
line += " / %d skipped" % skipped
if self.isatty:
self.rewrite(line, bold=True, erase=True)
if final:
self.write("\n")
else:
self.write_line(line)
@pytest.hookimpl(trylast=True)
def pytest_collection_modifyitems(self):
self.report_collect(True)
@pytest.hookimpl(trylast=True)
def pytest_sessionstart(self, session):
self._session = session
self._sessionstarttime = time.time()
if not self.showheader:
return
self.write_sep("=", "test session starts", bold=True)
verinfo = platform.python_version()
msg = "platform %s -- Python %s" % (sys.platform, verinfo)
if hasattr(sys, "pypy_version_info"):
verinfo = ".".join(map(str, sys.pypy_version_info[:3]))
msg += "[pypy-%s-%s]" % (verinfo, sys.pypy_version_info[3])
msg += ", pytest-%s, py-%s, pluggy-%s" % (
pytest.__version__,
py.__version__,
pluggy.__version__,
)
if (
self.verbosity > 0
or self.config.option.debug
or getattr(self.config.option, "pastebin", None)
):
msg += " -- " + str(sys.executable)
self.write_line(msg)
lines = self.config.hook.pytest_report_header(
config=self.config, startdir=self.startdir
)
self._write_report_lines_from_hooks(lines)
def _write_report_lines_from_hooks(self, lines):
lines.reverse()
for line in collapse(lines):
self.write_line(line)
def pytest_report_header(self, config):
inifile = ""
if config.inifile:
inifile = " " + config.rootdir.bestrelpath(config.inifile)
lines = ["rootdir: %s, inifile:%s" % (config.rootdir, inifile)]
plugininfo = config.pluginmanager.list_plugin_distinfo()
if plugininfo:
lines.append("plugins: %s" % ", ".join(_plugin_nameversions(plugininfo)))
return lines
def pytest_collection_finish(self, session):
if self.config.option.collectonly:
self._printcollecteditems(session.items)
if self.stats.get("failed"):
self._tw.sep("!", "collection failures")
for rep in self.stats.get("failed"):
rep.toterminal(self._tw)
return 1
return 0
lines = self.config.hook.pytest_report_collectionfinish(
config=self.config, startdir=self.startdir, items=session.items
)
self._write_report_lines_from_hooks(lines)
def _printcollecteditems(self, items):
# to print out items and their parent collectors
# we take care to leave out Instances aka ()
# because later versions are going to get rid of them anyway
if self.config.option.verbose < 0:
if self.config.option.verbose < -1:
counts = {}
for item in items:
name = item.nodeid.split("::", 1)[0]
counts[name] = counts.get(name, 0) + 1
for name, count in sorted(counts.items()):
self._tw.line("%s: %d" % (name, count))
else:
for item in items:
nodeid = item.nodeid
nodeid = nodeid.replace("::()::", "::")
self._tw.line(nodeid)
return
stack = []
indent = ""
for item in items:
needed_collectors = item.listchain()[1:] # strip root node
while stack:
if stack == needed_collectors[: len(stack)]:
break
stack.pop()
for col in needed_collectors[len(stack) :]:
stack.append(col)
# if col.name == "()":
# continue
indent = (len(stack) - 1) * " "
self._tw.line("%s%s" % (indent, col))
@pytest.hookimpl(hookwrapper=True)
def pytest_sessionfinish(self, exitstatus):
outcome = yield
outcome.get_result()
self._tw.line("")
summary_exit_codes = (
EXIT_OK,
EXIT_TESTSFAILED,
EXIT_INTERRUPTED,
EXIT_USAGEERROR,
EXIT_NOTESTSCOLLECTED,
)
if exitstatus in summary_exit_codes:
self.config.hook.pytest_terminal_summary(
terminalreporter=self, exitstatus=exitstatus
)
if exitstatus == EXIT_INTERRUPTED:
self._report_keyboardinterrupt()
del self._keyboardinterrupt_memo
self.summary_stats()
@pytest.hookimpl(hookwrapper=True)
def pytest_terminal_summary(self):
self.summary_errors()
self.summary_failures()
yield
self.summary_warnings()
self.summary_passes()
def pytest_keyboard_interrupt(self, excinfo):
self._keyboardinterrupt_memo = excinfo.getrepr(funcargs=True)
def pytest_unconfigure(self):
if hasattr(self, "_keyboardinterrupt_memo"):
self._report_keyboardinterrupt()
def _report_keyboardinterrupt(self):
excrepr = self._keyboardinterrupt_memo
msg = excrepr.reprcrash.message
self.write_sep("!", msg)
if "KeyboardInterrupt" in msg:
if self.config.option.fulltrace:
excrepr.toterminal(self._tw)
else:
excrepr.reprcrash.toterminal(self._tw)
self._tw.line(
"(to show a full traceback on KeyboardInterrupt use --fulltrace)",
yellow=True,
)
def _locationline(self, nodeid, fspath, lineno, domain):
def mkrel(nodeid):
line = self.config.cwd_relative_nodeid(nodeid)
if domain and line.endswith(domain):
line = line[: -len(domain)]
values = domain.split("[")
values[0] = values[0].replace(".", "::") # don't replace '.' in params
line += "[".join(values)
return line
# collect_fspath comes from testid which has a "/"-normalized path
if fspath:
res = mkrel(nodeid).replace("::()", "") # parens-normalization
if self.verbosity >= 2 and nodeid.split("::")[0] != fspath.replace(
"\\", nodes.SEP
):
res += " <- " + self.startdir.bestrelpath(fspath)
else:
res = "[location]"
return res + " "
def _getfailureheadline(self, rep):
if hasattr(rep, "location"):
fspath, lineno, domain = rep.location
return domain
else:
return "test session" # XXX?
def _getcrashline(self, rep):
try:
return str(rep.longrepr.reprcrash)
except AttributeError:
try:
return str(rep.longrepr)[:50]
except AttributeError:
return ""
#
# summaries for sessionfinish
#
def getreports(self, name):
values = []
for x in self.stats.get(name, []):
if not hasattr(x, "_pdbshown"):
values.append(x)
return values
def summary_warnings(self):
if self.hasopt("w"):
all_warnings = self.stats.get("warnings")
if not all_warnings:
return
grouped = itertools.groupby(
all_warnings, key=lambda wr: wr.get_location(self.config)
)
self.write_sep("=", "warnings summary", yellow=True, bold=False)
for location, warning_records in grouped:
# legacy warnings show their location explicitly, while standard warnings look better without
# it because the location is already formatted into the message
warning_records = list(warning_records)
if location:
self._tw.line(str(location))
for w in warning_records:
if location:
lines = w.message.splitlines()
indented = "\n".join(" " + x for x in lines)
message = indented.rstrip()
else:
message = w.message.rstrip()
self._tw.line(message)
self._tw.line()
self._tw.line("-- Docs: https://docs.pytest.org/en/latest/warnings.html")
def summary_passes(self):
if self.config.option.tbstyle != "no":
if self.hasopt("P"):
reports = self.getreports("passed")
if not reports:
return
self.write_sep("=", "PASSES")
for rep in reports:
if rep.sections:
msg = self._getfailureheadline(rep)
self.write_sep("_", msg)
self._outrep_summary(rep)
def print_teardown_sections(self, rep):
showcapture = self.config.option.showcapture
if showcapture == "no":
return
for secname, content in rep.sections:
if showcapture != "all" and showcapture not in secname:
continue
if "teardown" in secname:
self._tw.sep("-", secname)
if content[-1:] == "\n":
content = content[:-1]
self._tw.line(content)
def summary_failures(self):
if self.config.option.tbstyle != "no":
reports = self.getreports("failed")
if not reports:
return
self.write_sep("=", "FAILURES")
for rep in reports:
if self.config.option.tbstyle == "line":
line = self._getcrashline(rep)
self.write_line(line)
else:
msg = self._getfailureheadline(rep)
markup = {"red": True, "bold": True}
self.write_sep("_", msg, **markup)
self._outrep_summary(rep)
for report in self.getreports(""):
if report.nodeid == rep.nodeid and report.when == "teardown":
self.print_teardown_sections(report)
def summary_errors(self):
if self.config.option.tbstyle != "no":
reports = self.getreports("error")
if not reports:
return
self.write_sep("=", "ERRORS")
for rep in self.stats["error"]:
msg = self._getfailureheadline(rep)
if not hasattr(rep, "when"):
# collect
msg = "ERROR collecting " + msg
elif rep.when == "setup":
msg = "ERROR at setup of " + msg
elif rep.when == "teardown":
msg = "ERROR at teardown of " + msg
self.write_sep("_", msg)
self._outrep_summary(rep)
def _outrep_summary(self, rep):
rep.toterminal(self._tw)
showcapture = self.config.option.showcapture
if showcapture == "no":
return
for secname, content in rep.sections:
if showcapture != "all" and showcapture not in secname:
continue
self._tw.sep("-", secname)
if content[-1:] == "\n":
content = content[:-1]
self._tw.line(content)
def summary_stats(self):
session_duration = time.time() - self._sessionstarttime
(line, color) = build_summary_stats_line(self.stats)
msg = "%s in %.2f seconds" % (line, session_duration)
markup = {color: True, "bold": True}
if self.verbosity >= 0:
self.write_sep("=", msg, **markup)
if self.verbosity == -1:
self.write_line(msg, **markup)
def repr_pythonversion(v=None):
if v is None:
v = sys.version_info
try:
return "%s.%s.%s-%s-%s" % v
except (TypeError, ValueError):
return str(v)
def build_summary_stats_line(stats):
keys = ("failed passed skipped deselected xfailed xpassed warnings error").split()
unknown_key_seen = False
for key in stats.keys():
if key not in keys:
if key: # setup/teardown reports have an empty key, ignore them
keys.append(key)
unknown_key_seen = True
parts = []
for key in keys:
val = stats.get(key, None)
if val:
parts.append("%d %s" % (len(val), key))
if parts:
line = ", ".join(parts)
else:
line = "no tests ran"
if "failed" in stats or "error" in stats:
color = "red"
elif "warnings" in stats or unknown_key_seen:
color = "yellow"
elif "passed" in stats:
color = "green"
else:
color = "yellow"
return (line, color)
def _plugin_nameversions(plugininfo):
values = []
for plugin, dist in plugininfo:
# gets us name and version!
name = "{dist.project_name}-{dist.version}".format(dist=dist)
# questionable convenience, but it keeps things short
if name.startswith("pytest-"):
name = name[7:]
# we decided to print python package names
# they can have more than one plugin
if name not in values:
values.append(name)
return values
|
|
# -*- coding: utf-8 -*-
import cgi, web
from datetime import datetime
from django.utils import simplejson
from geohash import Geohash
from google.appengine.api import urlfetch
from models import Place
from urllib import quote, urlencode
from web.contrib.template import render_mako
from web import form, seeother
__all__ = ['index', 'about', 'privacy', 'api', 'location', 'place', 'recent',
'within', 'abusive', 'addtags', 'tag']
render_mako2 = render_mako(directories=['templates'],
input_encoding='utf-8',
output_encoding='utf-8')
def render(path, **kwargs):
return getattr(render_mako2, path)(**kwargs)
class index(object):
def GET(self):
return render('main/index')
class about(object):
def GET(self):
return render('main/about')
class privacy(object):
def GET(self):
return render('main/privacy')
class api(object):
def GET(self):
return render('main/api')
class location(object):
def GET(self, loc_hash):
coords = Geohash(loc_hash).point()
return render('main/location', coords=coords)
class place(object):
myform = form.Form(
form.Textbox('name'),
form.Textbox('address'),
form.Textbox('description'),
form.Textbox('bitly_login'),
form.Textbox('bitly_apikey'),
form.Textbox('longitude'),
form.Textbox('latitude'))
mylogin = 'jacintos'
myapikey = 'R_2555944778356e8a2fa1c15f33b8e3f9'
def GET(self, model_id):
place = Place.get_by_id(int(model_id))
if place is None:
raise web.webapi.notfound()
else:
place.id = model_id
coords = Geohash(place.geohash).point()
tags = ['<a href="/tag/' + quote(t) + '">' + t + '</a>' for t in place.tags]
return render('main/place', place=place, coords=coords, tags=tags)
def POST(self):
f = place.myform()
if f.validates():
coords = float(f.d.get('longitude')), float(f.d.get('latitude'))
login = f.d.get('bitly_login') or place.mylogin
apikey = f.d.get('bitly_apikey') or place.myapikey
p = Place()
p.name = cgi.escape(f.d.get('name'))
p.address = cgi.escape(f.d.get('address'))
p.description = cgi.escape(f.d.get('description'))
p.geohash = str(Geohash(coords))
p.put()
model_id = p.key().id()
url = web.ctx.home + '/place/' + str(model_id)
p.bitly_hash = self._bitly_hash(url, login, apikey)
if p.bitly_hash is None:
# Try again in case user's account was junk
p.bitly_hash = self._bitly_hash(url, place.mylogin,
place.myapikey)
p.put()
else:
raise web.webapi.badrequest()
raise seeother(url)
def _bitly_hash(self, url, login, apikey):
opts = {
'longUrl' : url,
'login' : login,
'apiKey' : apikey,
'format' : 'json',
'version' : '2.0.1'
}
base = 'http://api.bit.ly/shorten?'
resp = urlfetch.fetch(base + urlencode(opts))
if resp.status_code == 200:
content = simplejson.loads(resp.content)
if content.get('statusCode') == 'OK':
return content['results'][url].get('userHash')
class recent(object):
def GET(self):
query = Place.all()
places = query.order('-created_at').fetch(limit=100)
base_url = web.ctx.home + '/place/'
for place in places:
place.url = base_url + str(place.key().id())
place.long, place.lat = Geohash(place.geohash).point()
place.updated_at = place.created_at
web.header('Content-Type', 'application/atom+xml')
updated_at = datetime.now()
return render('atom/recent', updated_at=updated_at, places=places)
class within(object):
def GET(self):
i = web.webapi.input(bbox=None)
if i.bbox is None:
raise web.badrequest()
try:
coords = [float(s) for s in i.bbox.split(',')]
except Exception:
raise web.badrequest()
if len(coords) != 4:
raise web.badrequest()
tl = str(Geohash((coords[0], coords[1])))
br = str(Geohash((coords[2], coords[3])))
query = Place.all()
query.filter('geohash >', tl).filter('geohash <', br)
res = {
'type' : 'FeatureCollection',
'crs' : {
'type' : 'EPSG',
'properties' : {
'code' : 4326,
'coordinate_order' : [1, 0]
}
}
}
features = []
for p in query:
coords = Geohash(p.geohash).point()
features.append({
'type' : 'Feature',
'geometry' : {
'type' : 'Point',
'coordinates' : coords
},
'properties' : {
'name' : p.name,
'address' : p.address,
'id' : p.key().id(),
'hash' : p.bitly_hash,
}
})
res['features'] = features
web.header('Content-Type', 'application/json')
return simplejson.dumps(res)
class abusive(object):
def POST(self, model_id):
place = Place.get_by_id(int(model_id))
if place is None:
raise web.notfound()
place.abusive = True
place.put()
return '{"code":200}'
class addtags(object):
def POST(self, model_id):
place = Place.get_by_id(int(model_id))
if place is None:
raise web.notfound()
i = web.webapi.input(tags=None)
ts = [t.strip() for t in i.tags.split(',')]
place.tags.extend(ts)
place.put()
return '{"code":200}'
class tag(object):
def GET(self, name):
query = Place.all()
query.filter('tags =', name).order('-created_at')
places = query.fetch(100)
return render('main/tagged', name=name, places=places)
def my_internal_error():
return web.internalerror(render('error/500'))
def my_not_found():
return web.notfound(render('error/404'))
def my_bad_request():
return web.badrequest(render('error/400'))
web.webapi.internalerror = my_internal_error
web.webapi.notfound = my_not_found
web.webapi.badrequest = my_bad_request
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Nova base exception handling.
Includes decorator for re-raising Nova-type exceptions.
SHOULD include dedicated exception logging.
"""
import functools
import sys
from oslo.config import cfg
import webob.exc
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import safe_utils
LOG = logging.getLogger(__name__)
exc_log_opts = [
cfg.BoolOpt('fatal_exception_format_errors',
default=False,
help='Make exception message format errors fatal'),
]
CONF = cfg.CONF
CONF.register_opts(exc_log_opts)
class ConvertedException(webob.exc.WSGIHTTPException):
def __init__(self, code=0, title="", explanation=""):
self.code = code
self.title = title
self.explanation = explanation
super(ConvertedException, self).__init__()
def _cleanse_dict(original):
"""Strip all admin_password, new_pass, rescue_pass keys from a dict."""
return dict((k, v) for k, v in original.iteritems() if not "_pass" in k)
def wrap_exception(notifier=None, get_notifier=None):
"""This decorator wraps a method to catch any exceptions that may
get thrown. It logs the exception as well as optionally sending
it to the notification system.
"""
def inner(f):
def wrapped(self, context, *args, **kw):
# Don't store self or context in the payload, it now seems to
# contain confidential information.
try:
return f(self, context, *args, **kw)
except Exception as e:
with excutils.save_and_reraise_exception():
if notifier or get_notifier:
payload = dict(exception=e)
call_dict = safe_utils.getcallargs(f, context,
*args, **kw)
cleansed = _cleanse_dict(call_dict)
payload.update({'args': cleansed})
# If f has multiple decorators, they must use
# functools.wraps to ensure the name is
# propagated.
event_type = f.__name__
(notifier or get_notifier()).error(context,
event_type,
payload)
return functools.wraps(f)(wrapped)
return inner
class NovaException(Exception):
"""Base Nova Exception
To correctly use this class, inherit from it and define
a 'msg_fmt' property. That msg_fmt will get printf'd
with the keyword arguments provided to the constructor.
"""
msg_fmt = _("An unknown exception occurred.")
code = 500
headers = {}
safe = False
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
if 'code' not in self.kwargs:
try:
self.kwargs['code'] = self.code
except AttributeError:
pass
if not message:
try:
message = self.msg_fmt % kwargs
except Exception:
exc_info = sys.exc_info()
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_('Exception in string format operation'))
for name, value in kwargs.iteritems():
LOG.error("%s: %s" % (name, value))
if CONF.fatal_exception_format_errors:
raise exc_info[0], exc_info[1], exc_info[2]
else:
# at least get the core message out if something happened
message = self.msg_fmt
super(NovaException, self).__init__(message)
def format_message(self):
# NOTE(mrodden): use the first argument to the python Exception object
# which should be our full NovaException message, (see __init__)
return self.args[0]
class EncryptionFailure(NovaException):
msg_fmt = _("Failed to encrypt text: %(reason)s")
class DecryptionFailure(NovaException):
msg_fmt = _("Failed to decrypt text: %(reason)s")
class VirtualInterfaceCreateException(NovaException):
msg_fmt = _("Virtual Interface creation failed")
class VirtualInterfaceMacAddressException(NovaException):
msg_fmt = _("Creation of virtual interface with "
"unique mac address failed")
class GlanceConnectionFailed(NovaException):
msg_fmt = _("Connection to glance host %(host)s:%(port)s failed: "
"%(reason)s")
class CinderConnectionFailed(NovaException):
msg_fmt = _("Connection to cinder host failed: %(reason)s")
class Forbidden(NovaException):
ec2_code = 'AuthFailure'
msg_fmt = _("Not authorized.")
code = 403
class AdminRequired(Forbidden):
msg_fmt = _("User does not have admin privileges")
class PolicyNotAuthorized(Forbidden):
msg_fmt = _("Policy doesn't allow %(action)s to be performed.")
class ImageNotActive(NovaException):
# NOTE(jruzicka): IncorrectState is used for volumes only in EC2,
# but it still seems like the most appropriate option.
ec2_code = 'IncorrectState'
msg_fmt = _("Image %(image_id)s is not active.")
class ImageNotAuthorized(NovaException):
msg_fmt = _("Not authorized for image %(image_id)s.")
class Invalid(NovaException):
msg_fmt = _("Unacceptable parameters.")
code = 400
class InvalidBDM(Invalid):
msg_fmt = _("Block Device Mapping is Invalid.")
class InvalidBDMSnapshot(InvalidBDM):
msg_fmt = _("Block Device Mapping is Invalid: "
"failed to get snapshot %(id)s.")
class InvalidBDMVolume(InvalidBDM):
msg_fmt = _("Block Device Mapping is Invalid: "
"failed to get volume %(id)s.")
class InvalidBDMImage(InvalidBDM):
msg_fmt = _("Block Device Mapping is Invalid: "
"failed to get image %(id)s.")
class InvalidBDMBootSequence(InvalidBDM):
msg_fmt = _("Block Device Mapping is Invalid: "
"Boot sequence for the instance "
"and image/block device mapping "
"combination is not valid.")
class InvalidBDMLocalsLimit(InvalidBDM):
msg_fmt = _("Block Device Mapping is Invalid: "
"You specified more local devices than the "
"limit allows")
class InvalidBDMEphemeralSize(InvalidBDM):
msg_fmt = _("Ephemeral disks requested are larger than "
"the instance type allows.")
class InvalidBDMSwapSize(InvalidBDM):
msg_fmt = _("Swap drive requested is larger than instance type allows.")
class InvalidBDMFormat(InvalidBDM):
msg_fmt = _("Block Device Mapping is Invalid: "
"%(details)s")
class InvalidBDMForLegacy(InvalidBDM):
msg_fmt = _("Block Device Mapping cannot "
"be converted to legacy format. ")
class InvalidAttribute(Invalid):
msg_fmt = _("Attribute not supported: %(attr)s")
class ValidationError(Invalid):
msg_fmt = "%(detail)s"
class VolumeUnattached(Invalid):
ec2_code = 'IncorrectState'
msg_fmt = _("Volume %(volume_id)s is not attached to anything")
class VolumeNotCreated(NovaException):
msg_fmt = _("Volume %(volume_id)s did not finish being created"
" even after we waited %(seconds)s seconds or %(attempts)s"
" attempts.")
class InvalidKeypair(Invalid):
ec2_code = 'InvalidKeyPair.Format'
msg_fmt = _("Keypair data is invalid: %(reason)s")
class InvalidRequest(Invalid):
msg_fmt = _("The request is invalid.")
class InvalidInput(Invalid):
msg_fmt = _("Invalid input received: %(reason)s")
class InvalidVolume(Invalid):
ec2_code = 'UnsupportedOperation'
msg_fmt = _("Invalid volume: %(reason)s")
class InvalidVolumeAccessMode(Invalid):
msg_fmt = _("Invalid volume access mode") + ": %(access_mode)s"
class InvalidMetadata(Invalid):
msg_fmt = _("Invalid metadata: %(reason)s")
class InvalidMetadataSize(Invalid):
msg_fmt = _("Invalid metadata size: %(reason)s")
class InvalidPortRange(Invalid):
ec2_code = 'InvalidParameterValue'
msg_fmt = _("Invalid port range %(from_port)s:%(to_port)s. %(msg)s")
class InvalidIpProtocol(Invalid):
msg_fmt = _("Invalid IP protocol %(protocol)s.")
class InvalidContentType(Invalid):
msg_fmt = _("Invalid content type %(content_type)s.")
class InvalidCidr(Invalid):
msg_fmt = _("Invalid cidr %(cidr)s.")
class InvalidUnicodeParameter(Invalid):
msg_fmt = _("Invalid Parameter: "
"Unicode is not supported by the current database.")
# Cannot be templated as the error syntax varies.
# msg needs to be constructed when raised.
class InvalidParameterValue(Invalid):
ec2_code = 'InvalidParameterValue'
msg_fmt = _("%(err)s")
class InvalidAggregateAction(Invalid):
msg_fmt = _("Cannot perform action '%(action)s' on aggregate "
"%(aggregate_id)s. Reason: %(reason)s.")
class InvalidGroup(Invalid):
msg_fmt = _("Group not valid. Reason: %(reason)s")
class InvalidSortKey(Invalid):
msg_fmt = _("Sort key supplied was not valid.")
class InstanceInvalidState(Invalid):
msg_fmt = _("Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot "
"%(method)s while the instance is in this state.")
class InstanceNotRunning(Invalid):
msg_fmt = _("Instance %(instance_id)s is not running.")
class InstanceNotInRescueMode(Invalid):
msg_fmt = _("Instance %(instance_id)s is not in rescue mode")
class InstanceNotRescuable(Invalid):
msg_fmt = _("Instance %(instance_id)s cannot be rescued: %(reason)s")
class InstanceNotReady(Invalid):
msg_fmt = _("Instance %(instance_id)s is not ready")
class InstanceSuspendFailure(Invalid):
msg_fmt = _("Failed to suspend instance: %(reason)s")
class InstanceResumeFailure(Invalid):
msg_fmt = _("Failed to resume instance: %(reason)s")
class InstancePowerOnFailure(Invalid):
msg_fmt = _("Failed to power on instance: %(reason)s")
class InstancePowerOffFailure(Invalid):
msg_fmt = _("Failed to power off instance: %(reason)s")
class InstanceRebootFailure(Invalid):
msg_fmt = _("Failed to reboot instance: %(reason)s")
class InstanceTerminationFailure(Invalid):
msg_fmt = _("Failed to terminate instance: %(reason)s")
class InstanceDeployFailure(Invalid):
msg_fmt = _("Failed to deploy instance: %(reason)s")
class MultiplePortsNotApplicable(Invalid):
msg_fmt = _("Failed to launch instances: %(reason)s")
class ServiceUnavailable(Invalid):
msg_fmt = _("Service is unavailable at this time.")
class ComputeResourcesUnavailable(ServiceUnavailable):
msg_fmt = _("Insufficient compute resources: %(reason)s.")
class HypervisorUnavailable(NovaException):
msg_fmt = _("Connection to the hypervisor is broken on host: %(host)s")
class ComputeServiceUnavailable(ServiceUnavailable):
msg_fmt = _("Compute service of %(host)s is unavailable at this time.")
class ComputeServiceInUse(NovaException):
msg_fmt = _("Compute service of %(host)s is still in use.")
class UnableToMigrateToSelf(Invalid):
msg_fmt = _("Unable to migrate instance (%(instance_id)s) "
"to current host (%(host)s).")
class InvalidHypervisorType(Invalid):
msg_fmt = _("The supplied hypervisor type of is invalid.")
class DestinationHypervisorTooOld(Invalid):
msg_fmt = _("The instance requires a newer hypervisor version than "
"has been provided.")
class DestinationDiskExists(Invalid):
msg_fmt = _("The supplied disk path (%(path)s) already exists, "
"it is expected not to exist.")
class InvalidDevicePath(Invalid):
msg_fmt = _("The supplied device path (%(path)s) is invalid.")
class DevicePathInUse(Invalid):
msg_fmt = _("The supplied device path (%(path)s) is in use.")
code = 409
class DeviceIsBusy(Invalid):
msg_fmt = _("The supplied device (%(device)s) is busy.")
class InvalidCPUInfo(Invalid):
msg_fmt = _("Unacceptable CPU info: %(reason)s")
class InvalidIpAddressError(Invalid):
msg_fmt = _("%(address)s is not a valid IP v4/6 address.")
class InvalidVLANTag(Invalid):
msg_fmt = _("VLAN tag is not appropriate for the port group "
"%(bridge)s. Expected VLAN tag is %(tag)s, "
"but the one associated with the port group is %(pgroup)s.")
class InvalidVLANPortGroup(Invalid):
msg_fmt = _("vSwitch which contains the port group %(bridge)s is "
"not associated with the desired physical adapter. "
"Expected vSwitch is %(expected)s, but the one associated "
"is %(actual)s.")
class InvalidDiskFormat(Invalid):
msg_fmt = _("Disk format %(disk_format)s is not acceptable")
class InvalidDiskInfo(Invalid):
msg_fmt = _("Disk info file is invalid: %(reason)s")
class DiskInfoReadWriteFail(Invalid):
msg_fmt = _("Failed to read or write disk info file: %(reason)s")
class ImageUnacceptable(Invalid):
msg_fmt = _("Image %(image_id)s is unacceptable: %(reason)s")
class InstanceUnacceptable(Invalid):
msg_fmt = _("Instance %(instance_id)s is unacceptable: %(reason)s")
class InvalidEc2Id(Invalid):
msg_fmt = _("Ec2 id %(ec2_id)s is unacceptable.")
class InvalidUUID(Invalid):
msg_fmt = _("Expected a uuid but received %(uuid)s.")
class InvalidID(Invalid):
msg_fmt = _("Invalid ID received %(id)s.")
class ConstraintNotMet(NovaException):
msg_fmt = _("Constraint not met.")
code = 412
class NotFound(NovaException):
msg_fmt = _("Resource could not be found.")
code = 404
class AgentBuildNotFound(NotFound):
msg_fmt = _("No agent-build associated with id %(id)s.")
class AgentBuildExists(NovaException):
msg_fmt = _("Agent-build with hypervisor %(hypervisor)s os %(os)s "
"architecture %(architecture)s exists.")
class VolumeNotFound(NotFound):
ec2_code = 'InvalidVolumeID.NotFound'
msg_fmt = _("Volume %(volume_id)s could not be found.")
class VolumeBDMNotFound(NotFound):
msg_fmt = _("No volume Block Device Mapping with id %(volume_id)s.")
class SnapshotNotFound(NotFound):
ec2_code = 'InvalidSnapshotID.NotFound'
msg_fmt = _("Snapshot %(snapshot_id)s could not be found.")
class DiskNotFound(NotFound):
msg_fmt = _("No disk at %(location)s")
class VolumeDriverNotFound(NotFound):
msg_fmt = _("Could not find a handler for %(driver_type)s volume.")
class InvalidImageRef(Invalid):
msg_fmt = _("Invalid image href %(image_href)s.")
class AutoDiskConfigDisabledByImage(Invalid):
msg_fmt = _("Requested image %(image)s "
"has automatic disk resize disabled.")
class ImageNotFound(NotFound):
msg_fmt = _("Image %(image_id)s could not be found.")
class PreserveEphemeralNotSupported(Invalid):
msg_fmt = _("The current driver does not support "
"preserving ephemeral partitions.")
# NOTE(jruzicka): ImageNotFound is not a valid EC2 error code.
class ImageNotFoundEC2(ImageNotFound):
msg_fmt = _("Image %(image_id)s could not be found. The nova EC2 API "
"assigns image ids dynamically when they are listed for the "
"first time. Have you listed image ids since adding this "
"image?")
class ProjectNotFound(NotFound):
msg_fmt = _("Project %(project_id)s could not be found.")
class StorageRepositoryNotFound(NotFound):
msg_fmt = _("Cannot find SR to read/write VDI.")
class NetworkDuplicated(Invalid):
msg_fmt = _("Network %(network_id)s is duplicated.")
class NetworkInUse(NovaException):
msg_fmt = _("Network %(network_id)s is still in use.")
class NetworkNotCreated(NovaException):
msg_fmt = _("%(req)s is required to create a network.")
class NetworkNotFound(NotFound):
msg_fmt = _("Network %(network_id)s could not be found.")
class PortNotFound(NotFound):
msg_fmt = _("Port id %(port_id)s could not be found.")
class NetworkNotFoundForBridge(NetworkNotFound):
msg_fmt = _("Network could not be found for bridge %(bridge)s")
class NetworkNotFoundForUUID(NetworkNotFound):
msg_fmt = _("Network could not be found for uuid %(uuid)s")
class NetworkNotFoundForCidr(NetworkNotFound):
msg_fmt = _("Network could not be found with cidr %(cidr)s.")
class NetworkNotFoundForInstance(NetworkNotFound):
msg_fmt = _("Network could not be found for instance %(instance_id)s.")
class NoNetworksFound(NotFound):
msg_fmt = _("No networks defined.")
class NoMoreNetworks(NovaException):
msg_fmt = _("No more available networks.")
class NetworkNotFoundForProject(NotFound):
msg_fmt = _("Either network uuid %(network_uuid)s is not present or "
"is not assigned to the project %(project_id)s.")
class NetworkAmbiguous(Invalid):
msg_fmt = _("More than one possible network found. Specify "
"network ID(s) to select which one(s) to connect to,")
class NetworkRequiresSubnet(Invalid):
msg_fmt = _("Network %(network_uuid)s requires a subnet in order to boot"
" instances on.")
class ExternalNetworkAttachForbidden(Forbidden):
msg_fmt = _("It is not allowed to create an interface on "
"external network %(network_uuid)s")
class DatastoreNotFound(NotFound):
msg_fmt = _("Could not find the datastore reference(s) which the VM uses.")
class PortInUse(Invalid):
msg_fmt = _("Port %(port_id)s is still in use.")
class PortRequiresFixedIP(Invalid):
msg_fmt = _("Port %(port_id)s requires a FixedIP in order to be used.")
class PortNotUsable(Invalid):
msg_fmt = _("Port %(port_id)s not usable for instance %(instance)s.")
class PortNotFree(Invalid):
msg_fmt = _("No free port available for instance %(instance)s.")
class FixedIpExists(NovaException):
msg_fmt = _("Fixed ip %(address)s already exists.")
class FixedIpNotFound(NotFound):
msg_fmt = _("No fixed IP associated with id %(id)s.")
class FixedIpNotFoundForAddress(FixedIpNotFound):
msg_fmt = _("Fixed ip not found for address %(address)s.")
class FixedIpNotFoundForInstance(FixedIpNotFound):
msg_fmt = _("Instance %(instance_uuid)s has zero fixed ips.")
class FixedIpNotFoundForNetworkHost(FixedIpNotFound):
msg_fmt = _("Network host %(host)s has zero fixed ips "
"in network %(network_id)s.")
class FixedIpNotFoundForSpecificInstance(FixedIpNotFound):
msg_fmt = _("Instance %(instance_uuid)s doesn't have fixed ip '%(ip)s'.")
class FixedIpNotFoundForNetwork(FixedIpNotFound):
msg_fmt = _("Fixed IP address (%(address)s) does not exist in "
"network (%(network_uuid)s).")
class FixedIpAlreadyInUse(NovaException):
msg_fmt = _("Fixed IP address %(address)s is already in use on instance "
"%(instance_uuid)s.")
class FixedIpAssociatedWithMultipleInstances(NovaException):
msg_fmt = _("More than one instance is associated with fixed ip address "
"'%(address)s'.")
class FixedIpInvalid(Invalid):
msg_fmt = _("Fixed IP address %(address)s is invalid.")
class NoMoreFixedIps(NovaException):
ec2_code = 'UnsupportedOperation'
msg_fmt = _("Zero fixed ips available.")
class NoFixedIpsDefined(NotFound):
msg_fmt = _("Zero fixed ips could be found.")
class FloatingIpExists(NovaException):
msg_fmt = _("Floating ip %(address)s already exists.")
class FloatingIpNotFound(NotFound):
ec2_code = "UnsupportedOperation"
msg_fmt = _("Floating ip not found for id %(id)s.")
class FloatingIpDNSExists(Invalid):
msg_fmt = _("The DNS entry %(name)s already exists in domain %(domain)s.")
class FloatingIpNotFoundForAddress(FloatingIpNotFound):
msg_fmt = _("Floating ip not found for address %(address)s.")
class FloatingIpNotFoundForHost(FloatingIpNotFound):
msg_fmt = _("Floating ip not found for host %(host)s.")
class FloatingIpMultipleFoundForAddress(NovaException):
msg_fmt = _("Multiple floating ips are found for address %(address)s.")
class FloatingIpPoolNotFound(NotFound):
msg_fmt = _("Floating ip pool not found.")
safe = True
class NoMoreFloatingIps(FloatingIpNotFound):
msg_fmt = _("Zero floating ips available.")
safe = True
class FloatingIpAssociated(NovaException):
ec2_code = "UnsupportedOperation"
msg_fmt = _("Floating ip %(address)s is associated.")
class FloatingIpNotAssociated(NovaException):
msg_fmt = _("Floating ip %(address)s is not associated.")
class NoFloatingIpsDefined(NotFound):
msg_fmt = _("Zero floating ips exist.")
class NoFloatingIpInterface(NotFound):
ec2_code = "UnsupportedOperation"
msg_fmt = _("Interface %(interface)s not found.")
class CannotDisassociateAutoAssignedFloatingIP(NovaException):
ec2_code = "UnsupportedOperation"
msg_fmt = _("Cannot disassociate auto assigned floating ip")
class KeypairNotFound(NotFound):
ec2_code = 'InvalidKeyPair.NotFound'
msg_fmt = _("Keypair %(name)s not found for user %(user_id)s")
class ServiceNotFound(NotFound):
msg_fmt = _("Service %(service_id)s could not be found.")
class ServiceBinaryExists(NovaException):
msg_fmt = _("Service with host %(host)s binary %(binary)s exists.")
class ServiceTopicExists(NovaException):
msg_fmt = _("Service with host %(host)s topic %(topic)s exists.")
class HostNotFound(NotFound):
msg_fmt = _("Host %(host)s could not be found.")
class ComputeHostNotFound(HostNotFound):
msg_fmt = _("Compute host %(host)s could not be found.")
class HostBinaryNotFound(NotFound):
msg_fmt = _("Could not find binary %(binary)s on host %(host)s.")
class InvalidReservationExpiration(Invalid):
msg_fmt = _("Invalid reservation expiration %(expire)s.")
class InvalidQuotaValue(Invalid):
msg_fmt = _("Change would make usage less than 0 for the following "
"resources: %(unders)s")
class QuotaNotFound(NotFound):
msg_fmt = _("Quota could not be found")
class QuotaExists(NovaException):
msg_fmt = _("Quota exists for project %(project_id)s, "
"resource %(resource)s")
class QuotaResourceUnknown(QuotaNotFound):
msg_fmt = _("Unknown quota resources %(unknown)s.")
class ProjectUserQuotaNotFound(QuotaNotFound):
msg_fmt = _("Quota for user %(user_id)s in project %(project_id)s "
"could not be found.")
class ProjectQuotaNotFound(QuotaNotFound):
msg_fmt = _("Quota for project %(project_id)s could not be found.")
class QuotaClassNotFound(QuotaNotFound):
msg_fmt = _("Quota class %(class_name)s could not be found.")
class QuotaUsageNotFound(QuotaNotFound):
msg_fmt = _("Quota usage for project %(project_id)s could not be found.")
class ReservationNotFound(QuotaNotFound):
msg_fmt = _("Quota reservation %(uuid)s could not be found.")
class OverQuota(NovaException):
msg_fmt = _("Quota exceeded for resources: %(overs)s")
class SecurityGroupNotFound(NotFound):
msg_fmt = _("Security group %(security_group_id)s not found.")
class SecurityGroupNotFoundForProject(SecurityGroupNotFound):
msg_fmt = _("Security group %(security_group_id)s not found "
"for project %(project_id)s.")
class SecurityGroupNotFoundForRule(SecurityGroupNotFound):
msg_fmt = _("Security group with rule %(rule_id)s not found.")
class SecurityGroupExists(Invalid):
ec2_code = 'InvalidGroup.Duplicate'
msg_fmt = _("Security group %(security_group_name)s already exists "
"for project %(project_id)s.")
class SecurityGroupExistsForInstance(Invalid):
msg_fmt = _("Security group %(security_group_id)s is already associated"
" with the instance %(instance_id)s")
class SecurityGroupNotExistsForInstance(Invalid):
msg_fmt = _("Security group %(security_group_id)s is not associated with"
" the instance %(instance_id)s")
class SecurityGroupDefaultRuleNotFound(Invalid):
msg_fmt = _("Security group default rule (%rule_id)s not found.")
class SecurityGroupCannotBeApplied(Invalid):
msg_fmt = _("Network requires port_security_enabled and subnet associated"
" in order to apply security groups.")
class SecurityGroupRuleExists(Invalid):
ec2_code = 'InvalidPermission.Duplicate'
msg_fmt = _("Rule already exists in group: %(rule)s")
class NoUniqueMatch(NovaException):
msg_fmt = _("No Unique Match Found.")
code = 409
class MigrationNotFound(NotFound):
msg_fmt = _("Migration %(migration_id)s could not be found.")
class MigrationNotFoundByStatus(MigrationNotFound):
msg_fmt = _("Migration not found for instance %(instance_id)s "
"with status %(status)s.")
class ConsolePoolNotFound(NotFound):
msg_fmt = _("Console pool %(pool_id)s could not be found.")
class ConsolePoolExists(NovaException):
msg_fmt = _("Console pool with host %(host)s, console_type "
"%(console_type)s and compute_host %(compute_host)s "
"already exists.")
class ConsolePoolNotFoundForHostType(NotFound):
msg_fmt = _("Console pool of type %(console_type)s "
"for compute host %(compute_host)s "
"on proxy host %(host)s not found.")
class ConsoleNotFound(NotFound):
msg_fmt = _("Console %(console_id)s could not be found.")
class ConsoleNotFoundForInstance(ConsoleNotFound):
msg_fmt = _("Console for instance %(instance_uuid)s could not be found.")
class ConsoleNotFoundInPoolForInstance(ConsoleNotFound):
msg_fmt = _("Console for instance %(instance_uuid)s "
"in pool %(pool_id)s could not be found.")
class ConsoleTypeInvalid(Invalid):
msg_fmt = _("Invalid console type %(console_type)s")
class ConsoleTypeUnavailable(Invalid):
msg_fmt = _("Unavailable console type %(console_type)s.")
class ConsolePortRangeExhausted(NovaException):
msg_fmt = _("The console port range %(min_port)d-%(max_port)d is "
"exhausted.")
class FlavorNotFound(NotFound):
msg_fmt = _("Flavor %(flavor_id)s could not be found.")
class FlavorNotFoundByName(FlavorNotFound):
msg_fmt = _("Flavor with name %(flavor_name)s could not be found.")
class FlavorAccessNotFound(NotFound):
msg_fmt = _("Flavor access not found for %(flavor_id)s / "
"%(project_id)s combination.")
class CellNotFound(NotFound):
msg_fmt = _("Cell %(cell_name)s doesn't exist.")
class CellExists(NovaException):
msg_fmt = _("Cell with name %(name)s already exists.")
class CellRoutingInconsistency(NovaException):
msg_fmt = _("Inconsistency in cell routing: %(reason)s")
class CellServiceAPIMethodNotFound(NotFound):
msg_fmt = _("Service API method not found: %(detail)s")
class CellTimeout(NotFound):
msg_fmt = _("Timeout waiting for response from cell")
class CellMaxHopCountReached(NovaException):
msg_fmt = _("Cell message has reached maximum hop count: %(hop_count)s")
class NoCellsAvailable(NovaException):
msg_fmt = _("No cells available matching scheduling criteria.")
class CellsUpdateUnsupported(NovaException):
msg_fmt = _("Cannot update cells configuration file.")
class InstanceUnknownCell(NotFound):
msg_fmt = _("Cell is not known for instance %(instance_uuid)s")
class SchedulerHostFilterNotFound(NotFound):
msg_fmt = _("Scheduler Host Filter %(filter_name)s could not be found.")
class FlavorExtraSpecsNotFound(NotFound):
msg_fmt = _("Flavor %(flavor_id)s has no extra specs with "
"key %(extra_specs_key)s.")
class ComputeHostMetricNotFound(NotFound):
msg_fmt = _("Metric %(name)s could not be found on the compute "
"host node %(host)s.%(node)s.")
class FileNotFound(NotFound):
msg_fmt = _("File %(file_path)s could not be found.")
class NoFilesFound(NotFound):
msg_fmt = _("Zero files could be found.")
class SwitchNotFoundForNetworkAdapter(NotFound):
msg_fmt = _("Virtual switch associated with the "
"network adapter %(adapter)s not found.")
class NetworkAdapterNotFound(NotFound):
msg_fmt = _("Network adapter %(adapter)s could not be found.")
class ClassNotFound(NotFound):
msg_fmt = _("Class %(class_name)s could not be found: %(exception)s")
class NotAllowed(NovaException):
msg_fmt = _("Action not allowed.")
class ImageRotationNotAllowed(NovaException):
msg_fmt = _("Rotation is not allowed for snapshots")
class RotationRequiredForBackup(NovaException):
msg_fmt = _("Rotation param is required for backup image_type")
class KeyPairExists(NovaException):
ec2_code = 'InvalidKeyPair.Duplicate'
msg_fmt = _("Key pair '%(key_name)s' already exists.")
class InstanceExists(NovaException):
msg_fmt = _("Instance %(name)s already exists.")
class FlavorExists(NovaException):
msg_fmt = _("Flavor with name %(name)s already exists.")
class FlavorIdExists(NovaException):
msg_fmt = _("Flavor with ID %(flavor_id)s already exists.")
class FlavorAccessExists(NovaException):
msg_fmt = _("Flavor access already exists for flavor %(flavor_id)s "
"and project %(project_id)s combination.")
class InvalidSharedStorage(NovaException):
msg_fmt = _("%(path)s is not on shared storage: %(reason)s")
class InvalidLocalStorage(NovaException):
msg_fmt = _("%(path)s is not on local storage: %(reason)s")
class StorageError(NovaException):
msg_fmt = _("Storage error: %(reason)s")
class MigrationError(NovaException):
msg_fmt = _("Migration error: %(reason)s")
class MigrationPreCheckError(MigrationError):
msg_fmt = _("Migration pre-check error: %(reason)s")
class MalformedRequestBody(NovaException):
msg_fmt = _("Malformed message body: %(reason)s")
# NOTE(johannes): NotFound should only be used when a 404 error is
# appropriate to be returned
class ConfigNotFound(NovaException):
msg_fmt = _("Could not find config at %(path)s")
class PasteAppNotFound(NovaException):
msg_fmt = _("Could not load paste app '%(name)s' from %(path)s")
class CannotResizeToSameFlavor(NovaException):
msg_fmt = _("When resizing, instances must change flavor!")
class ResizeError(NovaException):
msg_fmt = _("Resize error: %(reason)s")
class CannotResizeDisk(NovaException):
msg_fmt = _("Server disk was unable to be resized because: %(reason)s")
class FlavorMemoryTooSmall(NovaException):
msg_fmt = _("Flavor's memory is too small for requested image.")
class FlavorDiskTooSmall(NovaException):
msg_fmt = _("Flavor's disk is too small for requested image.")
class InsufficientFreeMemory(NovaException):
msg_fmt = _("Insufficient free memory on compute node to start %(uuid)s.")
class NoValidHost(NovaException):
msg_fmt = _("No valid host was found. %(reason)s")
class QuotaError(NovaException):
ec2_code = 'ResourceLimitExceeded'
msg_fmt = _("Quota exceeded: code=%(code)s")
code = 413
headers = {'Retry-After': 0}
safe = True
class TooManyInstances(QuotaError):
msg_fmt = _("Quota exceeded for %(overs)s: Requested %(req)s,"
" but already used %(used)d of %(allowed)d %(resource)s")
class FloatingIpLimitExceeded(QuotaError):
msg_fmt = _("Maximum number of floating ips exceeded")
class FixedIpLimitExceeded(QuotaError):
msg_fmt = _("Maximum number of fixed ips exceeded")
class MetadataLimitExceeded(QuotaError):
msg_fmt = _("Maximum number of metadata items exceeds %(allowed)d")
class OnsetFileLimitExceeded(QuotaError):
msg_fmt = _("Personality file limit exceeded")
class OnsetFilePathLimitExceeded(QuotaError):
msg_fmt = _("Personality file path too long")
class OnsetFileContentLimitExceeded(QuotaError):
msg_fmt = _("Personality file content too long")
class KeypairLimitExceeded(QuotaError):
msg_fmt = _("Maximum number of key pairs exceeded")
class SecurityGroupLimitExceeded(QuotaError):
ec2_code = 'SecurityGroupLimitExceeded'
msg_fmt = _("Maximum number of security groups or rules exceeded")
class PortLimitExceeded(QuotaError):
msg_fmt = _("Maximum number of ports exceeded")
class AggregateError(NovaException):
msg_fmt = _("Aggregate %(aggregate_id)s: action '%(action)s' "
"caused an error: %(reason)s.")
class AggregateNotFound(NotFound):
msg_fmt = _("Aggregate %(aggregate_id)s could not be found.")
class AggregateNameExists(NovaException):
msg_fmt = _("Aggregate %(aggregate_name)s already exists.")
class AggregateHostNotFound(NotFound):
msg_fmt = _("Aggregate %(aggregate_id)s has no host %(host)s.")
class AggregateMetadataNotFound(NotFound):
msg_fmt = _("Aggregate %(aggregate_id)s has no metadata with "
"key %(metadata_key)s.")
class AggregateHostExists(NovaException):
msg_fmt = _("Aggregate %(aggregate_id)s already has host %(host)s.")
class FlavorCreateFailed(NovaException):
msg_fmt = _("Unable to create flavor")
class InstancePasswordSetFailed(NovaException):
msg_fmt = _("Failed to set admin password on %(instance)s "
"because %(reason)s")
safe = True
class DuplicateVlan(NovaException):
msg_fmt = _("Detected existing vlan with id %(vlan)d")
class CidrConflict(NovaException):
msg_fmt = _("There was a conflict when trying to complete your request.")
code = 409
class InstanceNotFound(NotFound):
ec2_code = 'InvalidInstanceID.NotFound'
msg_fmt = _("Instance %(instance_id)s could not be found.")
class InstanceInfoCacheNotFound(NotFound):
msg_fmt = _("Info cache for instance %(instance_uuid)s could not be "
"found.")
class NodeNotFound(NotFound):
msg_fmt = _("Node %(node_id)s could not be found.")
class NodeNotFoundByUUID(NotFound):
msg_fmt = _("Node with UUID %(node_uuid)s could not be found.")
class MarkerNotFound(NotFound):
msg_fmt = _("Marker %(marker)s could not be found.")
class InvalidInstanceIDMalformed(Invalid):
ec2_code = 'InvalidInstanceID.Malformed'
msg_fmt = _("Invalid id: %(val)s (expecting \"i-...\").")
class CouldNotFetchImage(NovaException):
msg_fmt = _("Could not fetch image %(image_id)s")
class CouldNotUploadImage(NovaException):
msg_fmt = _("Could not upload image %(image_id)s")
class TaskAlreadyRunning(NovaException):
msg_fmt = _("Task %(task_name)s is already running on host %(host)s")
class TaskNotRunning(NovaException):
msg_fmt = _("Task %(task_name)s is not running on host %(host)s")
class InstanceIsLocked(InstanceInvalidState):
msg_fmt = _("Instance %(instance_uuid)s is locked")
class ConfigDriveInvalidValue(Invalid):
msg_fmt = _("Invalid value for Config Drive option: %(option)s")
class ConfigDriveMountFailed(NovaException):
msg_fmt = _("Could not mount vfat config drive. %(operation)s failed. "
"Error: %(error)s")
class ConfigDriveUnknownFormat(NovaException):
msg_fmt = _("Unknown config drive format %(format)s. Select one of "
"iso9660 or vfat.")
class InterfaceAttachFailed(Invalid):
msg_fmt = _("Failed to attach network adapter device to %(instance)s")
class InterfaceDetachFailed(Invalid):
msg_fmt = _("Failed to detach network adapter device from %(instance)s")
class InstanceUserDataTooLarge(NovaException):
msg_fmt = _("User data too large. User data must be no larger than "
"%(maxsize)s bytes once base64 encoded. Your data is "
"%(length)d bytes")
class InstanceUserDataMalformed(NovaException):
msg_fmt = _("User data needs to be valid base 64.")
class UnexpectedTaskStateError(NovaException):
msg_fmt = _("Unexpected task state: expecting %(expected)s but "
"the actual state is %(actual)s")
class UnexpectedDeletingTaskStateError(UnexpectedTaskStateError):
pass
class InstanceActionNotFound(NovaException):
msg_fmt = _("Action for request_id %(request_id)s on instance"
" %(instance_uuid)s not found")
class InstanceActionEventNotFound(NovaException):
msg_fmt = _("Event %(event)s not found for action id %(action_id)s")
class UnexpectedVMStateError(NovaException):
msg_fmt = _("Unexpected VM state: expecting %(expected)s but "
"the actual state is %(actual)s")
class CryptoCAFileNotFound(FileNotFound):
msg_fmt = _("The CA file for %(project)s could not be found")
class CryptoCRLFileNotFound(FileNotFound):
msg_fmt = _("The CRL file for %(project)s could not be found")
class InstanceRecreateNotSupported(Invalid):
msg_fmt = _('Instance recreate is not supported.')
class ServiceGroupUnavailable(NovaException):
msg_fmt = _("The service from servicegroup driver %(driver)s is "
"temporarily unavailable.")
class DBNotAllowed(NovaException):
msg_fmt = _('%(binary)s attempted direct database access which is '
'not allowed by policy')
class UnsupportedVirtType(Invalid):
msg_fmt = _("Virtualization type '%(virt)s' is not supported by "
"this compute driver")
class UnsupportedHardware(Invalid):
msg_fmt = _("Requested hardware '%(model)s' is not supported by "
"the '%(virt)s' virt driver")
class Base64Exception(NovaException):
msg_fmt = _("Invalid Base 64 data for file %(path)s")
class BuildAbortException(NovaException):
msg_fmt = _("Build of instance %(instance_uuid)s aborted: %(reason)s")
class RescheduledException(NovaException):
msg_fmt = _("Build of instance %(instance_uuid)s was re-scheduled: "
"%(reason)s")
class ShadowTableExists(NovaException):
msg_fmt = _("Shadow table with name %(name)s already exists.")
class InstanceFaultRollback(NovaException):
def __init__(self, inner_exception=None):
message = _("Instance rollback performed due to: %s")
self.inner_exception = inner_exception
super(InstanceFaultRollback, self).__init__(message % inner_exception)
class UnsupportedObjectError(NovaException):
msg_fmt = _('Unsupported object type %(objtype)s')
class OrphanedObjectError(NovaException):
msg_fmt = _('Cannot call %(method)s on orphaned %(objtype)s object')
class IncompatibleObjectVersion(NovaException):
msg_fmt = _('Version %(objver)s of %(objname)s is not supported')
class ObjectActionError(NovaException):
msg_fmt = _('Object action %(action)s failed because: %(reason)s')
class ObjectFieldInvalid(NovaException):
msg_fmt = _('Field %(field)s of %(objname)s is not an instance of Field')
class CoreAPIMissing(NovaException):
msg_fmt = _("Core API extensions are missing: %(missing_apis)s")
class AgentError(NovaException):
msg_fmt = _('Error during following call to agent: %(method)s')
class AgentTimeout(AgentError):
msg_fmt = _('Unable to contact guest agent. '
'The following call timed out: %(method)s')
class AgentNotImplemented(AgentError):
msg_fmt = _('Agent does not support the call: %(method)s')
class InstanceGroupNotFound(NotFound):
msg_fmt = _("Instance group %(group_uuid)s could not be found.")
class InstanceGroupIdExists(NovaException):
msg_fmt = _("Instance group %(group_uuid)s already exists.")
class InstanceGroupMetadataNotFound(NotFound):
msg_fmt = _("Instance group %(group_uuid)s has no metadata with "
"key %(metadata_key)s.")
class InstanceGroupMemberNotFound(NotFound):
msg_fmt = _("Instance group %(group_uuid)s has no member with "
"id %(instance_id)s.")
class InstanceGroupPolicyNotFound(NotFound):
msg_fmt = _("Instance group %(group_uuid)s has no policy %(policy)s.")
class PluginRetriesExceeded(NovaException):
msg_fmt = _("Number of retries to plugin (%(num_retries)d) exceeded.")
class ImageDownloadModuleError(NovaException):
msg_fmt = _("There was an error with the download module %(module)s. "
"%(reason)s")
class ImageDownloadModuleMetaDataError(ImageDownloadModuleError):
msg_fmt = _("The metadata for this location will not work with this "
"module %(module)s. %(reason)s.")
class ImageDownloadModuleNotImplementedError(ImageDownloadModuleError):
msg_fmt = _("The method %(method_name)s is not implemented.")
class ImageDownloadModuleConfigurationError(ImageDownloadModuleError):
msg_fmt = _("The module %(module)s is misconfigured: %(reason)s.")
class ResourceMonitorError(NovaException):
msg_fmt = _("Error when creating resource monitor: %(monitor)s")
class PciDeviceWrongAddressFormat(NovaException):
msg_fmt = _("The PCI address %(address)s has an incorrect format.")
class PciDeviceNotFoundById(NotFound):
msg_fmt = _("PCI device %(id)s not found")
class PciDeviceNotFound(NovaException):
msg_fmt = _("PCI Device %(node_id)s:%(address)s not found.")
class PciDeviceInvalidStatus(NovaException):
msg_fmt = _(
"PCI device %(compute_node_id)s:%(address)s is %(status)s "
"instead of %(hopestatus)s")
class PciDeviceInvalidOwner(NovaException):
msg_fmt = _(
"PCI device %(compute_node_id)s:%(address)s is owned by %(owner)s "
"instead of %(hopeowner)s")
class PciDeviceRequestFailed(NovaException):
msg_fmt = _(
"PCI device request (%requests)s failed")
class PciDevicePoolEmpty(NovaException):
msg_fmt = _(
"Attempt to consume PCI device %(compute_node_id)s:%(address)s "
"from empty pool")
class PciInvalidAlias(NovaException):
msg_fmt = _("Invalid PCI alias definition: %(reason)s")
class PciRequestAliasNotDefined(NovaException):
msg_fmt = _("PCI alias %(alias)s is not defined")
class MissingParameter(NovaException):
ec2_code = 'MissingParameter'
msg_fmt = _("Not enough parameters: %(reason)s")
code = 400
class PciConfigInvalidWhitelist(Invalid):
msg_fmt = _("Invalid PCI devices Whitelist config %(reason)s")
class PciTrackerInvalidNodeId(NovaException):
msg_fmt = _("Cannot change %(node_id)s to %(new_node_id)s")
# Cannot be templated, msg needs to be constructed when raised.
class InternalError(NovaException):
ec2_code = 'InternalError'
msg_fmt = "%(err)s"
class PciDevicePrepareFailed(NovaException):
msg_fmt = _("Failed to prepare PCI device %(id)s for instance "
"%(instance_uuid)s: %(reason)s")
class PciDeviceDetachFailed(NovaException):
msg_fmt = _("Failed to detach PCI device %(dev)s: %(reason)s")
class PciDeviceUnsupportedHypervisor(NovaException):
msg_fmt = _("%(type)s hypervisor does not support PCI devices")
class KeyManagerError(NovaException):
msg_fmt = _("Key manager error: %(reason)s")
class VolumesNotRemoved(Invalid):
msg_fmt = _("Failed to remove volume(s): (%(reason)s)")
class InvalidVideoMode(Invalid):
msg_fmt = _("Provided video model (%(model)s) is not supported.")
class RngDeviceNotExist(Invalid):
msg_fmt = _("The provided RNG device path: (%(path)s) is not "
"present on the host.")
class RequestedVRamTooHigh(NovaException):
msg_fmt = _("The requested amount of video memory %(req_vram)d is higher "
"than the maximum allowed by flavor %(max_vram)d.")
class InvalidWatchdogAction(Invalid):
msg_fmt = _("Provided watchdog action (%(action)s) is not supported.")
class NoBlockMigrationForConfigDriveInLibVirt(NovaException):
msg_fmt = _("Block migration of instances with config drives is not "
"supported in libvirt.")
class UnshelveException(NovaException):
msg_fmt = _("Error during unshelve instance %(instance_id)s: %(reason)s")
|
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.ads.googleads.v9.resources.types import customer_label
from google.ads.googleads.v9.services.types import customer_label_service
from google.rpc import status_pb2 # type: ignore
from .transports.base import CustomerLabelServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import CustomerLabelServiceGrpcTransport
class CustomerLabelServiceClientMeta(type):
"""Metaclass for the CustomerLabelService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[CustomerLabelServiceTransport]]
_transport_registry["grpc"] = CustomerLabelServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[CustomerLabelServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class CustomerLabelServiceClient(metaclass=CustomerLabelServiceClientMeta):
"""Service to manage labels on customers."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
CustomerLabelServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
CustomerLabelServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> CustomerLabelServiceTransport:
"""Return the transport used by the client instance.
Returns:
CustomerLabelServiceTransport: The transport used by the client instance.
"""
return self._transport
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
@staticmethod
def customer_path(customer_id: str,) -> str:
"""Return a fully-qualified customer string."""
return "customers/{customer_id}".format(customer_id=customer_id,)
@staticmethod
def parse_customer_path(path: str) -> Dict[str, str]:
"""Parse a customer path into its component segments."""
m = re.match(r"^customers/(?P<customer_id>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def customer_label_path(customer_id: str, label_id: str,) -> str:
"""Return a fully-qualified customer_label string."""
return "customers/{customer_id}/customerLabels/{label_id}".format(
customer_id=customer_id, label_id=label_id,
)
@staticmethod
def parse_customer_label_path(path: str) -> Dict[str, str]:
"""Parse a customer_label path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/customerLabels/(?P<label_id>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def label_path(customer_id: str, label_id: str,) -> str:
"""Return a fully-qualified label string."""
return "customers/{customer_id}/labels/{label_id}".format(
customer_id=customer_id, label_id=label_id,
)
@staticmethod
def parse_label_path(path: str) -> Dict[str, str]:
"""Parse a label path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/labels/(?P<label_id>.+?)$", path
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, CustomerLabelServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the customer label service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.CustomerLabelServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in (
"true",
"false",
):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
use_client_cert = (
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true"
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, CustomerLabelServiceTransport):
# transport is a CustomerLabelServiceTransport instance.
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = CustomerLabelServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_customer_label(
self,
request: Union[
customer_label_service.GetCustomerLabelRequest, dict
] = None,
*,
resource_name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> customer_label.CustomerLabel:
r"""Returns the requested customer-label relationship in full
detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Args:
request (Union[google.ads.googleads.v9.services.types.GetCustomerLabelRequest, dict]):
The request object. Request message for
[CustomerLabelService.GetCustomerLabel][google.ads.googleads.v9.services.CustomerLabelService.GetCustomerLabel].
resource_name (:class:`str`):
Required. The resource name of the
customer-label relationship to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v9.resources.types.CustomerLabel:
Represents a relationship between a
customer and a label. This customer may
not have access to all the labels
attached to it. Additional
CustomerLabels may be returned by
increasing permissions with login-
customer-id.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a customer_label_service.GetCustomerLabelRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, customer_label_service.GetCustomerLabelRequest
):
request = customer_label_service.GetCustomerLabelRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_customer_label
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("resource_name", request.resource_name),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
def mutate_customer_labels(
self,
request: Union[
customer_label_service.MutateCustomerLabelsRequest, dict
] = None,
*,
customer_id: str = None,
operations: Sequence[
customer_label_service.CustomerLabelOperation
] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> customer_label_service.MutateCustomerLabelsResponse:
r"""Creates and removes customer-label relationships. Operation
statuses are returned.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `DatabaseError <>`__
`HeaderError <>`__ `InternalError <>`__ `LabelError <>`__
`MutateError <>`__ `QuotaError <>`__ `RequestError <>`__
Args:
request (Union[google.ads.googleads.v9.services.types.MutateCustomerLabelsRequest, dict]):
The request object. Request message for
[CustomerLabelService.MutateCustomerLabels][google.ads.googleads.v9.services.CustomerLabelService.MutateCustomerLabels].
customer_id (:class:`str`):
Required. ID of the customer whose
customer-label relationships are being
modified.
This corresponds to the ``customer_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
operations (:class:`Sequence[google.ads.googleads.v9.services.types.CustomerLabelOperation]`):
Required. The list of operations to
perform on customer-label relationships.
This corresponds to the ``operations`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v9.services.types.MutateCustomerLabelsResponse:
Response message for a customer
labels mutate.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([customer_id, operations]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a customer_label_service.MutateCustomerLabelsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, customer_label_service.MutateCustomerLabelsRequest
):
request = customer_label_service.MutateCustomerLabelsRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if customer_id is not None:
request.customer_id = customer_id
if operations is not None:
request.operations = operations
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.mutate_customer_labels
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("customer_id", request.customer_id),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
__all__ = ("CustomerLabelServiceClient",)
|
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import os.path
import generate_protocol_externs
import re
import shutil
import subprocess
import sys
import tempfile
scripts_path = os.path.dirname(os.path.abspath(__file__))
devtools_path = os.path.dirname(scripts_path)
inspector_path = os.path.dirname(devtools_path) + "/core/inspector"
devtools_frontend_path = devtools_path + "/front_end"
protocol_externs_path = devtools_frontend_path + "/protocol_externs.js"
webgl_rendering_context_idl_path = os.path.dirname(devtools_path) + "/core/html/canvas/WebGLRenderingContext.idl"
closure_compiler_jar = scripts_path + "/closure/compiler.jar"
jsdoc_validator_jar = scripts_path + "/jsdoc-validator/jsdoc-validator.jar"
java_exec = "java -Xms512m -server -XX:+TieredCompilation"
generate_protocol_externs.generate_protocol_externs(protocol_externs_path, devtools_path + "/protocol.json")
jsmodule_name_prefix = "jsmodule_"
modules = [
{
"name": "common",
"dependencies": [],
"sources": [
"Color.js",
"DOMExtension.js",
"Object.js",
"ParsedURL.js",
"Progress.js",
"Settings.js",
"TextRange.js",
"UIString.js",
"UserMetrics.js",
"utilities.js",
"Geometry.js",
]
},
{
"name": "sdk",
"dependencies": ["common"],
"sources": [
"ApplicationCacheModel.js",
"CompilerScriptMapping.js",
"ConsoleModel.js",
"ContentProvider.js",
"ContentProviderBasedProjectDelegate.js",
"ContentProviders.js",
"CookieParser.js",
"CSSFormatter.js",
"CSSMetadata.js",
"CSSStyleModel.js",
"CSSStyleSheetMapping.js",
"BreakpointManager.js",
"Database.js",
"DOMAgent.js",
"DOMStorage.js",
"DebuggerModel.js",
"DebuggerScriptMapping.js",
"FileManager.js",
"FileSystemMapping.js",
"FileSystemModel.js",
"FileSystemProjectDelegate.js",
"FileUtils.js",
"HAREntry.js",
"IndexedDBModel.js",
"InspectorBackend.js",
"IsolatedFileSystemManager.js",
"IsolatedFileSystem.js",
"JavaScriptFormatter.js",
"Linkifier.js",
"NetworkLog.js",
"NetworkUISourceCodeProvider.js",
"OverridesSupport.js",
"PresentationConsoleMessageHelper.js",
"RuntimeModel.js",
"SASSSourceMapping.js",
"Script.js",
"ScriptFormatter.js",
"ScriptFormatterWorker.js",
"ScriptSnippetModel.js",
"SimpleWorkspaceProvider.js",
"SnippetStorage.js",
"SourceMapping.js",
"StylesSourceMapping.js",
"TempFile.js",
"TimelineManager.js",
"RemoteObject.js",
"Resource.js",
"DefaultScriptMapping.js",
"ResourceScriptMapping.js",
"LiveEditSupport.js",
"ResourceTreeModel.js",
"ResourceType.js",
"ResourceUtils.js",
"SourceMap.js",
"TracingAgent.js",
"NetworkManager.js",
"NetworkRequest.js",
"UISourceCode.js",
"Workspace.js",
"WorkspaceController.js",
]
},
{
"name": "ui",
"dependencies": ["common"],
"sources": [
"Checkbox.js",
"ContextMenu.js",
"CompletionDictionary.js",
"DOMSyntaxHighlighter.js",
"DataGrid.js",
"Dialog.js",
"DockController.js",
"Drawer.js",
"EmptyView.js",
"FilterBar.js",
"GoToLineDialog.js",
"HelpScreen.js",
"InspectorView.js",
"KeyboardShortcut.js",
"OverviewGrid.js",
"Panel.js",
"Placard.js",
"Popover.js",
"ProgressIndicator.js",
"PropertiesSection.js",
"SearchableView.js",
"Section.js",
"SidebarPane.js",
"SidebarTreeElement.js",
"ShortcutsScreen.js",
"ShowMoreDataGridNode.js",
"SidebarOverlay.js",
"SoftContextMenu.js",
"Spectrum.js",
"SplitView.js",
"SidebarView.js",
"StatusBarButton.js",
"SuggestBox.js",
"TabbedPane.js",
"TextEditor.js",
"TextPrompt.js",
"TextUtils.js",
"TimelineGrid.js",
"UIUtils.js",
"View.js",
"ViewportControl.js",
"treeoutline.js",
]
},
{
"name": "components",
"dependencies": ["sdk", "ui"],
"sources": [
"AdvancedSearchController.js",
"HandlerRegistry.js",
"ConsoleMessage.js",
"CookiesTable.js",
"DOMBreakpointsSidebarPane.js",
"DOMPresentationUtils.js",
"ElementsTreeOutline.js",
"FontView.js",
"ImageView.js",
"NativeBreakpointsSidebarPane.js",
"InspectElementModeController.js",
"ObjectPopoverHelper.js",
"ObjectPropertiesSection.js",
"ScreencastView.js",
"SourceFrame.js",
"ResourceView.js",
]
},
{
"name": "elements",
"dependencies": ["components"],
"sources": [
"CSSNamedFlowCollectionsView.js",
"CSSNamedFlowView.js",
"ElementsPanel.js",
"ElementsPanelDescriptor.js",
"EventListenersSidebarPane.js",
"MetricsSidebarPane.js",
"OverridesView.js",
"PlatformFontsSidebarPane.js",
"PropertiesSidebarPane.js",
"StylesSidebarPane.js",
"RenderingOptionsView.js",
]
},
{
"name": "network",
"dependencies": ["components"],
"sources": [
"NetworkItemView.js",
"RequestCookiesView.js",
"RequestHeadersView.js",
"RequestHTMLView.js",
"RequestJSONView.js",
"RequestPreviewView.js",
"RequestResponseView.js",
"RequestTimingView.js",
"RequestView.js",
"ResourceWebSocketFrameView.js",
"NetworkPanel.js",
"NetworkPanelDescriptor.js",
]
},
{
"name": "resources",
"dependencies": ["components"],
"sources": [
"ApplicationCacheItemsView.js",
"CookieItemsView.js",
"DatabaseQueryView.js",
"DatabaseTableView.js",
"DirectoryContentView.js",
"DOMStorageItemsView.js",
"FileContentView.js",
"FileSystemView.js",
"IndexedDBViews.js",
"ResourcesPanel.js",
]
},
{
"name": "workers",
"dependencies": ["components"],
"sources": [
"WorkerManager.js",
]
},
{
"name": "scripts",
"dependencies": ["components", "workers"],
"sources": [
"BreakpointsSidebarPane.js",
"CSSSourceFrame.js",
"CallStackSidebarPane.js",
"FilePathScoreFunction.js",
"FilteredItemSelectionDialog.js",
"JavaScriptSourceFrame.js",
"NavigatorOverlayController.js",
"NavigatorView.js",
"RevisionHistoryView.js",
"ScopeChainSidebarPane.js",
"SourcesNavigator.js",
"SourcesPanel.js",
"SourcesPanelDescriptor.js",
"SourcesSearchScope.js",
"StyleSheetOutlineDialog.js",
"TabbedEditorContainer.js",
"UISourceCodeFrame.js",
"WatchExpressionsSidebarPane.js",
"WorkersSidebarPane.js",
]
},
{
"name": "console",
"dependencies": ["components"],
"sources": [
"ConsoleView.js",
"ConsolePanel.js",
]
},
{
"name": "timeline",
"dependencies": ["components"],
"sources": [
"DOMCountersGraph.js",
"MemoryStatistics.js",
"PieChart.js",
"TimelineEventOverview.js",
"TimelineFrameOverview.js",
"TimelineMemoryOverview.js",
"TimelineModel.js",
"TimelineOverviewPane.js",
"TimelinePanel.js",
"TimelinePanelDescriptor.js",
"TimelinePresentationModel.js",
"TimelineFrameController.js"
]
},
{
"name": "audits",
"dependencies": ["components"],
"sources": [
"AuditCategories.js",
"AuditController.js",
"AuditFormatters.js",
"AuditLauncherView.js",
"AuditResultView.js",
"AuditRules.js",
"AuditsPanel.js",
]
},
{
"name": "codemirror",
"dependencies": ["components"],
"sources": [
"CodeMirrorTextEditor.js",
"CodeMirrorUtils.js",
]
},
{
"name": "layers",
"dependencies": ["components"],
"sources": [
"LayerTreeModel.js",
"LayersPanel.js",
"LayersPanelDescriptor.js",
"LayerTree.js",
"Layers3DView.js",
"LayerDetailsView.js",
"PaintProfilerView.js",
]
},
{
"name": "extensions",
"dependencies": ["components"],
"sources": [
"ExtensionAPI.js",
"ExtensionAuditCategory.js",
"ExtensionPanel.js",
"ExtensionRegistryStub.js",
"ExtensionServer.js",
"ExtensionView.js",
]
},
{
"name": "settings",
"dependencies": ["components", "extensions"],
"sources": [
"SettingsScreen.js",
"EditFileSystemDialog.js",
]
},
{
"name": "tests",
"dependencies": ["components"],
"sources": [
"TestController.js",
]
},
{
"name": "profiler",
"dependencies": ["components", "workers"],
"sources": [
"AllocationProfile.js",
"BottomUpProfileDataGridTree.js",
"CPUProfileView.js",
"FlameChart.js",
"HeapSnapshot.js",
"HeapSnapshotDataGrids.js",
"HeapSnapshotGridNodes.js",
"HeapSnapshotLoader.js",
"HeapSnapshotProxy.js",
"HeapSnapshotView.js",
"HeapSnapshotWorker.js",
"HeapSnapshotWorkerDispatcher.js",
"JSHeapSnapshot.js",
"ProfileDataGridTree.js",
"ProfilesPanel.js",
"ProfilesPanelDescriptor.js",
"ProfileLauncherView.js",
"TopDownProfileDataGridTree.js",
"CanvasProfileView.js",
"CanvasReplayStateView.js",
]
},
{
"name": "host_stub",
"dependencies": ["components", "profiler", "timeline"],
"sources": [
"InspectorFrontendAPI.js",
"InspectorFrontendHostStub.js",
]
}
]
# `importScript` function must not be used in any files
# except module headers. Refer to devtools.gyp file for
# the module header list.
allowed_import_statements_files = [
"utilities.js",
"ElementsPanel.js",
"ResourcesPanel.js",
"NetworkPanel.js",
"SourcesPanel.js",
"TimelinePanel.js",
"ProfilesPanel.js",
"AuditsPanel.js",
"LayersPanel.js",
"CodeMirrorTextEditor.js",
]
type_checked_jsdoc_tags_list = ["param", "return", "type", "enum"]
type_checked_jsdoc_tags_or = "|".join(type_checked_jsdoc_tags_list)
# Basic regex for invalid JsDoc types: an object type name ([A-Z][A-Za-z0-9.]+[A-Za-z0-9]) not preceded by '!', '?', ':' (this, new), or '.' (object property).
invalid_type_regex = re.compile(r"@(?:" + type_checked_jsdoc_tags_or + r")\s*\{.*(?<![!?:.A-Za-z0-9])([A-Z][A-Za-z0-9.]+[A-Za-z0-9]).*\}")
invalid_type_designator_regex = re.compile(r"@(?:" + type_checked_jsdoc_tags_or + r")\s*.*([?!])=?\}")
def verify_importScript_usage():
for module in modules:
for file_name in module['sources']:
if file_name in allowed_import_statements_files:
continue
sourceFile = open(devtools_frontend_path + "/" + file_name, "r")
source = sourceFile.read()
sourceFile.close()
if "importScript(" in source:
print "ERROR: importScript function is allowed in module header files only (found in %s)" % file_name
def dump_all_checked_files():
file_list = []
for module in modules:
for source in module["sources"]:
file_list.append(devtools_frontend_path + "/" + source)
return " ".join(file_list)
def verify_jsdoc_extra():
os.system("%s -jar %s %s" % (java_exec, jsdoc_validator_jar, dump_all_checked_files()))
def verify_jsdoc():
for module in modules:
for file_name in module['sources']:
lineIndex = 0
full_file_name = devtools_frontend_path + "/" + file_name
with open(full_file_name, "r") as sourceFile:
for line in sourceFile:
line = line.rstrip()
lineIndex += 1
if not line:
continue
verify_jsdoc_line(full_file_name, lineIndex, line)
verify_jsdoc_extra()
def verify_jsdoc_line(fileName, lineIndex, line):
def print_error(message, errorPosition):
print "%s:%s: ERROR - %s\n%s\n%s\n" % (fileName, lineIndex, message, line, " " * errorPosition + "^")
match = re.search(invalid_type_regex, line)
if match:
print_error("Type '%s' nullability not marked explicitly with '?' (nullable) or '!' (non-nullable)" % match.group(1), match.start(1))
match = re.search(invalid_type_designator_regex, line)
if (match):
print_error("Type nullability indicator misplaced, should precede type", match.start(1))
def check_java_path():
proc = subprocess.Popen("which java", stdout=subprocess.PIPE, shell=True)
(javaPath, _) = proc.communicate()
if proc.returncode != 0:
print "Cannot find java ('which java' return code = %d, should be 0)" % proc.returncode
sys.exit(1)
print "Java executable: " + re.sub(r"\n$", "", javaPath)
check_java_path()
print "Verifying 'importScript' function usage..."
verify_importScript_usage()
print "Verifying JSDoc comments..."
verify_jsdoc()
modules_by_name = {}
for module in modules:
modules_by_name[module["name"]] = module
def dump_module(name, recursively, processed_modules):
if name in processed_modules:
return ""
processed_modules[name] = True
module = modules_by_name[name]
command = ""
if recursively:
for dependency in module["dependencies"]:
command += dump_module(dependency, recursively, processed_modules)
command += " \\\n --module " + jsmodule_name_prefix + module["name"] + ":"
command += str(len(module["sources"]))
firstDependency = True
for dependency in module["dependencies"]:
if firstDependency:
command += ":"
else:
command += ","
firstDependency = False
command += jsmodule_name_prefix + dependency
for script in module["sources"]:
command += " \\\n --js " + devtools_frontend_path + "/" + script
return command
modules_dir = tempfile.mkdtemp()
compiler_command = "%s -jar %s --summary_detail_level 3 --compilation_level SIMPLE_OPTIMIZATIONS \
--warning_level VERBOSE --language_in ECMASCRIPT5 --accept_const_keyword --module_output_path_prefix %s/ \\\n" % (java_exec, closure_compiler_jar, modules_dir)
process_recursively = len(sys.argv) > 1
if process_recursively:
module_name = sys.argv[1]
if module_name != "all":
modules = []
for i in range(1, len(sys.argv)):
modules.append(modules_by_name[sys.argv[i]])
for module in modules:
command = compiler_command
command += " --externs " + devtools_frontend_path + "/externs.js" + " \\\n"
command += " --externs " + protocol_externs_path
command += dump_module(module["name"], True, {})
print "Compiling \"" + module["name"] + "\"..."
os.system(command)
else:
command = compiler_command
command += " --externs " + devtools_frontend_path + "/externs.js" + " \\\n"
command += " --externs " + protocol_externs_path
for module in modules:
command += dump_module(module["name"], False, {})
print "Compiling front_end..."
frontEndCompileProc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
def unclosure_injected_script(sourceFileName, outFileName):
sourceFile = open(sourceFileName, "r")
source = sourceFile.read()
sourceFile.close()
def replace_function(matchobj):
return re.sub(r"@param", "param", matchobj.group(1) or "") + "\n//" + matchobj.group(2)
# Comment out the closure function and its jsdocs
source = re.sub(r"(/\*\*(?:[\s\n]*\*\s*@param[^\n]+\n)+\s*\*/\s*)?\n(\(function)", replace_function, source, count=1)
# Comment out its return statement
source = re.sub(r"\n(\s*return\s+[^;]+;\s*\n\}\)\s*)$", "\n/*\\1*/", source)
outFileName = open(outFileName, "w")
outFileName.write(source)
outFileName.close()
injectedScriptSourceTmpFile = inspector_path + "/" + "InjectedScriptSourceTmp.js"
injectedScriptCanvasModuleSourceTmpFile = inspector_path + "/" + "InjectedScriptCanvasModuleSourceTmp.js"
unclosure_injected_script(inspector_path + "/" + "InjectedScriptSource.js", injectedScriptSourceTmpFile)
unclosure_injected_script(inspector_path + "/" + "InjectedScriptCanvasModuleSource.js", injectedScriptCanvasModuleSourceTmpFile)
print "Compiling InjectedScriptSource.js and InjectedScriptCanvasModuleSource.js..."
command = compiler_command
command += " --externs " + inspector_path + "/" + "InjectedScriptExterns.js" + " \\\n"
command += " --externs " + protocol_externs_path + " \\\n"
command += " --module " + jsmodule_name_prefix + "injected_script" + ":1" + " \\\n"
command += " --js " + injectedScriptSourceTmpFile + " \\\n"
command += " --module " + jsmodule_name_prefix + "injected_canvas_script" + ":1:" + jsmodule_name_prefix + "injected_script" + " \\\n"
command += " --js " + injectedScriptCanvasModuleSourceTmpFile + " \\\n"
command += "\n"
injectedScriptCompileProc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
print "Checking generated code in InjectedScriptCanvasModuleSource.js..."
check_injected_webgl_calls_command = "%s/check_injected_webgl_calls_info.py %s %s/InjectedScriptCanvasModuleSource.js" % (scripts_path, webgl_rendering_context_idl_path, inspector_path)
canvasModuleCompileProc = subprocess.Popen(check_injected_webgl_calls_command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
print
(frontEndCompileOut, _) = frontEndCompileProc.communicate()
print "front_end compilation output:\n", frontEndCompileOut
(injectedScriptCompileOut, _) = injectedScriptCompileProc.communicate()
print "InjectedScriptSource.js and InjectedScriptCanvasModuleSource.js compilation output:\n", injectedScriptCompileOut
(canvasModuleCompileOut, _) = canvasModuleCompileProc.communicate()
print "InjectedScriptCanvasModuleSource.js generated code check output:\n", canvasModuleCompileOut
os.system("rm " + injectedScriptSourceTmpFile)
os.system("rm " + injectedScriptCanvasModuleSourceTmpFile)
shutil.rmtree(modules_dir)
os.system("rm " + protocol_externs_path)
|
|
# Copyright 2021 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pre-processing functions for input data."""
import functools
from absl import logging
import tensorflow.compat.v2 as tf
from galaxy_mergers import losses
CROP_TYPE_NONE = 'crop_none'
CROP_TYPE_FIXED = 'crop_fixed'
CROP_TYPE_RANDOM = 'crop_random'
DATASET_FREQUENCY_MEAN = 4.0
DATASET_FREQUENCY_RANGE = 8.0
PHYSICAL_FEATURES_MIN_MAX = {
'redshift': (0.572788, 2.112304),
'mass': (9.823963, 10.951282)
}
ALL_FREQUENCIES = [105, 125, 160, 435, 606, 775, 850]
VALID_ADDITIONAL_FEATURES = ['redshift', 'sequence_average_redshift', 'mass']
def _make_padding_sizes(pad_size, random_centering):
if random_centering:
pad_size_left = tf.random.uniform(
shape=[], minval=0, maxval=pad_size+1, dtype=tf.int32)
else:
pad_size_left = pad_size // 2
pad_size_right = pad_size - pad_size_left
return pad_size_left, pad_size_right
def resize_and_pad(image, target_size, random_centering):
"""Resize image to target_size (<= image.size) and pad to original size."""
original_shape = image.shape
size = tf.reshape(target_size, [1])
size = tf.concat([size, size], axis=0)
image = tf.image.resize(image, size=size)
pad_size = original_shape[1] - target_size
pad_size_left, pad_size_right = _make_padding_sizes(
pad_size, random_centering)
padding = [[pad_size_left, pad_size_right],
[pad_size_left, pad_size_right], [0, 0]]
if len(original_shape) == 4:
padding = [[0, 0]] + padding
image = tf.pad(image, padding)
image.set_shape(original_shape)
return image
def resize_and_extract(image, target_size, random_centering):
"""Upscale image to target_size (>image.size), extract original size crop."""
original_shape = image.shape
size = tf.reshape(target_size, [1])
size = tf.concat([size, size], axis=0)
image = tf.image.resize(image, size=size)
pad_size = target_size - original_shape[1]
pad_size_left, pad_size_right = _make_padding_sizes(
pad_size, random_centering)
if len(original_shape) == 3:
image = tf.expand_dims(image, 0)
image = tf.cond(pad_size_right > 0,
lambda: image[:, pad_size_left:-pad_size_right, :, :],
lambda: image[:, pad_size_left:, :, :])
image = tf.cond(pad_size_right > 0,
lambda: image[:, :, pad_size_left:-pad_size_right, :],
lambda: image[:, :, pad_size_left:, :])
if len(original_shape) == 3:
image = tf.squeeze(image, 0)
image.set_shape(original_shape)
return image
def resize_and_center(image, target_size, random_centering):
return tf.cond(
tf.math.less_equal(target_size, image.shape[1]),
lambda: resize_and_pad(image, target_size, random_centering),
lambda: resize_and_extract(image, target_size, random_centering))
def random_rotation_and_flip(image):
angle = tf.random.uniform(shape=[], minval=0, maxval=4, dtype=tf.int32)
return tf.image.random_flip_left_right(tf.image.rot90(image, angle))
def get_all_rotations_and_flips(images):
assert isinstance(images, list)
new_images = []
for image in images:
for rotation in range(4):
new_images.append(tf.image.rot90(image, rotation))
flipped_image = tf.image.flip_left_right(image)
new_images.append(tf.image.rot90(flipped_image, rotation))
return new_images
def random_rescaling(image, random_centering):
assert image.shape.as_list()[0] == image.shape.as_list()[1]
original_size = image.shape.as_list()[1]
min_size = 2 * (original_size // 4)
max_size = original_size * 2
target_size = tf.random.uniform(
shape=[], minval=min_size, maxval=max_size // 2,
dtype=tf.int32) * 2
return resize_and_center(image, target_size, random_centering)
def get_all_rescalings(images, image_width, random_centering):
"""Get a uniform sample of rescalings of all images in input."""
assert isinstance(images, list)
min_size = 2 * (image_width // 4)
max_size = image_width * 2
delta_size = (max_size + 2 - min_size) // 5
sizes = range(min_size, max_size + 2, delta_size)
new_images = []
for image in images:
for size in sizes:
new_images.append(resize_and_center(image, size, random_centering))
return new_images
def move_repeats_to_batch(image, n_repeats):
width, height, n_channels = image.shape.as_list()[1:]
image = tf.reshape(image, [-1, width, height, n_channels, n_repeats])
image = tf.transpose(image, [0, 4, 1, 2, 3]) # [B, repeats, x, y, c]
return tf.reshape(image, [-1, width, height, n_channels])
def get_classification_label(dataset_row, class_boundaries):
merge_time = dataset_row['grounded_normalized_time']
label = tf.dtypes.cast(0, tf.int64)
for category, intervals in class_boundaries.items():
for interval in intervals:
if merge_time > interval[0] and merge_time < interval[1]:
label = tf.dtypes.cast(int(category), tf.int64)
return label
def get_regression_label(dataset_row, task_type):
"""Returns time-until-merger regression target given desired modeling task."""
if task_type == losses.TASK_NORMALIZED_REGRESSION:
return tf.dtypes.cast(dataset_row['normalized_time'], tf.float32)
elif task_type == losses.TASK_GROUNDED_UNNORMALIZED_REGRESSION:
return tf.dtypes.cast(dataset_row['grounded_normalized_time'], tf.float32)
elif task_type == losses.TASK_UNNORMALIZED_REGRESSION:
return tf.dtypes.cast(dataset_row['unnormalized_time'], tf.float32)
elif task_type == losses.TASK_CLASSIFICATION:
return tf.dtypes.cast(dataset_row['grounded_normalized_time'], tf.float32)
else:
raise ValueError
def get_normalized_time_target(dataset_row):
return tf.dtypes.cast(dataset_row['normalized_time'], tf.float32)
def apply_time_filter(dataset_row, time_interval):
"""Returns True if data is within the given time intervals."""
merge_time = dataset_row['grounded_normalized_time']
lower_time, upper_time = time_interval
return merge_time > lower_time and merge_time < upper_time
def normalize_physical_feature(name, dataset_row):
min_feat, max_feat = PHYSICAL_FEATURES_MIN_MAX[name]
value = getattr(dataset_row, name)
return 2 * (value - min_feat) / (max_feat - min_feat) - 1
def prepare_dataset(ds, target_size, crop_type, n_repeats, augmentations,
task_type, additional_features, class_boundaries,
time_intervals=None, frequencies_to_use='all',
additional_lambdas=None):
"""Prepare a zipped dataset of image, classification/regression labels."""
def _prepare_image(dataset_row):
"""Transpose, crop and cast an image."""
image = tf.dtypes.cast(dataset_row['image'], tf.float32)
image = tf.reshape(image, tf.cast(dataset_row['image_shape'], tf.int32))
image = tf.transpose(image, perm=[1, 2, 0]) # Convert to NHWC
freqs = ALL_FREQUENCIES if frequencies_to_use == 'all' else frequencies_to_use
idxs_to_keep = [ALL_FREQUENCIES.index(f) for f in freqs]
image = tf.gather(params=image, indices=idxs_to_keep, axis=-1)
# Based on offline computation on the empirical frequency range:
# Converts [0, 8.] ~~> [-1, 1]
image = (image - DATASET_FREQUENCY_MEAN)/(DATASET_FREQUENCY_RANGE/2.0)
def crop(image):
if crop_type == CROP_TYPE_FIXED:
crop_loc = tf.cast(dataset_row['proposed_crop'][0], tf.int32)
crop_size = tf.cast(dataset_row['proposed_crop'][1], tf.int32)
image = image[
crop_loc[0]:crop_loc[0] + crop_size[0],
crop_loc[1]:crop_loc[1] + crop_size[1], :]
image = tf.image.resize(image, target_size[0:2])
image.set_shape([target_size[0], target_size[1], target_size[2]])
elif crop_type == CROP_TYPE_RANDOM:
image = tf.image.random_crop(image, target_size)
image.set_shape([target_size[0], target_size[1], target_size[2]])
elif crop_type != CROP_TYPE_NONE:
raise NotImplementedError
return image
repeated_images = []
for _ in range(n_repeats):
repeated_images.append(crop(image))
image = tf.concat(repeated_images, axis=-1)
if augmentations['rotation_and_flip']:
image = random_rotation_and_flip(image)
if augmentations['rescaling']:
image = random_rescaling(image, augmentations['translation'])
return image
def get_regression_label_wrapper(dataset_row):
return get_regression_label(dataset_row, task_type=task_type)
def get_classification_label_wrapper(dataset_row):
return get_classification_label(dataset_row,
class_boundaries=class_boundaries)
if time_intervals:
for time_interval in time_intervals:
filter_fn = functools.partial(apply_time_filter,
time_interval=time_interval)
ds = ds.filter(filter_fn)
datasets = [ds.map(_prepare_image)]
if additional_features:
additional_features = additional_features.split(',')
assert all([f in VALID_ADDITIONAL_FEATURES for f in additional_features])
logging.info('Running with additional features: %s.',
', '.join(additional_features))
def _prepare_additional_features(dataset_row):
features = []
for f in additional_features:
features.append(normalize_physical_feature(f, dataset_row))
features = tf.convert_to_tensor(features, dtype=tf.float32)
features.set_shape([len(additional_features)])
return features
datasets += [ds.map(_prepare_additional_features)]
datasets += [
ds.map(get_classification_label_wrapper),
ds.map(get_regression_label_wrapper),
ds.map(get_normalized_time_target)]
if additional_lambdas:
for process_fn in additional_lambdas:
datasets += [ds.map(process_fn)]
return tf.data.Dataset.zip(tuple(datasets))
|
|
"""
Configuration schemas can be used to group configuration values
together. These schemas can be instantiated at import time, and values can
be retrieved from them by accessing the attributes of the schema object.
Each field on the schema turns into an accessor for a configuration value.
These accessors will cache the return value of the validator that they use, so
expensive operations are not repeated.
Example
-------
.. code-block:: python
from staticconf import schema
class MyClassSchema(object):
__metaclass__ = schema.SchemaMeta
# Namespace to retrieve configuration values from
namespace = 'my_package'
# (optional) Config path to prepend to all config keys in this schema
config_path = 'my_class.foo'
# Attributes accept the same values as a getter (default, help, etc)
ratio = schema.float(default=0.2) # configured at my_class.foo.ratio
# You can optionally specify a different name from the attribute name
max_threshold = schema.int(config_key='max') # configued at my_class.foo.max
You can also create your schema objects by subclassing Schema
.. code-block:: python
from staticconf import schema
class MyClassSchema(schema.Schema):
...
Access the values from a schema by instantiating the schema class.
.. code-block:: python
config = MyClassSchema()
print config.ratio
Arguments
---------
Schema accessors accept the following kwargs:
config_key
string configuration key
default
if no ``default`` is given, the key must be present in the configuration.
Raises :class:`staticconf.errors.ConfigurationError` on missing key.
help
a help string describing the purpose of the config value. See
:func:`staticconf.config.view_help`.
Custom schema types
-------------------
You can also create your own custom types using :func:`build_value_type`.
.. code-block:: python
from staticconf import schema
def validator(value):
try:
return do_some_casting(value)
except Exception:
raise ConfigurationError("%s can't be validated as a foo" % value)
foo_type = schema.build_value_type(validator)
class MySchema(object):
__metaclass__ = schema.SchemaMeta
something = foo_type(default=...)
"""
import functools
import six
from staticconf import validation, proxy, config, errors, getters
class ValueTypeDefinition(object):
__slots__ = ['validator', 'config_key', 'default', 'help']
def __init__(
self,
validator,
config_key=None,
default=proxy.UndefToken,
help=None):
self.validator = validator
self.config_key = config_key
self.default = default
self.help = help
class ValueToken(object):
__slots__ = [
'validator',
'config_key',
'default',
'_value',
'namespace',
'__weakref__'
]
def __init__(self, validator, namespace, key, default):
self.validator = validator
self.namespace = namespace
self.config_key = key
self.default = default
self._value = proxy.UndefToken
@classmethod
def from_definition(cls, value_def, namespace, key):
return cls(value_def.validator, namespace, key, value_def.default)
@proxy.cache_as_field('_value')
def get_value(self):
return proxy.extract_value(self)
def reset(self):
"""Clear the cached value so that configuration can be reloaded."""
self._value = proxy.UndefToken
def build_property(value_token):
"""Construct a property from a ValueToken. The callable gets passed an
instance of the schema class, which is ignored.
"""
def caller(_):
return value_token.get_value()
return property(caller)
class SchemaMeta(type):
"""Metaclass to construct config schema object."""
def __new__(mcs, name, bases, attributes):
namespace = mcs.get_namespace(attributes)
attributes = mcs.build_attributes(attributes, namespace)
return super(SchemaMeta, mcs).__new__(mcs, name, bases, attributes)
@classmethod
def get_namespace(cls, attributes):
if 'namespace' not in attributes:
raise errors.ConfigurationError("ConfigSchema requires a namespace.")
return config.get_namespace(attributes['namespace'])
@classmethod
def build_attributes(cls, attributes, namespace):
"""Return an attributes dictionary with ValueTokens replaced by a
property which returns the config value.
"""
config_path = attributes.get('config_path')
tokens = {}
def build_config_key(value_def, config_key):
key = value_def.config_key or config_key
return '%s.%s' % (config_path, key) if config_path else key
def build_token(name, value_def):
config_key = build_config_key(value_def, name)
value_token = ValueToken.from_definition(
value_def, namespace, config_key)
getters.register_value_proxy(namespace, value_token, value_def.help)
tokens[name] = value_token
return name, build_property(value_token)
def build_attr(name, attribute):
if not isinstance(attribute, ValueTypeDefinition):
return name, attribute
return build_token(name, attribute)
attributes = dict(build_attr(*item)
for item in six.iteritems(attributes))
attributes['_tokens'] = tokens
return attributes
@six.add_metaclass(SchemaMeta)
class Schema(object):
"""Base class for configuration schemas, uses :class:`SchemaMeta`."""
namespace = None
def build_value_type(validator):
"""A factory function to create a new schema type.
:param validator: a function which accepts one argument and returns that
value as the correct type.
"""
return functools.partial(ValueTypeDefinition, validator)
# Backwards compatible with staticconf 0.5.2
create_value_type = build_value_type
for name, validator in validation.get_validators():
name = name or 'any'
globals()[name] = build_value_type(validator)
list_of_validator = validation.build_list_type_validator(validator)
globals()['list_of_%s' % name] = build_value_type(list_of_validator)
|
|
# Copyright 2018 MLBenchmark Group. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Defines Subtokenizer class to encode and decode strings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import re
import sys
import unicodedata
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
LUA = "<lua_index_compat>"
PAD = "<pad>_"
PAD_ID = 1
EOS = "<EOS>_"
EOS_ID = 2
UNK = "<bypass_unk>"
RESERVED_TOKENS = [LUA, PAD, EOS, UNK]
# Set of characters that will be used in the function _escape_token() (see func
# docstring for more details).
# This set is added to the alphabet list to ensure that all escaped tokens can
# be encoded.
_ESCAPE_CHARS = set(u"\\_u;0123456789")
# Regex for the function _unescape_token(), the inverse of _escape_token().
# This is used to find "\u", "\\", and "\###;" substrings in the token.
_UNESCAPE_REGEX = re.compile(r"\\u|\\\\|\\([0-9]+);")
_UNDEFINED_UNICODE = u"\u3013"
# Set contains all letter and number characters.
_ALPHANUMERIC_CHAR_SET = set(
six.unichr(i) for i in xrange(sys.maxunicode)
if (unicodedata.category(six.unichr(i)).startswith("L") or
unicodedata.category(six.unichr(i)).startswith("N")))
# min_count is the minimum number of times a subtoken must appear in the data
# before before it is added to the vocabulary. The value is found using binary
# search to obtain the target vocabulary size.
_MIN_MIN_COUNT = 1 # min value to use when binary searching for min_count
_MAX_MIN_COUNT = 1000 # max value to use when binary searching for min_count
class Subtokenizer(object):
"""Encodes and decodes strings to/from integer IDs."""
def __init__(self, vocab_file, reserved_tokens=None):
"""Initializes class, creating a vocab file if data_files is provided."""
print("Initializing Subtokenizer from file %s." % vocab_file)
if reserved_tokens is None:
reserved_tokens = RESERVED_TOKENS
elif reserved_tokens is 'assumed_in_file':
reserved_tokens = []
self.subtoken_list = _load_vocab_file(vocab_file, reserved_tokens)
self.alphabet = _generate_alphabet_dict(self.subtoken_list, reserved_tokens)
self.subtoken_to_id_dict = _list_to_index_dict(self.subtoken_list)
self.max_subtoken_length = 0
for subtoken in self.subtoken_list:
self.max_subtoken_length = max(self.max_subtoken_length, len(subtoken))
# Create cache to speed up subtokenization
self._cache_size = 2 ** 20
self._cache = [(None, None)] * self._cache_size
@staticmethod
def init_from_files(
vocab_file, files, target_vocab_size, threshold, min_count=None,
file_byte_limit=1e6, reserved_tokens=None):
"""Create subtoken vocabulary based on files, and save vocab to file.
Args:
vocab_file: String name of vocab file to store subtoken vocabulary.
files: List of file paths that will be used to generate vocabulary.
target_vocab_size: target vocabulary size to generate.
threshold: int threshold of vocabulary size to accept.
min_count: int minimum count to use for generating the vocabulary. The min
count is the minimum number of times a subtoken should appear in the
files before it is added to the vocabulary. If set to none, this value
is found using binary search.
file_byte_limit: (Default 1e6) Maximum number of bytes of sample text that
will be drawn from the files.
reserved_tokens: List of string tokens that are guaranteed to be at the
beginning of the subtoken vocabulary list.
Returns:
Subtokenizer object
"""
if reserved_tokens is None:
reserved_tokens = RESERVED_TOKENS
if os.path.exists(vocab_file):
print("Vocab file already exists (%s)" % vocab_file)
else:
print("Begin steps to create subtoken vocabulary...")
token_counts = _count_tokens(files, file_byte_limit)
alphabet = _generate_alphabet_dict(token_counts)
subtoken_list = _generate_subtokens_with_target_vocab_size(
token_counts, alphabet, target_vocab_size, threshold, min_count,
reserved_tokens)
print("Generated vocabulary with %d subtokens." %
len(subtoken_list))
_save_vocab_file(vocab_file, subtoken_list)
return Subtokenizer(vocab_file)
@staticmethod
def init_from_existing_vocab_file(
vocab_file, files, target_vocab_size, threshold, min_count=None,
file_byte_limit=1e6, reserved_tokens=None):
"""Create subtoken vocabulary based on files, and save vocab to file.
Args:
vocab_file: String name of vocab file to store subtoken vocabulary.
files: List of file paths that will be used to generate vocabulary.
target_vocab_size: target vocabulary size to generate.
threshold: int threshold of vocabulary size to accept.
min_count: int minimum count to use for generating the vocabulary. The min
count is the minimum number of times a subtoken should appear in the
files before it is added to the vocabulary. If set to none, this value
is found using binary search.
file_byte_limit: (Default 1e6) Maximum number of bytes of sample text that
will be drawn from the files.
reserved_tokens: List of string tokens that are guaranteed to be at the
beginning of the subtoken vocabulary list.
Returns:
Subtokenizer object
"""
if os.path.exists(vocab_file):
print("Vocab file exists (%s)" % vocab_file)
else:
print("Vocab file does not exist (%s)" % vocab_file)
return Subtokenizer(vocab_file, reserved_tokens='assumed_in_file')
def encode(self, raw_string, add_eos=False):
"""Encodes a string into a list of int subtoken ids."""
ret = []
tokens = _split_string_to_tokens(_native_to_unicode(raw_string))
for token in tokens:
ret.extend(self._token_to_subtoken_ids(token))
if add_eos:
ret.append(EOS_ID)
return ret
def _token_to_subtoken_ids(self, token):
"""Encode a single token into a list of subtoken ids."""
cache_location = hash(token) % self._cache_size
cache_key, cache_value = self._cache[cache_location]
if cache_key == token:
return cache_value
ret = _split_token_to_subtokens(
_escape_token(token, self.alphabet), self.subtoken_to_id_dict,
self.max_subtoken_length)
ret = [self.subtoken_to_id_dict[subtoken_id] for subtoken_id in ret]
self._cache[cache_location] = (token, ret)
return ret
def decode(self, subtokens):
"""Converts list of int subtokens ids into a string."""
if isinstance(subtokens, np.ndarray):
# Note that list(subtokens) converts subtokens to a python list, but the
# items remain as np.int32. This converts both the array and its items.
subtokens = subtokens.tolist()
if not subtokens:
return ""
assert isinstance(subtokens, list) and isinstance(subtokens[0], int), (
"Subtokens argument passed into decode() must be a list of integers.")
return _unicode_to_native(
_join_tokens_to_string(self._subtoken_ids_to_tokens(subtokens)))
def _subtoken_ids_to_tokens(self, subtokens):
"""Convert list of int subtoken ids to a list of string tokens."""
escaped_tokens = "".join([
self.subtoken_list[s] for s in subtokens
if s < len(self.subtoken_list)])
escaped_tokens = escaped_tokens.split("_")
# All tokens in the vocabulary list have been escaped (see _escape_token())
# so each token must be unescaped when decoding.
ret = []
for token in escaped_tokens:
if token:
ret.append(_unescape_token(token))
return ret
def _save_vocab_file(vocab_file, subtoken_list):
"""Save subtokens to file."""
with open(vocab_file, mode='w', newline='\n') as f:
for subtoken in subtoken_list:
f.write("'%s'\n" % _unicode_to_native(subtoken))
def _load_vocab_file(vocab_file, reserved_tokens=None):
"""Load vocabulary while ensuring reserved tokens are at the top."""
if reserved_tokens is None:
reserved_tokens = RESERVED_TOKENS
subtoken_list = []
with open(vocab_file, mode='r', newline='\n') as f:
for line in f:
subtoken = _native_to_unicode(line.strip())
subtoken = subtoken[1:-1] # Remove surrounding single-quotes
if subtoken in reserved_tokens:
continue
subtoken_list.append(_native_to_unicode(subtoken))
return reserved_tokens + subtoken_list
def _native_to_unicode(s):
"""Convert string to unicode (required in Python 2)."""
if six.PY2:
return s if isinstance(s, unicode) else s.decode("utf-8")
else:
return s
def _unicode_to_native(s):
"""Convert string from unicode to native format (required in Python 2)."""
if six.PY2:
return s.encode("utf-8") if isinstance(s, unicode) else s
else:
return s
def _split_string_to_tokens(text):
"""Splits text to a list of string tokens."""
if not text:
return []
ret = []
token_start = 0
# Classify each character in the input string
is_alnum = [c in _ALPHANUMERIC_CHAR_SET for c in text]
for pos in xrange(1, len(text)):
if is_alnum[pos] != is_alnum[pos - 1]:
token = text[token_start:pos]
if token != u" " or token_start == 0:
ret.append(token)
token_start = pos
final_token = text[token_start:]
ret.append(final_token)
return ret
def _join_tokens_to_string(tokens):
"""Join a list of string tokens into a single string."""
token_is_alnum = [t[0] in _ALPHANUMERIC_CHAR_SET for t in tokens]
ret = []
for i, token in enumerate(tokens):
if i > 0 and token_is_alnum[i - 1] and token_is_alnum[i]:
ret.append(u" ")
ret.append(token)
return "".join(ret)
def _escape_token(token, alphabet):
r"""Replace characters that aren't in the alphabet and append "_" to token.
Apply three transformations to the token:
1. Replace underline character "_" with "\u", and backslash "\" with "\\".
2. Replace characters outside of the alphabet with "\###;", where ### is the
character's Unicode code point.
3. Appends "_" to mark the end of a token.
Args:
token: unicode string to be escaped
alphabet: list of all known characters
Returns:
escaped string
"""
token = token.replace(u"\\", u"\\\\").replace(u"_", u"\\u")
ret = [c if c in alphabet and c != u"\n" else r"\%d;" % ord(c) for c in token]
return u"".join(ret) + "_"
def _unescape_token(token):
r"""Replaces escaped characters in the token with their unescaped versions.
Applies inverse transformations as _escape_token():
1. Replace "\u" with "_", and "\\" with "\".
2. Replace "\###;" with the unicode character the ### refers to.
Args:
token: escaped string
Returns:
unescaped string
"""
def match(m):
r"""Returns replacement string for matched object.
Matched objects contain one of the strings that matches the regex pattern:
r"\\u|\\\\|\\([0-9]+);"
The strings can be '\u', '\\', or '\###;' (### is any digit number).
m.group(0) refers to the entire matched string ('\u', '\\', or '\###;').
m.group(1) refers to the first parenthesized subgroup ('###').
m.group(0) exists for all match objects, while m.group(1) exists only for
the string '\###;'.
This function looks to see if m.group(1) exists. If it doesn't, then the
matched string must be '\u' or '\\' . In this case, the corresponding
replacement ('_' and '\') are returned. Note that in python, a single
backslash is written as '\\', and double backslash as '\\\\'.
If m.goup(1) exists, then use the integer in m.group(1) to return a
unicode character.
Args:
m: match object
Returns:
String to replace matched object with.
"""
# Check if the matched strings are '\u' or '\\'.
if m.group(1) is None:
return u"_" if m.group(0) == u"\\u" else u"\\"
# If m.group(1) exists, try and return unicode character.
try:
return six.unichr(int(m.group(1)))
except (ValueError, OverflowError) as _:
return _UNDEFINED_UNICODE
# Use match function to replace escaped substrings in the token.
return _UNESCAPE_REGEX.sub(match, token)
def _count_tokens(files, file_byte_limit=1e6):
"""Return token counts of words in the files.
Samples file_byte_limit bytes from each file, and counts the words that appear
in the samples. The samples are semi-evenly distributed across the file.
Args:
files: List of filepaths
file_byte_limit: Max number of bytes that will be read from each file.
Returns:
Dictionary mapping tokens to the number of times they appear in the sampled
lines from the files.
"""
token_counts = collections.defaultdict(int)
for filepath in files:
with open(filepath, mode='r', newline='\n') as reader:
file_byte_budget = file_byte_limit
counter = 0
lines_to_skip = int(reader.size() / (file_byte_budget * 2))
for line in reader:
if counter < lines_to_skip:
counter += 1
else:
if file_byte_budget < 0:
break
line = line.strip()
file_byte_budget -= len(line)
counter = 0
# Add words to token counts
for token in _split_string_to_tokens(_native_to_unicode(line)):
token_counts[token] += 1
return token_counts
def _list_to_index_dict(lst):
"""Create dictionary mapping list items to their indices in the list."""
return {item: n for n, item in enumerate(lst)}
def _split_token_to_subtokens(token, subtoken_dict, max_subtoken_length):
"""Splits a token into subtokens defined in the subtoken dict."""
ret = []
start = 0
token_len = len(token)
while start < token_len:
# Find the longest subtoken, so iterate backwards.
for end in xrange(min(token_len, start + max_subtoken_length), start, -1):
subtoken = token[start:end]
if subtoken in subtoken_dict:
ret.append(subtoken)
start = end
break
else: # Did not break
# If there is no possible encoding of the escaped token then one of the
# characters in the token is not in the alphabet. This should be
# impossible and would be indicative of a bug.
raise ValueError("Was unable to split token \"%s\" into subtokens." %
token)
return ret
def _generate_subtokens_with_target_vocab_size(
token_counts, alphabet, target_size, threshold, min_count=None,
reserved_tokens=None):
"""Generate subtoken vocabulary close to the target size."""
if reserved_tokens is None:
reserved_tokens = RESERVED_TOKENS
if min_count is not None:
print("Using min_count=%d to generate vocab with target size %d" %
(min_count, target_size))
return _generate_subtokens(
token_counts, alphabet, min_count, reserved_tokens=reserved_tokens)
def bisect(min_val, max_val):
"""Recursive function to binary search for subtoken vocabulary."""
cur_count = (min_val + max_val) // 2
print("Binary search: trying min_count=%d (%d %d)" %
(cur_count, min_val, max_val))
subtoken_list = _generate_subtokens(
token_counts, alphabet, cur_count, reserved_tokens=reserved_tokens)
val = len(subtoken_list)
print("Binary search: min_count=%d resulted in %d tokens" %
(cur_count, val))
within_threshold = abs(val - target_size) < threshold
if within_threshold or min_val >= max_val or cur_count < 2:
return subtoken_list
if val > target_size:
other_subtoken_list = bisect(cur_count + 1, max_val)
else:
other_subtoken_list = bisect(min_val, cur_count - 1)
# Return vocabulary dictionary with the closest number of tokens.
other_val = len(other_subtoken_list)
if abs(other_val - target_size) < abs(val - target_size):
return other_subtoken_list
return subtoken_list
print("Finding best min_count to get target size of %d" %
target_size)
return bisect(_MIN_MIN_COUNT, _MAX_MIN_COUNT)
def _generate_alphabet_dict(iterable, reserved_tokens=None):
"""Create set of characters that appear in any element in the iterable."""
if reserved_tokens is None:
reserved_tokens = RESERVED_TOKENS
elif reserved_tokens is 'assumed_in_file':
reserved_tokens = []
alphabet = {c for token in iterable for c in token}
alphabet |= {c for token in reserved_tokens for c in token}
alphabet |= _ESCAPE_CHARS # Add escape characters to alphabet set.
return alphabet
def _count_and_gen_subtokens(
token_counts, alphabet, subtoken_dict, max_subtoken_length):
"""Count number of times subtokens appear, and generate new subtokens.
Args:
token_counts: dict mapping tokens to the number of times they appear in the
original files.
alphabet: list of allowed characters. Used to escape the tokens, which
guarantees that all tokens can be split into subtokens.
subtoken_dict: dict mapping subtokens to ids.
max_subtoken_length: maximum length of subtoken in subtoken_dict.
Returns:
A defaultdict mapping subtokens to the number of times they appear in the
tokens. The dict may contain new subtokens.
"""
subtoken_counts = collections.defaultdict(int)
for token, count in six.iteritems(token_counts):
token = _escape_token(token, alphabet)
subtokens = _split_token_to_subtokens(
token, subtoken_dict, max_subtoken_length)
# Generate new subtokens by taking substrings from token.
start = 0
for subtoken in subtokens:
for end in xrange(start + 1, len(token) + 1):
new_subtoken = token[start:end]
subtoken_counts[new_subtoken] += count
start += len(subtoken)
return subtoken_counts
def _filter_and_bucket_subtokens(subtoken_counts, min_count):
"""Return a bucketed list of subtokens that are filtered by count.
Args:
subtoken_counts: defaultdict mapping subtokens to their counts
min_count: int count used to filter subtokens
Returns:
List of subtoken sets, where subtokens in set i have the same length=i.
"""
# Create list of buckets, where subtokens in bucket i have length i.
subtoken_buckets = []
for subtoken, count in six.iteritems(subtoken_counts):
if count < min_count: # Filter out subtokens that don't appear enough
continue
while len(subtoken_buckets) <= len(subtoken):
subtoken_buckets.append(set())
subtoken_buckets[len(subtoken)].add(subtoken)
return subtoken_buckets
def _gen_new_subtoken_list(
subtoken_counts, min_count, alphabet, reserved_tokens=None):
"""Generate candidate subtokens ordered by count, and new max subtoken length.
Add subtokens to the candiate list in order of length (longest subtokens
first). When a subtoken is added, the counts of each of its prefixes are
decreased. Prefixes that don't appear much outside the subtoken are not added
to the candidate list.
For example:
subtoken being added to candidate list: 'translate'
subtoken_counts: {'translate':10, 't':40, 'tr':16, 'tra':12, ...}
min_count: 5
When 'translate' is added, subtoken_counts is updated to:
{'translate':0, 't':30, 'tr':6, 'tra': 2, ...}
The subtoken 'tra' will not be added to the candidate list, because it appears
twice (less than min_count) outside of 'translate'.
Args:
subtoken_counts: defaultdict mapping str subtokens to int counts
min_count: int minumum count requirement for subtokens
alphabet: set of characters. Each character is added to the subtoken list to
guarantee that all tokens can be encoded.
reserved_tokens: list of tokens that will be added to the beginning of the
returned subtoken list.
Returns:
List of candidate subtokens in decreasing count order, and maximum subtoken
length
"""
if reserved_tokens is None:
reserved_tokens = RESERVED_TOKENS
# Create a list of (count, subtoken) for each candidate subtoken.
subtoken_candidates = []
# Use bucketted list to iterate through subtokens in order of length.
# subtoken_buckets[i] = set(subtokens), where each subtoken has length i.
subtoken_buckets = _filter_and_bucket_subtokens(subtoken_counts, min_count)
max_subtoken_length = len(subtoken_buckets) - 1
# Go through the list in reverse order to consider longer subtokens first.
for subtoken_len in xrange(max_subtoken_length, 0, -1):
for subtoken in subtoken_buckets[subtoken_len]:
count = subtoken_counts[subtoken]
# Possible if this subtoken is a prefix of another token.
if count < min_count:
continue
# Ignore alphabet/reserved tokens, which will be added manually later.
if subtoken not in alphabet and subtoken not in reserved_tokens:
subtoken_candidates.append((count, subtoken))
# Decrement count of the subtoken's prefixes (if a longer subtoken is
# added, its prefixes lose priority to be added).
for end in xrange(1, subtoken_len):
subtoken_counts[subtoken[:end]] -= count
# Add alphabet subtokens (guarantees that all strings are encodable).
subtoken_candidates.extend((subtoken_counts.get(a, 0), a) for a in alphabet)
# Order subtoken candidates by decreasing count.
subtoken_list = [t for _, t in sorted(subtoken_candidates, reverse=True)]
# Add reserved tokens to beginning of the list.
subtoken_list = reserved_tokens + subtoken_list
return subtoken_list, max_subtoken_length
def _generate_subtokens(
token_counts, alphabet, min_count, num_iterations=4,
reserved_tokens=None):
"""Create a list of subtokens in decreasing order of frequency.
Args:
token_counts: dict mapping str tokens -> int count
alphabet: set of characters
min_count: int minimum number of times a subtoken must appear before it is
added to the vocabulary.
num_iterations: int number of iterations to generate new tokens.
reserved_tokens: list of tokens that will be added to the beginning to the
returned subtoken list.
Returns:
Sorted list of subtokens (most frequent first)
"""
if reserved_tokens is None:
reserved_tokens = RESERVED_TOKENS
# Use alphabet set to create initial list of subtokens
subtoken_list = reserved_tokens + list(alphabet)
max_subtoken_length = 1
# On each iteration, segment all words using the subtokens defined in
# subtoken_dict, count how often the resulting subtokens appear, and update
# the dictionary with subtokens w/ high enough counts.
for i in xrange(num_iterations):
print("\tGenerating subtokens: iteration %d" % i)
# Generate new subtoken->id dictionary using the new subtoken list.
subtoken_dict = _list_to_index_dict(subtoken_list)
# Create dict mapping subtoken->count, with additional subtokens created
# from substrings taken from the tokens.
subtoken_counts = _count_and_gen_subtokens(
token_counts, alphabet, subtoken_dict, max_subtoken_length)
# Generate new list of subtokens sorted by subtoken count.
subtoken_list, max_subtoken_length = _gen_new_subtoken_list(
subtoken_counts, min_count, alphabet, reserved_tokens)
print("\tVocab size: %d" % len(subtoken_list))
return subtoken_list
|
|
# -*- coding: utf-8 -*-
"""
flask_jwt
~~~~~~~~~
Flask-JWT module
"""
from collections import OrderedDict
from datetime import datetime, timedelta
from functools import wraps
import jwt
from flask import current_app, request, jsonify, _request_ctx_stack
from flask.views import MethodView
from werkzeug.local import LocalProxy
__version__ = '0.1.0'
current_user = LocalProxy(lambda: getattr(_request_ctx_stack.top, 'current_user', None))
_jwt = LocalProxy(lambda: current_app.extensions['jwt'])
def _default_payload_handler(user):
return {
'user_id': user.id,
'exp': datetime.utcnow() + current_app.config['JWT_EXPIRATION_DELTA']
}
def _default_encode_handler(payload):
return jwt.encode(
payload,
current_app.config['JWT_SECRET_KEY'],
current_app.config['JWT_ALGORITHM']
).decode('utf-8')
def _default_decode_handler(token):
return jwt.decode(
token,
current_app.config['JWT_SECRET_KEY'],
current_app.config['JWT_VERIFY'],
current_app.config['JWT_VERIFY_EXPIRATION'],
current_app.config['JWT_LEEWAY']
)
CONFIG_DEFAULTS = {
'JWT_DEFAULT_REALM': 'Login Required',
'JWT_AUTH_URL_RULE': '/auth',
'JWT_AUTH_ENDPOINT': 'jwt',
'JWT_ENCODE_HANDLER': _default_encode_handler,
'JWT_DECODE_HANDLER': _default_decode_handler,
'JWT_PAYLOAD_HANDLER': _default_payload_handler,
'JWT_ALGORITHM': 'HS256',
'JWT_VERIFY': True,
'JWT_VERIFY_EXPIRATION': True,
'JWT_LEEWAY': 0,
'JWT_EXPIRATION_DELTA': timedelta(seconds=300)
}
def jwt_required(realm=None):
"""View decorator that requires a valid JWT token to be present in the request
:param realm: an optional realm
"""
def wrapper(fn):
@wraps(fn)
def decorator(*args, **kwargs):
verify_jwt(realm)
return fn(*args, **kwargs)
return decorator
return wrapper
class JWTError(Exception):
def __init__(self, error, description, status_code=400, headers=None):
self.error = error
self.description = description
self.status_code = status_code
self.headers = headers
def verify_jwt(realm=None):
"""Does the actual work of verifying the JWT data in the current request.
This is done automatically for you by `jwt_required()` but you could call it manually.
Doing so would be useful in the context of optional JWT access in your APIs.
:param realm: an optional realm
"""
realm = realm or current_app.config['JWT_DEFAULT_REALM']
auth = request.headers.get('Authorization', None)
if auth is None:
raise JWTError('Authorization Required', 'Authorization header was missing', 401, {
'WWW-Authenticate': 'JWT realm="%s"' % realm
})
parts = auth.split()
if parts[0].lower() != 'bearer':
raise JWTError('Invalid JWT header', 'Unsupported authorization type')
elif len(parts) == 1:
raise JWTError('Invalid JWT header', 'Token missing')
elif len(parts) > 2:
raise JWTError('Invalid JWT header', 'Token contains spaces')
try:
handler = current_app.config['JWT_DECODE_HANDLER']
payload = handler(parts[1])
except jwt.ExpiredSignature:
raise JWTError('Invalid JWT', 'Token is expired')
except jwt.DecodeError:
raise JWTError('Invalid JWT', 'Token is undecipherable')
_request_ctx_stack.top.current_user = user = _jwt.user_callback(payload)
if user is None:
raise JWTError('Invalid JWT', 'User does not exist')
class JWTAuthView(MethodView):
def post(self):
data = request.get_json(force=True)
username = data.get('username', None)
password = data.get('password', None)
criterion = [username, password, len(data) == 2]
if not all(criterion):
raise JWTError('Bad Request', 'Missing required credentials', status_code=400)
user = _jwt.authentication_callback(username=username, password=password)
if user:
payload_handler = current_app.config['JWT_PAYLOAD_HANDLER']
payload = payload_handler(user)
encode_handler = current_app.config['JWT_ENCODE_HANDLER']
return jsonify({'token': encode_handler(payload)})
else:
raise JWTError('Bad Request', 'Invalid credentials')
class JWT(object):
def __init__(self, app=None):
if app is not None:
self.app = app
self.init_app(app)
else:
self.app = None
def init_app(self, app):
for k, v in CONFIG_DEFAULTS.items():
app.config.setdefault(k, v)
app.config.setdefault('JWT_SECRET_KEY', app.config['SECRET_KEY'])
url_rule = app.config.get('JWT_AUTH_URL_RULE', None)
endpoint = app.config.get('JWT_AUTH_ENDPOINT', None)
if url_rule and endpoint:
auth_view = JWTAuthView.as_view(app.config['JWT_AUTH_ENDPOINT'])
app.add_url_rule(url_rule, methods=['POST'], view_func=auth_view)
app.errorhandler(JWTError)(self._on_jwt_error)
if not hasattr(app, 'extensions'): # pragma: no cover
app.extensions = {}
app.extensions['jwt'] = self
def _on_jwt_error(self, e):
return getattr(self, 'error_callback', self._error_callback)(e)
def _error_callback(self, e):
return jsonify(OrderedDict([
('status_code', e.status_code),
('error', e.error),
('description', e.description),
])), e.status_code, e.headers
def authentication_handler(self, callback):
"""Specifies the authentication handler function. This function receives two
positional arguments. The first being the username the second being the password.
It should return an object representing the authenticated user. Example::
@jwt.authentication_handler
def authenticate(username, password):
if username == 'joe' and password == 'pass':
return User(id=1, username='joe')
:param callback: the authentication handler function
"""
self.authentication_callback = callback
return callback
def user_handler(self, callback):
"""Specifies the user handler function. This function receives the token payload as
its only positional argument. It should return an object representing the current
user. Example::
@jwt.user_handler
def load_user(payload):
if payload['user_id'] == 1:
return User(id=1, username='joe')
:param callback: the user handler function
"""
self.user_callback = callback
return callback
def error_handler(self, callback):
"""Specifies the error handler function. This function receives a JWTError instance as
its only positional argument. It can optionally return a response. Example::
@jwt.error_handler
def error_handler(e):
return "Something bad happened", 400
:param callback: the error handler function
"""
self.error_callback = callback
return callback
|
|
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
class TestIndex(unittest2.TestCase):
PROJECT = 'project'
INDEX_ID = 'index-id'
def _getTargetClass(self):
from gcloud.search.index import Index
return Index
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def _setUpConstants(self):
import datetime
from gcloud._helpers import UTC
self.WHEN_TS = 1437767599.006
self.WHEN = datetime.datetime.utcfromtimestamp(self.WHEN_TS).replace(
tzinfo=UTC)
self.ZONE_ID = 12345
def _makeResource(self):
self._setUpConstants()
return {
'projectId': self.PROJECT,
'indexId': self.INDEX_ID,
'indexedField': {
'textFields': ['text-1', 'text-2'],
'htmlFields': ['html-1', 'html-2'],
'atomFields': ['atom-1', 'atom-2'],
'dateFields': ['date-1', 'date-2'],
'numberFields': ['number-1', 'number-2'],
'geoFields': ['geo-1', 'geo-2'],
}
}
def _makeDocumentResource(self, doc_id, rank=None, title=None):
resource = {'docId': doc_id}
if rank is not None:
resource['rank'] = rank
if title is not None:
resource['fields'] = {
'title': {
'values': [{
'stringValue': title,
'stringFormat': 'text',
'lang': 'en'}]
}
}
return resource
def _verifyResourceProperties(self, index, resource):
self.assertEqual(index.name, resource.get('indexId'))
field_info = resource.get('indexedField', {})
self.assertEqual(index.text_fields, field_info.get('textFields'))
self.assertEqual(index.html_fields, field_info.get('htmlFields'))
self.assertEqual(index.atom_fields, field_info.get('atomFields'))
self.assertEqual(index.date_fields, field_info.get('dateFields'))
self.assertEqual(index.number_fields, field_info.get('numberFields'))
self.assertEqual(index.geo_fields, field_info.get('geoFields'))
def _verifyDocumentResource(self, documents, resource):
from gcloud.search.document import Document
from gcloud.search.document import StringValue
self.assertEqual(len(documents), len(resource))
for found, expected in zip(documents, resource):
self.assertTrue(isinstance(found, Document))
self.assertEqual(found.name, expected['docId'])
self.assertEqual(found.rank, expected.get('rank'))
e_fields = expected.get('fields', ())
self.assertEqual(sorted(found.fields), sorted(e_fields))
for field, f_field in found.fields.items():
e_field = e_fields[field]
for f_value, e_value in zip(f_field.values, e_field['values']):
self.assertTrue(isinstance(f_value, StringValue))
self.assertEqual(f_value.string_value,
e_value['stringValue'])
self.assertEqual(f_value.string_format,
e_value['stringFormat'])
self.assertEqual(f_value.language,
e_value['lang'])
def test_ctor(self):
client = _Client(self.PROJECT)
index = self._makeOne(self.INDEX_ID, client)
self.assertEqual(index.name, self.INDEX_ID)
self.assertTrue(index._client is client)
self.assertEqual(index.project, client.project)
self.assertEqual(
index.path,
'/projects/%s/indexes/%s' % (self.PROJECT, self.INDEX_ID))
self.assertEqual(index.text_fields, None)
self.assertEqual(index.html_fields, None)
self.assertEqual(index.atom_fields, None)
self.assertEqual(index.date_fields, None)
self.assertEqual(index.number_fields, None)
self.assertEqual(index.geo_fields, None)
def test_from_api_repr_missing_identity(self):
self._setUpConstants()
client = _Client(self.PROJECT)
RESOURCE = {}
klass = self._getTargetClass()
with self.assertRaises(KeyError):
klass.from_api_repr(RESOURCE, client=client)
def test_from_api_repr_bare(self):
self._setUpConstants()
client = _Client(self.PROJECT)
RESOURCE = {
'indexId': self.INDEX_ID,
}
klass = self._getTargetClass()
index = klass.from_api_repr(RESOURCE, client=client)
self.assertTrue(index._client is client)
self._verifyResourceProperties(index, RESOURCE)
def test_from_api_repr_w_properties(self):
self._setUpConstants()
client = _Client(self.PROJECT)
RESOURCE = self._makeResource()
klass = self._getTargetClass()
index = klass.from_api_repr(RESOURCE, client=client)
self.assertTrue(index._client is client)
self._verifyResourceProperties(index, RESOURCE)
def test_list_documents_defaults(self):
DOCID_1 = 'docid-one'
DOCID_2 = 'docid-two'
PATH = 'projects/%s/indexes/%s/documents' % (
self.PROJECT, self.INDEX_ID)
TOKEN = 'TOKEN'
DOC_1 = self._makeDocumentResource(DOCID_1)
DOC_2 = self._makeDocumentResource(DOCID_2)
RESPONSE = {
'nextPageToken': TOKEN,
'documents': [DOC_1, DOC_2],
}
client = _Client(self.PROJECT)
conn = client.connection = _Connection(RESPONSE)
index = self._makeOne(self.INDEX_ID, client)
documents, token = index.list_documents()
self._verifyDocumentResource(documents, RESPONSE['documents'])
self.assertEqual(token, TOKEN)
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/%s' % PATH)
self.assertEqual(req['query_params'], {})
def test_list_documents_explicit(self):
DOCID_1 = 'docid-one'
RANK_1 = 2345
TITLE_1 = 'Title One'
DOCID_2 = 'docid-two'
RANK_2 = 1234
TITLE_2 = 'Title Two'
PATH = 'projects/%s/indexes/%s/documents' % (
self.PROJECT, self.INDEX_ID)
TOKEN = 'TOKEN'
DOC_1 = self._makeDocumentResource(DOCID_1, RANK_1, TITLE_1)
DOC_2 = self._makeDocumentResource(DOCID_2, RANK_2, TITLE_2)
RESPONSE = {'documents': [DOC_1, DOC_2]}
client = _Client(self.PROJECT)
conn = client.connection = _Connection(RESPONSE)
index = self._makeOne(self.INDEX_ID, client)
documents, token = index.list_documents(
max_results=3, page_token=TOKEN, view='FULL')
self._verifyDocumentResource(documents, RESPONSE['documents'])
self.assertEqual(token, None)
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/%s' % PATH)
self.assertEqual(req['query_params'],
{'pageSize': 3,
'pageToken': TOKEN,
'view': 'FULL'})
def test_document_defaults(self):
from gcloud.search.document import Document
DOCUMENT_ID = 'document-id'
client = _Client(self.PROJECT)
index = self._makeOne(self.INDEX_ID, client)
document = index.document(DOCUMENT_ID)
self.assertTrue(isinstance(document, Document))
self.assertEqual(document.name, DOCUMENT_ID)
self.assertEqual(document.rank, None)
self.assertTrue(document.index is index)
def test_document_explicit(self):
from gcloud.search.document import Document
DOCUMENT_ID = 'document-id'
RANK = 1234
client = _Client(self.PROJECT)
index = self._makeOne(self.INDEX_ID, client)
document = index.document(DOCUMENT_ID, rank=RANK)
self.assertTrue(isinstance(document, Document))
self.assertEqual(document.name, DOCUMENT_ID)
self.assertEqual(document.rank, RANK)
self.assertTrue(document.index is index)
def test_search_defaults(self):
DOCID_1 = 'docid-one'
TITLE_1 = 'Title One'
DOCID_2 = 'docid-two'
TITLE_2 = 'Title Two'
PATH = 'projects/%s/indexes/%s/search' % (
self.PROJECT, self.INDEX_ID)
TOKEN = 'TOKEN'
DOC_1 = self._makeDocumentResource(DOCID_1, title=TITLE_1)
DOC_2 = self._makeDocumentResource(DOCID_2, title=TITLE_2)
QUERY = 'query string'
RESPONSE = {
'nextPageToken': TOKEN,
'matchedCount': 2,
'results': [DOC_1, DOC_2],
}
client = _Client(self.PROJECT)
conn = client.connection = _Connection(RESPONSE)
index = self._makeOne(self.INDEX_ID, client)
documents, token, matched_count = index.search(QUERY)
self._verifyDocumentResource(documents, RESPONSE['results'])
self.assertEqual(token, TOKEN)
self.assertEqual(matched_count, 2)
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/%s' % PATH)
self.assertEqual(req['query_params'], {'query': QUERY})
def test_search_explicit(self):
DOCID_1 = 'docid-one'
TITLE_1 = 'Title One'
FUNKY_1 = 'this is a funky show'
RANK_1 = 2345
DOCID_2 = 'docid-two'
TITLE_2 = 'Title Two'
FUNKY_2 = 'delighfully funky ambiance'
RANK_2 = 1234
PATH = 'projects/%s/indexes/%s/search' % (
self.PROJECT, self.INDEX_ID)
TOKEN = 'TOKEN'
def _makeFunky(text):
return {
'values': [{
'stringValue': text,
'stringFormat': 'text',
'lang': 'en',
}]
}
DOC_1 = self._makeDocumentResource(DOCID_1, RANK_1, TITLE_1)
DOC_1['fields']['funky'] = _makeFunky(FUNKY_1)
DOC_2 = self._makeDocumentResource(DOCID_2, RANK_2, TITLE_2)
DOC_2['fields']['funky'] = _makeFunky(FUNKY_2)
EXPRESSIONS = {'funky': 'snippet("funky", content)'}
QUERY = 'query string'
RESPONSE = {
'matchedCount': 2,
'results': [DOC_1, DOC_2],
}
client = _Client(self.PROJECT)
conn = client.connection = _Connection(RESPONSE)
index = self._makeOne(self.INDEX_ID, client)
documents, token, matched_count = index.search(
query=QUERY,
max_results=3,
page_token=TOKEN,
field_expressions=EXPRESSIONS,
order_by=['title'],
matched_count_accuracy=100,
scorer='generic',
scorer_size=20,
return_fields=['_rank', 'title', 'funky'],
)
self._verifyDocumentResource(documents, RESPONSE['results'])
self.assertEqual(token, None)
self.assertEqual(matched_count, 2)
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/%s' % PATH)
expected_params = {
'query': QUERY,
'pageSize': 3,
'pageToken': TOKEN,
'fieldExpressions': EXPRESSIONS,
'orderBy': ['title'],
'matchedCountAccuracy': 100,
'scorer': 'generic',
'scorerSize': 20,
'returnFields': ['_rank', 'title', 'funky'],
}
self.assertEqual(req['query_params'], expected_params)
class _Client(object):
def __init__(self, project='project', connection=None):
self.project = project
self.connection = connection
class _Connection(object):
def __init__(self, *responses):
self._responses = responses
self._requested = []
def api_request(self, **kw):
from gcloud.exceptions import NotFound
self._requested.append(kw)
try:
response, self._responses = self._responses[0], self._responses[1:]
except: # pragma: NO COVER
raise NotFound('miss')
else:
return response
|
|
""" define solvers that record cumulative times needed for every loop """
from dolfin import *
from nanopores.tools.pdesystem import newtonsolve
from nanopores import *
def QmolEff(U, geo):
phi, phidual = U
phys = geo.physics
dim = phys.dim
coeff = Constant(1.) if dim==3 else Expression("2*pi*x[0]")
molqv = phys.Moleculeqv
dnaqs = phys.DNAqs
lscale = phys.lscale
grad = phys.grad
q = phys.qq
#qmol = coeff*molqv/lscale**3/q*geo.dx("molecule")
qDNA = (1./lscale**2/q)*geo.NeumannRHS(coeff, "surfcharge")
qmol = (1./lscale**3/q)*geo.linearRHS(coeff, "volcharge")
Fbare = molqv * (-coeff*grad(phi)[dim-1]) *geo.dx("molecule")
return dict(qmol=qmol, Fbare=Fbare, qDNA=qDNA)
def adaptive_pbpnps(geo, phys, cyl=False, frac=0.5, Nmax=1e4, mesh2D=None, cheapest=False,
Felref=None, Fdragref=None, Fpbref=None, ratio=0.01):
#Fdragref = Fsref + Fpref
LinearPB = LinearPBAxisymGoalOriented if cyl else LinearPBGoalOriented
PNPStokes = PNPSAxisym if cyl else PNPS
z = phys.dim - 1
bV = phys.bV
print "biased voltage:", bV
phys.bV = 0.
goal = lambda v : phys.Fbare(v, z) # phys.Fbaresurf(v, z) # +
pb = LinearPB(geo, phys, goal=goal, ref=Fpbref)
phys.bV = bV
pb.maxcells = Nmax
pb.marking_fraction = frac
if cheapest:
pb.estimate = pb.estimate_cheap
pb.add_functionals([QmolEff])
refined = True
i = 0
print "Number of cells:", pb.geo.mesh.num_cells()
while refined:
i += 1
if phys.dim == 3:
print "\nAssessing mesh quality."
mesh_quality(pb.geo.mesh, ratio=ratio, geo=pb.geo, plothist=False)
print "\nSolving PB."
# solve pb
pb.single_solve()
pb.print_functionals()
# define and solve pnps
'''
if i==1:
pnps = PNPStokes(pb.geo, phys, v0=pb.solution)
else:
pnps.geo = pb.geo
mesh = pb.geo.mesh
for name, S in pnps.solvers.items():
print "Adapting %s." % name
S.adapt(mesh)
functions = tuple(pnps.functions.values())
for S in pnps.solvers.values():
S.replace(functions,functions)
'''
print "Defining PNPS with Taylor-Hood elements."
pnps = PNPStokes(pb.geo, phys, v0=pb.solution, taylorhood=True)
print "\nSolving PNPS."
dofs = pnps.dofs()
print " Degrees of freedom: %d" % dofs
pnps.solve()
#newton_iter = pnps.newton_solve()
#print " Newton iterations:", newton_iter
print
#if phys.dim == 3:
# pnps.visualize("pore")
# fs = pnps.get_functionals()
# Fp = fs["Fp%d" %z]
# Fshear = fs["Fshear%d" %z]
# Fdrag = Fp + Fshear
# Fel = fs["Fbarevol%d" %z]
F, Fel, Fdrag = pnps.zforces()
print "Fbare [pN]:", Fel
print "Fdrag [pN]:", Fdrag #, " = %s (Fp) + %s (Fshear)" %(Fp, Fshear)
print "F [pN]:", F
if Felref is not None:
pb.save_estimate("Fdrag", abs((Fdrag-Fdragref)/Fdragref), N=dofs)
pb.save_estimate("Fel", abs((Fel-Felref)/Felref), N=dofs)
#pb.save_estimate("Fs", abs((Fshear-Fsref)/Fsref), N=dofs)
Fref = Felref + Fdragref
pb.save_estimate("F", abs((F-Fref)/Fref), N=dofs)
print "\nAdaptive refinement."
(ind, err) = pb.estimate()
pb.save_estimate("Fpb est", err, N=dofs)
refined = pb.refine(ind)
if not refined:
print "Maximal number of cells reached."
else:
print "New total number of cells:", pb.geo.mesh.num_cells()
return pb, pnps
def adaptive_pb(geo, phys, cyl=False, frac=0.5, Nmax=1e4, Fpbref=None,
ratio=.01, mesh2D=None, cheapest=False):
LinearPB = LinearPBAxisymGoalOriented if cyl else LinearPBGoalOriented
z = phys.dim - 1
bV = phys.bV
phys.bV = 0.
goal = lambda v : phys.Fbare(v, z) #- phys.CurrentPB(v)
pb = LinearPB(geo, phys, goal=goal, ref=Fpbref)
phys.bV = bV
pb.maxcells = Nmax
pb.marking_fraction = frac
if cheapest:
pb.estimate = pb.estimate_cheap
pb.add_functionals([QmolEff])
refined = True
i = 0
print "Number of cells:", pb.geo.mesh.num_cells()
while refined:
i += 1
if phys.dim == 3:
print "\nAssessing mesh quality."
mesh_quality(pb.geo.mesh, ratio=ratio, geo=pb.geo, plothist=False)
print "\nSolving PB."
# solve pb
pb.single_solve()
pb.print_functionals(name="Fbare")
#plot(pb.geo.mesh)
#plot(pb.geo.submesh("membrane"))
#plot(pb.geo.submesh("pore"))
#plot(pb.geo.submesh("dna"))
if phys.dim == 3:
dofs = pb.dofs()
#plot_on_sub(pb.solution, geo, "dna", title="N=%s" %dofs)
#geo_debug(pb.geo)
Rz = pb.geo.params["Rz"]
r0 = pb.geo.params["r0"]
plot1D({"phi, N=%s" %dofs: pb.solution}, (-Rz, Rz, 101), "z", dim=3,
origin=(r0, 0., 0.), axlabels=("z [nm]", "potential [V]"), newfig=False)
# origin=(0., 0., 0.), axlabels=("z [nm]", "potential [V]"), newfig=False)
print "\nError estimation."
(ind, err) = pb.estimate()
print "\nMesh refinement."
refined = pb.refine(ind)
if not refined:
print "Maximal number of cells reached."
else:
print "New total number of cells:", pb.geo.mesh.num_cells()
return pb
def pbpnps(geo, phys, cyl=False, frac=0.5, Nmax=1e4, cheapest=False):
LinearPB = LinearPBAxisymGoalOriented if cyl else LinearPBGoalOriented
PNPStokes = PNPSAxisym if cyl else PNPS
z = phys.dim - 1
bV = phys.bV
phys.bV = 0.
goal = (lambda v : phys.Fbare(v, z)) if geo.parameter("x0") else (lambda v : phys.CurrentPB(v))
pb = LinearPB(geo, phys, goal=goal)
phys.bV = bV
pb.maxcells = Nmax
pb.marking_fraction = frac
if cheapest:
pb.estimate = pb.estimate_cheap
refined = True
i = 0
print "Number of cells:", pb.geo.mesh.num_cells()
while refined:
i += 1
print "\nSolving PB."
pb.single_solve()
print "\nError estimation."
(ind, err) = pb.estimate()
print "\nMesh refinement."
refined = pb.refine(ind)
if not refined:
print "Maximal number of cells reached."
else:
print "New total number of cells:", pb.geo.mesh.num_cells()
pnps = PNPStokes(pb.geo, phys, v0=pb.solution, taylorhood=True)
print "\nSolving PNPS."
dofs = pnps.dofs()
print " Degrees of freedom: %d" % dofs
newton_iter = pnps.newton_solve()
print " Newton iterations:", newton_iter
return pb, pnps
def newton_solve(self, tol=None, damp=None, verbose=True):
if tol is None: tol = self.tolnewton
if damp is None: damp = self.newtondamp
S = self.solvers.values()[0]
S.newtondamp = damp
tcum = 0.
Uold = self.solutions(deepcopy=True)
for i in range(self.imax):
tloop = Timer("loop")
S.solve()
if verbose:
print ' Relative L2 Newton error:',S.relerror()
if S.convergence(tol):
if verbose:
print " Break loop because tolerance %s was reached." %tol
converged = True
break
# cumulative time
tcum += tloop.stop()
# calculate the error
U = self.solutions(deepcopy=True)
err = sum(errornorm(u, uold, "L2", degree_rise=0) for u, uold in zip(U, Uold)) / sum(norm(u, "L2") for u in U)
Uold = U
self.save_estimate("err newton i", err, N=i+1)
self.save_estimate("err newton time", err, N=tcum)
continue
else:
if verbose: print " Did not reach tolerance %s." %tol
converged = False
print " Newton iterations:",i+1
#print ' Relative L2 Newton error:',S.relerror()
return i+1, converged
def hybrid_solve(self, tol=None, damp=None):
for _ in self.fixedpoint():
try:
v = self.functions["poisson"]
plot(v)
#interactive()
except: pass
def geo_debug(geo):
print "Boundaries:"
for i in geo._bou2phys:
print "%d: %s" %(i, str(geo._bou2phys[i]))
for subd in geo._physical_domain:
submesh = geo.submesh(subd)
geo_sub = geo_from_subdomains(submesh,
"nanopores.geometries.%s.subdomains" %geo.params["name"], **geo.params)
plot(geo_sub.boundaries, title=("boundaries on %s" %subd), elevate=-3e1)
#plot(submesh, title=("initial mesh on %s" %subd), wireframe=True, elevate=-3e1)
interactive()
def mesh_quality(mesh, oldmesh=None, ratio=1e-1, geo=None, plothist=True, plot_cells=True):
#vertex = VertexFunction("bool", mesh, False)
dgncells = CellFunction("size_t", mesh, 0)
ndeg = 0
for c in cells(mesh):
if c.radius_ratio() < ratio:
dgncells[c] = 1
ndeg += 1
print "%s degenerate cells of radius ratio < %s." % (ndeg, ratio)
minrr = MeshQuality.radius_ratio_min_max(mesh)[0]
print "Minimal radius ratio of mesh:", minrr
if plothist:
from matplotlib import pyplot
pyplot.figure()
exec(MeshQuality.radius_ratio_matplotlib_histogram(mesh, 200), locals())
# plot degenerate cells
if minrr < ratio and plot_cells:
submesh = SubMesh(mesh, dgncells, 1)
title = "degenerate N=%s" %mesh.num_cells()
#plot(submesh, title=title)
geo_sub = geo_from_subdomains(submesh,
"nanopores.geometries.%s.subdomains" %geo.params["name"], **geo.params)
plot(geo_sub.boundaries, title="boundaries "+title)
# find degenerate cells before snapping
if oldmesh is not None:
oldmesh = refine(oldmesh)
oldcells = CellFunction("size_t", oldmesh, 0)
oldcells.array()[:] = dgncells.array()
plot(SubMesh(oldmesh, oldcells, 1), "old degenerate cells N=%s" %mesh.num_cells())
def save_Fref(pb, pnps):
z = pnps.phys.dim - 1
fs = pnps.get_functionals()
#Fp = fs["Fp%d" %z]
#Fshear = fs["Fshear%d" %z]
#Fdrag = Fp + Fshear
Fdrag = fs["Fdragvol%d" %z]
Fel = fs["Fbarevol%d" %z]
#F = Fdrag + Fel
Fpbref = pb.get_functionals()["goal"]
data = dict(
Fdragref = Fdrag,
Felref = Fel,
Fpbref = Fpbref,
)
save_dict(data, ".", "Fref")
def load_Fref():
return load_dict(".", "Fref")
def save_estimators(name, estimators):
save_stuff(name, {key: est.__dict__ for key, est in estimators.items()})
def load_estimators(name):
def to_estimator(dic):
est = Estimator(dic["name"])
est.pairs = dic["pairs"]
return est
dic = load_stuff(name)
return {k : to_estimator(dic[k]) for k in dic}
|
|
try:
from lxml.etree import ElementTree
from lxml.etree.ElementTree import Element
except ImportError:
from xml.etree import ElementTree
from xml.etree.ElementTree import Element
from client import V1Server, V1Error
from base_asset import BaseAsset
from cache_decorator import memoized
from special_class_methods import special_classes
from none_deref import NoneDeref
from string_utils import split_attribute, to_unicode_or_bust
class V1Meta(object):
def __init__(self, *args, **kw):
self.server = V1Server(*args, **kw)
self.global_cache = {}
self.dirtylist = []
def __getattr__(self, attr):
"""
Dynamically build asset type classes when someone tries to get attrs
that we don't have.
"""
return self.asset_class(attr)
def __enter__(self):
return self
def __exit__(self, *args, **kw):
self.commit()
@memoized
def asset_class(self, asset_type_name):
xmldata = self.server.get_meta_xml(asset_type_name)
class_members = {
'_v1_v1meta': self,
'_v1_asset_type_name': asset_type_name,
}
for operation in xmldata.findall('Operation'):
opname = operation.get('name')
def operation_func(myself, opname2=opname):
myself._v1_execute_operation(opname2)
class_members[opname] = operation_func
for attribute in xmldata.findall('AttributeDefinition'):
attr = attribute.get("name")
if attribute.get('attributetype') == 'Relation':
if attribute.get('ismultivalue') == 'True':
def getter(self, attr=attr):
return self._v1_getattr(attr)
def setter(self, value, attr=attr):
return self._v1_setattr(attr, list(value))
def deleter(self, attr=attr):
raise NotImplementedError
else:
def getter(self, attr=attr):
v = self._v1_getattr(attr)
if v:
return self._v1_getattr(attr)[0]
else:
return NoneDeref()
def setter(self, value, attr=attr):
return self._v1_setattr(attr, value)
def deleter(self, attr=attr):
raise NotImplementedError
else:
def getter(self, attr=attr):
return self._v1_getattr(attr)
def setter(self, value, attr=attr):
return self._v1_setattr(attr, value)
def deleter(self, attr=attr):
raise NotImplementedError
class_members[attr] = property(getter, setter, deleter)
bases = [BaseAsset, ]
# mix in any special methods
if asset_type_name in special_classes:
mixin = special_classes[asset_type_name]
bases.append(mixin)
new_asset_class = type(asset_type_name, tuple(bases), class_members)
return new_asset_class
def add_to_dirty_list(self, asset_instance):
self.dirtylist.append(asset_instance)
def commit(self):
errors = []
for asset in self.dirtylist:
try:
asset._v1_commit()
except V1Error, e:
errors.append(e)
self.dirtylist = []
return errors
def generate_update_doc(self, newdata):
update_doc = Element('Asset')
for attrname, newvalue in newdata.items():
if newvalue is None: # single relation was removed
node = Element('Relation')
node.set('name', attrname)
node.set('act', 'set')
elif isinstance(newvalue, BaseAsset): # single relation was changed
node = Element('Relation')
node.set('name', attrname)
node.set('act', 'set')
ra = Element('Asset')
ra.set('idref', newvalue.idref)
node.append(ra)
elif isinstance(newvalue, list): # multi relation was changed
node = Element('Relation')
node.set('name', attrname)
for item in newvalue:
child = Element('Asset')
child.set('idref', item.idref)
child.set('act', 'add')
node.append(child)
else: # Not a relation
node = Element('Attribute')
node.set('name', attrname)
node.set('act', 'set')
# force unicode value
# idea stolen from https://github.com/Matt141421356/VersionOne.SDK.Python/commit/447e042584bf3bd424c1ff7a83fb247b9d299543
node.text = to_unicode_or_bust(newvalue)
update_doc.append(node)
return update_doc
def create_asset(self, asset_type_name, newdata):
update_doc = self.generate_update_doc(newdata)
new_asset_xml = self.server.create_asset(asset_type_name, update_doc)
asset_type, asset_oid, asset_moment = new_asset_xml.get('id').split(':')
return self.asset_class(asset_type)(asset_oid)
def update_asset(self, asset_type_name, asset_oid, newdata):
update_doc = self.generate_update_doc(newdata)
return self.server.update_asset(asset_type_name, asset_oid, update_doc)
def execute_operation(self, asset_type_name, oid, opname):
return self.server.execute_operation(asset_type_name, oid, opname)
def get_attr(self, asset_type_name, oid, attrname, moment=None):
xml = self.server.get_attr(asset_type_name, oid, attrname, moment)
dummy_asset = ElementTree.Element('Asset')
dummy_asset.append(xml)
return self.unpack_asset(dummy_asset)[attrname]
def query(self, asset_type_name, wherestring, selstring):
return self.server.get_query_xml(asset_type_name, wherestring,
selstring)
def read_asset(self, asset_type_name, asset_oid, moment=None):
xml = self.server.get_asset_xml(asset_type_name, asset_oid, moment)
return self.unpack_asset(xml)
def unpack_asset(self, xml):
output = {}
self.unpack_asset_relations(output, xml)
self.unpack_asset_attributes(output, xml)
return output
def unpack_asset_attributes(self, output, xml):
for attribute in xml.findall('Attribute'):
# key = attribute.get('name').replace('.','_')
key = attribute.get('name')
values = [v.text for v in attribute.findall('Value')]
if len(values) == 0:
values = [attribute.text]
self.add_attribute_to_output(output, key, values)
def unpack_asset_relations(self, output, xml):
# we sort relations in order to insert the shortest ones first, so that
# containing relations are added before leaf ones.
for relation in sorted(xml.findall('Relation'),
key=lambda x: x.get('name')):
key = relation.get('name')
related_asset_elements = relation.findall('Asset')
rellist = []
for value_element in related_asset_elements:
relation_idref = value_element.get('idref')
value = self.asset_from_oid(relation_idref)
rellist.append(value)
self.add_relation_to_output(output, key, rellist)
def add_relation_to_output(self, output, relation, assets):
if self.is_attribute_qualified(relation):
(container, leaf) = self.split_relation_to_container_and_leaf(
relation)
asset = self.get_related_asset(output, container)
# asset may be unset because the reference is broken
if asset:
asset.with_data({leaf: assets})
else:
output[relation] = assets
def add_attribute_to_output(self, output, relation, values):
if self.is_attribute_qualified(relation):
(container, leaf) = self.split_relation_to_container_and_leaf(
relation)
for (asset, value) in zip(
self.get_related_assets(output, container), values):
# for calculated values it is not an asset so take the value
# directly
if hasattr(asset, 'with_data'):
asset.with_data({leaf: value})
else:
output[relation] = value
else:
output[relation] = values[0]
def is_attribute_qualified(self, relation):
parts = split_attribute(relation)
return len(parts) > 1
def split_relation_to_container_and_leaf(self, relation):
parts = split_attribute(relation)
return ('.'.join(parts[:-1]), parts[-1])
def get_related_assets(self, output, relation):
if self.is_attribute_qualified(relation):
parts = split_attribute(relation)
assets = output[parts[0]]
for part in parts[1:]:
try:
asset = assets[0]
except IndexError:
return []
assets = asset._v1_getattr(part)
return assets
else:
return output[relation]
def get_related_asset(self, output, relation):
assets = self.get_related_assets(output, relation)
try:
return assets[0]
except IndexError:
return None
def asset_from_oid(self, oidtoken):
oid_parts = oidtoken.split(":")
(asset_type, asset_id, moment) = oid_parts if len(oid_parts) > 2 else (
oid_parts[0], oid_parts[1], None)
AssetClass = self.asset_class(asset_type)
instance = AssetClass(asset_id, moment)
return instance
def set_attachment_blob(self, attachment, data=None):
intid = attachment.intid if isinstance(attachment,
BaseAsset) else attachment
return self.server.set_attachment_blob(intid, data)
get_attachment_blob = set_attachment_blob
# type_converters = dict(
# Boolean = bool
# Numeric = float,
# Date = iso8601.parse_date,
# Duration = str,
# Text = str,
# LongText = str,
# Relation = str,
# Rank = str,
# AssetType = str,
# Opaque = str,
# State = int,
# Password = str,
# Blob = str,
# )
|
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import time
import datetime
import six
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow import configuration
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.state import State
from qds_sdk.qubole import Qubole
from qds_sdk.commands import Command, HiveCommand, PrestoCommand, HadoopCommand, \
PigCommand, ShellCommand, SparkCommand, DbTapQueryCommand, DbExportCommand, \
DbImportCommand
COMMAND_CLASSES = {
"hivecmd": HiveCommand,
"prestocmd": PrestoCommand,
"hadoopcmd": HadoopCommand,
"shellcmd": ShellCommand,
"pigcmd": PigCommand,
"sparkcmd": SparkCommand,
"dbtapquerycmd": DbTapQueryCommand,
"dbexportcmd": DbExportCommand,
"dbimportcmd": DbImportCommand
}
POSITIONAL_ARGS = {
'hadoopcmd': ['sub_command'],
'shellcmd': ['parameters'],
'pigcmd': ['parameters']
}
def flatten_list(list_of_lists):
return [element for array in list_of_lists for element in array]
def filter_options(options):
options_to_remove = ["help", "print-logs-live", "print-logs"]
return [option for option in options if option not in options_to_remove]
def get_options_list(command_class):
options_list = [option.get_opt_string().strip("--") for option in command_class.optparser.option_list]
return filter_options(options_list)
def build_command_args():
command_args, hyphen_args = {}, set()
for cmd in COMMAND_CLASSES:
# get all available options from the class
opts_list = get_options_list(COMMAND_CLASSES[cmd])
# append positional args if any for the command
if cmd in POSITIONAL_ARGS:
opts_list += POSITIONAL_ARGS[cmd]
# get args with a hyphen and replace them with underscore
for index, opt in enumerate(opts_list):
if "-" in opt:
opts_list[index] = opt.replace("-", "_")
hyphen_args.add(opts_list[index])
command_args[cmd] = opts_list
return command_args, list(hyphen_args)
COMMAND_ARGS, HYPHEN_ARGS = build_command_args()
class QuboleHook(BaseHook):
def __init__(self, *args, **kwargs):
conn = self.get_connection(kwargs['qubole_conn_id'])
Qubole.configure(api_token=conn.password, api_url=conn.host)
self.task_id = kwargs['task_id']
self.dag_id = kwargs['dag'].dag_id
self.kwargs = kwargs
self.cls = COMMAND_CLASSES[self.kwargs['command_type']]
self.cmd = None
@staticmethod
def handle_failure_retry(context):
ti = context['ti']
cmd_id = ti.xcom_pull(key='qbol_cmd_id', task_ids=ti.task_id)
if cmd_id is not None:
cmd = Command.find(cmd_id)
if cmd is not None:
log = LoggingMixin().log
if cmd.status == 'done':
log.info('Command ID: %s has been succeeded, hence marking this '
'TI as Success.', cmd_id)
ti.state = State.SUCCESS
elif cmd.status == 'running':
log.info('Cancelling the Qubole Command Id: %s', cmd_id)
cmd.cancel()
def execute(self, context):
args = self.cls.parse(self.create_cmd_args(context))
self.cmd = self.cls.create(**args)
context['task_instance'].xcom_push(key='qbol_cmd_id', value=self.cmd.id)
self.log.info(
"Qubole command created with Id: %s and Status: %s",
self.cmd.id, self.cmd.status
)
while not Command.is_done(self.cmd.status):
time.sleep(Qubole.poll_interval)
self.cmd = self.cls.find(self.cmd.id)
self.log.info("Command Id: %s and Status: %s", self.cmd.id, self.cmd.status)
if 'fetch_logs' in self.kwargs and self.kwargs['fetch_logs'] is True:
self.log.info("Logs for Command Id: %s \n%s", self.cmd.id, self.cmd.get_log())
if self.cmd.status != 'done':
raise AirflowException('Command Id: {0} failed with Status: {1}'.format(
self.cmd.id, self.cmd.status))
def kill(self, ti):
"""
Kill (cancel) a Qubole command
:param ti: Task Instance of the dag, used to determine the Quboles command id
:return: response from Qubole
"""
if self.cmd is None:
cmd_id = ti.xcom_pull(key="qbol_cmd_id", task_ids=ti.task_id)
self.cmd = self.cls.find(cmd_id)
if self.cls and self.cmd:
self.log.info('Sending KILL signal to Qubole Command Id: %s', self.cmd.id)
self.cmd.cancel()
def get_results(self, ti=None, fp=None, inline=True, delim=None, fetch=True):
"""
Get results (or just s3 locations) of a command from Qubole and save into a file
:param ti: Task Instance of the dag, used to determine the Quboles command id
:param fp: Optional file pointer, will create one and return if None passed
:param inline: True to download actual results, False to get s3 locations only
:param delim: Replaces the CTL-A chars with the given delim, defaults to ','
:param fetch: when inline is True, get results directly from s3 (if large)
:return: file location containing actual results or s3 locations of results
"""
if fp is None:
iso = datetime.datetime.utcnow().isoformat()
logpath = os.path.expanduser(
configuration.conf.get('core', 'BASE_LOG_FOLDER')
)
resultpath = logpath + '/' + self.dag_id + '/' + self.task_id + '/results'
configuration.mkdir_p(resultpath)
fp = open(resultpath + '/' + iso, 'wb')
if self.cmd is None:
cmd_id = ti.xcom_pull(key="qbol_cmd_id", task_ids=self.task_id)
self.cmd = self.cls.find(cmd_id)
self.cmd.get_results(fp, inline, delim, fetch)
fp.flush()
fp.close()
return fp.name
def get_log(self, ti):
"""
Get Logs of a command from Qubole
:param ti: Task Instance of the dag, used to determine the Quboles command id
:return: command log as text
"""
if self.cmd is None:
cmd_id = ti.xcom_pull(key="qbol_cmd_id", task_ids=self.task_id)
Command.get_log_id(self.cls, cmd_id)
def get_jobs_id(self, ti):
"""
Get jobs associated with a Qubole commands
:param ti: Task Instance of the dag, used to determine the Quboles command id
:return: Job informations assoiciated with command
"""
if self.cmd is None:
cmd_id = ti.xcom_pull(key="qbol_cmd_id", task_ids=self.task_id)
Command.get_jobs_id(self.cls, cmd_id)
def create_cmd_args(self, context):
args = []
cmd_type = self.kwargs['command_type']
inplace_args = None
tags = set([self.dag_id, self.task_id, context['run_id']])
positional_args_list = flatten_list(POSITIONAL_ARGS.values())
for k, v in self.kwargs.items():
if k in COMMAND_ARGS[cmd_type]:
if k in HYPHEN_ARGS:
args.append("--{0}={1}".format(k.replace('_', '-'), v))
elif k in positional_args_list:
inplace_args = v
elif k == 'tags':
if isinstance(v, six.string_types):
tags.add(v)
elif isinstance(v, (list, tuple)):
for val in v:
tags.add(val)
else:
args.append("--{0}={1}".format(k, v))
if k == 'notify' and v is True:
args.append("--notify")
args.append("--tags={0}".format(','.join(filter(None, tags))))
if inplace_args is not None:
args += inplace_args.split(' ')
return args
|
|
'''
Created on 2016/2/19
:author: hubo
'''
from vlcp.config import defaultconfig
from vlcp.server.module import Module, api, depend, call_api, ModuleNotification
from vlcp.event.runnable import RoutineContainer
from vlcp.service.connection import openflowserver
from vlcp.protocol.openflow import OpenflowConnectionStateEvent
from vlcp.event.connection import ConnectionResetException, ResolveRequestEvent,\
ResolveResponseEvent
import itertools
import socket
from vlcp.event.event import Event, withIndices, M_
from vlcp.event.core import QuitException, syscall_removequeue
from contextlib import closing
def _get_endpoint(conn):
raddr = getattr(conn, 'remoteaddr', None)
if raddr:
if isinstance(raddr, tuple):
# Ignore port
return raddr[0]
else:
# Unix socket
return raddr
else:
return ''
@withIndices()
class TableAcquireUpdate(Event):
pass
@withIndices('connection', 'datapathid', 'vhost')
class FlowInitialize(Event):
pass
@withIndices()
class TableAcquireDelayEvent(Event):
pass
@defaultconfig
@depend(openflowserver.OpenflowServer)
class OpenflowManager(Module):
'''
Manage Openflow Connections
'''
service = True
_default_vhostbind = None
def __init__(self, server):
Module.__init__(self, server)
self.apiroutine = RoutineContainer(self.scheduler)
self.apiroutine.main = self._manage_conns
self.routines.append(self.apiroutine)
self.managed_conns = {}
self.endpoint_conns = {}
self.table_modules = set()
self._acquiring = False
self._acquire_updated = False
self._lastacquire = None
self._synchronized = False
self.createAPI(api(self.getconnections, self.apiroutine),
api(self.getconnection, self.apiroutine),
api(self.waitconnection, self.apiroutine),
api(self.getdatapathids, self.apiroutine),
api(self.getalldatapathids, self.apiroutine),
api(self.getallconnections, self.apiroutine),
api(self.getconnectionsbyendpoint, self.apiroutine),
api(self.getconnectionsbyendpointname, self.apiroutine),
api(self.getendpoints, self.apiroutine),
api(self.getallendpoints, self.apiroutine),
api(self.acquiretable, self.apiroutine),
api(self.unacquiretable, self.apiroutine),
api(self.lastacquiredtables)
)
def _add_connection(self, conn):
vhost = conn.protocol.vhost
conns = self.managed_conns.setdefault((vhost, conn.openflow_datapathid), [])
remove = []
for i in range(0, len(conns)):
if conns[i].openflow_auxiliaryid == conn.openflow_auxiliaryid:
ci = conns[i]
remove = [ci]
ep = _get_endpoint(ci)
econns = self.endpoint_conns.get((vhost, ep))
if econns is not None:
try:
econns.remove(ci)
except ValueError:
pass
if not econns:
del self.endpoint_conns[(vhost, ep)]
del conns[i]
break
conns.append(conn)
ep = _get_endpoint(conn)
econns = self.endpoint_conns.setdefault((vhost, ep), [])
econns.append(conn)
if self._lastacquire and conn.openflow_auxiliaryid == 0:
self.apiroutine.subroutine(self._initialize_connection(conn))
return remove
async def _initialize_connection(self, conn):
ofdef = conn.openflowdef
flow_mod = ofdef.ofp_flow_mod(buffer_id = ofdef.OFP_NO_BUFFER,
out_port = ofdef.OFPP_ANY,
command = ofdef.OFPFC_DELETE
)
if hasattr(ofdef, 'OFPG_ANY'):
flow_mod.out_group = ofdef.OFPG_ANY
if hasattr(ofdef, 'OFPTT_ALL'):
flow_mod.table_id = ofdef.OFPTT_ALL
if hasattr(ofdef, 'ofp_match_oxm'):
flow_mod.match = ofdef.ofp_match_oxm()
cmds = [flow_mod]
if hasattr(ofdef, 'ofp_group_mod'):
group_mod = ofdef.ofp_group_mod(command = ofdef.OFPGC_DELETE,
group_id = ofdef.OFPG_ALL
)
cmds.append(group_mod)
await conn.protocol.batch(cmds, conn, self.apiroutine)
if hasattr(ofdef, 'ofp_instruction_goto_table'):
# Create default flows
vhost = conn.protocol.vhost
if self._lastacquire and vhost in self._lastacquire:
_, pathtable = self._lastacquire[vhost]
cmds = [ofdef.ofp_flow_mod(table_id = t[i][1],
command = ofdef.OFPFC_ADD,
priority = 0,
buffer_id = ofdef.OFP_NO_BUFFER,
out_port = ofdef.OFPP_ANY,
out_group = ofdef.OFPG_ANY,
match = ofdef.ofp_match_oxm(),
instructions = [ofdef.ofp_instruction_goto_table(table_id = t[i+1][1])]
)
for _,t in pathtable.items()
for i in range(0, len(t) - 1)]
if cmds:
await conn.protocol.batch(cmds, conn, self.apiroutine)
await self.apiroutine.wait_for_send(FlowInitialize(conn, conn.openflow_datapathid, conn.protocol.vhost))
async def _acquire_tables(self):
try:
while self._acquire_updated:
result = None
exception = None
# Delay the update so we are not updating table acquires for every module
await self.apiroutine.wait_for_send(TableAcquireDelayEvent())
await TableAcquireDelayEvent.createMatcher()
module_list = list(self.table_modules)
self._acquire_updated = False
try:
requests = await self.apiroutine.execute_all(call_api(self.apiroutine, module, 'gettablerequest', {})
for module in module_list)
except QuitException:
raise
except Exception as exc:
self._logger.exception('Acquiring table failed')
exception = exc
else:
vhosts = set(vh for _, vhs in requests if vhs is not None for vh in vhs)
vhost_result = {}
# Requests should be list of (name, (ancester, ancester, ...), pathname)
for vh in vhosts:
graph = {}
table_path = {}
try:
for r in requests:
if r[1] is None or vh in r[1]:
for name, ancesters, pathname in r[0]:
if name in table_path:
if table_path[name] != pathname:
raise ValueError("table conflict detected: %r can not be in two path: %r and %r" % (name, table_path[name], pathname))
else:
table_path[name] = pathname
if name not in graph:
graph[name] = (set(ancesters), set())
else:
graph[name][0].update(ancesters)
for anc in ancesters:
graph.setdefault(anc, (set(), set()))[1].add(name)
except ValueError as exc:
self._logger.error(str(exc))
exception = exc
break
else:
sequences = []
def dfs_sort(current):
sequences.append(current)
for d in graph[current][1]:
anc = graph[d][0]
anc.remove(current)
if not anc:
dfs_sort(d)
nopre_tables = sorted([k for k,v in graph.items() if not v[0]], key = lambda x: (table_path.get(name, ''),name))
for t in nopre_tables:
dfs_sort(t)
if len(sequences) < len(graph):
rest_tables = set(graph.keys()).difference(sequences)
self._logger.error("Circle detected in table acquiring, following tables are related: %r, vhost = %r", sorted(rest_tables), vh)
self._logger.error("Circle dependencies are: %s", ", ".join(repr(tuple(graph[t][0])) + "=>" + t for t in rest_tables))
exception = ValueError("Circle detected in table acquiring, following tables are related: %r, vhost = %r" % (sorted(rest_tables),vh))
break
elif len(sequences) > 255:
self._logger.error("Table limit exceeded: %d tables (only 255 allowed), vhost = %r", len(sequences), vh)
exception = ValueError("Table limit exceeded: %d tables (only 255 allowed), vhost = %r" % (len(sequences),vh))
break
else:
full_indices = list(zip(sequences, itertools.count()))
tables = dict((k,tuple(g)) for k,g in itertools.groupby(sorted(full_indices, key = lambda x: table_path.get(x[0], '')),
lambda x: table_path.get(x[0], '')))
vhost_result[vh] = (full_indices, tables)
finally:
self._acquiring = False
if exception:
await self.apiroutine.wait_for_send(TableAcquireUpdate(exception = exception))
else:
result = vhost_result
if result != self._lastacquire:
self._lastacquire = result
self._reinitall()
await self.apiroutine.wait_for_send(TableAcquireUpdate(result = result))
async def load(self, container):
self.scheduler.queue.addSubQueue(1, TableAcquireDelayEvent.createMatcher(), 'ofpmanager_tableacquiredelay')
await container.wait_for_send(TableAcquireUpdate(result = None))
return await Module.load(self, container)
async def unload(self, container, force=False):
await Module.unload(self, container, force=force)
await container.syscall(syscall_removequeue(self.scheduler.queue, 'ofpmanager_tableacquiredelay'))
def _reinitall(self):
for cl in self.managed_conns.values():
for c in cl:
self.apiroutine.subroutine(self._initialize_connection(c))
async def _manage_existing(self):
result = await call_api(self.apiroutine, "openflowserver", "getconnections", {})
vb = self.vhostbind
for c in result:
if vb is None or c.protocol.vhost in vb:
self._add_connection(c)
self._synchronized = True
await self.apiroutine.wait_for_send(ModuleNotification(self.getServiceName(), 'synchronized'))
async def _wait_for_sync(self):
if not self._synchronized:
await ModuleNotification.createMatcher(self.getServiceName(), 'synchronized')
async def _manage_conns(self):
vb = self.vhostbind
self.apiroutine.subroutine(self._manage_existing(), False)
try:
if vb is not None:
conn_up = OpenflowConnectionStateEvent.createMatcher(state = OpenflowConnectionStateEvent.CONNECTION_SETUP,
_ismatch = lambda x: x.createby.vhost in vb)
conn_down = OpenflowConnectionStateEvent.createMatcher(state = OpenflowConnectionStateEvent.CONNECTION_DOWN,
_ismatch = lambda x: x.createby.vhost in vb)
else:
conn_up = OpenflowConnectionStateEvent.createMatcher(state = OpenflowConnectionStateEvent.CONNECTION_SETUP)
conn_down = OpenflowConnectionStateEvent.createMatcher(state = OpenflowConnectionStateEvent.CONNECTION_DOWN)
while True:
ev, m = await M_(conn_up, conn_down)
if m is conn_up:
remove = self._add_connection(ev.connection)
self.scheduler.emergesend(ModuleNotification(self.getServiceName(), 'update', add = [ev.connection], remove = remove))
else:
conns = self.managed_conns.get((ev.createby.vhost, ev.datapathid))
remove = []
if conns is not None:
try:
conns.remove(ev.connection)
except ValueError:
pass
else:
remove.append(ev.connection)
if not conns:
del self.managed_conns[(ev.createby.vhost, ev.datapathid)]
# Also delete from endpoint_conns
ep = _get_endpoint(ev.connection)
econns = self.endpoint_conns.get((ev.createby.vhost, ep))
if econns is not None:
try:
econns.remove(ev.connection)
except ValueError:
pass
if not econns:
del self.endpoint_conns[(ev.createby.vhost, ep)]
if remove:
self.scheduler.emergesend(ModuleNotification(self.getServiceName(), 'update', add = [], remove = remove))
finally:
self.scheduler.emergesend(ModuleNotification(self.getServiceName(), 'unsynchronized'))
async def getconnections(self, datapathid, vhost = ''):
"Return all connections of datapath"
await self._wait_for_sync()
return list(self.managed_conns.get((vhost, datapathid), []))
async def getconnection(self, datapathid, auxiliaryid = 0, vhost = ''):
"Get current connection of datapath"
await self._wait_for_sync()
return self._getconnection(datapathid, auxiliaryid, vhost)
def _getconnection(self, datapathid, auxiliaryid = 0, vhost = ''):
conns = self.managed_conns.get((vhost, datapathid))
if conns is None:
return None
else:
for c in conns:
if c.openflow_auxiliaryid == auxiliaryid:
return c
return None
async def waitconnection(self, datapathid, auxiliaryid = 0, timeout = 30, vhost = ''):
"Wait for a datapath connection"
await self._wait_for_sync()
c = self._getconnection(datapathid, auxiliaryid, vhost)
if c is None:
timeout_, ev, _ = await self.apiroutine.wait_with_timeout(
timeout,
OpenflowConnectionStateEvent.createMatcher(datapathid, auxiliaryid,
OpenflowConnectionStateEvent.CONNECTION_SETUP,
_ismatch = lambda x: x.createby.vhost == vhost))
if timeout_:
raise ConnectionResetException('Datapath %016x is not connected' % datapathid)
return ev.connection
else:
return c
async def getdatapathids(self, vhost = ''):
"Get All datapath IDs"
await self._wait_for_sync()
return [k[1] for k in self.managed_conns.keys() if k[0] == vhost]
async def getalldatapathids(self):
"Get all datapath IDs from any vhost. Return ``(vhost, datapathid)`` pair."
await self._wait_for_sync()
return list(self.managed_conns.keys())
async def getallconnections(self, vhost = ''):
"Get all connections from vhost. If vhost is None, return all connections from any host"
await self._wait_for_sync()
if vhost is None:
return list(itertools.chain(self.managed_conns.values()))
else:
return list(itertools.chain(v for k,v in self.managed_conns.items() if k[0] == vhost))
async def getconnectionsbyendpoint(self, endpoint, vhost = ''):
"Get connection by endpoint address (IP, IPv6 or UNIX socket address)"
await self._wait_for_sync()
return self.endpoint_conns.get((vhost, endpoint))
async def getconnectionsbyendpointname(self, name, vhost = '', timeout = 30):
"Get connection by endpoint name (Domain name, IP or IPv6 address)"
# Resolve the name
if not name:
endpoint = ''
return await self.getconnectionbyendpoint(endpoint, vhost)
else:
request = (name, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, socket.IPPROTO_TCP, socket.AI_ADDRCONFIG | socket.AI_V4MAPPED)
# Resolve hostname
await self.apiroutine.wait_for_send(ResolveRequestEvent(request))
timeout_, ev, m = await self.apiroutine.wait_with_timeout(timeout, ResolveResponseEvent.createMatcher(request))
if timeout_:
# Resolve is only allowed through asynchronous resolver
#try:
# self.addrinfo = socket.getaddrinfo(self.hostname, self.port, socket.AF_UNSPEC, socket.SOCK_DGRAM if self.udp else socket.SOCK_STREAM, socket.IPPROTO_UDP if self.udp else socket.IPPROTO_TCP, socket.AI_ADDRCONFIG|socket.AI_NUMERICHOST)
#except:
raise IOError('Resolve hostname timeout: ' + name)
else:
if hasattr(ev, 'error'):
raise IOError('Cannot resolve hostname: ' + name)
resp = ev.response
for r in resp:
raddr = r[4]
if isinstance(raddr, tuple):
# Ignore port
endpoint = raddr[0]
else:
# Unix socket? This should not happen, but in case...
endpoint = raddr
r = await self.getconnectionsbyendpoint(endpoint, vhost)
if r is not None:
return r
return None
async def getendpoints(self, vhost = ''):
"Get all endpoints for vhost"
await self._wait_for_sync()
return [k[1] for k in self.endpoint_conns if k[0] == vhost]
async def getallendpoints(self):
"Get all endpoints from any vhost. Return ``(vhost, endpoint)`` pairs."
await self._wait_for_sync()
return list(self.endpoint_conns.keys())
def lastacquiredtables(self, vhost = ""):
"Get acquired table IDs"
return self._lastacquire.get(vhost)
async def acquiretable(self, modulename):
"Start to acquire tables for a module on module loading."
if not modulename in self.table_modules:
self.table_modules.add(modulename)
self._acquire_updated = True
if not self._acquiring:
self._acquiring = True
self.apiroutine.subroutine(self._acquire_tables())
async def unacquiretable(self, modulename):
"When module is unloaded, stop acquiring tables for this module."
if modulename in self.table_modules:
self.table_modules.remove(modulename)
self._acquire_updated = True
if not self._acquiring:
self._acquiring = True
self.apiroutine.subroutine(self._acquire_tables())
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Particularities of XDS - keywords
# - Only the last keyword declatation count
# -- except if the keyword accept multiple declatation
# [case of: SPOT_RANGE=
# EXCLUDE_RESOLUTION_RANGE=
# PROFILE_RANGE=]
# - Multiple declaration on 1 line with no separator
# - Number of values declared by 1 keyw can go from 1 to 9
# - Some keyword names are not compatible with python name-space
# -- the are translated: cf modified_keys
#
import sys
import os
import re
import commands
import shutil
from time import time, sleep
__version__ = "0.7.12"
__author__ = "Pierre Legrand (pierre.legrand \at synchrotron-soleil.fr)"
__date__ = "26-03-2013"
__copyright__ = "Copyright (c) 2006-2013 Pierre Legrand"
__license__ = "New BSD http://www.opensource.org/licenses/bsd-license.php"
# Environemantal variable XDSHOME, if set, defines the place where the xds
# executables will be searched. The parallelized execs (xds_par, xscale_par)
# will be used be defaults.
def get_xdshome():
if "XDSHOME" in os.environ.keys():
XDSHOME = os.getenv("XDSHOME")
if not os.path.isfile(os.path.join(XDSHOME, "xds_par")):
#if _verbose:
# print "WARNING: no 'xds_par' found in $XDSHOME path (%s)." % XDSHOME
# print " Using default $PATH location."
XDSHOME = ""
else: XDSHOME = ""
return XDSHOME
XDSHOME = get_xdshome()
xinp = "XDS.INP"
LP_names = ["COLSPOT.LP","CORRECT.LP","DEFPIX.LP","IDXREF.LP","XSCALE.LP",
"INIT.LP","INTEGRATE.LP","XYCORR.LP","XDS.INP","XPARM.XDS"]
multiple_keys = ("SPOT_RANGE",
"EXCLUDE_RESOLUTION_RANGE",
"PROFILE_RANGE",
"UNTRUSTED_RECTANGLE",
"UNTRUSTED_ELLIPSE")
modified_keys = {
"DIRECTION_OF_DETECTOR_Y-AXIS": "DIRECTION_OF_DETECTOR_Y_AXIS",
"DIRECTION_OF_DETECTOR_X-AXIS": "DIRECTION_OF_DETECTOR_X_AXIS",
"X-RAY_WAVELENGTH": "X_RAY_WAVELENGTH",
"REFLECTING_RANGE_E.S.D.": "REFLECTING_RANGE_E_S_D_",
"FRIEDEL'S_LAW": "FRIEDELS_LAW",
"REFINE(IDXREF)": "REFINE_IDXREF",
"REFINE(INTEGRATE)": "REFINE_INTEGRATE",
"REFINE(CORRECT)": "REFINE_CORRECT",
"BEAM_DIVERGENCE_E.S.D.": "BEAM_DIVERGENCE_E_S_D_",
"NUMBER_OF_PROFILE_GRID_POINTS_ALONG_ALPHA/BETA": \
"NUMBER_OF_PROFILE_GRID_POINTS_ALONG_ALPHA_BETA",
"SPOT_MAXIMUM-CENTROID": "SPOT_MAXIMUM_CENTROID",
"UNIT_CELL_A-AXIS": "UNIT_CELL_A_AXIS",
"UNIT_CELL_B-AXIS": "UNIT_CELL_B_AXIS",
"UNIT_CELL_C-AXIS": "UNIT_CELL_C_AXIS",
"REFLECTIONS/CORRECTION_FACTOR": "REFLECTIONS_CORRECTION_FACTOR",
"MINIMUM_I/SIGMA": "MINIMUM_I_SIGMA",
"REFINE(IDXREF)": "REFINE_IDXREF",
"REFINE(INTEGRATE)": "REFINE_INTEGRATE",
"REFINE(CORRECT)": "REFINE_CORRECT",
"X-GEO_CORR": "X_GEO_CORR",
"Y-GEO_CORR": "Y_GEO_CORR",
"SPOT_MAXIMUM-CENTROID": "SPOT_MAXIMUM_CENTROID" }
illegal_keys = ["SPOT_WIDTH_ALONG_X", "SPOT_WIDTH_ALONG_Y",
"NUMBER_OF_REFLECTIONS_USED_FOR_REFINEMENT_IN_COLPROF",
"MINIMUM_SIGNAL_TO_NOISE_FOR_LOCATING_SPOTS",
"NUMBER_OF_FRAMES_BETWEEN_REFINEMENT_IN_COLPROF","BFRAC","WEAK",
"MAXIMUM_RANDOM_DEVIATE_OF_INTENSITY","GAIN","RMAX"]
translate_keys = {"REFINE": "REFINE(INTEGRATE)",
"RESOLUTION_RANGE_FOR_ACCEPTING_REFLECTIONS": \
"INCLUDE_RESOLUTION_RANGE"}
modified_keys_r = {}
for k in modified_keys.keys(): modified_keys_r[modified_keys[k]] = k
xdsinp_base = """
JOB= ALL
DATA_RANGE= 1 46
SPOT_RANGE= 1 9
SPOT_MAXIMUM-CENTROID= 2.0
BACKGROUND_RANGE= 1 10
MINIMUM_NUMBER_OF_PIXELS_IN_A_SPOT= 5
STRONG_PIXEL= 5.0
OSCILLATION_RANGE= 1.0
STARTING_ANGLE= 74.0
STARTING_FRAME= 1
X-RAY_WAVELENGTH= 0.978954
NAME_TEMPLATE_OF_DATA_FRAMES= ../image_3_???.img
DETECTOR_DISTANCE= 150.01
DETECTOR= ADSC MINIMUM_VALID_PIXEL_VALUE= 1 OVERLOAD= 65000
DIRECTION_OF_DETECTOR_X-AXIS= 1.0 0.0 0.0
DIRECTION_OF_DETECTOR_Y-AXIS= 0.0 1.0 0.0
NX= 2048 NY= 2048 QX= 0.1024 QY= 0.1024
ORGX= 1007.8125 ORGY= 1035.15625
ROTATION_AXIS= 1.0 0.0 0.0
INCIDENT_BEAM_DIRECTION= 0.0 0.0 1.0
FRACTION_OF_POLARIZATION= 0.99
POLARIZATION_PLANE_NORMAL= 0.0 1.0 0.0
! AIR= 0.001
SPACE_GROUP_NUMBER= 0
UNIT_CELL_CONSTANTS= 0 0 0 0 0 0
VALUE_RANGE_FOR_TRUSTED_DETECTOR_PIXELS= 6100 30000
INCLUDE_RESOLUTION_RANGE= 45.0 0.0
REFINE(INTEGRATE)= BEAM ORIENTATION CELL
DELPHI= 8.0
MAXIMUM_NUMBER_OF_PROCESSORS= 32
MAXIMUM_NUMBER_OF_JOBS= 1
RESOLUTION_SHELLS=15.0 8.0 5.0 3.0
TOTAL_SPINDLE_ROTATION_RANGES=15.0 180.0 15.0
STARTING_ANGLES_OF_SPINDLE_ROTATION=-95.0 95.0 5.0
TRUSTED_REGION= 0.0 1.42
PROFILE_FITTING= TRUE
STRICT_ABSORPTION_CORRECTION= TRUE
NUMBER_OF_PROFILE_GRID_POINTS_ALONG_ALPHA/BETA= 9
NUMBER_OF_PROFILE_GRID_POINTS_ALONG_GAMMA= 9
REFINE(IDXREF)= BEAM AXIS ORIENTATION CELL
REFINE(INTEGRATE)= BEAM ORIENTATION CELL
REFINE(CORRECT)= DISTANCE BEAM AXIS ORIENTATION CELL
TEST_RESOLUTION_RANGE= 20 4.5
! REFERENCE_DATA_SET=
"""
BravaisDico = {"a":"triclinic","m":"monoclinic","o":"orthorhombic",
"t":"tetragonal","h":"hexagonal","c":"cubic","P":"Primitive",
"R":"Rhombohedral","F":"Centered","C":"Centered","I":"Centered"}
Bravais_to_Laue = {
# Each Bravais lattice entry maps a laue_tuple.
# Each Laue tuple contains
# (name,first_spg_num, first_spg_name, tuple_of_spg_num)
"aP": (("-1",1,"P1",(1,)),),
"mP": (("2/m",3,"P2",(3,4)),),
"mC": (("2/m",5,"C2",(5,)),),
"mI": (("2/m",5,"C2",(5,)),),
"oP": (("mmm",16,"P222",(16,17,18,19)),),
"oC": (("mmm",21,"C222",(21,20)),),
"oF": (("mmm",22,"F222",(22,)),),
"oI": (("mmm",23,"I222",(23,24)),),
"tP": (("4/m",75,"P4",(75,76,77,78)),
("4/mmm",89,"P422",(89,90,91,92,93,94,95,96))),
"tI": (("4/m",79,"I4",(79,80)),
("4/mmm",97,"I422",(97,98))),
"hP": (("-3",143,"P3",(143,144,145)),
("-31m",149,"P312",(149,151,153)),
("-3m1",150,"P321",(150,152,154)),
("6/m",168,"P6",(168,169,170,171,172,173)),
("6/mmm",177,"P622",(177,178,179,180,181,182))),
"hR": (("-3",146,"R3",(146,)),
("-3m1",155,"R32",(155,))),
"cP": (("m-3",195,"P23",(195,198)),
("m-3m",207,"P432",(207,208,212,213))),
"cF": (("m-3",196,"F23",(196,)),
("m-3m",209,"F432",(209,210))),
"cI": (("m-3",197,"I23",(197,199)),
("m-3m",211,"I432",(211,214))),
"Unknown": ((0,0,0,(0,0)),)}
SPGlib = {
# each SGnumber entry maps (SGsymbol1,SGsymbol2,SGorder)
0:("P1","P1",1),
1:("P1","P1",1),3:("P2","P2",2),4:("P21","P2(1)",2),5:("C2","C2",4),
16:("P222","P222",4),17:("P2221","P222(1)",4),
18:("P21212","P2(1)2(1)2",4),19:("P212121","P2(1)2(1)2(1)",4),
21:("C222","C222",8),20:("C2221","C222(1)",8),22:("F222","F222",16),
23:("I222","I222",8),24:("I212121","I2(1)2(1)2(1)",8),
75:("P4","P4",4),76:("P41","P4(1)",4),77:("P42","P4(2)",4),
78:("P43","P4(3)",4),89:("P422","P422",8),90:("P4212","P42(1)2",8),
91:("P4122","P4(1)22",8),92:("P41212","P4(1)2(1)2",8),
93:("P4222","P4(2)22",8),94:("P42212","P4(2)2(1)2",8),
95:("P4322","P4(3)22",8),96:("P43212","P4(3)2(1)2",8),79:("I4","I4",8),
80:("I41","I4(1)",8),97:("I422","I422",16),98:("I4122","I4(1)22",16),
143:("P3","P3",3),144:("P31","P3(1)",3),145:("P32","P3(2)",3),
149:("P312","P312",6),150:("P321","P321",6),151:("P3112","P3(1)12",6),
152:("P3121","P3(1)21",6),153:("P3212","P3(2)12",6),
154:("P3221","P3(2)21",6),168:("P6","P6",6),169:("P61","P6(1)",6),
170:("P65","P6(5)",6),171:("P62","P6(2)",6),172:("P64","P6(4)",6),
173:("P63","P6(3)",6),177:("P622","P622",12),178:("P6122","P6(1)22",12),
179:("P6522","P6(5)22",12),180:("P6222","P6(2)22",12),
181:("P6422","P6(4)22",12),182:("P6322","P6(3)22",12),
146:("R3","R3",9),155:("R32","R32",18),195:("P23","P23",12),
198:("P213","P2(1)3",12),207:("P432","P432",24),208:("P4232","P4(2)32",24),
212:("P4332","P4(3)32",24),213:("P4132","P4(1)32",24),196:("F23","F23",48),
209:("F432","F432",96),210:("F4132","F4(1)32",96),197:("I23","I23",24),
199:("I213","I2(1)3",24),211:("I432","I432",48),214:("I4132","I4(1)32",48)}
PG2SP = {"1":(1,),"2":(3,4,5),
"222":(16,17,18,19,20,21,22,23,24),
"4":(75,76,77,78,79,80),
"422":(89,90,91,92,93,94,95,96,97,98),
"3":(143,144,145,146),
"312":(149,151,153),
"321":(150,152,154,155),
"6":(168,169,170,171,172,173),
"622":(177,178,179,180,181,182),
"23":(195,196,197,198,199),
"432":(207,208,209,210,211,212,213,214)}
EXCLUDE_ICE_RING = {"EXCLUDE_RESOLUTION_RANGE": [[3.93, 3.87], [3.70, 3.64],
[3.47, 3.41], [2.70, 2.64], [2.28, 2.22], [2.102, 2.042], [1.978, 1.918],
[1.948, 1.888], [1.913, 1.853], [1.751, 1.691]]}
"""!EXCLUDE_RESOLUTION_RANGE= 3.93 3.87 !ice-ring at 3.897 Angstrom
!EXCLUDE_RESOLUTION_RANGE= 3.70 3.64 !ice-ring at 3.669 Angstrom
!EXCLUDE_RESOLUTION_RANGE= 3.47 3.41 !ice-ring at 3.441 Angstrom
!EXCLUDE_RESOLUTION_RANGE= 2.70 2.64 !ice-ring at 2.671 Angstrom
!EXCLUDE_RESOLUTION_RANGE= 2.28 2.22 !ice-ring at 2.249 Angstrom
!EXCLUDE_RESOLUTION_RANGE= 2.102 2.042 !ice-ring at 2.072 Angstrom - strong
!EXCLUDE_RESOLUTION_RANGE= 1.978 1.918 !ice-ring at 1.948 Angstrom - weak
!EXCLUDE_RESOLUTION_RANGE= 1.948 1.888 !ice-ring at 1.918 Angstrom - strong
!EXCLUDE_RESOLUTION_RANGE= 1.913 1.853 !ice-ring at 1.883 Angstrom - weak
!EXCLUDE_RESOLUTION_RANGE= 1.751 1.691 !ice-ring at 1.721 Angstrom - weak
"""
def get_BravaisToSpgs():
Bravais_to_spg = {}
for br in Bravais_to_Laue:
for lauespgs in Bravais_to_Laue[br]:
if br in Bravais_to_spg:
Bravais_to_spg[br] += lauespgs[3]
else:
Bravais_to_spg[br] = lauespgs[3]
return Bravais_to_spg
# global dictionary to keep trac of the XDS.INP key order appearance
xdsKeyOrder = []
class Lattice:
def __init__(self, cell, Bravais_type="Unknown", symmetry=None,
dmin=0.5, friedels_law=1):
self.alpha = 90.
self.beta = 90.
self.gamma = 90.
# Minimal Bragg spacing = Maximal resolution
# Non zerro value to avoid zerro division errors.
self.dmin = dmin
# Friedel'law default = true equivalent to anomal=false
self.friedels_law = friedels_law
# Indexing default parameters
self.fit = None # indexing geometrical fit quality, Float
self.character = None # Lattice character, Int (from 1 to 44,
# see International tables)
self.reindexing = None # tupple of len 12
# Scaling results
self.rsym = None
self.rmeas = None
self.rmea3 = None
self.compl = None
self.comp3 = None
self.isig = None
if type(cell) == type(""):
cell = map(float, cell.strip().split())
if len(cell) == 6:
self.cell = map(float,tuple(cell))
(self.a, self.b, self.c,
self.alpha, self.beta, self.gamma) = tuple(cell)
elif len(cell) == 3:
self.cell = map(float,tuple(cell))
self.cell = cell[0], cell[1], cell[2], 90., 90., 90.
(self.a, self.b, self.c) = tuple(cell)
else:
print cell, len(cell)
print "ERROR: Number of argument incorrect for Lattice class."
sys.exit()
if Bravais_type in Bravais_to_Laue.keys():
self.Bravais_type = Bravais_type
if Bravais_type == "Unknown" and symmetry:
self.symmetry_num = int(symmetry)
for brav in Bravais_to_Laue:
for Laue_spgs in Bravais_to_Laue[brav]:
if self.symmetry_num in Laue_spgs[3]:
self.Bravais_type = brav
break
else:
if symmetry:
# Verify that the symmetry number is
# compatible with the Bravais type"
symnums = []
for syms in Bravais_to_Laue[self.Bravais_type]:
symnums.extend(syms[-1])
if symmetry not in symnums:
print "ERROR: Given symmetry number",
print " imcompatible with Bravais type."
sys.exit()
else: self.symmetry_num = int(symmetry)
else:
# Set lowest symmetry number for the given Bravais type
self.symmetry_num = Bravais_to_Laue[self.Bravais_type][0][1]
#
self.symmetry_str1 = SPGlib[self.symmetry_num][0]
self.symmetry_str2 = SPGlib[self.symmetry_num][1]
self.multiplicity = SPGlib[self.symmetry_num][2]
def prt(self, fmt=6*"%7.1f"):
return fmt % tuple(self.cell)
def __str__(self):
#self.cell = map(float,tuple(self.cell))
return self.prt()
def volum(self):
from math import cos,pi
d2r = pi/180
a, b, c, al, be, ga = self.cell
cosa, cosb, cosg = cos(al*d2r), cos(be*d2r), cos(ga*d2r)
return a*b*c*(1-cosa**2-cosb**2-cosg**2+2*cosa*cosb*cosg)**0.5
def idealize(self):
_latt = self.Bravais_type[0]
a, b, c, alpha, beta, gamma = self.cell
if _latt == "m":
self.alpha = 90.
self.gamma = 90.
if _latt == "o":
self.alpha = 90.
self.beta = 90.
self.gamma = 90.
if _latt == "t":
a = (a+b)/2.
self.a = a
self.b = a
self.alpha = 90.
self.beta = 90.
self.gamma = 90.
if _latt == "h":
a = (a+b)/2.
self.a = a
self.b = a
self.alpha = 90.
self.beta = 90.
self.gamma = 120.
if _latt == "c":
a = (a+b+c)/3.
self.a = a
self.b = a
self.c = a
self.alpha = 90.
self.beta = 90.
self.gamma = 90.
self.cell = (self.a, self.b, self.c,
self.alpha, self.beta, self.gamma)
class Param:
"""A simple class to handle the parameters that fills templates"""
def __init__(self, obj=None):
"""Constructor for the Param classes from file or string."""
#if type(obj) == file:# needs python version >= 2.2
from types import FileType
if type(obj) == FileType:
exec obj.read() in self.__dict__
obj.close()
if type(obj) == type(""):
exec obj in self.__dict__
def __getitem__(self, a):
return self.__dict__[a]
def __setitem__(self, a, b):
self.__dict__[a] = b
def __delitem__(self, a):
del self.__dict__[a]
def mix(self, _par):
"""Update the current param with either
custom Param instance, dict or executable string."""
# if type(_par) == dict: # needs python version >= 2.2
if type(_par) == type({}):
self.__dict__.update(_par)
elif type(_par) == type(""):
exec _par in self.__dict__
else:
# if type(_par) == types.InstanceType:
self.__dict__.update(_par.__dict__)
def add(self, _par):
"""Complete and/or replace the current param with custom dict."""
# if type(_par) == dict: # needs python version >= 2.2
if type(_par) == type({}):
self.__dict__.update(_par)
def keys(self):
rkeys = []
for k in self.__dict__.keys():
if k not in ("__builtins__","__doc__","__module__"):
rkeys.append(k)
return rkeys
def has_key(self, key):
if key in self.keys(): return 1
else: return 0
def intersect(self, par2):
return filter(self.has_key, par2.keys())
class XParam(Param):
def add(self, par_dict):
"""Complete and/or replace the current param with custom dict."""
for k in par_dict.keys():
if k in multiple_keys:
if not k in self.keys():
par_dict[k] = [par_dict[k]]
else:
self[k].append(par_dict[k])
par_dict[k] = self[k]
self.__dict__.update(par_dict)
def copy(self):
return xdsInp2Param(inp_str="%s" % self)
def xds_parse(self):
""" Parser to transforme, if possible, string variables to:
numerical variable, or tupple of numerical or string"""
def _parse(keyStr):
fmt_str = "'%s',"
fmt_num = "%s,"
valStr = ""
subValues = keyStr.split()
for s in subValues:
try:
float(s)
fmtVal = fmt_num
except ValueError:
fmtVal = fmt_str
valStr += fmtVal % (s)
return valStr[:-1]
for k in self.keys():
_to_exec = "%s= " % k
if k in multiple_keys:
if type(self[k]) != type(""):
for subv in self[k]:
_to_exec += "["+ _parse(subv) +"],"
else: _to_exec += _parse(self[k])+","
else:
_to_exec += _parse(self[k])
exec _to_exec in self.__dict__
def __repr__(self):
def _prt(self,l,_to_repr):
line = ""
for k in l:
kp = k
if k in modified_keys_r.keys(): kp = modified_keys_r[k]
if k in self.keys():
if k in multiple_keys:
for subval in self[k]:
line += "%s= %s\n " % (kp, toString(subval))
line = line[:-1]
else:
line += "%s= %s " % (kp, toString(self[k]))
_to_repr.remove(k)
return line, _to_repr
_repr = "!=== File generated by XUPY v%s ===!\n\n" % __version__
_to_repr = self.keys()
if xdsKeyOrder:
for l1 in xdsKeyOrder:
_line, _to_repr = _prt(self,l1,_to_repr)
_repr += " %s\n" % _line[:-1]
for not_print in ("__builtins__","__module__","__doc__"):
if _to_repr.count(not_print): _to_repr.remove(not_print)
_repr += "\n!=== Added Keywords ===!\n\n"
# _to_repr[:], a copy of _to_repr, is used to avoid interference
# with the _to_repr.remove(k) instruction in the _prt(l) function
for k2 in _to_repr[:]:
# To avoid printing of internal parameters
if k2[0] != "_":
_line, _to_repr = _prt(self,(k2,),_to_repr)
_repr += " %s\n" % _line[:-1]
return _repr
class DataCollectInfo:
""" A simple class to handle Data Collection Image informations."""
def __init__(self, init):
""" ImagesInfo can be contruct from either a string template or a
tupple of 4 building elements (dir, prefix, nDigits, suffix).
"""
self.image_numbers = [] # set by lookup_image_numbers()
self.image_ranges = [] # set by lookup_image_ranges()
if type(init) == type(""):
self.directory, init = os.path.split(init)
regexp = re.compile(r"(.*)_(.*)\.(.*)")
match = regexp.match(init)
self.prefix = match.group(1)
numeric = match.group(2)
self.suffix = match.group(3)
self.nDigits = len(numeric)
elif type(init) == type(()) and len(init) == 4:
self.directory = init[0]
self.prefix = init[1]
self.nDigits = int(init[2])
self.suffix = init[3]
else:
raise TypeError, "Unexpected ImagesInfo contructor argument."
if not self.directory:
self.directory = "."
self.format = self.prefix+"_%0"+str(self.nDigits)+"d."+self.suffix
expression = self.prefix + "_([0-9]*)\." + self.suffix
self.regexp = re.compile(expression)
self.regexpCompress = re.compile(expression + "[\.gz|\.z|\.Z|\.bz2]*")
def getNumber(self, imageName):
d, imageName = os.path.split(imageName)
match = self.regexp.match(imageName)
return int(match.group(2))
def getXDSTemplate(self):
xdst = self.directory+"/"+self.prefix+"_"+\
self.nDigits*"?"+"."+self.suffix
if len(xdst) > 50:
print ">>> Warning NAME_TEMPLATE_OF_DATA_FRAMES has more than 50",
print " characters! XDS will stop."
print ">>> Lengthy path names should be abbreviated by a symbolic",
print " link for frames directory."
return xdst
def getMosflmTemplate(self):
return self.prefix + "_" + self.nDigits*"#" + "." + self.suffix
def lookup_image_numbers(self):
"""Return a list of matching image number. Removes duplicate numbers.
"""
images_num = []
files = os.listdir(self.directory)
files.sort()
prev = 0
for f in files:
match = self.regexpCompress.match(f)
if match:
n = int(match.group(1))
if n != prev:
images_num.append(int(match.group(1)))
prev = n
self.image_numbers = images_num
return images_num
def lookup_image_ranges(self):
"""Return a range list of consecutive image number.
For example: [[1,3],[91,93]] or [[1,90]]
"""
seqf = []
if self.lookup_image_numbers():
prev, i = self.image_numbers[0]-1, self.image_numbers[0]
for n in self.image_numbers:
if n != prev+1:
seqf.append([i,prev])
i = n
prev = n
seqf.append([i,prev])
self.image_ranges = seqf
return seqf
def get_range(self, minf=None, maxf=None):
if self.image_ranges:
min_c, max_c = self.image_ranges[0][0], self.image_ranges[-1][-1]
if minf: min_c = max(minf,min_c)
if maxf: max_c = min(maxf,max_c)
return [min_c, max_c]
else:
return []
def getClosestImage(self, target):
"""Return closest existing image number to target.
"""
target = int(target)
if not self.image_numbers:
self.lookup_image_numbers()
diff = map(lambda x,y: abs(x-y), self.image_numbers,
len(self.image_numbers)*(target,))
return self.image_numbers[diff.index(min(diff))]
def toString(obj):
""" Transforme all variables to a string format"""
if type(obj) == type(()) or type(obj) == type([]):
return " ".join(map(str,obj))
else: return str(obj)
def xdsInp2Param(inp_name="XDS.INP", inp_str=None):
""" Translate an XDS input file to a Paramter object.
Return the Paramter object and a list of the key order appearence"""
global xdsKeyOrder
if not inp_str:
inp_str = opReadCl(inp_name)
xdsKeyOrder = []
# Recognition template for XDS keywords
key_re = r"([A-Z0-9_.\-\\'\(\)\/]+=)"
newPar = XParam()
allKey = []
for line in inp_str.splitlines():
aline = re.split("(!)",line.strip())[0]
raw_keys = re.split(key_re,aline)[1:]
try: nkeys = len(raw_keys)/2
except:
print "Can't process line:\n", aline
print "Wrong keys are: ",raw_keys,"\nSTOP!"
return None
if nkeys:
_kOrd = []
for i in range(0,nkeys*2,2):
_d0 = {}
newkey = raw_keys[i][:-1]
if newkey in modified_keys.keys():
newkey = modified_keys[newkey]
if newkey not in allKey:
_kOrd.append(newkey)
allKey.append(newkey)
if newkey in multiple_keys:
_d0[newkey] = [raw_keys[i+1].strip()]
else: _d0[newkey] = raw_keys[i+1].strip()
newPar.mix(_d0)
elif newkey in multiple_keys:
values = []
previous = getattr(newPar,newkey)
if type(previous) != type(""):
for v in previous: values.append(v)
else: values.append(previous)
values.append(raw_keys[i+1].strip())
_d0[newkey] = values
newPar.mix(_d0)
if _kOrd: xdsKeyOrder.append(_kOrd)
newPar.xds_parse()
return newPar
def latest(check_names=LP_names):
rexp = re.compile(r"(.*)\.(\d\d\d)\Z")
ns = [int(rexp.match(n).group(2)) for n in os.listdir(".") if \
(rexp.match(n) and rexp.match(n).group(1) in check_names)]
if not ns: return 0
else: return max(ns)
def saveLastVersion(file_names, suffix=""):
import filecmp
last_vnum = latest(file_names)
if not suffix:
for name in file_names:
last_lnum = latest((name,))
compare = name, name+".%03d" % last_lnum
if os.path.isfile(compare[0]) and os.path.isfile(compare[1]):
if not filecmp.cmp(name,name+".%03d" % last_lnum):
shutil.copyfile(name ,name+".%03d" % (last_vnum+1))
elif os.path.isfile(compare[0]):
if os.path.getsize(compare[0]) != 0:
shutil.copyfile(name ,name+".%03d" % (last_vnum+1))
else:
for name in file_names:
if os.path.getsize(name) != 0:
shutil.copyfile(name ,name+str(suffix))
def exec_prog(prog_name, stdinp=None, stdout= None, stderr=None):
if not stdout : stdout = " "
else: stdout = " > " + stdout
if not stdinp : stdinp = " "
else: stdinp = " < " + stdinp
if not stderr : stderr = " "
else: stderr = " 2>" + stderr
os.system(prog_name+stdinp+stdout+stderr ) # use popen instead ?
def run_xds_thread(arguments):
tpar,tinp,tout,tdir,tsave = arguments
tmp_par = tpar.copy()
return run_xds(tmp_par, inp_f=tinp, out_f=tout, directory=tdir, save=tsave)
def new_range(r,n):
l = r[1]-r[0]
new_r = [r[0]-1]
for a in range(n): new_r.append(r[0]+int((a+1)*l/float(n)))
return new_r
def taskCallback(message):
global FinishedThread
FinishedThread += 1
print " <== Threads", message
def waitForAllThreadsEnd(NumberOfThreadToWait):
global FinishedThread
while FinishedThread != NumberOfThreadToWait:
sleep(0.1)
def run_multi_integrate(xpar_init,inp_f=None,main_directory=None,
nThreads=2,init=0):
from ThreadPool import ThreadPool
# Copy
xpar = xpar_init.copy()
global FinishedThread
FinishedThread = 0
pool = ThreadPool(nThreads)
if init:
xpar.JOB = "INIT","DEFPIX","INTEGRATE","CORRECT"
files_to_copy = "XPARM.XDS","X-CORRECTIONS.pck","Y-CORRECTIONS.pck"
else:
xpar.JOB = "DEFPIX","INTEGRATE","CORRECT"
files_to_copy = "XPARM.XDS","BKGINIT.pck","BLANK.pck",\
"GAIN.pck","X-CORRECTIONS.pck","Y-CORRECTIONS.pck"
if not main_directory: main_directory = os.getcwd()
if os.path.isdir(main_directory):
os.chdir(main_directory)
else:
print "STOP! Directory not found:",directory
sys.exit()
range_list = new_range(xpar.DATA_RANGE, nThreads)
_templ = xpar.NAME_TEMPLATE_OF_DATA_FRAMES
if type(_templ) == type("") and _templ[0] != "/":
xpar.NAME_TEMPLATE_OF_DATA_FRAMES = "../" + \
xpar.NAME_TEMPLATE_OF_DATA_FRAMES
if type(_templ) == type(()) and _templ[0][0] != "/":
xpar.NAME_TEMPLATE_OF_DATA_FRAMES = "../" + \
xpar.NAME_TEMPLATE_OF_DATA_FRAMES[0], \
xpar.NAME_TEMPLATE_OF_DATA_FRAMES[1]
hklfiles = []
startTime = time()
print "\n"
for NTh in range(nThreads):
#newdir = os.path.join(main_directory,"integrate_batch_%d" % (NTh+1))
newdir = "integrate_batch_%d" % (NTh+1)
if not os.path.exists(newdir): os.mkdir(newdir)
for file in files_to_copy:
if os.path.isfile(file):
shutil.copyfile(file,os.path.join(newdir,file))
else:
print "STOP: Can't find file",file
sys.exit()
os.chdir(newdir)
xpar.DATA_RANGE = range_list[NTh]+1,range_list[NTh+1]
xpar.SPOT_RANGE = (range_list[NTh]+1,range_list[NTh]+4),
args = (xpar,inp_f,"xds.out",os.getcwd(),0)
pool.queueTask(run_xds_thread,args,taskCallback)
# To avoid chdir before the xds process is started...
print " ==> Threads XDS started for integration of images %4d to %4d"\
% (xpar.DATA_RANGE[0],xpar.DATA_RANGE[1])
sleep(2.0)
os.chdir("..")
hklfiles.append(os.path.join(newdir,"XDS_ASCII.HKL"))
pool.joinAll()
print "\n"
while FinishedThread != nThreads:
sleep(0.1)
#read cell
endTime = time()
print "\n Integration time: %.1f seconds" % (endTime - startTime)
H = read_xdsascii_head(hklfiles[0])
if H["friedels_law"] == "TRUE": H["friedels_law"] = 1
elif H["friedels_law"] == "FALSE": H["friedels_law"] = 0
dmin = get_maxResolution(os.path.join(newdir,"INTEGRATE.LP"))
xlatt = Lattice(H["cell"], "Unknown", symmetry=H["sym"],
dmin=dmin, friedels_law=H["friedels_law"])
run_xscale(hklfiles, "batch_merge.hkl", xlatt, save=1, out_f="xscale.out")
def run_xds(new_par, inp_f=xinp, out_f=None, directory=None, save=1):
if directory:
if not os.path.exists(directory):
try: os.mkdir(directory)
except:
print "STOP! Can't creat xds working directory:",directory
sys.exit()
if os.path.isdir(directory): os.chdir(directory)
#print "Working on images %4d to %4d in %s" % (r[0],r[1],directory)
if inp_f:
# Verify the presence of the specified XDS.INP file
if not os.path.isfile(inp_f):
print ">>> ERROR: Can't find file "+inp_f+" !"
sys.exit()
# If an old XDS.INP file exist, try to backup it.
if inp_f and os.path.isfile(xinp):
try: shutil.copyfile(xinp, xinp + "_backup")
except:
print ">>> ERROR: Can't save old "+xinp+" file !"
sys.exit()
# Try to save the trace of the first XDS.INP template used
if not os.path.isfile(xinp+"_init"):
shutil.copyfile(inp_f, xinp+"_init")
xpar = xdsInp2Param(inp_name=inp_f)
else:
xpar = XParam()
xpar.mix(new_par)
opWriteCl(xinp, "%s" % xpar)
exec_prog(os.path.join(XDSHOME,"xds_par"), stdout=out_f, stderr="xupy.stderr")
r = xpar.DATA_RANGE
if save: saveLastVersion(LP_names)
return "XDS finished for integration of images %4d to %4d" % (r[0],r[1])
def getProfilRefPar(infile="INTEGRATE.LP"):
p_lp = opReadCl(infile)
if len(p_lp) <= 1500:
print "\nERROR! Uncompleted 'INTEGRATE' step."
sys.exit()
#if init:
# st1 = p_lp.index("BASED ON SPOT PROFILE PARAMETERS")+32
# fit_dat_1 = p_lp[st1:st1+163].replace("DEGREES","")
else:
st1 = p_lp.index("* SUGGESTED VALUES FOR INPUT PARAMETERS *")
fit_dat_1 = p_lp[st1+46:st1+165]
return xdsInp2Param(inp_str=fit_dat_1)
def gxparm2xpar(Dir):
shutil.copyfile(os.path.join(Dir,"GXPARM.XDS"),
os.path.join(Dir,"XPARM.XDS"))
def get_num(seq,num):
L = []
for i in num: L.append(seq[i])
return L
facteur_names = ["Compl","ComplL","Compl3","Unique","Total","Total3",
"Compar","Rsym","RsymL","Rsym3","Compa3","Rmeas","Rmea3","I/sig",
"I/sigL","Misfit","Absent","AbsIav"]
def resum_scaling(lpf="CORRECT.LP", ios_threshold=2.0):
""" Extract Scaling statistics from XSCALE.LP or CORRECT.LP."""
lp = opReadCl(lpf)
if len(lp) < 2000: return None
s = XParam()
file_type = lpf.split("/")[-1][:6]
if file_type == "CORREC": correct = 1
elif file_type == "XSCALE": correct = 0
try:
spa = lp.index("CORRECTION PARAMETERS FOR THE STANDARD ERROR OF")
spb = lp.index(" ***********", spa+100)
AB6 = lp[spa:spb].split()
if correct:
#print AB6[-6:]
#sp1 = lp.index(" b ISa")
#sp2 = lp.index(" INTEGRATE.HKL ", sp1)
s.K1s, s.K2s, s.IoverSigmaAsympt = map(float, AB6[-3:])
else:
#print AB6[-50:-6]
sp1 = lp.index("ISa0 INPUT DATA SET")
sp2 = lp.index("\n", sp1+30)
s.K1s, s.K2s, s.IoverSigmaAsympt = map(float, \
lp[sp1:sp2].split()[-5:-2])
#s.IoverSigmaAsympt = 1/((s.K1s*(s.K2s+0.0004))**0.5)
except:
s.IoverSigmaAsympt = -99.9
try:
st2 = lp.index(" STATISTICS OF S")
except:
st2 = lp.rindex(" COMPLETENESS AND QUALITY")
s.LowestReso = 100
slowr = lp.index("INCLUDE_RESOLUTION_RANGE=") + 26
s.LowestReso, s.HighestReso = lp[slowr:slowr+80].splitlines()[0].split()
if correct:
st3 = lp.index("NUMBER OF REJECTED MISFITS ",st2)
st6 = lp.index("NUMBER OF UNIQUE ACCEPTED REFLECTIONS " ,st2)
stat_g = lp[st3:st6+58].split()
s.misfit, s.absent = get_num(stat_g,(4,10))
else:
st3 = lp.index("REFLECTIONS REJECTED")
st6 = lp.index("REFLECTIONS ON OUTPUT FILE")
s.misfit, tmp = get_num(lp[st3-24:st6].split(),(0,-1))
s.absent = "0"
#
st9 = lp.index(" RESOLUTION\n RESOLUTION " ,st2)+12
st10 = lp.index(" Corr\n\n" ,st2)+10
st11 = lp.index("\n\n",st10)
#st12 = lp.index("NOISE >= 3.0" ,st11)
#st13 = lp.index("\n\n\n",st12)
st14 = lp.index("WILSON LINE ",st11)
#
if correct:
stat_tg = lp[st10:st11].splitlines()
s.last_table = lp[st9:st11]
#stat_tg = lp[st10:st11].splitlines()[4:-2]
#stat_tg3 = lp[st12:st13].splitlines()[4:-2]
else:
#st10x = lp.index("NOISE >= -2.0" ,st2)
st10x = lp.index("= STATISTICS " ,st2)
#st12x = lp.index("NOISE >= 4.0" ,st11)
#stat_tg = lp[st10:st10x].splitlines()[4:-2]
stat_tg = lp[st10:st10x].splitlines()[4:-3]
#stat_tg3 = lp[st12:st12x].splitlines()[4:-2]
stat_wilson = lp[st14+44:st14+75].split()
#
TG, TG3 = [], []
for l in stat_tg: TG.append(l.split())
#for l in stat_tg3: TG3.append(l.split())
s.wilson_b, s.wilson_corr = stat_wilson[0], stat_wilson[2]
s.reso, s.resoL = TG[-2][0], TG[-3][0]
s.compar, s.comparL = TG[-1][7], TG[-2][7]
s.total = TG[-1][1]
s.compl, s.complL = TG[-1][4], TG[-2][4]
s.rsym, s.rsymL = TG[-1][5], TG[-2][5]
s.rmeas, s.rmeasL = TG[-1][9], TG[-2][9]
s.isig, s.isigL = TG[-1][8], TG[-2][8]
s.anoNum, s.anoNumL = TG[-1][-1], TG[-2][-1]
s.anoSig, s.anoSigL = TG[-1][-2], TG[-2][-2]
s.anoCorr, s.anoCorrL = TG[-1][-3], TG[-2][-3]
s.cchalf, s.cchalfL = TG[-1][-4], TG[-2][-4]
s.unique = TG[-1][2]
#s.rsym3, s.rsym3L = TG3[-1][5], TG3[-2][5]
#s.rmeas3, s.rmeas3L = TG3[-1][9], TG3[-2][9]
#s.total3 = TG3[-1][1]
#s.compl3, s.compl3L = TG3[-1][4], TG3[-2][4]
#s.compar3, s.compar3L = TG3[-1][7], TG3[-2][7]
if correct:
stt = lp.index(" STANDARD ERROR OF REFLECTION INTENSITIES")
stt = lp.index("--------\n", stt)
statline = lp[stt+9:stt+86].split()
s.LowestReso, s.HighestReso, s.iosig, s.chi2, s.rsym = statline[0:5]
#
for k in s.keys():
if type(s[k]) == str:
try:
s[k] = float(s[k].replace("%","").replace("*",""))
except:
pass
#
if float(s.absent):
stnabs = lp.index("AVERAGE INTENSITY FOR", st2)
s.AbsNum = int(lp[stnabs:stnabs+60].split()[3])
st7 = lp.index("SYSTEMATICALLY ABSENT",st2)
s.AbsIav = float(lp[st7+27:st7+32].strip())
else:
s.AbsIav = 0
s.AbsNum = 0
#
reso, rsym, ios = [],[],[]
for i in TG[:-1]:
reso.append(float(i[0]))
rsym.append(float(i[9][:-1]))
ios.append(float(i[8]))
#
if correct:
stcs = lp.index("SELECTED SPACE GROUP AND UNIT")
else:
stcs = 0
stcell = lp.index("UNIT_CELL_CONSTANTS=", stcs)+20
s.cell = lp[stcell:stcell+51].rstrip()
stspg = lp.index("SPACE_GROUP_NUMBER=", stcs)+19
s.spg_num = int(lp[stspg:stspg+5])
s.spg_sym = SPGlib[s.spg_num][1]
ind = 0
for i in range(len(reso)):
if ios[i] >= ios_threshold: ind = i
if 0 < ind < len(reso)-1:
M = (ios[ind] - ios_threshold)/(ios[ind] - ios[ind+1])
s.dmin = reso[ind] + (reso[ind] - reso[ind+1]) * M
elif ind == 0: s.dmin = reso[-1]
else: s.dmin = reso[ind]
#print s.dmin
s.multiplicity = s.total/s.unique
return s
def unpack_latticefit(_str):
ss = _str.split()
latt = Lattice((map(float,ss[3:9])),ss[1])
latt.fit = float(ss[2])
latt.character = int(ss[0])
latt.reindexing = tuple(map(int,ss[9:]))
return latt
def resum_idxref(idxref="IDXREF.LP"):
list_latticesFit = []
i_lp = opReadCl(idxref)
st1 = i_lp.index("LATTICE- BRAVAIS-")
idxref_latticesFit = i_lp[st1+172:st1+5012]
return map(unpack_latticefit, idxref_latticesFit.splitlines())
def get_xparm_cell(xp_name="XPARM.XDS"):
return tuple(map(float,\
opReadCl(xp_name).splitlines()[7].split()[1:]))
def select_lattices(limit = 100, idxref="IDXREF.LP"):
selected = []
selection = Param()
for _latt in resum_idxref(idxref):
#if _latt.fit <= limit and _latt.fit != 0.0:
if _latt.fit <= limit:
selected.append(_latt)
return selected
def opReadCl(filename):
f = open(filename)
r = f.read()
f.close()
return r
def opWriteCl(filename, _str):
f = open(filename,"w")
f.write(_str)
f.close()
_add = lambda x,y: x+y
def mean(seq):
return reduce(_add, [s[0] for s in seq])/float(len(seq))
def wMean(seq):
sum = reduce(_add, [s[0]*s[1] for s in seq])
sumw = reduce(_add, [s[1] for s in seq])
return sum/float(sumw)
def standardDeviation(seq):
m = mean(seq)
sum2 = reduce(_add, [ (n[0]-m)**2 for n in seq ])
return (sum2/(len(seq)-1))**0.5
def wStandardDeviation(seq):
m = wMean(seq)
sum2 = reduce(_add, [ n[1]*(n[0]-m)**2 for n in seq ])
sumw = reduce(_add, [ n[1] for n in seq ])
return (sum2/sumw)**0.5
def read_xdsascii_head(file_name_in):
head = {}
head["cell"] = 0,0,0,90,90,90
head["sym"] = 0
head["inputfile_name"] = ""
head["inputfile_type"] = ""
head["merge"] = ""
head["friedels_law"] = ""
head["wavelength"] = 0
head["template_name"] = ""
head["i_set"] = []
head["include_resolution"] = 1000, 0
if not os.path.exists(file_name_in):
print "ERROR! Can't find file %s.\nSTOP.\n" % (file_name_in)
sys.exit()
raw = open(file_name_in)
line = raw.readline()
while line[0] == "!":
if line.count("MERGE=") == 1:
head["merge"] = line[line.index("MERGE=")+6:-1].split()[0].strip()
if line.count("FRIEDEL'S_LAW="):
head["friedels_law"] = \
line[line.index("FRIEDEL'S_LAW=")+14:-1].strip()
if line.count("NAME and FORMAT"):
line = raw.readline()
head["inputfile_name"] = line.split()[2]
head["inputfile_type"] = line.split()[3]
if line.count("COMPRISES THE FOLLOWING SCALED INPUT FILES:"):
line = raw.readline()
head["inputfile_name"] = line.split("INPUT_FILE=")[1].strip()
head["inputfile_type"] = "XDS_ASCII"
if line.count("UNIT_CELL_CONSTANTS="):
head["cell"] = line[line.index("=")+1:-1].strip()
elif line.count("INCLUDE_RESOLUTION_RANGE="):
head["include_resolution"] = map(float, \
line[line.index("=")+1:-1].strip().split())
elif line.count("SPACE_GROUP_NUMBER="):
head["sym"] = line[line.index("=")+1:-1].strip()
elif line.count("X-RAY_WAVELENGTH="):
iw = line.index("WAVELENGTH=")+11
head["wavelength"] = float(line[iw:iw+10].strip())
elif line.count("NAME_TEMPLATE_OF_DATA_FRAMES="):
head["template_name"] = line[line.index("=")+1:].strip().split("??")[0].split("/")[-1]
if head["template_name"][-1] == "_":
head["template_name"] == head["template_name"][:-1]
elif line.count("INPUT_FILE="):
head["i_set"] += [line[line.index("LE=")+3:].strip().split("??")[0].split("/")[-1],]
line = raw.readline()
if head["i_set"]:
print "ISETs=", head["i_set"]
head["template_name"] = os.path.commonprefix(head["i_set"])
return head
def get_resmax_limit():
lp = ""
if os.path.exists("DEFPIX.LP"):
try: lp = opReadCl("DEFPIX.LP")
except: pass
elif os.path.exists("INTEGRATE.LP"):
try: lp = opReadCl("INTEGRATE.LP")
except: pass
else: return 1.5
if len(lp) <= 1300: return 1.5
st = lp.index("RESOLUTION RANGE RECORDED BY DETECTOR")
return float(lp[st:st+80].splitlines()[0].split()[-1])
def res_bin(dmin,nbin):
return [1./(((1./dmin)**3*bin/nbin)**(1./3)) for bin in range(1,nbin+1)]
def get_maxResolution(infile="INTEGRATE.LP"):
p_lp = opReadCl(infile)[:1115]
to_find = "RESOLUTION RANGE RECORDED BY DETECTOR (ANGSTROM)"
resol = p_lp[p_lp.index(to_find)+len(to_find):].split()[1]
return float(resol)
def write_xscale_resum(s,r,friedels_law,Dir=None):
if not Dir: Dir = os.getcwd()
resum = open(os.path.join(Dir,"xscale_resum.txt"),"w")
print "\n\n\n\t\t\tScaling Statistics\n\n"
#
def print_o3(t,v1,v2):
txt = "\t %-22s %14s %-s" % (t,v1,v2)
print txt
resum.write(txt+"\n")
#
print_o3("Resolution","%.2f - %.2f" % (max(r),s.reso),\
"(%.2f - %.2f)\n" % (s.resoL,s.reso))
print_o3("Completeness","%.1f%%" % s.compl, "(%.1f%%)" % s.complL)
print_o3("I/sigma(I)","%.1f " % s.isig, "(%.1f)" % s.isigL)
print_o3("Rmeas","%.1f%%" % s.rmeas, "(%.1f%%)" % s.rmeasL)
print_o3("Rsym","%.2f%%" % s.rsym, "(%.1f%%)" % s.rsymL)
print_o3("Compared","%d " % s.compar, "(%d)" % s.comparL)
print_o3("Measured","%d " % s.total,"")
print_o3("Unique","%d " % s.unique,"")
print_o3("Multiplicity","%.1f " % (s.total/s.unique),"")
print_o3("Rejected misfits","%d " % s.misfit,"")
if s.absent:
print_o3(" with <Iabs>/<I>","%.1f%%" % s.AbsIav,"")
if friedels_law == "FALSE":
print_o3("Anomalous contrib.","%.1f " % s.anom, "")
print_o3("Wilson scaling (B/Corr)","%.1f " % s.wilson_b,\
"%.2f" % s.wilson_corr)
print_o3("Estimated Res_max","%.2f" % s.dmin,"")
print
resum.close()
def run_xscale(files, hklout, lattice, Dir=None,
out_f=None, nbin=12, merge="TRUE", save=0):
xscale_inp = """ ! RESOLUTION_SHELLS= %s
SPACE_GROUP_NUMBER= %d
UNIT_CELL_CONSTANTS= %s
OUTPUT_FILE=%s\n FRIEDEL'S_LAW= %s MERGE= %s
STRICT_ABSORPTION_CORRECTION= FALSE
"""
if lattice.friedels_law: friedels_law = "TRUE"
else: friedels_law = "FALSE"
#spg = lattice.symmetry_num
#cell = lattice.cell
#print """cell=%s spg=%d Dir=%s
#dmin=%s nbin=%d friedels_law=%s merge=%s
#""" % (cell,spg,Dir,dmin,nbin,friedels_law,merge)
inp_line = "INPUT_FILE=%s XDS_ASCII 100 %s\n"
resbin = ""
if not Dir: Dir = os.getcwd()
for reso in res_bin(lattice.dmin,nbin-2): resbin = resbin + "%5.2f" % reso
resbin = "15. 10. " + resbin
#
script = xscale_inp % (resbin,lattice.symmetry_num, lattice,
hklout, friedels_law, merge)
for file in files:
script = script + inp_line % (file, lattice.dmin)
open(os.path.join(Dir,"XSCALE.INP"),"wb").write(script)
#
exec_prog("cd %s; %s" % (Dir,os.path.join(XDSHOME,"xscale_par")),
stdout=out_f, stderr="xupy.stderr")
if save: saveLastVersion("XSCALE.LP")
s = resum_scaling(lpf=os.path.join(Dir,"XSCALE.LP"))
if not s:
print "\nERROR while running XSCALE"
sys.exit()
r = 100, lattice.dmin
write_xscale_resum(s,r,friedels_law)
def guess_imageType(image_name):
"""Return a the image detector type and compress type.
Do not handel swaping..."""
image = open(image_name,"r")
head = image.read(9000)
image.close()
imageType = []
imageCompress = "NO"
if head[:15] == "{\nHEADER_BYTES=" and head.count(";\nDETECTOR_SN=") and \
head.count(";\nPIXEL_SIZE="): imageType.append("ADSC")
if head.count("\nSCANNER") and head.count("mar research") and \
head.count("\nPROGRAM"): imageType.append("MarIP345")
if head[:3] == "II*" and \
head[1028:1031] == "MMX" : imageType.append("MarCCD")
if head.count("CCP4 packed image"): imageCompress = "PCK"
if len(imageType) == 1: return imageType[0], imageCompress
elif len(imageType) == 0: return "Unknown", imageCompress
elif len(imageType) > 1:
print "ERROR: Can't choose the detector type between:", imageType
sys.exit()
def get_number_of_processors():
platf = None
try:
if "linux" in sys.platform:
platf = int(commands.getoutput("egrep -c '^processor' /proc/cpuinfo"))
else:
#"darwin" in sys.platform:
# or [Free|Net|Open]BSD and MacOS X
platf = int(commands.getoutput("sysctl -n hw.ncpu"))
except:
platf = 4
return platf
|
|
import logging
import httplib2
import json
import os
import hashlib
# This module performs authentication based on the tokens
# issued by Globus Online's Nexus service, see this URL for
# details:
# http://globusonline.github.com/nexus-docs/api.html
#
# Import the Globus Online client libraries, originally
# sourced from:
# https://github.com/globusonline/python-nexus-client
from nexus import Client
from django.contrib.auth.models import AnonymousUser,User
from django.contrib.auth import login,authenticate
from django.contrib.auth.middleware import AuthenticationMiddleware
from django.conf import settings
from django.http import HttpResponse
from pprint import pformat
"""
This is the 2-legged OAuth authentication code from tastypie
heavily modified into a django authentication middleware.
We base this on RemoteUserMiddleware so that we can get access to the
request object to have access to the request headers, and then we
simply re-use the existing remote user backend code
https://docs.djangoproject.com/en/1.4/howto/auth-remote-user/
You configure it the same way using the normal instructions, except
that you use this module oauth.TwoLeggedOAuthMiddleware instead of
django.contrib.auth.middleware.RemoteUserMiddleware
The django.contrib.auth.backends.RemoteUserBackend module is also
used with this module, add it into the AUTHENTICATION_BACKENDS
declaration in settings.py
To set the authentiction service to be used, set AUTHSVC in your
settings.py file. Here is an example:
AUTHSVC = 'https://graph.api.go.sandbox.globuscs.info/'
Django modules can check the request.META['KBASEsessid'] for the
session ID that will be used within the KBase session management
infrastructure
To test this, bind the sample handler into urls.py like this:
...
from oauth import AuthStatus
...
urlpatterns = patterns( ...
...
url(r'^authstatus/?$', AuthStatus),
...
)
Then visit the authstatus URL to see the auth state.
If you have the perl Bio::KBase::AuthToken libraries installed,
you can test it like this:
token=`perl -MBio::KBase::AuthToken -e 'print Bio::KBase::AuthToken->new( user_id => "papa", password => "papa")->token,"\n";'`
curl -H "Authorization: Bearer $token" http://127.0.0.1:8000/authstatus/
Steve Chan
sychan@lbl.gov
9/6/2012
Previous documentation follows:
This is a simple 2-legged OAuth authentication model for tastypie.
Copied nearly verbatim from gregbayer's piston example
- https://github.com/gregbayer/django-piston-two-legged-oauth
Dependencies:
- python-oauth2: https://github.com/simplegeo/python-oauth2
Adapted from example:
- http://philipsoutham.com/post/2172924723/two-legged-oauth-in-python
"""
class OAuth2Middleware(AuthenticationMiddleware):
"""
Two Legged OAuth authenticator.
This Authentication method checks for a provided HTTP_AUTHORIZATION
and looks up to see if this is a valid OAuth Consumer
"""
# Authentication server
# Create a Python Globus client
client = Client(config_file=os.path.join(os.path.dirname(__file__), 'nexus/nexus.yml'))
try:
authsvc = "https://%s/" % client.config['server']
except:
authsvc = 'https://nexus.api.globusonline.org/'
# Set the salt used for computing a session hash from the signature hash
salt = "(African || European)?"
def __init__(self, realm='API'):
self.realm = realm
self.user = None
self.http = httplib2.Http(disable_ssl_certificate_validation=True)
# The shortcut option will bypass token validation if we already have a django session
self.shortcut = False
def process_request(self, request):
"""
Verify 2-legged oauth request. Parameters accepted as
values in "Authorization" header, or as a GET request
or in a POST body.
"""
# AuthenticationMiddleware is required so that request.user exists.
if not hasattr(request, 'user'):
raise ImproperlyConfigured(
"The Django remote user auth middleware requires the"
" authentication middleware to be installed. Edit your"
" MIDDLEWARE_CLASSES setting to insert"
" 'django.contrib.auth.middleware.AuthenticationMiddleware'"
" before the RemoteUserMiddleware class.")
try:
if 'HTTP_AUTHORIZATION' in request.META:
auth_header = request.META.get('HTTP_AUTHORIZATION')
else:
logging.debug("No authorization header found.")
return None
# Extract the token based on whether it is an OAuth or Bearer
# token
if auth_header[:6] == 'OAuth ':
token = auth_header[6:]
elif auth_header[:7] == 'Bearer ':
token = auth_header[7:]
else:
logging.info("Authorization header did not contain OAuth or Bearer type token")
return None
# Push the token into the META for future reference
request.META['KBASEtoken'] = token
if (request.user.is_authenticated() and self.shortcut):
return
user_id = OAuth2Middleware.client.authenticate_user( token)
if not user_id:
logging.error("Authentication token failed validation")
return None
else:
logging.info("Validated as user " + user_id)
token_map = {}
for entry in token.split('|'):
key, value = entry.split('=')
token_map[key] = value
profile = self.get_profile(token)
if (profile == None):
logging.error("Token validated, but could not retrieve user profile")
return None
# For now, compute a sessionid based on hashing the
# the signature with the salt
request.META['KBASEsessid'] = hashlib.sha256(token_map['sig']+OAuth2Middleware.salt).hexdigest()
# Add in some useful details that came in from the token validation
request.META['KBASEprofile'] = profile
# See if the username is already associated with any currently logged
# in user, if so just pass over the rest
# Raises exception if it doesn't pass
user = authenticate(remote_user=profile['username'])
if user:
request.user = user
# For now, compute a sessionid based on hashing the
# the signature with the salt
request.META['KBASEsessid'] = hashlib.sha256(token_map['sig']+OAuth2Middleware.salt).hexdigest()
print pformat( request.META['KBASEsessid'])
# Add in some useful details that came in from the token validation
request.META['KBASEprofile'] = profile
login(request,user)
else:
logging.error( "Failed to return user from call to authenticate() with username " + profile['username'])
except KeyError, e:
logging.exception("KeyError in TwoLeggedOAuthMiddleware: %s" % e)
request.user = AnonymousUser()
except Exception, e:
logging.exception("Error in TwoLeggedOAuthMiddleware: %s" % e)
def get_profile(self,token):
try:
token_map = {}
for entry in token.split('|'):
key, value = entry.split('=')
token_map[key] = value
keyurl = self.__class__.authsvc + "/users/" + token_map['un'] + "?custom_fields=*&fields=groups,username,email_validated,fullname,email"
res,body = self.http.request(keyurl,"GET",
headers={ 'Authorization': 'Globus-Goauthtoken ' + token })
if (200 <= int(res.status)) and ( int(res.status) < 300):
profile = json.loads( body)
return profile
logging.error( body)
raise Exception("HTTP", res)
except Exception, e:
logging.exception("Error in get_profile.")
return None
def AuthStatus(request):
res = "request.user.is_authenticated = %s \n" % request.user.is_authenticated()
if request.user.is_authenticated():
res = res + "request.user.username = %s\n" % request.user.username
if 'KBASEsessid' in request.META:
res = res + "Your KBase SessionID is %s\n" % request.META['KBASEsessid']
if 'KBASEprofile' in request.META:
res = res + "Your profile record is:\n%s\n" % pformat( request.META['KBASEprofile'])
if 'KBASEtoken' in request.META:
res = res + "Your OAuth token is:\n%s\n" % pformat( request.META['KBASEtoken'])
return HttpResponse(res)
|
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
""" A Django web app and unit tests in a single file.
Based on Nsukami's blog post (no longer on line).
To get it running, copy it into a directory named udjango:
$ pip install django
$ python udjango_test.py
Change the DJANGO_COMMAND to runserver to switch back to web server.
Tested with Django 3.0 and Python 3.8.
"""
import os
import sys
import django
from django.conf import settings
from django.conf.urls import include
from django.contrib.auth import get_user_model
from django.contrib import admin
from django.core.management import call_command
from django.core.management.utils import get_random_secret_key
from django.core.wsgi import get_wsgi_application
from django.db import models
from django.db.models.base import ModelBase
from django.http import HttpResponse
from django.test import TestCase, Client
from django.urls import reverse, re_path
WIPE_DATABASE = True
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DB_FILE = os.path.join(BASE_DIR, 'udjango.db')
DJANGO_COMMAND = 'test' # 'test' or 'runserver'
# the current folder name will also be our app
APP_LABEL = os.path.basename(BASE_DIR)
urlpatterns = []
Author = Book = None
class Tests(TestCase):
def test_post(self):
author = Author.objects.create(name='Jim')
user = get_user_model().objects.create_superuser('admin', '', 'admin')
client = Client()
client.force_login(user)
expected_title = 'My New Book'
response = client.post(reverse('admin:udjango_book_add'),
dict(title=expected_title,
author=author.id))
if response.status_code != 302:
self.assertEqual([], response.context['errors'])
new_book = Book.objects.last()
self.assertEqual(expected_title, new_book.title)
def main():
global Author, Book
setup()
from rest_framework import routers
from rest_framework import serializers
from rest_framework import viewsets
# Create your models here.
class Author(models.Model):
name = models.CharField(max_length=200)
def __str__(self):
return self.name
class Book(models.Model):
author = models.ForeignKey(Author,
related_name='books',
on_delete=models.CASCADE)
title = models.CharField(max_length=400)
def __str__(self):
return self.title
admin.site.register(Book)
admin.site.register(Author)
admin.autodiscover()
class BookSerializer(serializers.ModelSerializer):
class Meta:
model = Book
fields = '__all__'
class BooksViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Book.objects.all()
serializer_class = BookSerializer
router = routers.DefaultRouter()
router.register(r'books', BooksViewSet)
def index(request):
return HttpResponse(
"Hello, Django! <a href='admin'>Web</a> or <a href='api'>API</a>? "
"Login as user 'admin', password 'admin'.")
urlpatterns.extend([
re_path(r'^admin/', admin.site.urls),
re_path(r'^$', index, name='homepage'),
re_path(r'^api/', include(router.urls)),
re_path(r'^api-auth/', include('rest_framework.urls',
namespace='rest_framework'))
])
if __name__ == "__main__":
if DJANGO_COMMAND == 'test':
call_command('test', '__main__.Tests')
else:
if WIPE_DATABASE or not os.path.exists(DB_FILE):
with open(DB_FILE, 'w'):
pass
call_command('makemigrations', APP_LABEL)
call_command('migrate')
get_user_model().objects.create_superuser('admin', '', 'admin')
call_command(DJANGO_COMMAND)
else:
get_wsgi_application()
def setup():
sys.path[0] = os.path.dirname(BASE_DIR)
static_path = os.path.join(BASE_DIR, "static")
try:
os.mkdir(static_path)
except FileExistsError:
pass
settings.configure(
DEBUG=True,
ROOT_URLCONF=__name__,
MIDDLEWARE=[
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.locale.LocaleMiddleware',
],
INSTALLED_APPS=[
APP_LABEL,
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
],
STATIC_URL='/static/',
STATICFILES_DIRS=[
static_path,
],
STATIC_ROOT=os.path.join(BASE_DIR, "static_root"),
MEDIA_ROOT=os.path.join(BASE_DIR, "media"),
MEDIA_URL='/media/',
SECRET_KEY=get_random_secret_key(),
DEFAULT_AUTO_FIELD='django.db.models.AutoField',
TEMPLATES=[
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
],
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': DB_FILE,
}
},
REST_FRAMEWORK={
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAdminUser',
],
}
)
django.setup()
app_config = django.apps.apps.app_configs[APP_LABEL]
app_config.models_module = app_config.models
original_new_func = ModelBase.__new__
@staticmethod
def patched_new(cls, name, bases, attrs):
if 'Meta' not in attrs:
class Meta:
app_label = APP_LABEL
attrs['Meta'] = Meta
return original_new_func(cls, name, bases, attrs)
ModelBase.__new__ = patched_new
main()
|
|
#!/usr/bin/env python
#
# BaseRecognizer.py
#
# Class recognizing modified base residues.
#
# http://iimcb.genesilico.pl/moderna/
#
__author__ = "Magdalena Rother, Tomasz Puton, Kristian Rother"
__copyright__ = "Copyright 2008, The Moderna Project"
__credits__ = ["Janusz Bujnicki"]
__license__ = "GPL"
__maintainer__ = "Magdalena Rother"
__email__ = "mmusiel@genesilico.pl"
__status__ = "Production"
from rna_tools.tools.mini_moderna3.moderna.analyze.MolecularGraph import AnnotatedMolecule
from rna_tools.tools.mini_moderna3.moderna.analyze.MolTopologies import read_nucleotide_topologies
from rna_tools.tools.mini_moderna3.moderna.util.Errors import BaseRecognitionError
from rna_tools.tools.mini_moderna3.moderna.Constants import AMINO, STANDARD_BASES, HETERO_GROUPS, PHOSPHATE_GROUP, \
NUCLEOTIDE_ATOMS
from rna_tools.tools.mini_moderna3.moderna.Constants import MODIFICATION_TOPOLOGY_FILE
import re
MODIFIED_BASE_PATTERNS = read_nucleotide_topologies(MODIFICATION_TOPOLOGY_FILE)
# do not recognize amino acids
EXCLUDE_AMINO = True
DNA_BASES = ['dA', 'dC', 'dG', 'dT']
NAME_MATCHES = {
'RMP': 'd5mpA',
'SMP': 'd5mpA',
'CMR':'d5mpC',
'A3A': 'alpha-dA',
'GAO': 'arabinoseG',
'CAR': 'arabinoseC',
'UAR': 'arabinoseU',
'A5O': 'arabinoseA',
'G25': 'GMP',
'A5L': 'dA',
'DA': 'dA',
}
class BaseRecognitionResult(object):
def __init__(self, resi):
self.resi = resi
self.abbrev = ''
self.mol = None
self.subunits = []
@property
def amino_acid(self):
"""
Returns amino acid three-letter code, if this is an amino acid,
otherwise None.
"""
short_name = self.resi.resname
if short_name in AMINO:
self.abbrev = short_name
return short_name
@property
def standard_nucleotide(self):
"""
Recognize nucleotides in a Bio.PDB.Residue instance that
- have a name like A,G,C,T,U
- only atoms with standard names in the PDB file
- All atoms must be present, except the phosphate group.
- The O2' atom is used to distinguish ribo- from desoxyribonucleotides.
- all hydrogens are neglected
returns one of A,G,C,T,U,dA,dG,dC,dT,dU, or False.
"""
short_name = self.resi.resname.strip()
atoms = set([])
if short_name in STANDARD_BASES:
desoxynucleotide = 'd'
for atom in self.resi.child_list:
atomname = atom.id.replace('*',"'")
if atomname in PHOSPHATE_GROUP: continue
elif atomname[0] == 'H': continue
elif atomname == "O2'": desoxynucleotide = ''
else:
atoms.add(atomname)
if atoms==NUCLEOTIDE_ATOMS[short_name]:
result = desoxynucleotide + short_name
self.abbrev = result
return result
return False
def has_forbidden_elements(self):
FORBIDDEN = ['BR', 'CL', 'MN', 'MG', 'I', 'F', 'FE', 'CU', 'V', 'CR']
for at in self.resi:
name = re.sub("[\d\s']", '', at.fullname)
name = name.upper()
if name in FORBIDDEN:
return True
def check_forbidden_elements(self):
"""catch heavy atom groups."""
if self.has_forbidden_elements():
if self.abbrev in STANDARD_BASES + DNA_BASES:
self.abbrev = '?' + self.abbrev
elif self.abbrev == '':
raise BaseRecognitionError('Strange element occured in residue recognized as : %s'%self.abbrev)
def identify_phosphates(self, tags, resn):
"""Distinguishes ATP, ADP, AMP, GTP, ... and other phosphate extensions from standard nucleotides."""
restype = NAME_MATCHES.get(resn)
if restype in tags:
return restype
for t in tags:
if t.endswith('_mod_phos'):
return t
elif resn == t or 'd'+resn ==t:
restype = t
elif not restype and t in STANDARD_BASES + DNA_BASES:
restype = t
#if not restype and len(tags)>0 and tags[0] in STANDARD_BASES + DNA_BASES:
# restype = tags[0]
return restype
def run_topology_matching(self):
"""Running the subgraph matching algorithm."""
self.mol = AnnotatedMolecule()
self.mol.parse_resi(self.resi)
self.subunits = set([su for su, atom in self.mol.detect_patterns(MODIFIED_BASE_PATTERNS)])
#print self.subunits
def identify_molecule(self):
"""returns an abbreviated name for the molecule"""
restype = ""
resn = re.sub('\s','',self.resi.resname.strip())
sub2 = [x for x in self.subunits if x not in ('phosphate','desoxyriboside','riboside','purine','pyrimidine')]
sub2.sort()
restype = self.identify_phosphates(sub2, resn)
# decide about some difficult cases
if not restype:
if len(sub2) == 1:
if sub2[0] in STANDARD_BASES: # the easy ones
restype = sub2[0]
elif sub2[0]=='phosphate': restype = '' # phosphate only
else: restype = '['+sub2[0]+']' # put modified bases in [x]
elif sub2 == ['m5D', 'm5U']:
if re.search('D',resn): restype = '[m5D]'
else: restype = prefix+'[m5U]'
elif sub2 == ['D', 'U']:
if re.search('D',resn): restype = '[D]'
else: restype = prefix+'U'
elif sub2 == ['T','m5D','m5U'] or sub2 == ['T','m5U']:
if resn == 'T': restype = 'T'
elif resn == 'U': restype = 'U'
else: restype = '[m5U]'
elif sub2 == ['galQtRNA', 'manQtRNA']:
if re.search('g',resn,re.IGNORECASE):
restype = '[galQtRNA]'
else:
restype ='[manQtRNA]'
else:
restype = '<%s:%s>'%(resn,str(list(self.subunits)))
if restype[0]=='<':
if restype[1:-1] in HETERO_GROUPS:
restype = ''
else:
# probably some other hetero group,
raise BaseRecognitionError('Unknown Residue:%s'%restype)
restype = re.sub('[\[\]]','',restype)
self.abbrev = restype
return restype
class BaseRecognizer(object):
"""
Assigns a name to a residue - if necessary by analyzing
the detailed topolgy of atoms.
"""
def identify_resi(self,resi):
"""
Recognizes what kind of residue there is and returns an abbreviation.
Takes a Bio.PDB.Residue instance.
First, assignment by atom and residue names will be done.
- Names must be A,G,C,T,U
Second, the structure will be converted to .mol format and
examined by the pattern matching procedure.
"""
result = BaseRecognitionResult(resi)
if result.amino_acid or result.standard_nucleotide:
return result.abbrev
# Now, this residue might be a modified base,
result.run_topology_matching()
restype = result.identify_molecule()
if not result.abbrev:
raise BaseRecognitionError('Unknown Residue:%s'%restype)
result.check_forbidden_elements()
return result.abbrev
|
|
"""
Import utilities
Exported classes:
ImportManager Manage the import process
Importer Base class for replacing standard import functions
BuiltinImporter Emulate the import mechanism for builtin and frozen modules
DynLoadSuffixImporter
"""
from warnings import warnpy3k
warnpy3k("the imputil module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
# note: avoid importing non-builtin modules
import imp ### not available in Jython?
import sys
import __builtin__
# for the DirectoryImporter
import struct
import marshal
__all__ = ["ImportManager","Importer","BuiltinImporter"]
_StringType = type('')
_ModuleType = type(sys) ### doesn't work in Jython...
class ImportManager:
"Manage the import process."
def install(self, namespace=vars(__builtin__)):
"Install this ImportManager into the specified namespace."
if isinstance(namespace, _ModuleType):
namespace = vars(namespace)
# Note: we have no notion of "chaining"
# Record the previous import hook, then install our own.
self.previous_importer = namespace['__import__']
self.namespace = namespace
namespace['__import__'] = self._import_hook
### fix this
#namespace['reload'] = self._reload_hook
def uninstall(self):
"Restore the previous import mechanism."
self.namespace['__import__'] = self.previous_importer
def add_suffix(self, suffix, importFunc):
assert hasattr(importFunc, '__call__')
self.fs_imp.add_suffix(suffix, importFunc)
######################################################################
#
# PRIVATE METHODS
#
clsFilesystemImporter = None
def __init__(self, fs_imp=None):
# we're definitely going to be importing something in the future,
# so let's just load the OS-related facilities.
if not _os_stat:
_os_bootstrap()
# This is the Importer that we use for grabbing stuff from the
# filesystem. It defines one more method (import_from_dir) for our use.
if fs_imp is None:
cls = self.clsFilesystemImporter or _FilesystemImporter
fs_imp = cls()
self.fs_imp = fs_imp
# Initialize the set of suffixes that we recognize and import.
# The default will import dynamic-load modules first, followed by
# .py files (or a .py file's cached bytecode)
for desc in imp.get_suffixes():
if desc[2] == imp.C_EXTENSION:
self.add_suffix(desc[0],
DynLoadSuffixImporter(desc).import_file)
self.add_suffix('.py', py_suffix_importer)
def _import_hook(self, fqname, globals=None, locals=None, fromlist=None):
"""Python calls this hook to locate and import a module."""
parts = fqname.split('.')
# determine the context of this import
parent = self._determine_import_context(globals)
# if there is a parent, then its importer should manage this import
if parent:
module = parent.__importer__._do_import(parent, parts, fromlist)
if module:
return module
# has the top module already been imported?
try:
top_module = sys.modules[parts[0]]
except KeyError:
# look for the topmost module
top_module = self._import_top_module(parts[0])
if not top_module:
# the topmost module wasn't found at all.
raise ImportError, 'No module named ' + fqname
# fast-path simple imports
if len(parts) == 1:
if not fromlist:
return top_module
if not top_module.__dict__.get('__ispkg__'):
# __ispkg__ isn't defined (the module was not imported by us),
# or it is zero.
#
# In the former case, there is no way that we could import
# sub-modules that occur in the fromlist (but we can't raise an
# error because it may just be names) because we don't know how
# to deal with packages that were imported by other systems.
#
# In the latter case (__ispkg__ == 0), there can't be any sub-
# modules present, so we can just return.
#
# In both cases, since len(parts) == 1, the top_module is also
# the "bottom" which is the defined return when a fromlist
# exists.
return top_module
importer = top_module.__dict__.get('__importer__')
if importer:
return importer._finish_import(top_module, parts[1:], fromlist)
# Grrr, some people "import os.path" or do "from os.path import ..."
if len(parts) == 2 and hasattr(top_module, parts[1]):
if fromlist:
return getattr(top_module, parts[1])
else:
return top_module
# If the importer does not exist, then we have to bail. A missing
# importer means that something else imported the module, and we have
# no knowledge of how to get sub-modules out of the thing.
raise ImportError, 'No module named ' + fqname
def _determine_import_context(self, globals):
"""Returns the context in which a module should be imported.
The context could be a loaded (package) module and the imported module
will be looked for within that package. The context could also be None,
meaning there is no context -- the module should be looked for as a
"top-level" module.
"""
if not globals or not globals.get('__importer__'):
# globals does not refer to one of our modules or packages. That
# implies there is no relative import context (as far as we are
# concerned), and it should just pick it off the standard path.
return None
# The globals refer to a module or package of ours. It will define
# the context of the new import. Get the module/package fqname.
parent_fqname = globals['__name__']
# if a package is performing the import, then return itself (imports
# refer to pkg contents)
if globals['__ispkg__']:
parent = sys.modules[parent_fqname]
assert globals is parent.__dict__
return parent
i = parent_fqname.rfind('.')
# a module outside of a package has no particular import context
if i == -1:
return None
# if a module in a package is performing the import, then return the
# package (imports refer to siblings)
parent_fqname = parent_fqname[:i]
parent = sys.modules[parent_fqname]
assert parent.__name__ == parent_fqname
return parent
def _import_top_module(self, name):
# scan sys.path looking for a location in the filesystem that contains
# the module, or an Importer object that can import the module.
for item in sys.path:
if isinstance(item, _StringType):
module = self.fs_imp.import_from_dir(item, name)
else:
module = item.import_top(name)
if module:
return module
return None
def _reload_hook(self, module):
"Python calls this hook to reload a module."
# reloading of a module may or may not be possible (depending on the
# importer), but at least we can validate that it's ours to reload
importer = module.__dict__.get('__importer__')
if not importer:
### oops. now what...
pass
# okay. it is using the imputil system, and we must delegate it, but
# we don't know what to do (yet)
### we should blast the module dict and do another get_code(). need to
### flesh this out and add proper docco...
raise SystemError, "reload not yet implemented"
class Importer:
"Base class for replacing standard import functions."
def import_top(self, name):
"Import a top-level module."
return self._import_one(None, name, name)
######################################################################
#
# PRIVATE METHODS
#
def _finish_import(self, top, parts, fromlist):
# if "a.b.c" was provided, then load the ".b.c" portion down from
# below the top-level module.
bottom = self._load_tail(top, parts)
# if the form is "import a.b.c", then return "a"
if not fromlist:
# no fromlist: return the top of the import tree
return top
# the top module was imported by self.
#
# this means that the bottom module was also imported by self (just
# now, or in the past and we fetched it from sys.modules).
#
# since we imported/handled the bottom module, this means that we can
# also handle its fromlist (and reliably use __ispkg__).
# if the bottom node is a package, then (potentially) import some
# modules.
#
# note: if it is not a package, then "fromlist" refers to names in
# the bottom module rather than modules.
# note: for a mix of names and modules in the fromlist, we will
# import all modules and insert those into the namespace of
# the package module. Python will pick up all fromlist names
# from the bottom (package) module; some will be modules that
# we imported and stored in the namespace, others are expected
# to be present already.
if bottom.__ispkg__:
self._import_fromlist(bottom, fromlist)
# if the form is "from a.b import c, d" then return "b"
return bottom
def _import_one(self, parent, modname, fqname):
"Import a single module."
# has the module already been imported?
try:
return sys.modules[fqname]
except KeyError:
pass
# load the module's code, or fetch the module itself
result = self.get_code(parent, modname, fqname)
if result is None:
return None
module = self._process_result(result, fqname)
# insert the module into its parent
if parent:
setattr(parent, modname, module)
return module
def _process_result(self, result, fqname):
ispkg, code, values = result
# did get_code() return an actual module? (rather than a code object)
is_module = isinstance(code, _ModuleType)
# use the returned module, or create a new one to exec code into
if is_module:
module = code
else:
module = imp.new_module(fqname)
### record packages a bit differently??
module.__importer__ = self
module.__ispkg__ = ispkg
# insert additional values into the module (before executing the code)
module.__dict__.update(values)
# the module is almost ready... make it visible
sys.modules[fqname] = module
# execute the code within the module's namespace
if not is_module:
try:
exec code in module.__dict__
except:
if fqname in sys.modules:
del sys.modules[fqname]
raise
# fetch from sys.modules instead of returning module directly.
# also make module's __name__ agree with fqname, in case
# the "exec code in module.__dict__" played games on us.
module = sys.modules[fqname]
module.__name__ = fqname
return module
def _load_tail(self, m, parts):
"""Import the rest of the modules, down from the top-level module.
Returns the last module in the dotted list of modules.
"""
for part in parts:
fqname = "%s.%s" % (m.__name__, part)
m = self._import_one(m, part, fqname)
if not m:
raise ImportError, "No module named " + fqname
return m
def _import_fromlist(self, package, fromlist):
'Import any sub-modules in the "from" list.'
# if '*' is present in the fromlist, then look for the '__all__'
# variable to find additional items (modules) to import.
if '*' in fromlist:
fromlist = list(fromlist) + \
list(package.__dict__.get('__all__', []))
for sub in fromlist:
# if the name is already present, then don't try to import it (it
# might not be a module!).
if sub != '*' and not hasattr(package, sub):
subname = "%s.%s" % (package.__name__, sub)
submod = self._import_one(package, sub, subname)
if not submod:
raise ImportError, "cannot import name " + subname
def _do_import(self, parent, parts, fromlist):
"""Attempt to import the module relative to parent.
This method is used when the import context specifies that <self>
imported the parent module.
"""
top_name = parts[0]
top_fqname = parent.__name__ + '.' + top_name
top_module = self._import_one(parent, top_name, top_fqname)
if not top_module:
# this importer and parent could not find the module (relatively)
return None
return self._finish_import(top_module, parts[1:], fromlist)
######################################################################
#
# METHODS TO OVERRIDE
#
def get_code(self, parent, modname, fqname):
"""Find and retrieve the code for the given module.
parent specifies a parent module to define a context for importing. It
may be None, indicating no particular context for the search.
modname specifies a single module (not dotted) within the parent.
fqname specifies the fully-qualified module name. This is a
(potentially) dotted name from the "root" of the module namespace
down to the modname.
If there is no parent, then modname==fqname.
This method should return None, or a 3-tuple.
* If the module was not found, then None should be returned.
* The first item of the 2- or 3-tuple should be the integer 0 or 1,
specifying whether the module that was found is a package or not.
* The second item is the code object for the module (it will be
executed within the new module's namespace). This item can also
be a fully-loaded module object (e.g. loaded from a shared lib).
* The third item is a dictionary of name/value pairs that will be
inserted into new module before the code object is executed. This
is provided in case the module's code expects certain values (such
as where the module was found). When the second item is a module
object, then these names/values will be inserted *after* the module
has been loaded/initialized.
"""
raise RuntimeError, "get_code not implemented"
######################################################################
#
# Some handy stuff for the Importers
#
# byte-compiled file suffix character
_suffix_char = __debug__ and 'c' or 'o'
# byte-compiled file suffix
_suffix = '.py' + _suffix_char
def _compile(pathname, timestamp):
"""Compile (and cache) a Python source file.
The file specified by <pathname> is compiled to a code object and
returned.
Presuming the appropriate privileges exist, the bytecodes will be
saved back to the filesystem for future imports. The source file's
modification timestamp must be provided as a Long value.
"""
codestring = open(pathname, 'rU').read()
if codestring and codestring[-1] != '\n':
codestring = codestring + '\n'
code = __builtin__.compile(codestring, pathname, 'exec')
# try to cache the compiled code
try:
f = open(pathname + _suffix_char, 'wb')
except IOError:
pass
else:
f.write('\0\0\0\0')
f.write(struct.pack('<I', timestamp))
marshal.dump(code, f)
f.flush()
f.seek(0, 0)
f.write(imp.get_magic())
f.close()
return code
_os_stat = _os_path_join = None
def _os_bootstrap():
"Set up 'os' module replacement functions for use during import bootstrap."
names = sys.builtin_module_names
join = None
if 'posix' in names:
sep = '/'
from posix import stat
elif 'nt' in names:
sep = '\\'
from nt import stat
elif 'dos' in names:
sep = '\\'
from dos import stat
elif 'os2' in names:
sep = '\\'
from os2 import stat
else:
raise ImportError, 'no os specific module found'
if join is None:
def join(a, b, sep=sep):
if a == '':
return b
lastchar = a[-1:]
if lastchar == '/' or lastchar == sep:
return a + b
return a + sep + b
global _os_stat
_os_stat = stat
global _os_path_join
_os_path_join = join
def _os_path_isdir(pathname):
"Local replacement for os.path.isdir()."
try:
s = _os_stat(pathname)
except OSError:
return None
return (s.st_mode & 0170000) == 0040000
def _timestamp(pathname):
"Return the file modification time as a Long."
try:
s = _os_stat(pathname)
except OSError:
return None
return long(s.st_mtime)
######################################################################
#
# Emulate the import mechanism for builtin and frozen modules
#
class BuiltinImporter(Importer):
def get_code(self, parent, modname, fqname):
if parent:
# these modules definitely do not occur within a package context
return None
# look for the module
if imp.is_builtin(modname):
type = imp.C_BUILTIN
elif imp.is_frozen(modname):
type = imp.PY_FROZEN
else:
# not found
return None
# got it. now load and return it.
module = imp.load_module(modname, None, modname, ('', '', type))
return 0, module, { }
######################################################################
#
# Internal importer used for importing from the filesystem
#
class _FilesystemImporter(Importer):
def __init__(self):
self.suffixes = [ ]
def add_suffix(self, suffix, importFunc):
assert hasattr(importFunc, '__call__')
self.suffixes.append((suffix, importFunc))
def import_from_dir(self, dir, fqname):
result = self._import_pathname(_os_path_join(dir, fqname), fqname)
if result:
return self._process_result(result, fqname)
return None
def get_code(self, parent, modname, fqname):
# This importer is never used with an empty parent. Its existence is
# private to the ImportManager. The ImportManager uses the
# import_from_dir() method to import top-level modules/packages.
# This method is only used when we look for a module within a package.
assert parent
for submodule_path in parent.__path__:
code = self._import_pathname(_os_path_join(submodule_path, modname), fqname)
if code is not None:
return code
return self._import_pathname(_os_path_join(parent.__pkgdir__, modname),
fqname)
def _import_pathname(self, pathname, fqname):
if _os_path_isdir(pathname):
result = self._import_pathname(_os_path_join(pathname, '__init__'),
fqname)
if result:
values = result[2]
values['__pkgdir__'] = pathname
values['__path__'] = [ pathname ]
return 1, result[1], values
return None
for suffix, importFunc in self.suffixes:
filename = pathname + suffix
try:
finfo = _os_stat(filename)
except OSError:
pass
else:
return importFunc(filename, finfo, fqname)
return None
######################################################################
#
# SUFFIX-BASED IMPORTERS
#
def py_suffix_importer(filename, finfo, fqname):
file = filename[:-3] + _suffix
t_py = long(finfo[8])
t_pyc = _timestamp(file)
code = None
if t_pyc is not None and t_pyc >= t_py:
f = open(file, 'rb')
if f.read(4) == imp.get_magic():
t = struct.unpack('<I', f.read(4))[0]
if t == t_py:
code = marshal.load(f)
f.close()
if code is None:
file = filename
code = _compile(file, t_py)
return 0, code, { '__file__' : file }
class DynLoadSuffixImporter:
def __init__(self, desc):
self.desc = desc
def import_file(self, filename, finfo, fqname):
fp = open(filename, self.desc[1])
module = imp.load_module(fqname, fp, filename, self.desc)
module.__file__ = filename
return 0, module, { }
######################################################################
def _print_importers():
items = sys.modules.items()
items.sort()
for name, module in items:
if module:
print name, module.__dict__.get('__importer__', '-- no importer')
else:
print name, '-- non-existent module'
def _test_revamp():
ImportManager().install()
sys.path.insert(0, BuiltinImporter())
######################################################################
#
# TODO
#
# from Finn Bock:
# type(sys) is not a module in Jython. what to use instead?
# imp.C_EXTENSION is not in Jython. same for get_suffixes and new_module
#
# given foo.py of:
# import sys
# sys.modules['foo'] = sys
#
# ---- standard import mechanism
# >>> import foo
# >>> foo
# <module 'sys' (built-in)>
#
# ---- revamped import mechanism
# >>> import imputil
# >>> imputil._test_revamp()
# >>> import foo
# >>> foo
# <module 'foo' from 'foo.py'>
#
#
# from MAL:
# should BuiltinImporter exist in sys.path or hard-wired in ImportManager?
# need __path__ processing
# performance
# move chaining to a subclass [gjs: it's been nuked]
# deinstall should be possible
# query mechanism needed: is a specific Importer installed?
# py/pyc/pyo piping hooks to filter/process these files
# wish list:
# distutils importer hooked to list of standard Internet repositories
# module->file location mapper to speed FS-based imports
# relative imports
# keep chaining so that it can play nice with other import hooks
#
# from Gordon:
# push MAL's mapper into sys.path[0] as a cache (hard-coded for apps)
#
# from Guido:
# need to change sys.* references for rexec environs
# need hook for MAL's walk-me-up import strategy, or Tim's absolute strategy
# watch out for sys.modules[...] is None
# flag to force absolute imports? (speeds _determine_import_context and
# checking for a relative module)
# insert names of archives into sys.path (see quote below)
# note: reload does NOT blast module dict
# shift import mechanisms and policies around; provide for hooks, overrides
# (see quote below)
# add get_source stuff
# get_topcode and get_subcode
# CRLF handling in _compile
# race condition in _compile
# refactoring of os.py to deal with _os_bootstrap problem
# any special handling to do for importing a module with a SyntaxError?
# (e.g. clean up the traceback)
# implement "domain" for path-type functionality using pkg namespace
# (rather than FS-names like __path__)
# don't use the word "private"... maybe "internal"
#
#
# Guido's comments on sys.path caching:
#
# We could cache this in a dictionary: the ImportManager can have a
# cache dict mapping pathnames to importer objects, and a separate
# method for coming up with an importer given a pathname that's not yet
# in the cache. The method should do a stat and/or look at the
# extension to decide which importer class to use; you can register new
# importer classes by registering a suffix or a Boolean function, plus a
# class. If you register a new importer class, the cache is zapped.
# The cache is independent from sys.path (but maintained per
# ImportManager instance) so that rearrangements of sys.path do the
# right thing. If a path is dropped from sys.path the corresponding
# cache entry is simply no longer used.
#
# My/Guido's comments on factoring ImportManager and Importer:
#
# > However, we still have a tension occurring here:
# >
# > 1) implementing policy in ImportManager assists in single-point policy
# > changes for app/rexec situations
# > 2) implementing policy in Importer assists in package-private policy
# > changes for normal, operating conditions
# >
# > I'll see if I can sort out a way to do this. Maybe the Importer class will
# > implement the methods (which can be overridden to change policy) by
# > delegating to ImportManager.
#
# Maybe also think about what kind of policies an Importer would be
# likely to want to change. I have a feeling that a lot of the code
# there is actually not so much policy but a *necessity* to get things
# working given the calling conventions for the __import__ hook: whether
# to return the head or tail of a dotted name, or when to do the "finish
# fromlist" stuff.
#
|
|
"""
Various sweeps for scanning experiment parameters
"""
from atom.api import Atom, Str, Float, Int, Bool, Dict, List, Enum, \
Coerced, Property, Typed, observe, cached_property, Int
import enaml
from enaml.qt.qt_application import QtApplication
from instruments.MicrowaveSources import MicrowaveSource
from instruments.Instrument import Instrument
from instruments.plugins import find_plugins
from DictManager import DictManager
import numpy as np
import json
import floatbits
from JSONLibraryUtils import LibraryCoders
class Sweep(Atom):
label = Str()
axisLabel = Str()
enabled = Bool(True)
order = Int(-1)
def json_encode(self, matlabCompatible=False):
jsonDict = self.__getstate__()
if matlabCompatible:
jsonDict['type'] = self.__class__.__name__
jsonDict.pop('enabled', None)
jsonDict.pop('name', None)
else:
jsonDict['x__class__'] = self.__class__.__name__
jsonDict['x__module__'] = self.__class__.__module__
return jsonDict
class PointsSweep(Sweep):
"""
A class for sweeps with floating points with one instrument.
'step' depends on numPoints (but not the other way around) to break the dependency cycle
"""
start = Float(0.0)
step = Property()
stop = Float(1.0)
numPoints = Int(1)
def _set_step(self, step):
# int() will give floor() casted to an Int
try:
self.numPoints = int((self.stop - self.start)/floatbits.prevfloat(step)) + 1
except ValueError, e:
print("ERROR: Sweep named %s issue computing Num. Points: %s" % (self.label,e))
def _get_step(self):
return (self.stop - self.start)/max(1, self.numPoints-1)
@observe('start', 'stop', 'numPoints')
def update_step(self, change):
if change['type'] == 'update':
# update the step to keep numPoints fixed
self.get_member('step').reset(self)
class Power(PointsSweep):
label = Str(default='Power')
instr = Str()
units = Enum('dBm', 'Watts').tag(desc='Logarithmic or linear power sweep')
class Frequency(PointsSweep):
label = Str(default='Frequency')
instr = Str()
class HeterodyneFrequency(PointsSweep):
label = Str(default='HeterodyneFrequency')
instr1 = Str()
instr2 = Str()
diffFreq = Float(10.0e-3).tag(desc="IF frequency (GHz)")
class SegmentNum(PointsSweep):
label = Str(default='SegmentNum')
class SegmentNumWithCals(PointsSweep):
label = Str(default='SegmentNumWithCals')
numCals = Int(0)
def json_encode(self, matlabCompatible=False):
jsonDict = super(SegmentNumWithCals, self).json_encode(matlabCompatible)
if matlabCompatible:
jsonDict['type'] = 'SegmentNum'
jsonDict['stop'] = self.stop + self.step * self.numCals
jsonDict['numPoints'] = self.numPoints + self.numCals
return jsonDict
class Repeat(Sweep):
label = Str(default='Repeat')
numRepeats = Int(1).tag(desc='How many times to loop.')
class AWGChannel(PointsSweep):
label = Str(default='AWGChannel')
channel = Enum('1','2','3','4','1&2','3&4').tag(desc='Which channel or pair to sweep')
mode = Enum('Amp.', 'Offset').tag(desc='Sweeping amplitude or offset')
instr = Str()
class AWGSequence(Sweep):
label = Str(default='AWGSequence')
start = Int()
stop = Int()
step = Int(1)
sequenceFile = Str().tag(desc='Base string for the sequence files')
class Attenuation(PointsSweep):
label = Str(default='Attenuation (dB)')
channel = Enum(1, 2, 3).tag(desc='Which channel to sweep')
instr = Str()
class DC(PointsSweep):
label = Str(default='DC')
instr = Str()
class Threshold(PointsSweep):
label = Str(default="Threshold")
instr = Str()
stream = Enum('(1,1)','(1,2)','(2,1)','(2,2)').tag(desc='which stream to set threshold')
newSweepClasses = [Power, Frequency, HeterodyneFrequency, Attenuation, SegmentNum, SegmentNumWithCals, AWGChannel, AWGSequence, DC, Repeat, Threshold]
class SweepLibrary(Atom):
sweepDict = Coerced(dict)
sweepList = Property()
sweepOrder = List()
possibleInstrs = List()
version = Int(1)
sweepManager = Typed(DictManager)
libFile = Str()
def __init__(self, **kwargs):
super(SweepLibrary, self).__init__(**kwargs)
find_sweeps_plugins()
self.load_from_library()
self.sweepManager = DictManager(itemDict=self.sweepDict,
possibleItems=newSweepClasses)
#Overload [] to allow direct pulling of sweep info
def __getitem__(self, sweepName):
return self.sweepDict[sweepName]
def _get_sweepList(self):
return [sweep.label for sweep in self.sweepDict.values() if sweep.enabled]
def write_to_file(self,fileName=None):
libFileName = fileName if fileName != None else self.libFile
if libFileName:
with open(libFileName, 'w') as FID:
json.dump(self, FID, cls=LibraryCoders.LibraryEncoder, indent=2, sort_keys=True)
def load_from_library(self):
if self.libFile:
try:
with open(self.libFile, 'r') as FID:
try:
tmpLib = json.load(FID, cls=LibraryCoders.LibraryDecoder)
except ValueError, e:
print ("WARNING: JSON object issue: %s in %s" % (e,self.libFile))
return
if isinstance(tmpLib, SweepLibrary):
self.sweepDict.update(tmpLib.sweepDict)
del self.possibleInstrs[:]
for instr in tmpLib.possibleInstrs:
self.possibleInstrs.append(instr)
del self.sweepOrder[:]
for sweepStr in tmpLib.sweepOrder:
self.sweepOrder.append(sweepStr)
# grab library version
self.version = tmpLib.version
except IOError:
print('No sweep library found.')
def json_encode(self, matlabCompatible=False):
if matlabCompatible:
# re-assign based on sweepOrder
for ct, sweep in enumerate(self.sweepOrder):
self.sweepDict[sweep].order = ct+1
return {label:sweep for label,sweep in self.sweepDict.items() if label in self.sweepOrder}
else:
return {
'sweepDict': {label:sweep for label,sweep in self.sweepDict.items()},
'sweepOrder': self.sweepOrder,
'version': self.version
}
# local plugin registration to enable access by Sweeps.plugin
def find_sweeps_plugins():
plugins = find_plugins(Sweep, verbose=False)
for plugin in plugins:
if plugin not in newSweepClasses:
newSweepClasses.append(plugin)
if plugin.__name__ not in globals().keys():
globals().update({plugin.__name__: plugin})
print 'Registered Plugin {0}'.format(plugin.__name__)
if __name__ == "__main__":
from instruments.MicrowaveSources import AgilentN5183A
testSource1 = AgilentN5183A(label='TestSource1')
testSource2 = AgilentN5183A(label='TestSource2')
from Sweeps import Frequency, Power, SegmentNumWithCals, SweepLibrary
sweepDict = {
'TestSweep1': Frequency(label='TestSweep1', start=5, step=0.1, stop=6, instr=testSource1.label),
'TestSweep2': Power(label='TestSweep2', start=-20, stop=0, numPoints=41, instr=testSource2.label),
'SegWithCals': SegmentNumWithCals(label='SegWithCals', start=0, stop=20, numPoints=101, numCals=4)
}
sweepLib = SweepLibrary(possibleInstrs=[testSource1.label, testSource2.label], sweepDict=sweepDict)
#sweepLib = SweepLibrary(libFile='Sweeps.json')
with enaml.imports():
from SweepsViews import SweepManagerWindow
app = QtApplication()
view = SweepManagerWindow(sweepLib=sweepLib)
view.show()
app.start()
|
|
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""RADOS Block Device Driver"""
from __future__ import absolute_import
import io
import json
import math
import os
import tempfile
from eventlet import tpool
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import fileutils
from oslo_utils import units
from six.moves import urllib
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import image_utils
from cinder import interface
from cinder import utils
from cinder.volume import driver
try:
import rados
import rbd
except ImportError:
rados = None
rbd = None
LOG = logging.getLogger(__name__)
RBD_OPTS = [
cfg.StrOpt('rbd_cluster_name',
default='ceph',
help='The name of ceph cluster'),
cfg.StrOpt('rbd_pool',
default='rbd',
help='The RADOS pool where rbd volumes are stored'),
cfg.StrOpt('rbd_user',
help='The RADOS client name for accessing rbd volumes '
'- only set when using cephx authentication'),
cfg.StrOpt('rbd_ceph_conf',
default='', # default determined by librados
help='Path to the ceph configuration file'),
cfg.BoolOpt('rbd_flatten_volume_from_snapshot',
default=False,
help='Flatten volumes created from snapshots to remove '
'dependency from volume to snapshot'),
cfg.StrOpt('rbd_secret_uuid',
help='The libvirt uuid of the secret for the rbd_user '
'volumes'),
cfg.StrOpt('volume_tmp_dir',
help='Directory where temporary image files are stored '
'when the volume driver does not write them directly '
'to the volume. Warning: this option is now deprecated, '
'please use image_conversion_dir instead.'),
cfg.IntOpt('rbd_max_clone_depth',
default=5,
help='Maximum number of nested volume clones that are '
'taken before a flatten occurs. Set to 0 to disable '
'cloning.'),
cfg.IntOpt('rbd_store_chunk_size', default=4,
help='Volumes will be chunked into objects of this size '
'(in megabytes).'),
cfg.IntOpt('rados_connect_timeout', default=-1,
help='Timeout value (in seconds) used when connecting to '
'ceph cluster. If value < 0, no timeout is set and '
'default librados value is used.'),
cfg.IntOpt('rados_connection_retries', default=3,
help='Number of retries if connection to ceph cluster '
'failed.'),
cfg.IntOpt('rados_connection_interval', default=5,
help='Interval value (in seconds) between connection '
'retries to ceph cluster.')
]
CONF = cfg.CONF
CONF.register_opts(RBD_OPTS)
class RBDImageMetadata(object):
"""RBD image metadata to be used with RBDImageIOWrapper."""
def __init__(self, image, pool, user, conf):
self.image = image
self.pool = utils.convert_str(pool)
self.user = utils.convert_str(user)
self.conf = utils.convert_str(conf)
class RBDImageIOWrapper(io.RawIOBase):
"""Enables LibRBD.Image objects to be treated as Python IO objects.
Calling unimplemented interfaces will raise IOError.
"""
def __init__(self, rbd_meta):
super(RBDImageIOWrapper, self).__init__()
self._rbd_meta = rbd_meta
self._offset = 0
def _inc_offset(self, length):
self._offset += length
@property
def rbd_image(self):
return self._rbd_meta.image
@property
def rbd_user(self):
return self._rbd_meta.user
@property
def rbd_pool(self):
return self._rbd_meta.pool
@property
def rbd_conf(self):
return self._rbd_meta.conf
def read(self, length=None):
offset = self._offset
total = self._rbd_meta.image.size()
# NOTE(dosaboy): posix files do not barf if you read beyond their
# length (they just return nothing) but rbd images do so we need to
# return empty string if we have reached the end of the image.
if (offset >= total):
return b''
if length is None:
length = total
if (offset + length) > total:
length = total - offset
self._inc_offset(length)
return self._rbd_meta.image.read(int(offset), int(length))
def write(self, data):
self._rbd_meta.image.write(data, self._offset)
self._inc_offset(len(data))
def seekable(self):
return True
def seek(self, offset, whence=0):
if whence == 0:
new_offset = offset
elif whence == 1:
new_offset = self._offset + offset
elif whence == 2:
new_offset = self._rbd_meta.image.size()
new_offset += offset
else:
raise IOError(_("Invalid argument - whence=%s not supported") %
(whence))
if (new_offset < 0):
raise IOError(_("Invalid argument"))
self._offset = new_offset
def tell(self):
return self._offset
def flush(self):
try:
self._rbd_meta.image.flush()
except AttributeError:
LOG.warning(_LW("flush() not supported in "
"this version of librbd"))
def fileno(self):
"""RBD does not have support for fileno() so we raise IOError.
Raising IOError is recommended way to notify caller that interface is
not supported - see http://docs.python.org/2/library/io.html#io.IOBase
"""
raise IOError(_("fileno() not supported by RBD()"))
# NOTE(dosaboy): if IO object is not closed explicitly, Python auto closes
# it which, if this is not overridden, calls flush() prior to close which
# in this case is unwanted since the rbd image may have been closed prior
# to the autoclean - currently triggering a segfault in librbd.
def close(self):
pass
class RBDVolumeProxy(object):
"""Context manager for dealing with an existing rbd volume.
This handles connecting to rados and opening an ioctx automatically, and
otherwise acts like a librbd Image object.
The underlying librados client and ioctx can be accessed as the attributes
'client' and 'ioctx'.
"""
def __init__(self, driver, name, pool=None, snapshot=None,
read_only=False):
client, ioctx = driver._connect_to_rados(pool)
if snapshot is not None:
snapshot = utils.convert_str(snapshot)
try:
self.volume = driver.rbd.Image(ioctx,
utils.convert_str(name),
snapshot=snapshot,
read_only=read_only)
except driver.rbd.Error:
LOG.exception(_LE("error opening rbd image %s"), name)
driver._disconnect_from_rados(client, ioctx)
raise
self.driver = driver
self.client = client
self.ioctx = ioctx
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
try:
self.volume.close()
finally:
self.driver._disconnect_from_rados(self.client, self.ioctx)
def __getattr__(self, attrib):
return getattr(self.volume, attrib)
class RADOSClient(object):
"""Context manager to simplify error handling for connecting to ceph."""
def __init__(self, driver, pool=None):
self.driver = driver
self.cluster, self.ioctx = driver._connect_to_rados(pool)
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
self.driver._disconnect_from_rados(self.cluster, self.ioctx)
@property
def features(self):
features = self.cluster.conf_get('rbd_default_features')
if ((features is None) or (int(features) == 0)):
features = self.driver.rbd.RBD_FEATURE_LAYERING
return int(features)
@interface.volumedriver
class RBDDriver(driver.TransferVD, driver.ExtendVD,
driver.CloneableImageVD, driver.SnapshotVD,
driver.MigrateVD, driver.ManageableVD, driver.BaseVD):
"""Implements RADOS block device (RBD) volume commands."""
VERSION = '1.2.0'
def __init__(self, *args, **kwargs):
super(RBDDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(RBD_OPTS)
self._stats = {}
# allow overrides for testing
self.rados = kwargs.get('rados', rados)
self.rbd = kwargs.get('rbd', rbd)
# All string args used with librbd must be None or utf-8 otherwise
# librbd will break.
for attr in ['rbd_cluster_name', 'rbd_user',
'rbd_ceph_conf', 'rbd_pool']:
val = getattr(self.configuration, attr)
if val is not None:
setattr(self.configuration, attr, utils.convert_str(val))
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met."""
if rados is None:
msg = _('rados and rbd python libraries not found')
raise exception.VolumeBackendAPIException(data=msg)
for attr in ['rbd_cluster_name', 'rbd_pool']:
val = getattr(self.configuration, attr)
if not val:
raise exception.InvalidConfigurationValue(option=attr,
value=val)
# NOTE: Checking connection to ceph
# RADOSClient __init__ method invokes _connect_to_rados
# so no need to check for self.rados.Error here.
with RADOSClient(self):
pass
def RBDProxy(self):
return tpool.Proxy(self.rbd.RBD())
def _ceph_args(self):
args = []
if self.configuration.rbd_user:
args.extend(['--id', self.configuration.rbd_user])
if self.configuration.rbd_ceph_conf:
args.extend(['--conf', self.configuration.rbd_ceph_conf])
if self.configuration.rbd_cluster_name:
args.extend(['--cluster', self.configuration.rbd_cluster_name])
return args
@utils.retry(exception.VolumeBackendAPIException,
CONF.rados_connection_interval,
CONF.rados_connection_retries)
def _connect_to_rados(self, pool=None):
LOG.debug("opening connection to ceph cluster (timeout=%s).",
self.configuration.rados_connect_timeout)
client = self.rados.Rados(
rados_id=self.configuration.rbd_user,
clustername=self.configuration.rbd_cluster_name,
conffile=self.configuration.rbd_ceph_conf)
if pool is not None:
pool = utils.convert_str(pool)
else:
pool = self.configuration.rbd_pool
try:
if self.configuration.rados_connect_timeout >= 0:
client.connect(timeout=
self.configuration.rados_connect_timeout)
else:
client.connect()
ioctx = client.open_ioctx(pool)
return client, ioctx
except self.rados.Error:
msg = _("Error connecting to ceph cluster.")
LOG.exception(msg)
client.shutdown()
raise exception.VolumeBackendAPIException(data=msg)
def _disconnect_from_rados(self, client, ioctx):
# closing an ioctx cannot raise an exception
ioctx.close()
client.shutdown()
def _get_backup_snaps(self, rbd_image):
"""Get list of any backup snapshots that exist on this volume.
There should only ever be one but accept all since they need to be
deleted before the volume can be.
"""
# NOTE(dosaboy): we do the import here otherwise we get import conflict
# issues between the rbd driver and the ceph backup driver. These
# issues only seem to occur when NOT using them together and are
# triggered when the ceph backup driver imports the rbd volume driver.
from cinder.backup.drivers import ceph
return ceph.CephBackupDriver.get_backup_snaps(rbd_image)
def _get_mon_addrs(self):
args = ['ceph', 'mon', 'dump', '--format=json']
args.extend(self._ceph_args())
out, _ = self._execute(*args)
lines = out.split('\n')
if lines[0].startswith('dumped monmap epoch'):
lines = lines[1:]
monmap = json.loads('\n'.join(lines))
addrs = [mon['addr'] for mon in monmap['mons']]
hosts = []
ports = []
for addr in addrs:
host_port = addr[:addr.rindex('/')]
host, port = host_port.rsplit(':', 1)
hosts.append(host.strip('[]'))
ports.append(port)
return hosts, ports
def _update_volume_stats(self):
stats = {
'vendor_name': 'Open Source',
'driver_version': self.VERSION,
'storage_protocol': 'ceph',
'total_capacity_gb': 'unknown',
'free_capacity_gb': 'unknown',
'reserved_percentage': 0,
'multiattach': False,
}
backend_name = self.configuration.safe_get('volume_backend_name')
stats['volume_backend_name'] = backend_name or 'RBD'
try:
with RADOSClient(self) as client:
ret, outbuf, _outs = client.cluster.mon_command(
'{"prefix":"df", "format":"json"}', '')
if ret != 0:
LOG.warning(_LW('Unable to get rados pool stats.'))
else:
outbuf = json.loads(outbuf)
pool_stats = [pool for pool in outbuf['pools'] if
pool['name'] ==
self.configuration.rbd_pool][0]['stats']
stats['free_capacity_gb'] = round((float(
pool_stats['max_avail']) / units.Gi), 2)
used_capacity_gb = float(
pool_stats['bytes_used']) / units.Gi
stats['total_capacity_gb'] = round(
(stats['free_capacity_gb'] + used_capacity_gb), 2)
except self.rados.Error:
# just log and return unknown capacities
LOG.exception(_LE('error refreshing volume stats'))
self._stats = stats
def get_volume_stats(self, refresh=False):
"""Return the current state of the volume service.
If 'refresh' is True, run the update first.
"""
if refresh:
self._update_volume_stats()
return self._stats
def _get_clone_depth(self, client, volume_name, depth=0):
"""Returns the number of ancestral clones of the given volume."""
parent_volume = self.rbd.Image(client.ioctx, volume_name)
try:
_pool, parent, _snap = self._get_clone_info(parent_volume,
volume_name)
finally:
parent_volume.close()
if not parent:
return depth
# If clone depth was reached, flatten should have occurred so if it has
# been exceeded then something has gone wrong.
if depth > self.configuration.rbd_max_clone_depth:
raise Exception(_("clone depth exceeds limit of %s") %
(self.configuration.rbd_max_clone_depth))
return self._get_clone_depth(client, parent, depth + 1)
def create_cloned_volume(self, volume, src_vref):
"""Create a cloned volume from another volume.
Since we are cloning from a volume and not a snapshot, we must first
create a snapshot of the source volume.
The user has the option to limit how long a volume's clone chain can be
by setting rbd_max_clone_depth. If a clone is made of another clone
and that clone has rbd_max_clone_depth clones behind it, the source
volume will be flattened.
"""
src_name = utils.convert_str(src_vref.name)
dest_name = utils.convert_str(volume.name)
flatten_parent = False
# Do full copy if requested
if self.configuration.rbd_max_clone_depth <= 0:
with RBDVolumeProxy(self, src_name, read_only=True) as vol:
vol.copy(vol.ioctx, dest_name)
return
# Otherwise do COW clone.
with RADOSClient(self) as client:
depth = self._get_clone_depth(client, src_name)
# If source volume is a clone and rbd_max_clone_depth reached,
# flatten the source before cloning. Zero rbd_max_clone_depth means
# infinite is allowed.
if depth == self.configuration.rbd_max_clone_depth:
LOG.debug("maximum clone depth (%d) has been reached - "
"flattening source volume",
self.configuration.rbd_max_clone_depth)
flatten_parent = True
src_volume = self.rbd.Image(client.ioctx, src_name)
try:
# First flatten source volume if required.
if flatten_parent:
_pool, parent, snap = self._get_clone_info(src_volume,
src_name)
# Flatten source volume
LOG.debug("flattening source volume %s", src_name)
src_volume.flatten()
# Delete parent clone snap
parent_volume = self.rbd.Image(client.ioctx, parent)
try:
parent_volume.unprotect_snap(snap)
parent_volume.remove_snap(snap)
finally:
parent_volume.close()
# Create new snapshot of source volume
clone_snap = "%s.clone_snap" % dest_name
LOG.debug("creating snapshot='%s'", clone_snap)
src_volume.create_snap(clone_snap)
src_volume.protect_snap(clone_snap)
except Exception:
# Only close if exception since we still need it.
src_volume.close()
raise
# Now clone source volume snapshot
try:
LOG.debug("cloning '%(src_vol)s@%(src_snap)s' to "
"'%(dest)s'",
{'src_vol': src_name, 'src_snap': clone_snap,
'dest': dest_name})
self.RBDProxy().clone(client.ioctx, src_name, clone_snap,
client.ioctx, dest_name,
features=client.features)
except Exception:
src_volume.unprotect_snap(clone_snap)
src_volume.remove_snap(clone_snap)
raise
finally:
src_volume.close()
if volume.size != src_vref.size:
LOG.debug("resize volume '%(dst_vol)s' from %(src_size)d to "
"%(dst_size)d",
{'dst_vol': volume.name, 'src_size': src_vref.size,
'dst_size': volume.size})
self._resize(volume)
LOG.debug("clone created successfully")
def create_volume(self, volume):
"""Creates a logical volume."""
size = int(volume.size) * units.Gi
LOG.debug("creating volume '%s'", volume.name)
chunk_size = self.configuration.rbd_store_chunk_size * units.Mi
order = int(math.log(chunk_size, 2))
with RADOSClient(self) as client:
self.RBDProxy().create(client.ioctx,
utils.convert_str(volume.name),
size,
order,
old_format=False,
features=client.features)
def _flatten(self, pool, volume_name):
LOG.debug('flattening %(pool)s/%(img)s',
dict(pool=pool, img=volume_name))
with RBDVolumeProxy(self, volume_name, pool) as vol:
vol.flatten()
def _clone(self, volume, src_pool, src_image, src_snap):
LOG.debug('cloning %(pool)s/%(img)s@%(snap)s to %(dst)s',
dict(pool=src_pool, img=src_image, snap=src_snap,
dst=volume.name))
chunk_size = self.configuration.rbd_store_chunk_size * units.Mi
order = int(math.log(chunk_size, 2))
with RADOSClient(self, src_pool) as src_client:
with RADOSClient(self) as dest_client:
self.RBDProxy().clone(src_client.ioctx,
utils.convert_str(src_image),
utils.convert_str(src_snap),
dest_client.ioctx,
utils.convert_str(volume.name),
features=src_client.features,
order=order)
def _resize(self, volume, **kwargs):
size = kwargs.get('size', None)
if not size:
size = int(volume.size) * units.Gi
with RBDVolumeProxy(self, volume.name) as vol:
vol.resize(size)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
self._clone(volume, self.configuration.rbd_pool,
snapshot.volume_name, snapshot.name)
if self.configuration.rbd_flatten_volume_from_snapshot:
self._flatten(self.configuration.rbd_pool, volume.name)
if int(volume.size):
self._resize(volume)
def _delete_backup_snaps(self, rbd_image):
backup_snaps = self._get_backup_snaps(rbd_image)
if backup_snaps:
for snap in backup_snaps:
rbd_image.remove_snap(snap['name'])
else:
LOG.debug("volume has no backup snaps")
def _get_clone_info(self, volume, volume_name, snap=None):
"""If volume is a clone, return its parent info.
Returns a tuple of (pool, parent, snap). A snapshot may optionally be
provided for the case where a cloned volume has been flattened but it's
snapshot still depends on the parent.
"""
try:
if snap:
volume.set_snap(snap)
pool, parent, parent_snap = tuple(volume.parent_info())
if snap:
volume.set_snap(None)
# Strip the tag off the end of the volume name since it will not be
# in the snap name.
if volume_name.endswith('.deleted'):
volume_name = volume_name[:-len('.deleted')]
# Now check the snap name matches.
if parent_snap == "%s.clone_snap" % volume_name:
return pool, parent, parent_snap
except self.rbd.ImageNotFound:
LOG.debug("Volume %s is not a clone.", volume_name)
volume.set_snap(None)
return (None, None, None)
def _get_children_info(self, volume, snap):
"""List children for the given snapshot of a volume(image).
Returns a list of (pool, image).
"""
children_list = []
if snap:
volume.set_snap(snap)
children_list = volume.list_children()
volume.set_snap(None)
return children_list
def _delete_clone_parent_refs(self, client, parent_name, parent_snap):
"""Walk back up the clone chain and delete references.
Deletes references i.e. deleted parent volumes and snapshots.
"""
parent_rbd = self.rbd.Image(client.ioctx, parent_name)
parent_has_snaps = False
try:
# Check for grandparent
_pool, g_parent, g_parent_snap = self._get_clone_info(parent_rbd,
parent_name,
parent_snap)
LOG.debug("deleting parent snapshot %s", parent_snap)
parent_rbd.unprotect_snap(parent_snap)
parent_rbd.remove_snap(parent_snap)
parent_has_snaps = bool(list(parent_rbd.list_snaps()))
finally:
parent_rbd.close()
# If parent has been deleted in Cinder, delete the silent reference and
# keep walking up the chain if it is itself a clone.
if (not parent_has_snaps) and parent_name.endswith('.deleted'):
LOG.debug("deleting parent %s", parent_name)
self.RBDProxy().remove(client.ioctx, parent_name)
# Now move up to grandparent if there is one
if g_parent:
self._delete_clone_parent_refs(client, g_parent, g_parent_snap)
def delete_volume(self, volume):
"""Deletes a logical volume."""
# NOTE(dosaboy): this was broken by commit cbe1d5f. Ensure names are
# utf-8 otherwise librbd will barf.
volume_name = utils.convert_str(volume.name)
with RADOSClient(self) as client:
try:
rbd_image = self.rbd.Image(client.ioctx, volume_name)
except self.rbd.ImageNotFound:
LOG.info(_LI("volume %s no longer exists in backend"),
volume_name)
return
clone_snap = None
parent = None
# Ensure any backup snapshots are deleted
self._delete_backup_snaps(rbd_image)
# If the volume has non-clone snapshots this delete is expected to
# raise VolumeIsBusy so do so straight away.
try:
snaps = rbd_image.list_snaps()
for snap in snaps:
if snap['name'].endswith('.clone_snap'):
LOG.debug("volume has clone snapshot(s)")
# We grab one of these and use it when fetching parent
# info in case the volume has been flattened.
clone_snap = snap['name']
break
raise exception.VolumeIsBusy(volume_name=volume_name)
# Determine if this volume is itself a clone
_pool, parent, parent_snap = self._get_clone_info(rbd_image,
volume_name,
clone_snap)
finally:
rbd_image.close()
@utils.retry(self.rbd.ImageBusy,
self.configuration.rados_connection_interval,
self.configuration.rados_connection_retries)
def _try_remove_volume(client, volume_name):
self.RBDProxy().remove(client.ioctx, volume_name)
if clone_snap is None:
LOG.debug("deleting rbd volume %s", volume_name)
try:
_try_remove_volume(client, volume_name)
except self.rbd.ImageBusy:
msg = (_("ImageBusy error raised while deleting rbd "
"volume. This may have been caused by a "
"connection from a client that has crashed and, "
"if so, may be resolved by retrying the delete "
"after 30 seconds has elapsed."))
LOG.warning(msg)
# Now raise this so that volume stays available so that we
# delete can be retried.
raise exception.VolumeIsBusy(msg, volume_name=volume_name)
except self.rbd.ImageNotFound:
LOG.info(_LI("RBD volume %s not found, allowing delete "
"operation to proceed."), volume_name)
return
# If it is a clone, walk back up the parent chain deleting
# references.
if parent:
LOG.debug("volume is a clone so cleaning references")
self._delete_clone_parent_refs(client, parent, parent_snap)
else:
# If the volume has copy-on-write clones we will not be able to
# delete it. Instead we will keep it as a silent volume which
# will be deleted when it's snapshot and clones are deleted.
new_name = "%s.deleted" % (volume_name)
self.RBDProxy().rename(client.ioctx, volume_name, new_name)
def create_snapshot(self, snapshot):
"""Creates an rbd snapshot."""
with RBDVolumeProxy(self, snapshot.volume_name) as volume:
snap = utils.convert_str(snapshot.name)
volume.create_snap(snap)
volume.protect_snap(snap)
def delete_snapshot(self, snapshot):
"""Deletes an rbd snapshot."""
# NOTE(dosaboy): this was broken by commit cbe1d5f. Ensure names are
# utf-8 otherwise librbd will barf.
volume_name = utils.convert_str(snapshot.volume_name)
snap_name = utils.convert_str(snapshot.name)
with RBDVolumeProxy(self, volume_name) as volume:
try:
volume.unprotect_snap(snap_name)
except self.rbd.InvalidArgument:
LOG.info(
_LI("InvalidArgument: Unable to unprotect snapshot %s."),
snap_name)
except self.rbd.ImageNotFound:
LOG.info(
_LI("ImageNotFound: Unable to unprotect snapshot %s."),
snap_name)
except self.rbd.ImageBusy:
children_list = self._get_children_info(volume, snap_name)
if children_list:
for (pool, image) in children_list:
LOG.info(_LI('Image %(pool)s/%(image)s is dependent '
'on the snapshot %(snap)s.'),
{'pool': pool,
'image': image,
'snap': snap_name})
raise exception.SnapshotIsBusy(snapshot_name=snap_name)
try:
volume.remove_snap(snap_name)
except self.rbd.ImageNotFound:
LOG.info(_LI("Snapshot %s does not exist in backend."),
snap_name)
def retype(self, context, volume, new_type, diff, host):
"""Retypes a volume, allow Qos and extra_specs change."""
# No need to check encryption, extra_specs and Qos here as:
# encryptions have been checked as same.
# extra_specs are not used in the driver.
# Qos settings are not used in the driver.
LOG.debug('RBD retype called for volume %s. No action '
'required for RBD volumes.', volume.id)
return True
def ensure_export(self, context, volume):
"""Synchronously recreates an export for a logical volume."""
pass
def create_export(self, context, volume, connector):
"""Exports the volume."""
pass
def remove_export(self, context, volume):
"""Removes an export for a logical volume."""
pass
def initialize_connection(self, volume, connector):
hosts, ports = self._get_mon_addrs()
data = {
'driver_volume_type': 'rbd',
'data': {
'name': '%s/%s' % (self.configuration.rbd_pool,
volume.name),
'hosts': hosts,
'ports': ports,
'cluster_name': self.configuration.rbd_cluster_name,
'auth_enabled': (self.configuration.rbd_user is not None),
'auth_username': self.configuration.rbd_user,
'secret_type': 'ceph',
'secret_uuid': self.configuration.rbd_secret_uuid,
'volume_id': volume.id,
}
}
LOG.debug('connection data: %s', data)
return data
def terminate_connection(self, volume, connector, **kwargs):
pass
def _parse_location(self, location):
prefix = 'rbd://'
if not location.startswith(prefix):
reason = _('Not stored in rbd')
raise exception.ImageUnacceptable(image_id=location, reason=reason)
pieces = [urllib.parse.unquote(loc)
for loc in location[len(prefix):].split('/')]
if any(map(lambda p: p == '', pieces)):
reason = _('Blank components')
raise exception.ImageUnacceptable(image_id=location, reason=reason)
if len(pieces) != 4:
reason = _('Not an rbd snapshot')
raise exception.ImageUnacceptable(image_id=location, reason=reason)
return pieces
def _get_fsid(self):
with RADOSClient(self) as client:
return client.cluster.get_fsid()
def _is_cloneable(self, image_location, image_meta):
try:
fsid, pool, image, snapshot = self._parse_location(image_location)
except exception.ImageUnacceptable as e:
LOG.debug('not cloneable: %s.', e)
return False
if self._get_fsid() != fsid:
LOG.debug('%s is in a different ceph cluster.', image_location)
return False
if image_meta['disk_format'] != 'raw':
LOG.debug("rbd image clone requires image format to be "
"'raw' but image %(image)s is '%(format)s'",
{"image": image_location,
"format": image_meta['disk_format']})
return False
# check that we can read the image
try:
with RBDVolumeProxy(self, image,
pool=pool,
snapshot=snapshot,
read_only=True):
return True
except self.rbd.Error as e:
LOG.debug('Unable to open image %(loc)s: %(err)s.',
dict(loc=image_location, err=e))
return False
def clone_image(self, context, volume,
image_location, image_meta,
image_service):
if image_location:
# Note: image_location[0] is glance image direct_url.
# image_location[1] contains the list of all locations (including
# direct_url) or None if show_multiple_locations is False in
# glance configuration.
if image_location[1]:
url_locations = [location['url'] for
location in image_location[1]]
else:
url_locations = [image_location[0]]
# iterate all locations to look for a cloneable one.
for url_location in url_locations:
if url_location and self._is_cloneable(
url_location, image_meta):
_prefix, pool, image, snapshot = \
self._parse_location(url_location)
self._clone(volume, pool, image, snapshot)
self._resize(volume)
return {'provider_location': None}, True
return ({}, False)
def _image_conversion_dir(self):
tmpdir = (self.configuration.volume_tmp_dir or
CONF.image_conversion_dir or
tempfile.gettempdir())
if tmpdir == self.configuration.volume_tmp_dir:
LOG.warning(_LW('volume_tmp_dir is now deprecated, please use '
'image_conversion_dir.'))
# ensure temporary directory exists
if not os.path.exists(tmpdir):
os.makedirs(tmpdir)
return tmpdir
def copy_image_to_volume(self, context, volume, image_service, image_id):
tmp_dir = self._image_conversion_dir()
with tempfile.NamedTemporaryFile(dir=tmp_dir) as tmp:
image_utils.fetch_to_raw(context, image_service, image_id,
tmp.name,
self.configuration.volume_dd_blocksize,
size=volume.size)
self.delete_volume(volume)
chunk_size = self.configuration.rbd_store_chunk_size * units.Mi
order = int(math.log(chunk_size, 2))
# keep using the command line import instead of librbd since it
# detects zeroes to preserve sparseness in the image
args = ['rbd', 'import',
'--pool', self.configuration.rbd_pool,
'--order', order,
tmp.name, volume.name,
'--new-format']
args.extend(self._ceph_args())
self._try_execute(*args)
self._resize(volume)
def copy_volume_to_image(self, context, volume, image_service, image_meta):
tmp_dir = self._image_conversion_dir()
tmp_file = os.path.join(tmp_dir,
volume.name + '-' + image_meta['id'])
with fileutils.remove_path_on_error(tmp_file):
args = ['rbd', 'export',
'--pool', self.configuration.rbd_pool,
volume.name, tmp_file]
args.extend(self._ceph_args())
self._try_execute(*args)
image_utils.upload_volume(context, image_service,
image_meta, tmp_file)
os.unlink(tmp_file)
def backup_volume(self, context, backup, backup_service):
"""Create a new backup from an existing volume."""
volume = self.db.volume_get(context, backup.volume_id)
with RBDVolumeProxy(self, volume.name,
self.configuration.rbd_pool) as rbd_image:
rbd_meta = RBDImageMetadata(rbd_image, self.configuration.rbd_pool,
self.configuration.rbd_user,
self.configuration.rbd_ceph_conf)
rbd_fd = RBDImageIOWrapper(rbd_meta)
backup_service.backup(backup, rbd_fd)
LOG.debug("volume backup complete.")
def restore_backup(self, context, backup, volume, backup_service):
"""Restore an existing backup to a new or existing volume."""
with RBDVolumeProxy(self, volume.name,
self.configuration.rbd_pool) as rbd_image:
rbd_meta = RBDImageMetadata(rbd_image, self.configuration.rbd_pool,
self.configuration.rbd_user,
self.configuration.rbd_ceph_conf)
rbd_fd = RBDImageIOWrapper(rbd_meta)
backup_service.restore(backup, volume.id, rbd_fd)
LOG.debug("volume restore complete.")
def extend_volume(self, volume, new_size):
"""Extend an existing volume."""
old_size = volume.size
try:
size = int(new_size) * units.Gi
self._resize(volume, size=size)
except Exception:
msg = _('Failed to Extend Volume '
'%(volname)s') % {'volname': volume.name}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.debug("Extend volume from %(old_size)s GB to %(new_size)s GB.",
{'old_size': old_size, 'new_size': new_size})
def manage_existing(self, volume, existing_ref):
"""Manages an existing image.
Renames the image name to match the expected name for the volume.
Error checking done by manage_existing_get_size is not repeated.
:param volume:
volume ref info to be set
:param existing_ref:
existing_ref is a dictionary of the form:
{'source-name': <name of rbd image>}
"""
# Raise an exception if we didn't find a suitable rbd image.
with RADOSClient(self) as client:
rbd_name = existing_ref['source-name']
self.RBDProxy().rename(client.ioctx,
utils.convert_str(rbd_name),
utils.convert_str(volume.name))
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of an existing image for manage_existing.
:param volume:
volume ref info to be set
:param existing_ref:
existing_ref is a dictionary of the form:
{'source-name': <name of rbd image>}
"""
# Check that the reference is valid
if 'source-name' not in existing_ref:
reason = _('Reference must contain source-name element.')
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=reason)
rbd_name = utils.convert_str(existing_ref['source-name'])
with RADOSClient(self) as client:
# Raise an exception if we didn't find a suitable rbd image.
try:
rbd_image = self.rbd.Image(client.ioctx, rbd_name)
image_size = rbd_image.size()
except self.rbd.ImageNotFound:
kwargs = {'existing_ref': rbd_name,
'reason': 'Specified rbd image does not exist.'}
raise exception.ManageExistingInvalidReference(**kwargs)
finally:
rbd_image.close()
# RBD image size is returned in bytes. Attempt to parse
# size as a float and round up to the next integer.
try:
convert_size = int(math.ceil(float(image_size) / units.Gi))
return convert_size
except ValueError:
exception_message = (_("Failed to manage existing volume "
"%(name)s, because reported size "
"%(size)s was not a floating-point"
" number.")
% {'name': rbd_name,
'size': image_size})
raise exception.VolumeBackendAPIException(
data=exception_message)
def unmanage(self, volume):
pass
def update_migrated_volume(self, ctxt, volume, new_volume,
original_volume_status):
"""Return model update from RBD for migrated volume.
This method should rename the back-end volume name(id) on the
destination host back to its original name(id) on the source host.
:param ctxt: The context used to run the method update_migrated_volume
:param volume: The original volume that was migrated to this backend
:param new_volume: The migration volume object that was created on
this backend as part of the migration process
:param original_volume_status: The status of the original volume
:returns: model_update to update DB with any needed changes
"""
name_id = None
provider_location = None
existing_name = CONF.volume_name_template % new_volume.id
wanted_name = CONF.volume_name_template % volume.id
with RADOSClient(self) as client:
try:
self.RBDProxy().rename(client.ioctx,
utils.convert_str(existing_name),
utils.convert_str(wanted_name))
except self.rbd.ImageNotFound:
LOG.error(_LE('Unable to rename the logical volume '
'for volume %s.'), volume.id)
# If the rename fails, _name_id should be set to the new
# volume id and provider_location should be set to the
# one from the new volume as well.
name_id = new_volume._name_id or new_volume.id
provider_location = new_volume['provider_location']
return {'_name_id': name_id, 'provider_location': provider_location}
def migrate_volume(self, context, volume, host):
return (False, None)
|
|
# -*- coding: UTF-8 -*-
from django.core.management.base import BaseCommand
from optparse import make_option
import unicodedata
import shout
import daemon
import daemon.pidfile
from signal import SIGTSTP, SIGABRT
import sys
import os
from jukebox.jukebox_core import api
class Command(BaseCommand):
skipCurrentSong = False
option_list = BaseCommand.option_list + (
make_option(
"--start",
action="store_true",
dest="start",
help="Start shoutcast streaming"
),
make_option(
"--stop",
action="store_true",
dest="stop",
help="Stop shoutcast streaming"
),
make_option(
"--fg",
action="store_true",
dest="fg",
help="Start shoutcast streaming in foreground"
),
make_option(
"--host",
action="store",
dest="host",
help="Host of shoutcast server"
),
make_option(
"--port",
action="store",
dest="port",
help="Port of shoutcast server"
),
make_option(
"--password",
action="store",
dest="password",
help="Source password of shoutcast server"
),
)
def handle(self, *args, **options):
pidFile = os.path.dirname(
os.path.abspath(__file__)
) + "/../../daemon.pid"
if options["start"] or options["fg"]:
if (
options["host"] is None or
options["port"] is None or
options["password"] is None
):
print "Required arguments: host, port, password"
self.print_help("jukebox_shout", "help")
return
if os.path.exists(pidFile):
print "Daemon already running, pid file exists"
return
pid = daemon.pidfile.TimeoutPIDLockFile(
pidFile,
10
)
self.shout = shout.Shout()
print "Using libshout version %s" % shout.version()
self.shout.audio_info = {
shout.SHOUT_AI_BITRATE: "128",
shout.SHOUT_AI_SAMPLERATE: "44100",
shout.SHOUT_AI_CHANNELS: "2"
}
self.shout.name = "Democratic Jukebox"
self.shout.url = \
"http://" + options["host"] + ":" + \
options["port"] + "/stream"
self.shout.mount = "/stream"
self.shout.port = int(options["port"])
self.shout.user = "source"
self.shout.password = options["password"]
self.shout.genre = "Mixed"
self.shout.description = "Your democratic music player"
self.shout.host = options["host"]
self.shout.ogv = 0
self.shout.format = "mp3"
try:
self.shout.open()
self.shout.close()
except shout.ShoutException as exception:
print "Error: " + str(exception)
return
if options["start"]:
print "Starting jukebox_shout daemon..."
self.daemon = daemon.DaemonContext(
uid=os.getuid(),
gid=os.getgid(),
pidfile=pid,
working_directory=os.getcwd(),
detach_process=True,
signal_map={
SIGTSTP: self.shutdown,
SIGABRT: self.skipSong
}
)
with self.daemon:
self.shout.open()
print "Register player"
pid = int(open(pidFile).read())
players_api = api.players()
players_api.add(pid)
songs_api = api.songs()
while 1:
self.sendfile(songs_api.getNextSong())
elif options["fg"]:
self.shout.open()
print "Register player"
pid = os.getpid()
players_api = api.players()
players_api.add(pid)
songs_api = api.songs()
while 1:
song = songs_api.getNextSong()
self.sendfile(song)
elif options["stop"]:
if not os.path.exists(pidFile):
print "Daemon not running"
return
print "Stopping daemon..."
pid = int(open(pidFile).read())
os.kill(pid, SIGTSTP)
print "Unregister player " + str(pid)
players_api = api.players()
players_api.remove(pid)
else:
self.print_help("jukebox_shout", "help")
def shutdown(self, signal, action):
self.shout.close()
self.daemon.close()
sys.exit(0)
def skipSong(self, signal, action):
self.skipCurrentSong = True
def sendfile(self, song_instance):
if not os.path.exists(song_instance.Filename.encode('utf8')):
print "File not found: %s" % (
song_instance.Filename.encode('utf8'))
return
print "Streaming file %s" % song_instance.Filename.encode('utf8')
f = open(song_instance.Filename.encode('utf8'))
self.shout.set_metadata({"song": self.getMetaData(song_instance)})
while 1:
if self.skipCurrentSong:
print "skipping current song"
self.shout.sync()
self.skipCurrentSong = False
break
else:
print "sending..."
buf = f.read(4096)
if not len(buf):
break
try:
self.shout.send(buf)
except shout.ShoutException, exc:
print "Error: " + str(exc)
break
self.shout.sync()
f.close()
def getMetaData(self, song_instance):
return unicodedata.normalize(
"NFKD",
song_instance.Artist.Name
).encode(
"ascii",
"ignore"
) + " - " + unicodedata.normalize(
"NFKD",
song_instance.Title
).encode(
"ascii",
"ignore"
)
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ast_edits which is used in tf upgraders.
All of the tests assume that we want to change from an API containing
def f(a, b, kw1, kw2): ...
def g(a, b, kw1, c, kw1_alias): ...
def g2(a, b, kw1, c, d, kw1_alias): ...
def h(a, kw1, kw2, kw1_alias, kw2_alias): ...
and the changes to the API consist of renaming, reordering, and/or removing
arguments. Thus, we want to be able to generate changes to produce each of the
following new APIs:
def f(a, b, kw1, kw3): ...
def f(a, b, kw2, kw1): ...
def f(a, b, kw3, kw1): ...
def g(a, b, kw1, c): ...
def g(a, b, c, kw1): ...
def g2(a, b, kw1, c, d): ...
def g2(a, b, c, d, kw1): ...
def h(a, kw1, kw2): ...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import six
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test as test_lib
from tensorflow.tools.compatibility import ast_edits
class NoUpdateSpec(ast_edits.APIChangeSpec):
"""A specification of an API change which doesn't change anything."""
def __init__(self):
self.function_handle = {}
self.function_reorders = {}
self.function_keyword_renames = {}
self.symbol_renames = {}
self.function_warnings = {}
self.change_to_function = {}
class RenameKeywordSpec(NoUpdateSpec):
"""A specification where kw2 gets renamed to kw3.
The new API is
def f(a, b, kw1, kw3): ...
"""
def __init__(self):
NoUpdateSpec.__init__(self)
self.update_renames()
def update_renames(self):
self.function_keyword_renames["f"] = {"kw2": "kw3"}
class ReorderKeywordSpec(NoUpdateSpec):
"""A specification where kw2 gets moved in front of kw1.
The new API is
def f(a, b, kw2, kw1): ...
"""
def __init__(self):
NoUpdateSpec.__init__(self)
self.update_reorders()
def update_reorders(self):
# Note that these should be in the old order.
self.function_reorders["f"] = ["a", "b", "kw1", "kw2"]
class ReorderAndRenameKeywordSpec(ReorderKeywordSpec, RenameKeywordSpec):
"""A specification where kw2 gets moved in front of kw1 and is changed to kw3.
The new API is
def f(a, b, kw3, kw1): ...
"""
def __init__(self):
ReorderKeywordSpec.__init__(self)
RenameKeywordSpec.__init__(self)
self.update_renames()
self.update_reorders()
class RemoveDeprecatedAliasKeyword(NoUpdateSpec):
"""A specification where kw1_alias is removed in g.
The new API is
def g(a, b, kw1, c): ...
def g2(a, b, kw1, c, d): ...
"""
def __init__(self):
NoUpdateSpec.__init__(self)
self.function_keyword_renames["g"] = {"kw1_alias": "kw1"}
self.function_keyword_renames["g2"] = {"kw1_alias": "kw1"}
class RemoveDeprecatedAliasAndReorderRest(RemoveDeprecatedAliasKeyword):
"""A specification where kw1_alias is removed in g.
The new API is
def g(a, b, c, kw1): ...
def g2(a, b, c, d, kw1): ...
"""
def __init__(self):
RemoveDeprecatedAliasKeyword.__init__(self)
# Note that these should be in the old order.
self.function_reorders["g"] = ["a", "b", "kw1", "c"]
self.function_reorders["g2"] = ["a", "b", "kw1", "c", "d"]
class RemoveMultipleKeywordArguments(NoUpdateSpec):
"""A specification where both keyword aliases are removed from h.
The new API is
def h(a, kw1, kw2): ...
"""
def __init__(self):
NoUpdateSpec.__init__(self)
self.function_keyword_renames["h"] = {
"kw1_alias": "kw1",
"kw2_alias": "kw2",
}
class TestAstEdits(test_util.TensorFlowTestCase):
def _upgrade(self, spec, old_file_text):
in_file = six.StringIO(old_file_text)
out_file = six.StringIO()
upgrader = ast_edits.ASTCodeUpgrader(spec)
count, report, errors = (
upgrader.process_opened_file("test.py", in_file,
"test_out.py", out_file))
return (count, report, errors), out_file.getvalue()
def testNoTransformIfNothingIsSupplied(self):
text = "f(a, b, kw1=c, kw2=d)\n"
_, new_text = self._upgrade(NoUpdateSpec(), text)
self.assertEqual(new_text, text)
text = "f(a, b, c, d)\n"
_, new_text = self._upgrade(NoUpdateSpec(), text)
self.assertEqual(new_text, text)
def testKeywordRename(self):
"""Test that we get the expected result if renaming kw2 to kw3."""
text = "f(a, b, kw1=c, kw2=d)\n"
expected = "f(a, b, kw1=c, kw3=d)\n"
_, new_text = self._upgrade(RenameKeywordSpec(), text)
self.assertEqual(new_text, expected)
# No keywords specified, no reordering, so we should get input as output
text = "f(a, b, c, d)\n"
_, new_text = self._upgrade(RenameKeywordSpec(), text)
self.assertEqual(new_text, text)
def testKeywordReorderWithParens(self):
"""Test that we get the expected result if there are parens around args."""
text = "f((a), ( ( b ) ))\n"
acceptable_outputs = [
# No change is a valid output
text,
# Also cases where all arguments are fully specified are allowed
"f(a=(a), b=( ( b ) ))\n",
# Making the parens canonical is ok
"f(a=(a), b=((b)))\n",
]
_, new_text = self._upgrade(ReorderKeywordSpec(), text)
self.assertIn(new_text, acceptable_outputs)
def testKeywordReorder(self):
"""Test that we get the expected result if kw2 is now before kw1."""
text = "f(a, b, kw1=c, kw2=d)\n"
acceptable_outputs = [
# No change is a valid output
text,
# Just reordering the kw.. args is also ok
"f(a, b, kw2=d, kw1=c)\n",
# Also cases where all arguments are fully specified are allowed
"f(a=a, b=b, kw1=c, kw2=d)\n",
"f(a=a, b=b, kw2=d, kw1=c)\n",
]
_, new_text = self._upgrade(ReorderKeywordSpec(), text)
self.assertIn(new_text, acceptable_outputs)
# Keywords are reordered, so we should reorder arguments too
text = "f(a, b, c, d)\n"
acceptable_outputs = [
"f(a, b, d, c)\n",
"f(a=a, b=b, kw1=c, kw2=d)\n",
"f(a=a, b=b, kw2=d, kw1=c)\n",
]
_, new_text = self._upgrade(ReorderKeywordSpec(), text)
self.assertIn(new_text, acceptable_outputs)
def testKeywordReorderAndRename(self):
"""Test that we get the expected result if kw2 is renamed and moved."""
text = "f(a, b, kw1=c, kw2=d)\n"
acceptable_outputs = [
"f(a, b, kw3=d, kw1=c)\n",
"f(a=a, b=b, kw1=c, kw3=d)\n",
"f(a=a, b=b, kw3=d, kw1=c)\n",
]
_, new_text = self._upgrade(ReorderAndRenameKeywordSpec(), text)
self.assertIn(new_text, acceptable_outputs)
# Keywords are reordered, so we should reorder arguments too
text = "f(a, b, c, d)\n"
acceptable_outputs = [
"f(a, b, d, c)\n",
"f(a=a, b=b, kw1=c, kw3=d)\n",
"f(a=a, b=b, kw3=d, kw1=c)\n",
]
_, new_text = self._upgrade(ReorderAndRenameKeywordSpec(), text)
self.assertIn(new_text, acceptable_outputs)
def testRemoveDeprecatedKeywordAlias(self):
"""Test that we get the expected result if a keyword alias is removed."""
text = "g(a, b, kw1=x, c=c)\n"
acceptable_outputs = [
# Not using deprecated alias, so original is ok
text,
"g(a=a, b=b, kw1=x, c=c)\n",
]
_, new_text = self._upgrade(RemoveDeprecatedAliasKeyword(), text)
self.assertIn(new_text, acceptable_outputs)
# No keyword used, should be no change
text = "g(a, b, x, c)\n"
_, new_text = self._upgrade(RemoveDeprecatedAliasKeyword(), text)
self.assertEqual(new_text, text)
# If we used the alias, it should get renamed
text = "g(a, b, kw1_alias=x, c=c)\n"
acceptable_outputs = [
"g(a, b, kw1=x, c=c)\n",
"g(a, b, c=c, kw1=x)\n",
"g(a=a, b=b, kw1=x, c=c)\n",
"g(a=a, b=b, c=c, kw1=x)\n",
]
_, new_text = self._upgrade(RemoveDeprecatedAliasKeyword(), text)
self.assertIn(new_text, acceptable_outputs)
# It should get renamed even if it's last
text = "g(a, b, c=c, kw1_alias=x)\n"
acceptable_outputs = [
"g(a, b, kw1=x, c=c)\n",
"g(a, b, c=c, kw1=x)\n",
"g(a=a, b=b, kw1=x, c=c)\n",
"g(a=a, b=b, c=c, kw1=x)\n",
]
_, new_text = self._upgrade(RemoveDeprecatedAliasKeyword(), text)
self.assertIn(new_text, acceptable_outputs)
def testRemoveDeprecatedKeywordAndReorder(self):
"""Test for when a keyword alias is removed and args are reordered."""
text = "g(a, b, kw1=x, c=c)\n"
acceptable_outputs = [
"g(a, b, c=c, kw1=x)\n",
"g(a=a, b=b, kw1=x, c=c)\n",
]
_, new_text = self._upgrade(RemoveDeprecatedAliasAndReorderRest(), text)
self.assertIn(new_text, acceptable_outputs)
# Keywords are reordered, so we should reorder arguments too
text = "g(a, b, x, c)\n"
# Don't accept an output which doesn't reorder c and d
acceptable_outputs = [
"g(a, b, c, x)\n",
"g(a=a, b=b, kw1=x, c=c)\n",
]
_, new_text = self._upgrade(RemoveDeprecatedAliasAndReorderRest(), text)
self.assertIn(new_text, acceptable_outputs)
# If we used the alias, it should get renamed
text = "g(a, b, kw1_alias=x, c=c)\n"
acceptable_outputs = [
"g(a, b, kw1=x, c=c)\n",
"g(a, b, c=c, kw1=x)\n",
"g(a=a, b=b, kw1=x, c=c)\n",
"g(a=a, b=b, c=c, kw1=x)\n",
]
_, new_text = self._upgrade(RemoveDeprecatedAliasKeyword(), text)
self.assertIn(new_text, acceptable_outputs)
# It should get renamed and reordered even if it's last
text = "g(a, b, c=c, kw1_alias=x)\n"
acceptable_outputs = [
"g(a, b, kw1=x, c=c)\n",
"g(a, b, c=c, kw1=x)\n",
"g(a=a, b=b, kw1=x, c=c)\n",
"g(a=a, b=b, c=c, kw1=x)\n",
]
_, new_text = self._upgrade(RemoveDeprecatedAliasKeyword(), text)
self.assertIn(new_text, acceptable_outputs)
def testRemoveDeprecatedKeywordAndReorder2(self):
"""Same as testRemoveDeprecatedKeywordAndReorder but on g2 (more args)."""
text = "g2(a, b, kw1=x, c=c, d=d)\n"
acceptable_outputs = [
"g2(a, b, c=c, d=d, kw1=x)\n",
"g2(a=a, b=b, kw1=x, c=c, d=d)\n",
]
_, new_text = self._upgrade(RemoveDeprecatedAliasAndReorderRest(), text)
self.assertIn(new_text, acceptable_outputs)
# Keywords are reordered, so we should reorder arguments too
text = "g2(a, b, x, c, d)\n"
# Don't accept an output which doesn't reorder c and d
acceptable_outputs = [
"g2(a, b, c, d, x)\n",
"g2(a=a, b=b, kw1=x, c=c, d=d)\n",
]
_, new_text = self._upgrade(RemoveDeprecatedAliasAndReorderRest(), text)
self.assertIn(new_text, acceptable_outputs)
# If we used the alias, it should get renamed
text = "g2(a, b, kw1_alias=x, c=c, d=d)\n"
acceptable_outputs = [
"g2(a, b, kw1=x, c=c, d=d)\n",
"g2(a, b, c=c, d=d, kw1=x)\n",
"g2(a=a, b=b, kw1=x, c=c, d=d)\n",
"g2(a=a, b=b, c=c, d=d, kw1=x)\n",
]
_, new_text = self._upgrade(RemoveDeprecatedAliasKeyword(), text)
self.assertIn(new_text, acceptable_outputs)
# It should get renamed and reordered even if it's not in order
text = "g2(a, b, d=d, c=c, kw1_alias=x)\n"
acceptable_outputs = [
"g2(a, b, kw1=x, c=c, d=d)\n",
"g2(a, b, c=c, d=d, kw1=x)\n",
"g2(a, b, d=d, c=c, kw1=x)\n",
"g2(a=a, b=b, kw1=x, c=c, d=d)\n",
"g2(a=a, b=b, c=c, d=d, kw1=x)\n",
"g2(a=a, b=b, d=d, c=c, kw1=x)\n",
]
_, new_text = self._upgrade(RemoveDeprecatedAliasKeyword(), text)
self.assertIn(new_text, acceptable_outputs)
def testRemoveMultipleKeywords(self):
"""Remove multiple keywords at once."""
# Not using deprecated keywords -> no rename
text = "h(a, kw1=x, kw2=y)\n"
_, new_text = self._upgrade(RemoveMultipleKeywordArguments(), text)
self.assertEqual(new_text, text)
# Using positional arguments (in proper order) -> no change
text = "h(a, x, y)\n"
_, new_text = self._upgrade(RemoveMultipleKeywordArguments(), text)
self.assertEqual(new_text, text)
# Use only the old names, in order
text = "h(a, kw1_alias=x, kw2_alias=y)\n"
acceptable_outputs = [
"h(a, x, y)\n",
"h(a, kw1=x, kw2=y)\n",
"h(a=a, kw1=x, kw2=y)\n",
"h(a, kw2=y, kw1=x)\n",
"h(a=a, kw2=y, kw1=x)\n",
]
_, new_text = self._upgrade(RemoveMultipleKeywordArguments(), text)
self.assertIn(new_text, acceptable_outputs)
# Use only the old names, in reverse order, should give one of same outputs
text = "h(a, kw2_alias=y, kw1_alias=x)\n"
_, new_text = self._upgrade(RemoveMultipleKeywordArguments(), text)
self.assertIn(new_text, acceptable_outputs)
# Mix old and new names
text = "h(a, kw1=x, kw2_alias=y)\n"
_, new_text = self._upgrade(RemoveMultipleKeywordArguments(), text)
self.assertIn(new_text, acceptable_outputs)
def testUnrestrictedFunctionWarnings(self):
class FooWarningSpec(NoUpdateSpec):
"""Usages of function attribute foo() prints out a warning."""
def __init__(self):
NoUpdateSpec.__init__(self)
self.function_warnings = {"*.foo": "not good"}
texts = ["object.foo()", "get_object().foo()",
"get_object().foo()", "object.foo().bar()"]
for text in texts:
(_, report, _), _ = self._upgrade(FooWarningSpec(), text)
self.assertIn("not good", report)
# Note that foo() won't result in a warning, because in this case foo is
# not an attribute, but a name.
false_alarms = ["foo", "foo()", "foo.bar()", "obj.run_foo()", "obj.foo"]
for text in false_alarms:
(_, report, _), _ = self._upgrade(FooWarningSpec(), text)
self.assertNotIn("not good", report)
def testFullNameNode(self):
t = ast_edits.full_name_node("a.b.c")
self.assertEquals(
ast.dump(t),
"Attribute(value=Attribute(value=Name(id='a', ctx=Load()), attr='b', "
"ctx=Load()), attr='c', ctx=Load())"
)
if __name__ == "__main__":
test_lib.main()
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in nn_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import gen_nn_ops
@ops.RegisterGradient("Conv2DBackpropInput")
def _Conv2DBackpropGrad(op, grad):
"""The derivatives for deconvolution.
Args:
op: the Deconvolution op.
grad: the tensor representing the gradient w.r.t. the output
Returns:
the gradients w.r.t. the input and the filter
"""
return [None,
nn_ops.conv2d_backprop_filter(grad, array_ops.shape(op.inputs[1]),
op.inputs[2], op.get_attr("strides"),
op.get_attr("padding"),
op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format")),
nn_ops.conv2d(grad, op.inputs[1], op.get_attr("strides"),
op.get_attr("padding"), op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format"))]
@ops.RegisterGradient("Conv3D")
def _Conv3DGrad(op, grad):
return [nn_ops.conv3d_backprop_input(op.inputs[0],
op.inputs[1],
grad,
strides=op.get_attr("strides"),
padding=op.get_attr("padding")),
nn_ops.conv3d_backprop_filter(op.inputs[0],
op.inputs[1],
grad,
strides=op.get_attr("strides"),
padding=op.get_attr("padding"))]
@ops.RegisterGradient("AvgPool3D")
def _AvgPool3DGrad(op, grad):
return nn_ops.avg_pool3d_grad(
array_ops.shape(op.inputs[0]),
grad,
ksize=op.get_attr("ksize"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"))
@ops.RegisterGradient("MaxPool3D")
def _MaxPool3DGrad(op, grad):
return nn_ops.max_pool3d_grad(op.inputs[0],
op.outputs[0],
grad,
ksize=op.get_attr("ksize"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"))
@ops.RegisterGradient("Softmax")
def _SoftmaxGrad(op, grad_softmax):
"""The derivative of the softmax nonlinearity.
We assume that probs is of shape [batch_size * dim]
The formula for dsoftmax / dx = (diag(softmax) - softmax * softmax').
This matrix is diagonal minus a rank one matrix, so it is easy to implement
as follows:
grad_x = grad_softmax * softmax - sum(grad_softmax * softmax) * softmax
Args:
op: the Softmax op.
grad_softmax: the tensor representing the gradient w.r.t. the
softmax output.
Returns:
gradient w.r.t the input to the softmax
"""
# TODO(ilyasu): assert that the tensor has two dimensions at
# graph-construction time? Alternatively: do different things
# depending on the dimensionality of the input tensors.
softmax = op.outputs[0]
grad_x = ((grad_softmax - array_ops.reshape(
math_ops.reduce_sum(grad_softmax * softmax, [1]), [-1, 1])) * softmax)
return grad_x
@ops.RegisterGradient("LogSoftmax")
def _LogSoftmaxGrad(op, grad):
"""The gradient for log_softmax.
log_softmax = input - log(sum(exp(input))
dlog_softmax/dinput = diag - softmax(input)
Args:
op: The log softmax op.
grad: The tensor representing the gradient w.r.t. the output.
Returns:
The gradients w.r.t. the input.
"""
softmax = math_ops.exp(op.outputs[0])
return grad - math_ops.reduce_sum(grad, 1, keep_dims=True) * softmax
@ops.RegisterGradient("BiasAdd")
def _BiasAddGrad(op, received_grad):
"""Return the gradients for the 2 inputs of bias_op.
The first input of unused_bias_op is the tensor t, and its gradient is
just the gradient the unused_bias_op received.
The second input of unused_bias_op is the bias vector which has one fewer
dimension than "received_grad" (the batch dimension.) Its gradient is the
received gradient Summed on the batch dimension, which is the first dimension.
Args:
op: The BiasOp for which we need to generate gradients.
received_grad: Tensor. The gradients passed to the BiasOp.
Returns:
Two tensors, the first one for the "tensor" input of the BiasOp,
the second one for the "bias" input of the BiasOp.
"""
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
return (received_grad, gen_nn_ops.bias_add_grad(out_backprop=received_grad,
data_format=data_format))
@ops.RegisterGradient("BiasAddV1")
def _BiasAddGradV1(unused_bias_op, received_grad):
"""Return the gradients for the 2 inputs of bias_op.
The first input of unused_bias_op is the tensor t, and its gradient is
just the gradient the unused_bias_op received.
The second input of unused_bias_op is the bias vector which has one fewer
dimension than "received_grad" (the batch dimension.) Its gradient is the
received gradient Summed on the batch dimension, which is the first dimension.
Args:
unused_bias_op: The BiasOp for which we need to generate gradients.
received_grad: Tensor. The gradients passed to the BiasOp.
Returns:
Two tensors, the first one for the "tensor" input of the BiasOp,
the second one for the "bias" input of the BiasOp.
"""
reduction_dim_tensor = math_ops.range(array_ops.rank(received_grad) - 1)
return (received_grad, math_ops.reduce_sum(received_grad,
reduction_dim_tensor))
@ops.RegisterGradient("Relu")
def _ReluGrad(op, grad):
return gen_nn_ops._relu_grad(grad, op.outputs[0])
@ops.RegisterGradient("Relu6")
def _Relu6Grad(op, grad):
return gen_nn_ops._relu6_grad(grad, op.inputs[0])
@ops.RegisterGradient("Elu")
def _EluGrad(op, grad):
return gen_nn_ops._elu_grad(grad, op.outputs[0])
@ops.RegisterGradient("Softplus")
def _SoftplusGrad(op, grad):
return gen_nn_ops._softplus_grad(grad, op.inputs[0])
@ops.RegisterGradient("Softsign")
def _SoftsignGrad(op, grad):
return gen_nn_ops._softsign_grad(grad, op.inputs[0])
@ops.RegisterGradient("ReluGrad")
def _ReluGradGrad(op, grad):
x = op.inputs[1]
return (gen_nn_ops._relu_grad(grad, x), array_ops.zeros(
shape=array_ops.shape(x), dtype=x.dtype))
def _BroadcastMul(vec, mat):
"""Multiply after broadcasting vec to match dimensions of mat.
Args:
vec: A 1-D tensor of dimension [D0]
mat: A 2-D tensor of dimension [D0, D1]
Returns:
A tensor of dimension [D0, D1], the result of vec * mat
"""
# Reshape vec to [D0, 1]
vec = array_ops.expand_dims(vec, -1)
return vec * mat
@ops.RegisterGradient("SoftmaxCrossEntropyWithLogits")
def _SoftmaxCrossEntropyWithLogitsGrad(op, grad_0, _):
# grad_0 is the backprop for cost, and we multiply it with the gradients
# (which is output[1])
# There is no gradient for the labels
return _BroadcastMul(grad_0, op.outputs[1]), None
@ops.RegisterGradient("SparseSoftmaxCrossEntropyWithLogits")
def _SparseSoftmaxCrossEntropyWithLogitsGrad(op, grad_0, _):
# grad_0 is the backprop for cost, and we multiply it with the gradients
# (which is output[1])
# There is no gradient for the labels
return _BroadcastMul(grad_0, op.outputs[1]), None
@ops.RegisterGradient("Conv2D")
def _Conv2DGrad(op, grad):
return [nn_ops.conv2d_backprop_input(
array_ops.shape(op.inputs[0]), op.inputs[1], grad, op.get_attr("strides"),
op.get_attr("padding"), op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format")),
nn_ops.conv2d_backprop_filter(op.inputs[0],
array_ops.shape(op.inputs[1]), grad,
op.get_attr("strides"),
op.get_attr("padding"),
op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format"))]
@ops.RegisterGradient("DepthwiseConv2dNative")
def _DepthwiseConv2dNativeGrad(op, grad):
return [
nn_ops.depthwise_conv2d_native_backprop_input(
array_ops.shape(op.inputs[0]), op.inputs[1], grad,
op.get_attr("strides"), op.get_attr("padding")),
nn_ops.depthwise_conv2d_native_backprop_filter(
op.inputs[0], array_ops.shape(op.inputs[1]), grad,
op.get_attr("strides"), op.get_attr("padding"))
]
@ops.RegisterGradient("LRN")
def _LRNGrad(op, grad):
depth_radius = op.get_attr("depth_radius")
bias = op.get_attr("bias")
alpha = op.get_attr("alpha")
beta = op.get_attr("beta")
return [gen_nn_ops._lrn_grad(grad, op.inputs[0], op.outputs[0], depth_radius,
bias, alpha, beta)]
@ops.RegisterGradient("AvgPool")
def _AvgPoolGrad(op, grad):
return gen_nn_ops._avg_pool_grad(
array_ops.shape(op.inputs[0]),
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
op.get_attr("padding"),
data_format=op.get_attr("data_format"))
@ops.RegisterGradient("MaxPool")
def _MaxPoolGrad(op, grad):
return gen_nn_ops._max_pool_grad(op.inputs[0],
op.outputs[0],
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format"))
@ops.RegisterGradient("BatchNormWithGlobalNormalization")
def _BatchNormWithGlobalNormalizationGrad(op, grad):
"""Return the gradients for the 5 inputs of BatchNormWithGlobalNormalization.
We do not backprop anything for the mean and var intentionally as they are
not being trained with backprop in the operation.
Args:
op: The BatchNormOp for which we need to generate gradients.
grad: Tensor. The gradients passed to the BatchNormOp.
Returns:
dx: Backprop for input, which is (grad * (g * rsqrt(v + epsilon)))
dm: Backprop for mean, which is
sum_over_rest(grad * g) * (-1 / rsqrt(v + epsilon))
dv: Backprop for variance, which is
sum_over_rest(grad * g * (x - m)) * (-1/2) * (v + epsilon) ^ (-3/2)
db: Backprop for beta, which is grad reduced in all except the
last dimension.
dg: Backprop for gamma, which is (grad * ((x - m) * rsqrt(v + epsilon)))
"""
dx, dm, dv, db, dg = gen_nn_ops._batch_norm_with_global_normalization_grad(
op.inputs[0], op.inputs[1], op.inputs[2], op.inputs[4], grad,
op.get_attr("variance_epsilon"), op.get_attr("scale_after_normalization"))
return dx, dm, dv, db, dg
@ops.RegisterGradient("L2Loss")
def _L2LossGrad(op, grad):
"""Return the gradients for L2Loss.
Args:
op: The L2LossOp for which we need to generate gradients.
grad: Tensor containing a single number.
Returns:
The gradient, which is (x * grad).
"""
return op.inputs[0] * grad
@ops.RegisterGradient("TopK")
@ops.RegisterGradient("TopKV2")
def _TopKGrad(op, grad, _):
"""Return the gradients for TopK.
Args:
op: The TopKOp for which we need to generate gradients.
grad: Tensor. The gradients passed to the TopKOp.
Returns:
A list of two tensors, the first being the gradient w.r.t to the input and
TopK, and the second being the gradient w.r.t. to the indices (all zero).
"""
in_shape = array_ops.shape(op.inputs[0])
ind_shape = array_ops.shape(op.outputs[1])
ind_lastdim = array_ops.gather(ind_shape, array_ops.size(ind_shape) - 1)
# Flatten indices to 2D.
ind_2d = array_ops.reshape(op.outputs[1], array_ops.pack([-1, ind_lastdim]))
in_lastdim = array_ops.gather(in_shape, array_ops.size(in_shape) - 1)
outerdim = array_ops.shape(ind_2d)[0]
# Compute linear indices (flattened to 1D).
ind = array_ops.reshape(ind_2d + array_ops.expand_dims(
math_ops.range(0, outerdim * in_lastdim, in_lastdim), -1), [-1])
# Substitute grad to appropriate locations and fill the rest with zeros,
# finally reshaping it to the original input shape.
return [array_ops.reshape(
sparse_ops.sparse_to_dense(ind,
array_ops.reshape(
math_ops.reduce_prod(in_shape), [1]),
array_ops.reshape(grad, [-1]),
validate_indices=False),
in_shape), array_ops.zeros(
[], dtype=dtypes.int32)]
|
|
#!/usr/bin/python
from dolfin import *
import petsc4py
import sys
petsc4py.init(sys.argv)
from petsc4py import PETSc
Print = PETSc.Sys.Print
# from MatrixOperations import *
import numpy as np
import matplotlib.pylab as plt
import scipy.sparse as sps
import scipy.sparse.linalg as slinalg
import os
import scipy.io
# from PyTrilinos import Epetra, EpetraExt, AztecOO, ML, Amesos
# from scipy2Trilinos import scipy_csr_matrix2CrsMatrix
import PETScIO as IO
import MatrixOperations as MO
import CheckPetsc4py as CP
import MaxwellPrecond as MP
def StoreMatrix(A,name):
test ="".join([name,".mat"])
scipy.io.savemat( test, {name: A},oned_as='row')
parameters['num_threads'] = 4
m = 12
errL2b =np.zeros((m-1,1))
errCurlb =np.zeros((m-1,1))
errL2r =np.zeros((m-1,1))
errH1r =np.zeros((m-1,1))
l2border = np.zeros((m-1,1))
Curlborder =np.zeros((m-1,1))
l2rorder = np.zeros((m-1,1))
H1rorder = np.zeros((m-1,1))
NN = np.zeros((m-1,1))
DoF = np.zeros((m-1,1))
Vdim = np.zeros((m-1,1))
Qdim = np.zeros((m-1,1))
Wdim = np.zeros((m-1,1))
iterations = np.zeros((m-1,1))
SolTime = np.zeros((m-1,1))
udiv = np.zeros((m-1,1))
MU = np.zeros((m-1,1))
OuterIt = np.zeros((m-1,1))
nn = 2
dim = 2
Solving = 'Direct'
ShowResultPlots = 'no'
ShowErrorPlots = 'no'
EigenProblem = 'no'
SavePrecond = 'no'
CheckMu = 'no'
case = 1
parameters = CP.ParameterSetup()
MU[0]= 1e0
for xx in xrange(1,m):
print xx
nn = 2**(xx)
parameters["form_compiler"]["quadrature_degree"] = 3
if (CheckMu == 'yes'):
if (xx != 1):
MU[xx-1] = MU[xx-2]/10
else:
if (xx != 1):
MU[xx-1] = MU[xx-2]
# Create mesh and define function space
nn = int(nn)
NN[xx-1] = xx
mesh = RectangleMesh(0,0, 1, 1, nn, nn,'left')
parameters['reorder_dofs_serial'] = False
V = FunctionSpace(mesh, "N1curl",2)
Q = FunctionSpace(mesh, "CG",2)
parameters['reorder_dofs_serial'] = False
W = V*Q
Vdim[xx-1] = V.dim()
Qdim[xx-1] = Q.dim()
Wdim[xx-1] = W.dim()
print "\n\nV: ",Vdim[xx-1],"Q: ",Qdim[xx-1],"W: ",Wdim[xx-1],"\n\n"
def boundary(x, on_boundary):
return on_boundary
if case == 1:
u0 = Expression(("x[1]*x[1]*(x[1]-1)","x[0]*x[0]*(x[0]-1)"))
p0 = Expression("x[1]*(x[1]-1)*x[0]*(x[0]-1)")
elif case == 2:
u0 = Expression(("sin(2*pi*x[1])*cos(2*pi*x[0])","-sin(2*pi*x[0])*cos(2*pi*x[1])"))
p0 = Expression(("sin(2*pi*x[0])*sin(2*pi*x[1])"))
elif case == 3:
u0 = Expression(("cos(2*pi*x[1])*sin(2*pi*x[0]) ","-cos(2*pi*x[0])*sin(2*pi*x[1]) "))
p0 = Expression("0")
bc = DirichletBC(W.sub(0),u0, boundary)
bc1 = DirichletBC(W.sub(1),p0, boundary)
bcs = [bc,bc1]
(u, p) = TrialFunctions(W)
(v, q) = TestFunctions(W)
c = 0
if case == 1:
CurlCurl = Expression(("-6*x[1]+2","-6*x[0]+2"))
gradR = Expression(("(2*x[0]-1)*x[1]*(x[1]-1)","(2*x[1]-1)*x[0]*(x[0]-1)"))
f = CurlCurl + gradR
elif case == 2:
CurlCurl = 8*pow(pi,2)*u0
gradR = Expression(("2*pi*cos(x[0])*sin(2*pi*x[1])","2*pi*sin(2*pi*x[0])*cos(2*pi*x[1])"))
f = CurlCurl+gradR
elif case == 3:
f = Expression(("(4*pow(pi,2)-C)*sin(2*pi*x[1])*cos(2*pi*x[0])","-(4*pow(pi,2)-C)*sin(2*pi*x[0])*cos(2*pi*x[1])"),C = c)
a11 = inner(curl(v),curl(u))*dx
a12 = inner(v,grad(p))*dx
a21 = inner(u,grad(q))*dx
L1 = inner(v, f)*dx
a = a11+a12+a21
tic()
AA, bb = assemble_system(a, L1, bcs)
A,b = CP.Assemble(AA,bb)
print toc()
b = bb.array()
zeros = 0*b
del bb
bb = IO.arrayToVec(b)
x = IO.arrayToVec(zeros)
p11 = inner(curl(v),curl(u))*dx + inner(u,v)*dx
p22 = inner(grad(p),grad(q))*dx
pp = p11+p22
PP,Pb = assemble_system(pp,L1,bcs)
P = CP.Assemble(PP)
if (Solving == 'Direct'):
ksp = PETSc.KSP().create()
ksp.setOperators(A)
ksp.setFromOptions()
ksp.setType(ksp.Type.MINRES)
ksp.setTolerances(1e-8)
pc = ksp.getPC()
pc.setType(PETSc.PC.Type.PYTHON)
pc.setPythonContext(MP.Approx(W,P))
# print 'Solving with:', ksp.getType()
# Solve!
tic()
ksp.solve(bb, x)
SolTime[xx-1] = toc()
print "time to solve: ",SolTime[xx-1]
OuterIt[xx-1] = ksp.its
r = bb.duplicate()
A.mult(x, r)
r.aypx(-1, bb)
rnorm = r.norm()
PETSc.Sys.Print('error norm = %g' % rnorm,comm=PETSc.COMM_WORLD)
del A,P
# if (Solving == 'Iterative' or Solving == 'Direct'):
# if case == 1:
# ue = Expression(("x[1]*x[1]*(x[1]-1)","x[0]*x[0]*(x[0]-1)"))
# pe = Expression("x[1]*(x[1]-1)*x[0]*(x[0]-1)")
# elif case == 2:
# ue = Expression(("sin(2*pi*x[1])*cos(2*pi*x[0])","-sin(2*pi*x[0])*cos(2*pi*x[1])"))
# pe = Expression(("sin(2*pi*x[0])*sin(2*pi*x[1])"))
# elif case == 3:
# ue = Expression(("cos(2*pi*x[1])*sin(2*pi*x[0]) ","-cos(2*pi*x[0])*sin(2*pi*x[1]) "))
# pe = Expression("sin(2*pi*x[0])*sin(2*pi*x[1]) ")
# Ve = FunctionSpace(mesh,"N1curl",4)
# u = interpolate(ue,Ve)
# Qe = FunctionSpace(mesh,"CG",4)
# p = interpolate(pe,Qe)
# X = IO.vecToArray(x)
# x = X[0:V.dim()]
# ua = Function(V)
# ua.vector()[:] = x
# pp = X[V.dim():]
# pa = Function(Q)
# pa.vector()[:] = pp
# parameters["form_compiler"]["quadrature_degree"] = 4
# ErrorB = Function(V)
# ErrorR = Function(Q)
# ErrorB = u-ua
# ErrorR = p-pa
# errL2b[xx-1] = sqrt(assemble(inner(ErrorB, ErrorB)*dx))
# errCurlb[xx-1] = sqrt(assemble(inner(curl(ErrorB), curl(ErrorB))*dx))
# errL2r[xx-1] = sqrt(assemble(inner(ErrorR, ErrorR)*dx))
# errH1r[xx-1] = sqrt(assemble(inner(grad(ErrorR), grad(ErrorR))*dx))
# if xx == 1:
# a = 1
# else:
# l2border[xx-1] = np.abs(np.log2(errL2b[xx-2]/errL2b[xx-1]))
# Curlborder[xx-1] = np.abs(np.log2(errCurlb[xx-2]/errCurlb[xx-1]))
# l2rorder[xx-1] = np.abs(np.log2(errL2r[xx-2]/errL2r[xx-1]))
# H1rorder[xx-1] = np.abs(np.log2(errH1r[xx-2]/errH1r[xx-1]))
# print errL2b[xx-1]
# print errCurlb[xx-1]
# print errL2r[xx-1]
# print errH1r[xx-1]
import pandas as pd
# print "\n\n Magnetic convergence"
# MagneticTitles = ["Total DoF","B DoF","Soln Time","Iter","B-L2","B-order","B-Curl","Curl-order"]
# MagneticValues = np.concatenate((Wdim,Vdim,SolTime,OuterIt,errL2b,l2border,errCurlb,Curlborder),axis=1)
# MagneticTable= pd.DataFrame(MagneticValues, columns = MagneticTitles)
# pd.set_option('precision',3)
# MagneticTable = MO.PandasFormat(MagneticTable,"B-Curl","%2.4e")
# MagneticTable = MO.PandasFormat(MagneticTable,'B-L2',"%2.4e")
# print MagneticTable
# print "\n\n Lagrange convergence"
# LagrangeTitles = ["Total DoF","R DoF","Soln Time","Iter","R-L2","R-order","R-H1","H1-order"]
# LagrangeValues = np.concatenate((Wdim,Qdim,SolTime,OuterIt,errL2r,l2rorder,errH1r,H1rorder),axis=1)
# LagrangeTable= pd.DataFrame(LagrangeValues, columns = LagrangeTitles)
# pd.set_option('precision',3)
# LagrangeTable = MO.PandasFormat(LagrangeTable,'R-L2',"%2.4e")
# LagrangeTable = MO.PandasFormat(LagrangeTable,'R-H1',"%2.4e")
# print LagrangeTable
# LatexTitlesB = ["B DoF","R DoF","BB-L2","B-order","BB-Curl","Curl-order"]
# LatexValuesB = np.concatenate((Vdim,Qdim,errL2b,l2border,errCurlb,Curlborder),axis=1)
# LatexTableB= pd.DataFrame(LatexValuesB, columns = LatexTitlesB)
# pd.set_option('precision',3)
# LatexTableB = MO.PandasFormat(LatexTableB,'BB-Curl',"%2.4e")
# LatexTableB = MO.PandasFormat(LatexTableB,'BB-L2',"%2.4e")
# print LatexTableB.to_latex()
# LatexTitlesR = ["B DoF","R DoF","R-L2","R-order","R-H1","H1-order"]
# LatexValuesR = np.concatenate((Vdim,Qdim,SolTime,l2rorder,errH1r,H1rorder),axis=1)
# LatexTableR= pd.DataFrame(LatexValuesR, columns = LatexTitlesR)
# pd.set_option('precision',3)
# LatexTableR = MO.PandasFormat(LatexTableR,'R-L2',"%2.4e")
# LatexTableR = MO.PandasFormat(LatexTableR,'R-H1',"%2.4e")
# print LatexTableR.to_latex()
LatexTitlesB = ["l","B DoF","R DoF","ITER"]
LatexValuesB = np.concatenate((NN,Vdim,Qdim,OuterIt),axis=1)
LatexTableB= pd.DataFrame(LatexValuesB, columns = LatexTitlesB)
pd.set_option('precision',3)
# LatexTableB = MO.PandasFormat(LatexTableB,'BB-Curl',"%2.4e")
# LatexTableB = MO.PandasFormat(LatexTableB,'BB-L2',"%2.4e")
print LatexTableB.to_latex()
if (SavePrecond == 'yes'):
scipy.io.savemat('eigenvalues/Wdim.mat', {'Wdim':Wdim-1},oned_as = 'row')
if (ShowResultPlots == 'yes'):
plot(ua)
plot(interpolate(ue,V))
plot(pa)
plot(interpolate(pe,Q))
interactive()
|
|
from __future__ import division
import numba as nb
import numpy as np
from .utils import get_func, isstr, aggregate_common_doc, funcs_no_separate_nan
from .utils_numpy import aliasing, input_validation, check_dtype, check_fill_value
class AggregateOp(object):
"""
Every subclass of AggregateOp handles a different aggregation operation. There are
several private class methods that need to be overwritten by the subclasses
in order to implement different functionality.
On object instantiation, all necessary static methods are compiled together into
two jitted callables, one for scalar arguments, and one for arrays. Calling the
instantiated object picks the right cached callable, does some further preprocessing
and then executes the actual aggregation operation.
"""
forced_fill_value = None
counter_fill_value = 1
counter_dtype = bool
mean_fill_value = None
mean_dtype = np.float64
outer = False
reverse = False
nans = False
def __init__(self, func=None, **kwargs):
if func is None:
func = type(self).__name__.lower()
self.func = func
self.__dict__.update(kwargs)
# Cache the compiled functions, so they don't have to be recompiled on every call
self._jit_scalar = self.callable(self.nans, self.reverse, scalar=True)
self._jit_non_scalar = self.callable(self.nans, self.reverse, scalar=False)
def __call__(self, group_idx, a, size=None, fill_value=0, order='C',
dtype=None, axis=None, ddof=0):
iv = input_validation(group_idx, a, size=size, order=order, axis=axis, check_bounds=False, func=self.func)
group_idx, a, flat_size, ndim_idx, size, unravel_shape = iv
# TODO: The typecheck should be done by the class itself, not by check_dtype
dtype = check_dtype(dtype, self.func, a, len(group_idx))
check_fill_value(fill_value, dtype, func=self.func)
input_dtype = type(a) if np.isscalar(a) else a.dtype
ret, counter, mean, outer = self._initialize(flat_size, fill_value, dtype, input_dtype, group_idx.size)
group_idx = np.ascontiguousarray(group_idx)
if not np.isscalar(a):
a = np.ascontiguousarray(a)
jitfunc = self._jit_non_scalar
else:
jitfunc = self._jit_scalar
jitfunc(group_idx, a, ret, counter, mean, outer, fill_value, ddof)
self._finalize(ret, counter, fill_value)
if self.outer:
ret = outer
# Deal with ndimensional indexing
if ndim_idx > 1:
if unravel_shape is not None:
# argreductions only
ret = np.unravel_index(ret, unravel_shape)[axis]
ret = ret.reshape(size, order=order)
return ret
@classmethod
def _initialize(cls, flat_size, fill_value, dtype, input_dtype, input_size):
if cls.forced_fill_value is None:
ret = np.full(flat_size, fill_value, dtype=dtype)
else:
ret = np.full(flat_size, cls.forced_fill_value, dtype=dtype)
counter = mean = outer = None
if cls.counter_fill_value is not None:
counter = np.full_like(ret, cls.counter_fill_value, dtype=cls.counter_dtype)
if cls.mean_fill_value is not None:
dtype = cls.mean_dtype if cls.mean_dtype else input_dtype
mean = np.full_like(ret, cls.mean_fill_value, dtype=dtype)
if cls.outer:
outer = np.full(input_size, fill_value, dtype=dtype)
return ret, counter, mean, outer
@classmethod
def _finalize(cls, ret, counter, fill_value):
if cls.forced_fill_value is not None and fill_value != cls.forced_fill_value:
if cls.counter_dtype == bool:
ret[counter] = fill_value
else:
ret[~counter.astype(bool)] = fill_value
@classmethod
def callable(cls, nans=False, reverse=False, scalar=False):
""" Compile a jitted function doing the hard part of the job """
_valgetter = cls._valgetter_scalar if scalar else cls._valgetter
valgetter = nb.njit(_valgetter)
outersetter = nb.njit(cls._outersetter)
_cls_inner = nb.njit(cls._inner)
if nans:
def _inner(ri, val, ret, counter, mean):
if not np.isnan(val):
_cls_inner(ri, val, ret, counter, mean)
inner = nb.njit(_inner)
else:
inner = _cls_inner
def _loop(group_idx, a, ret, counter, mean, outer, fill_value, ddof):
# fill_value and ddof need to be present for being exchangeable with loop_2pass
size = len(ret)
rng = range(len(group_idx) - 1, -1, -1) if reverse else range(len(group_idx))
for i in rng:
ri = group_idx[i]
if ri < 0:
raise ValueError("negative indices not supported")
if ri >= size:
raise ValueError("one or more indices in group_idx are too large")
val = valgetter(a, i)
inner(ri, val, ret, counter, mean)
outersetter(outer, i, ret[ri])
return nb.njit(_loop, nogil=True)
@staticmethod
def _valgetter(a, i):
return a[i]
@staticmethod
def _valgetter_scalar(a, i):
return a
@staticmethod
def _inner(ri, val, ret, counter, mean):
raise NotImplementedError("subclasses need to overwrite _inner")
@staticmethod
def _outersetter(outer, i, val):
pass
class Aggregate2pass(AggregateOp):
"""Base class for everything that needs to process the data twice like mean, var and std."""
@classmethod
def callable(cls, nans=False, reverse=False, scalar=False):
# Careful, cls needs to be passed, so that the overwritten methods remain available in
# AggregateOp.callable
loop = super(Aggregate2pass, cls).callable(nans=nans, reverse=reverse, scalar=scalar)
_2pass_inner = nb.njit(cls._2pass_inner)
def _loop2(ret, counter, mean, fill_value, ddof):
for ri in range(len(ret)):
if counter[ri]:
ret[ri] = _2pass_inner(ri, ret, counter, mean, ddof)
else:
ret[ri] = fill_value
loop2 = nb.njit(_loop2)
def _loop_2pass(group_idx, a, ret, counter, mean, outer, fill_value, ddof):
loop(group_idx, a, ret, counter, mean, outer, fill_value, ddof)
loop2(ret, counter, mean, fill_value, ddof)
return nb.njit(_loop_2pass)
@staticmethod
def _2pass_inner(ri, ret, counter, mean, ddof):
raise NotImplementedError("subclasses need to overwrite _2pass_inner")
@classmethod
def _finalize(cls, ret, counter, fill_value):
"""Copying the fill value is already done in the 2nd pass"""
pass
class AggregateNtoN(AggregateOp):
"""Base class for cumulative functions, where the output size matches the input size."""
outer = True
@staticmethod
def _outersetter(outer, i, val):
outer[i] = val
class AggregateGeneric(AggregateOp):
"""Base class for jitting arbitrary functions."""
counter_fill_value = None
def __init__(self, func, **kwargs):
self.func = func
self.__dict__.update(kwargs)
self._jitfunc = self.callable(self.nans)
def __call__(self, group_idx, a, size=None, fill_value=0, order='C',
dtype=None, axis=None, ddof=0):
iv = input_validation(group_idx, a, size=size, order=order, axis=axis, check_bounds=False)
group_idx, a, flat_size, ndim_idx, size, _ = iv
# TODO: The typecheck should be done by the class itself, not by check_dtype
dtype = check_dtype(dtype, self.func, a, len(group_idx))
check_fill_value(fill_value, dtype, func=self.func)
input_dtype = type(a) if np.isscalar(a) else a.dtype
ret, _, _, _ = self._initialize(flat_size, fill_value, dtype, input_dtype, group_idx.size)
group_idx = np.ascontiguousarray(group_idx)
sortidx = np.argsort(group_idx, kind='mergesort')
self._jitfunc(sortidx, group_idx, a, ret)
# Deal with ndimensional indexing
if ndim_idx > 1:
ret = ret.reshape(size, order=order)
return ret
def callable(self, nans=False):
"""Compile a jitted function and loop it over the sorted data."""
jitfunc = nb.njit(self.func, nogil=True)
def _loop(sortidx, group_idx, a, ret):
size = len(ret)
group_idx_srt = group_idx[sortidx]
a_srt = a[sortidx]
indices = step_indices(group_idx_srt)
for i in range(len(indices) - 1):
start_idx, stop_idx = indices[i], indices[i + 1]
ri = group_idx_srt[start_idx]
if ri < 0:
raise ValueError("negative indices not supported")
if ri >= size:
raise ValueError("one or more indices in group_idx are too large")
ret[ri] = jitfunc(a_srt[start_idx:stop_idx])
return nb.njit(_loop, nogil=True)
class Sum(AggregateOp):
forced_fill_value = 0
@staticmethod
def _inner(ri, val, ret, counter, mean):
counter[ri] = 0
ret[ri] += val
class Prod(AggregateOp):
forced_fill_value = 1
@staticmethod
def _inner(ri, val, ret, counter, mean):
counter[ri] = 0
ret[ri] *= val
class Len(AggregateOp):
forced_fill_value = 0
@staticmethod
def _inner(ri, val, ret, counter, mean):
counter[ri] = 0
ret[ri] += 1
class All(AggregateOp):
forced_fill_value = 1
@staticmethod
def _inner(ri, val, ret, counter, mean):
counter[ri] = 0
ret[ri] &= bool(val)
class Any(AggregateOp):
forced_fill_value = 0
@staticmethod
def _inner(ri, val, ret, counter, mean):
counter[ri] = 0
ret[ri] |= bool(val)
class Last(AggregateOp):
counter_fill_value = None
@staticmethod
def _inner(ri, val, ret, counter, mean):
ret[ri] = val
class First(Last):
reverse = True
class AllNan(AggregateOp):
forced_fill_value = 1
@staticmethod
def _inner(ri, val, ret, counter, mean):
counter[ri] = 0
ret[ri] &= val == val
class AnyNan(AggregateOp):
forced_fill_value = 0
@staticmethod
def _inner(ri, val, ret, counter, mean):
counter[ri] = 0
ret[ri] |= val != val
class Max(AggregateOp):
@staticmethod
def _inner(ri, val, ret, counter, mean):
if counter[ri]:
ret[ri] = val
counter[ri] = 0
elif ret[ri] < val:
ret[ri] = val
class Min(AggregateOp):
@staticmethod
def _inner(ri, val, ret, counter, mean):
if counter[ri]:
ret[ri] = val
counter[ri] = 0
elif ret[ri] > val:
ret[ri] = val
class ArgMax(AggregateOp):
mean_fill_value = np.nan
@staticmethod
def _valgetter(a, i):
return a[i], i
@staticmethod
def _inner(ri, val, ret, counter, mean):
cmp_val, arg = val
if counter[ri]:
mean[ri] = cmp_val
ret[ri] = arg
counter[ri] = 0
elif mean[ri] < cmp_val:
mean[ri] = cmp_val
ret[ri] = arg
class ArgMin(ArgMax):
@staticmethod
def _inner(ri, val, ret, counter, mean):
cmp_val, arg = val
if counter[ri]:
mean[ri] = cmp_val
ret[ri] = arg
counter[ri] = 0
elif mean[ri] > cmp_val:
mean[ri] = cmp_val
ret[ri] = arg
class Mean(Aggregate2pass):
forced_fill_value = 0
counter_fill_value = 0
counter_dtype = int
@staticmethod
def _inner(ri, val, ret, counter, mean):
counter[ri] += 1
ret[ri] += val
@staticmethod
def _2pass_inner(ri, ret, counter, mean, ddof):
return ret[ri] / counter[ri]
class Std(Mean):
mean_fill_value = 0
@staticmethod
def _inner(ri, val, ret, counter, mean):
counter[ri] += 1
mean[ri] += val
ret[ri] += val * val
@staticmethod
def _2pass_inner(ri, ret, counter, mean, ddof):
mean2 = mean[ri] * mean[ri]
return np.sqrt((ret[ri] - mean2 / counter[ri]) / (counter[ri] - ddof))
class Var(Std):
@staticmethod
def _2pass_inner(ri, ret, counter, mean, ddof):
mean2 = mean[ri] * mean[ri]
return (ret[ri] - mean2 / counter[ri]) / (counter[ri] - ddof)
class CumSum(AggregateNtoN, Sum):
pass
class CumProd(AggregateNtoN, Prod):
pass
class CumMax(AggregateNtoN, Max):
pass
class CumMin(AggregateNtoN, Min):
pass
def get_funcs():
funcs = dict()
for op in (Sum, Prod, Len, All, Any, Last, First, AllNan, AnyNan, Min, Max,
ArgMin, ArgMax, Mean, Std, Var,
CumSum, CumProd, CumMax, CumMin):
funcname = op.__name__.lower()
funcs[funcname] = op(funcname)
if funcname not in funcs_no_separate_nan:
funcname = 'nan' + funcname
funcs[funcname] = op(funcname, nans=True)
return funcs
_impl_dict = get_funcs()
_default_cache = {}
def aggregate(group_idx, a, func='sum', size=None, fill_value=0, order='C',
dtype=None, axis=None, cache=None, **kwargs):
func = get_func(func, aliasing, _impl_dict)
if not isstr(func):
if cache in (None, False):
aggregate_op = AggregateGeneric(func)
else:
if cache is True:
cache = _default_cache
aggregate_op = cache.setdefault(func, AggregateGeneric(func))
return aggregate_op(group_idx, a, size, fill_value, order, dtype, axis, **kwargs)
else:
func = _impl_dict[func]
return func(group_idx, a, size, fill_value, order, dtype, axis, **kwargs)
aggregate.__doc__ = """
This is the numba implementation of aggregate.
""" + aggregate_common_doc
@nb.njit(nogil=True, cache=True)
def step_count(group_idx):
"""Return the amount of index changes within group_idx."""
cmp_pos = 0
steps = 1
if len(group_idx) < 1:
return 0
for i in range(len(group_idx)):
if group_idx[cmp_pos] != group_idx[i]:
cmp_pos = i
steps += 1
return steps
@nb.njit(nogil=True, cache=True)
def step_indices(group_idx):
"""Return the edges of areas within group_idx, which are filled with the same value."""
ilen = step_count(group_idx) + 1
indices = np.empty(ilen, np.int64)
indices[0] = 0
indices[-1] = group_idx.size
cmp_pos = 0
ri = 1
for i in range(len(group_idx)):
if group_idx[cmp_pos] != group_idx[i]:
cmp_pos = i
indices[ri] = i
ri += 1
return indices
|
|
import os
import random
import logging
from collections import OrderedDict
from databasic import mongo, get_base_dir
from databasic.forms import WordCounterPaste, WordCounterUpload, WordCounterSample, WordCounterLink
from databasic.logic import wordhandler, filehandler
from flask import Blueprint, render_template, request, redirect, g, abort, send_from_directory
from flask_babel import lazy_gettext as _
from databasic import NLTK_STOPWORDS_BY_LANGUAGE
mod = Blueprint('wordcounter', __name__,
url_prefix='/<lang_code>/wordcounter',
template_folder='../templates/wordcounter')
logger = logging.getLogger(__name__)
@mod.route('/', methods=('GET', 'POST'))
def index():
words = None
forms = OrderedDict()
forms['sample'] = WordCounterSample(g.current_lang)
forms['paste'] = WordCounterPaste('I am Sam\nSam I am\nThat Sam-I-am!\nThat Sam-I-am!\nI do not like that Sam-I-am!\nDo you like \ngreen eggs and ham?\nI do not like them, Sam-I-am.\nI do not like\ngreen eggs and ham.\nWould you like them \nhere or there?\nI would not like them\nhere or there.\nI would not like them anywhere.')
forms['upload'] = WordCounterUpload()
forms['link'] = WordCounterLink()
if request.method == 'POST':
ignore_case = True
ignore_stopwords = True
btn_value = request.form['btn']
sample_id = ''
extras_to_save = {}
if btn_value == 'paste':
words = forms['paste'].data['area']
ignore_case = forms[btn_value].data['ignore_case_paste']
ignore_stopwords = forms[btn_value].data['ignore_stopwords_paste']
logger.debug("New from paste: %d chars", len(words) )
title = _('your text')
elif btn_value == 'upload':
upload_file = forms['upload'].data['upload']
words = process_upload(upload_file)
ignore_case = forms[btn_value].data['ignore_case_upload']
ignore_stopwords = forms[btn_value].data['ignore_stopwords_upload']
title = upload_file.filename
logger.debug("New from upload: %s", title)
elif btn_value == 'sample':
sample_source = forms['sample'].data['sample']
samplename = filehandler.get_sample_title(sample_source)
title = samplename
ignore_case = forms[btn_value].data['ignore_case_sample']
ignore_stopwords = forms[btn_value].data['ignore_stopwords_sample']
sample_id = title+str(ignore_case)+str(ignore_stopwords)
existing_doc_id = mongo.results_for_sample('wordcounter', sample_id)
if existing_doc_id is not None:
logger.debug("Existing from sample: %s", sample_source)
return redirect(request.url + 'results/' + existing_doc_id)
logger.info("New from sample: %s", sample_source)
sample_path = filehandler.get_sample_path(sample_source)
logger.debug(" loading from %s", sample_path)
words = filehandler.convert_to_txt(sample_path)
extras_to_save = filehandler.get_sample(sample_source)
elif btn_value == 'link':
url = forms['link'].data['link']
# TODO: should actually accept https
if 'https://' in url:
url = url.replace('https', 'http')
elif not 'http://' in url:
url = 'http://' + url
logger.debug("New from link: %s", url)
content = filehandler.download_webpage(url)
words = content['text']
ignore_case = forms[btn_value].data['ignore_case_link']
ignore_stopwords = forms[btn_value].data['ignore_stopwords_link']
title = _(content['title'])
if words is not None:
logger.debug(" about to process words")
counts = _process_words(words, ignore_case, ignore_stopwords, btn_value=='sample')
logger.debug(" finished counts, about to save")
doc_id = mongo.save_words('wordcounter', counts, ignore_case, ignore_stopwords, str(title), sample_id,
btn_value, extras_to_save)
logger.debug(" saved")
return redirect(request.url + 'results/' + doc_id + '?submit=true')
return render_template('wordcounter.html', forms=list(forms.items()), tool_name='wordcounter',
max_file_size_in_mb=g.max_file_size_mb)
@mod.route('/results/<doc_id>')
def results_for_doc(doc_id):
results = {}
remaining_days = None
try:
doc = mongo.find_document('wordcounter', doc_id)
if doc['sample_id'] == '':
remaining_days = mongo.get_remaining_days('wordcounter', doc_id)
except:
logger.warning("Unable to find doc '%s'", doc_id)
return render_template('no_results.html', tool_name='wordcounter')
counts = doc.get('counts')
# only render the top 40 results on the page (the csv contains all results)
results['unique_words'] = counts['unique_words'][:40]
results['bigrams'] = counts['bigrams'][:40]
results['trigrams'] = counts['trigrams'][:40]
max_index = min(20, len(results['unique_words']))
min_index = max(0, max_index-5)
random_unpopular_word = ['', '']
top_word = ''
word_in_bigrams_count = 0
word_in_trigrams_count = 0
if len(results['unique_words']) > 0:
random_unpopular_word = results['unique_words'][random.randrange(min_index, max_index+1)]\
if len(results['unique_words']) > 1 else results['unique_words'][0]
'''
Find the most popular word that is also present in bigrams and trigrams.
If none can be found, just get the most popular word.
'''
if results['unique_words'] and results['bigrams'] and results['trigrams']:
for word in results['unique_words']:
top_word = word[0]
word_in_bigrams_count = 0
word_in_trigrams_count = 0
for b in results['bigrams']:
if top_word in b[0]:
word_in_bigrams_count += 1
for t in results['trigrams']:
if top_word in t[0]:
word_in_trigrams_count += 1
if word_in_bigrams_count > 0 and word_in_trigrams_count > 0:
break
if word_in_bigrams_count == 0 and word_in_trigrams_count == 0:
top_word = results['unique_words'][0][0]
whatnext = {}
whatnext['top_word'] = top_word
whatnext['word_in_bigrams_count'] = word_in_bigrams_count
whatnext['word_in_trigrams_count'] = word_in_trigrams_count
whatnext['random_unpopular_word'] = random_unpopular_word[0]
whatnext['random_unpopular_word_count'] = random_unpopular_word[1]
biography = doc['biography'] if 'biography' in doc else None
return render_template('wordcounter/results.html',
results=results,
whatnext=whatnext,
tool_name='wordcounter',
title=doc['title'],
doc_id=doc_id,
source=doc['source'],
remaining_days=remaining_days,
total_words=counts['total_word_count'],
biography=biography)
@mod.route('/results/<doc_id>/download/<analysis_type>.csv')
def download_csv(doc_id, analysis_type):
logger.debug("Download %s", analysis_type)
if analysis_type not in ['words','bigrams','trigrams']:
logger.warning("Requested unknown csv type: %s", analysis_type)
abort(400)
try:
doc = mongo.find_document('wordcounter', doc_id)
except:
logger.warning("Unable to find doc '%s'", doc_id)
abort(400)
file_path = create_csv_file(doc.get('counts'),analysis_type)
logger.debug(' created %s csv to download at %s', analysis_type, file_path)
if file_path is None:
abort(500)
return filehandler.generate_csv(file_path)
@mod.route('/wordcounter-activity-guide.pdf')
def download_activity_guide():
filename = "WordCounter Activity Guide.pdf"
dir_path = os.path.join(get_base_dir(),'databasic','static','files','activity-guides',g.current_lang)
logger.debug("download activity guide from %s/%s", dir_path, filename)
return send_from_directory(directory=dir_path, filename=filename)
@mod.route('/run-activity')
def run_activity():
return render_template('wordcounter/run-activity.html')
def process_upload(doc):
file_path = filehandler.open_doc(doc)
file_size = os.stat(file_path).st_size # because browser might not have sent content_length
logger.debug("Upload: %d bytes", file_size)
words = filehandler.convert_to_txt(file_path)
filehandler.delete_file(file_path)
return words
def _process_words(words, ignore_case, ignore_stopwords, is_sample):
stopwords_language = NLTK_STOPWORDS_BY_LANGUAGE[g.current_lang]
counts = wordhandler.get_word_counts(
words,
ignore_case,
ignore_stopwords,
stopwords_language)
return counts
def create_csv_file(counts, analysis_type):
try:
if analysis_type == 'words':
return filehandler.write_to_csv(['word', 'frequency'], counts['unique_words'], '-word-counts.csv')
elif analysis_type == 'bigrams':
bigrams = []
for w in counts['bigrams']:
freq = w[1]
phrase = " ".join(w[0])
bigrams.append([phrase, freq])
return filehandler.write_to_csv(['bigram phrase', 'frequency'], bigrams, '-bigram-counts.csv')
elif analysis_type == 'trigrams':
trigrams = []
for w in counts['trigrams']:
freq = w[1]
phrase = " ".join(w[0])
trigrams.append([phrase, freq])
return filehandler.write_to_csv(['trigram phrase', 'frequency'], trigrams, '-trigram-counts.csv')
logger.error("Requested unknown csv type: %s", analysis_type)
except Exception as e:
logger.exception(e)
return render_template('no_results.html', tool_name='wordcounter')
def _clamp(n, minn, maxn):
return max(min(maxn, n), minn)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" BVT tests for CM Deployment Planner
"""
# Import Local Modules
from marvin.cloudstackAPI import (deployVirtualMachine, destroyVirtualMachine)
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.lib.base import (Account,
ServiceOffering,
Host, Pod, Cluster)
from marvin.lib.common import (get_domain,
get_zone,
get_template)
from marvin.lib.utils import cleanup_resources
from nose.plugins.attrib import attr
class TestVMDeploymentPlanner(cloudstackTestCase):
@classmethod
def setUpClass(cls):
testClient = super(TestVMDeploymentPlanner, cls).getClsTestClient()
cls.apiclient = testClient.getApiClient()
cls.services = testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.apiclient)
cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
cls.hypervisor = testClient.getHypervisorInfo()
cls.services['mode'] = cls.zone.networktype
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
# Create an account, network, VM and IP addresses
cls.account = Account.create(
cls.apiclient,
cls.services["account"],
domainid=cls.domain.id
)
cls.service_offering = ServiceOffering.create(
cls.apiclient,
cls.services["service_offerings"]["tiny"]
)
cls.cleanup = [
cls.account,
cls.service_offering
]
@classmethod
def tearDownClass(cls):
try:
cls.apiclient = super(
TestVMDeploymentPlanner,
cls
).getClsTestClient().getApiClient()
# Clean up, terminate the created templates
cleanup_resources(cls.apiclient, cls.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
def deploy_vm(self, destination_id):
cmd = deployVirtualMachine.deployVirtualMachineCmd()
template = get_template(
self.apiclient,
self.zone.id,
hypervisor=self.hypervisor
)
cmd.zoneid = self.zone.id
cmd.templateid = template.id
cmd.serviceofferingid = self.service_offering.id
cmd.hostid = destination_id
return self.apiclient.deployVirtualMachine(cmd)
def destroy_vm(self, vm_id):
cmd = destroyVirtualMachine.destroyVirtualMachineCmd()
cmd.expunge = True
cmd.id = vm_id
return self.apiclient.destroyVirtualMachine(cmd)
@attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="false")
def test_01_deploy_vm_on_specific_host(self):
hosts = Host.list(
self.apiclient,
zoneid=self.zone.id,
type='Routing'
)
target_id = hosts[0].id
vm = self.deploy_vm(target_id)
self.assertEqual(
target_id,
vm.hostid,
"VM instance was not deployed on target host ID")
self.destroy_vm(vm.id)
@attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="false")
def test_02_deploy_vm_on_specific_cluster(self):
# Select deployment cluster
clusters = Cluster.list(
self.apiclient,
)
target_cluster = clusters[0]
target_id = target_cluster.id
cluster_hypervisor = target_cluster.hypervisortype
template = get_template(
self.apiclient,
hypervisor=cluster_hypervisor
)
# deploy vm on cluster
cmd = deployVirtualMachine.deployVirtualMachineCmd()
cmd.zoneid = target_cluster.zoneid
cmd.serviceofferingid = self.service_offering.id
cmd.templateid = template.id
cmd.clusterid = target_id
vm = self.apiclient.deployVirtualMachine(cmd)
vm_host = Host.list(self.apiclient,
id=vm.hostid
)
self.assertEqual(
target_id,
vm_host[0].clusterid,
"VM was not deployed on the provided cluster"
)
self.destroy_vm(vm.id)
@attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="false")
def test_03_deploy_vm_on_specific_pod(self):
pods = Pod.list(
self.apiclient,
)
target_pod = pods[0]
# Get host by Pod ID
host = Host.list(
self.apiclient,
podid=target_pod.id)
# deploy vm on pod
cmd = deployVirtualMachine.deployVirtualMachineCmd()
cmd.zoneid = target_pod.zoneid
cmd.serviceofferingid = self.service_offering.id
template = get_template(
self.apiclient,
hypervisor=host[0].hypervisortype
)
cmd.templateid = template.id
cmd.podid = target_pod.id
vm = self.apiclient.deployVirtualMachine(cmd)
vm_host = Host.list(self.apiclient,
id=vm.hostid
)
self.assertEqual(
target_pod.id,
vm_host[0].podid,
"VM was not deployed on the target pod"
)
self.destroy_vm(vm.id)
@attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="false")
def test_04_deploy_vm_on_host_override_pod_and_cluster(self):
# Optional parameters pod, cluster and host
pod = Pod.list(self.apiclient, zoneid=self.zone.id)[0]
clusters = Cluster.list(self.apiclient, zoneid=self.zone.id, podid=pod.id)
self.assertEqual(
isinstance(clusters, list),
True,
"Check list response returns a valid list"
)
host = Host.list(self.apiclient, zoneid=self.zone.id, clusterid=clusters[0].id, type='Routing')[0]
cmd = deployVirtualMachine.deployVirtualMachineCmd()
# Required parameters
cmd.zoneid = self.zone.id
cmd.serviceofferingid = self.service_offering.id
template = get_template(self.apiclient, zone_id=self.zone.id, hypervisor=host.hypervisor)
cmd.templateid = template.id
# Add optional deployment params
cmd.podid = pod.id
cmd.clusterid = clusters[1].id if len(clusters) > 1 else clusters[0].id
cmd.hostid = host.id
vm = self.apiclient.deployVirtualMachine(cmd)
self.assertEqual(
vm.hostid,
host.id,
"VM was not deployed on the target host ID"
)
self.destroy_vm(vm.id)
@attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="false")
def test_05_deploy_vm_on_cluster_override_pod(self):
# Optional parameters pod, cluster and host
pod = Pod.list(self.apiclient, zoneid=self.zone.id)[0]
clusters = Cluster.list(self.apiclient, zoneid=self.zone.id, podid=pod.id)
self.assertEqual(
isinstance(clusters, list),
True,
"Check list response returns a valid list"
)
cmd = deployVirtualMachine.deployVirtualMachineCmd()
# Required parameters
cmd.zoneid = self.zone.id
cmd.serviceofferingid = self.service_offering.id
template = get_template(self.apiclient, zone_id=self.zone.id, hypervisor=clusters[0].hypervisortype)
cmd.templateid = template.id
# Add optional deployment params
cmd.podid = pod.id
cmd.clusterid = clusters[0].id
vm = self.apiclient.deployVirtualMachine(cmd)
vm_host = Host.list(self.apiclient,
id=vm.hostid
)
self.assertEqual(
vm_host[0].clusterid,
clusters[0].id,
"VM was not deployed on the target cluster"
)
self.destroy_vm(vm.id)
|
|
#!/usr/bin/env python
from JumpScale import j
import sys
import time
try:
import ujson as json
except:
import json
import psutil
import JumpScale.baselib.taskletengine
from JumpScale.baselib import cmdutils
# Preload libraries
j.system.platform.psutil=psutil
# import JumpScale.baselib.graphite
import JumpScale.lib.diskmanager
import JumpScale.baselib.stataggregator
import JumpScale.grid.agentcontroller
import JumpScale.grid.osis
import JumpScale.baselib.redis
from JumpScale.baselib.redisworker.RedisWorker import RedisWorkerFactory
import JumpScale.grid.jumpscripts
import os
RUNTIME = 24 * 3600
def restart_program():
"""Restarts the current program.
Note: this function does not return. Any cleanup action (like
saving data) must be done before calling this function."""
python = sys.executable
os.execl(python, python, * sys.argv)
class Worker(object):
def __init__(self,queuename, logpath):
self.actions={}
self.clients = dict()
self.acclient = None
self.redisw = RedisWorkerFactory()
self.queuename=queuename
self.init()
self.starttime = time.time()
self.logpath = logpath
self.logFile = None
if self.logpath != None:
self.logFile = open(self.logpath,'w',0)
def getClient(self, job):
ipaddr = getattr(job, 'achost', None)
client = self.clients.get(ipaddr)
if not client:
if ipaddr:
client = j.clients.agentcontroller.get(ipaddr, login='node')
self.clients[ipaddr] = client
else:
if self.acclient==None:
self.acclient = j.clients.agentcontroller.getByInstance()
return self.acclient
return client
def init(self):
j.system.fs.createDir(j.system.fs.joinPaths(j.dirs.tmpDir,"jumpscripts"))
self.redisw.redis.delete("workers:action:%s"%self.queuename)
def processAction(self, action):
self.redisw.redis.delete("workers:action:%s"%self.queuename)
if action == "RESTART":
print "RESTART ASKED"
restart_program()
j.application.stop()
if action=="RELOAD":
print "RELOAD ASKED"
self.actions={}
def run(self):
self.log("STARTED")
while True:
self.redisw.redis.hset("workers:heartbeat",self.queuename,int(time.time()))
if self.starttime + RUNTIME < time.time():
print "Running for %s seconds restarting" % RUNTIME
restart_program()
try:
self.log("check if work")
jtype, job = self.redisw._getWork(self.queuename,timeout=10)
except Exception,e:
if str(e).find("Could not find queue to execute job")<>-1:
#create queue
self.log("could not find queue")
else:
j.events.opserror("Could not get work from redis, is redis running?","workers.getwork",e)
time.sleep(10)
continue
if jtype == "action":
self.processAction(job)
continue
if job:
j.application.jid=job.guid
try:
if self.actions.has_key(job.jscriptid):
jscript=self.actions[job.jscriptid]
else:
self.log("JSCRIPT CACHEMISS")
try:
jscript=self.redisw.getJumpscriptFromId(job.jscriptid)
if jscript==None:
msg="cannot find jumpscript with id:%s"%job.jscriptid
self.log("ERROR:%s"%msg)
j.events.bug_warning(msg,category="worker.jscript.notfound")
job.result=msg
job.state="ERROR"
self.notifyWorkCompleted(job)
continue
if jscript.organization<>"" and jscript.name<>"" and jscript.id<1000000:
#this is to make sure when there is a new version of script since we launched this original script we take the newest one
jscript=self.redisw.getJumpscriptFromName(jscript.organization,jscript.name)
job.jscriptid=jscript.id
#result is method action
jscript.write()
jscript.load()
self.actions[job.jscriptid]=jscript
except Exception,e:
agentid=j.application.getAgentId()
if jscript<>None:
msg="could not compile jscript:%s %s_%s on agent:%s.\nError:%s"%(jscript.id,jscript.organization,jscript.name,agentid,e)
else:
msg="could not compile jscriptid:%s on agent:%s.\nError:%s"%(job.jscriptid,agentid,e)
eco=j.errorconditionhandler.parsePythonErrorObject(e)
eco.errormessage = msg
eco.code=jscript.source
eco.jid = job.guid
eco.category = 'workers.compilescript'
eco.process()
job.state="ERROR"
eco.tb = None
job.result=eco.__dict__
# j.events.bug_warning(msg,category="worker.jscript.notcompile")
# self.loghandler.logECO(eco)
self.notifyWorkCompleted(job)
continue
self.actions[job.jscriptid]=jscript
self.log("Job started:%s script:%s %s/%s"%(job.id, jscript.id,jscript.organization,jscript.name))
j.logger.enabled = job.log
status, result=jscript.executeInWorker(**job.args)
self.redisw.redis.hdel("workers:inqueuetest",jscript.getKey())
j.logger.enabled = True
if status:
job.result=result
job.state="OK"
job.resultcode=0
else:
if isinstance(result, basestring):
job.state = result
else:
eco = result
agentid=j.application.getAgentId()
msg="Could not execute jscript:%s %s_%s on agent:%s\nError: %s"%(jscript.id,jscript.organization,jscript.name,agentid, eco.errormessage)
eco.errormessage = msg
eco.jid = job.guid
eco.code=jscript.source
eco.category = "workers.executejob"
out=""
tocheck=["\"worker.py\"","jscript.executeInWorker","return self.module.action","JumpscriptFactory.py"]
for line in eco.backtrace.split("\n"):
found=False
for check in tocheck:
if line.find(check)<>-1:
found=True
break
if found==False:
out+="%s\n"%line
eco.backtrace=out
if job.id<1000000 and job.errorreport==True:
eco.process()
else:
self.log(eco)
# j.events.bug_warning(msg,category="worker.jscript.notexecute")
# self.loghandler.logECO(eco)
job.state="ERROR"
eco.tb = None
job.result=eco.__dict__
job.resultcode=1
#ok or not ok, need to remove from queue test
#thisin queue test is done to now execute script multiple time
self.notifyWorkCompleted(job)
finally:
j.application.jid = 0
def notifyWorkCompleted(self,job):
job.timeStop=int(time.time())
# if job.state[0:2]<>"OK":
# self.log("result:%s"%job.result)
if job.jscriptid>1000000:
#means is internal job
# q=j.clients.redis.getGeventRedisQueue("127.0.0.1",9999,"workers:return:%s"%jobid)
self.redisw.redis.hset("workers:jobs",job.id, json.dumps(job.__dict__))
self.redisw.redis.rpush("workers:return:%s"%job.id,time.time())
else:
try:
acclient = self.getClient(job)
except Exception,e:
j.events.opserror("could not report job in error to agentcontroller", category='workers.errorreporting', e=e)
return
#jumpscripts coming from AC
if job.state<>"OK":
try:
acclient.notifyWorkCompleted(job.__dict__)
except Exception,e:
j.events.opserror("could not report job in error to agentcontroller", category='workers.errorreporting', e=e)
return
#lets keep the errors
# self.redis.hdel("workers:jobs",job.id)
else:
if job.log or job.wait:
try:
acclient.notifyWorkCompleted(job.__dict__)
except Exception,e:
j.events.opserror("could not report job result to agentcontroller", category='workers.jobreporting', e=e)
return
# job.state=="OKR" #means ok reported
#we don't have to keep status of local job result, has been forwarded to AC
self.redisw.redis.hdel("workers:jobs",job.id)
def log(self, message, category='',level=5, time=None):
if time == None:
time = j.base.time.getLocalTimeHR()
msg = "%s:worker:%s:%s" % (time, self.queuename, message)
print msg
if self.logFile != None:
msg = msg+"\n"
self.logFile.write(msg)
if __name__ == '__main__':
parser = cmdutils.ArgumentParser()
parser.add_argument("-qn", '--queuename', help='Queue name', required=True)
parser.add_argument("-i", '--instance', help='JSAgent instance', required=True)
parser.add_argument("-lp", '--logpath', help='Logging file path', required=False, default=None)
opts = parser.parse_args()
jp = j.packages.findNewest('jumpscale', 'jsagent')
jp.load(opts.instance)
j.application.instanceconfig = jp.hrd_instance
j.core.osis.client = j.core.osis.getClientByInstance(die=False)
j.application.start("jumpscale:worker:%s" % opts.queuename)
if j.application.config.exists("grid.id"):
j.application.initGrid()
j.logger.consoleloglevel = 2
j.logger.maxlevel=7
worker=Worker(opts.queuename, opts.logpath)
worker.run()
|
|
# Copyright (C) Mesosphere, Inc. See LICENSE file for details.
"""This module provides fixtures that are shared among all repository
flavours (Open/EE)"""
import json
import os
import pyroute2
import pytest
from jwt.utils import base64url_decode, base64url_encode
import generic_test_code.common
from mocker.dns import DcosDnsServer
from mocker.jwt import generate_hs256_jwt, generate_rs256_jwt
from runner.common import LogCatcher, SyslogMock
from util import add_lo_ipaddr, ar_listen_link_setup, del_lo_ipaddr
@pytest.fixture()
def tmp_file(tmpdir):
"""Provide a temporary file in pytest-defined tmp dir
Returns:
A path to the tmp file
"""
return tmpdir.join('tmp_data.json').strpath
@pytest.fixture(scope='session')
def repo_is_ee():
return generic_test_code.common.repo_is_ee()
# We explicitly need dns_server_mock_s fixture here as Mock HTTP servers
# require DNS to resolve their server_names.
@pytest.fixture(scope='session')
def mocker_s(repo_is_ee, syslog_mock, extra_lo_ips, dns_server_mock_s):
"""Provide a gc-ed mocker instance suitable for the repository flavour"""
if repo_is_ee:
from mocker.ee import Mocker
else:
from mocker.open import Mocker
m = Mocker()
m.start()
yield m
m.stop()
@pytest.fixture(scope='function')
def mocker(mocker_s):
"""An extension to `mocker_s` fixture that adds resetting the mock to
initial state after each test.
The division stems from the fact that mocker instance should be created
only once per session, while it must be reset after every test to it's
initial state
"""
yield mocker_s
mocker_s.reset()
@pytest.fixture(scope='session')
def log_catcher():
"""Provide a session-scoped LogCatcher instance for use by other objects"""
lc = LogCatcher()
yield lc
lc.stop()
@pytest.fixture(scope='session')
def syslog_mock(log_catcher):
"""Provide a session-scoped SyslogMock instance for use by other objects"""
m = SyslogMock(log_catcher)
yield m
m.stop()
@pytest.fixture(scope='session')
def dns_server_mock_s(dcos_net_ips, resolvconf_fixup):
"""Set-up DNS mocks, both for agent AR (port 53) and master AR (port 61053)"""
dns_sockets = [
("198.51.100.1", 53),
("198.51.100.2", 53),
("198.51.100.3", 53),
("127.0.0.1", 53),
("127.0.0.1", 61053),
]
s = DcosDnsServer(dns_sockets)
s.start()
yield s
s.stop()
@pytest.fixture(scope='function')
def dns_server_mock(dns_server_mock_s):
"""An extension to `dns_server_mock_s` fixture that adds resetting the mock
to initial state after each test.
The division stems from the fact that server instance should be created
only once per session, while it must be reset after every test to it's
initial state
"""
yield dns_server_mock_s
dns_server_mock_s.reset()
@pytest.fixture(scope='session')
def dcos_net_ips():
"""Setup IPs that help dns_mock mimic dcos-net"""
ips = ['198.51.100.1', '198.51.100.2', '198.51.100.3']
nflink = pyroute2.IPRoute()
for ip in ips:
add_lo_ipaddr(nflink, ip, 32)
yield
for ip in ips:
del_lo_ipaddr(nflink, ip, 32)
nflink.close()
@pytest.fixture(scope='session')
def extra_lo_ips():
"""Setup IPs that are used for simulating e.g. agent, mesos leader, etc.. """
ips = ['127.0.0.2', '127.0.0.3']
nflink = pyroute2.IPRoute()
for ip in ips:
add_lo_ipaddr(nflink, ip, 32)
yield
for ip in ips:
del_lo_ipaddr(nflink, ip, 32)
nflink.close()
@pytest.fixture(scope='session')
def resolvconf_fixup():
"""Redirect all DNS request to local DNS mock
Docker's (1.12 ATM) functionality is quite limited when it comes to
/etc/resolv.conf manipulation: https://github.com/docker/docker/issues/1297
So the idea is to temporary change the resolv.conf contents during the
pytest run.
"""
with open("/etc/resolv.conf", 'rb') as fh:
old = fh.read()
with open("/etc/resolv.conf", 'w') as fh:
fh.write("nameserver 127.0.0.1\n")
yield
with open("/etc/resolv.conf", 'wb') as fh:
fh.write(old)
@pytest.fixture(scope='session')
def nginx_class(repo_is_ee, dns_server_mock_s, log_catcher, syslog_mock, mocker_s):
"""Provide a Nginx class suitable for the repository flavour
This fixture also binds together all the mocks (dns, syslog, mocker(endpoints),
log_catcher), so that tests developer can spawn it's own AR instance if
the default ones (master_ar_process/agent_ar_process) are insufficient.
"""
if repo_is_ee:
from runner.ee import Nginx
else:
from runner.open import Nginx
def f(*args, role="master", **kwargs):
# We cannot define it as a fixture due to the fact that nginx_class is
# used both in other fixtures and in tests directly. Liten link setup
# fixture would have to be pulled in every time nginx_class is used
# on its own.
ar_listen_link_setup(role, repo_is_ee)
return Nginx(*args, role=role, log_catcher=log_catcher, **kwargs)
return f
@pytest.fixture(scope='module')
def master_ar_process(nginx_class):
"""A go-to AR process instance fixture that should be used in most of the
tests.
We cannot have 'session' scoped AR processes, as some of the tests will
need to start AR with different env vars or AR type (master/agent). So the
idea is to give it 'module' scope and thus have the same AR instance for
all the tests in given test file unless some greater flexibility is required
and the nginx_class fixture or master_ar_process_pertest fixture is used.
.
"""
nginx = nginx_class(role="master")
nginx.start()
yield nginx
nginx.stop()
@pytest.fixture()
def master_ar_process_pertest(nginx_class):
"""An AR process instance fixture for situations where need to trade off
tests speed for having a per-test AR instance
"""
nginx = nginx_class(role="master")
nginx.start()
yield nginx
nginx.stop()
@pytest.fixture(scope='class')
def master_ar_process_perclass(nginx_class):
"""An AR process instance fixture for situations where need to trade off
tests speed for having a per-class AR instance
"""
nginx = nginx_class(role="master")
nginx.start()
yield nginx
nginx.stop()
@pytest.fixture(scope='module')
def agent_ar_process(nginx_class):
"""
Same as `master_ar_process` fixture except for the fact that it starts 'agent'
nginx instead of `master`.
"""
nginx = nginx_class(role="agent")
nginx.start()
yield nginx
nginx.stop()
@pytest.fixture()
def agent_ar_process_pertest(nginx_class):
"""
Same as `master_ar_process_pertest` fixture except for the fact that it
starts 'agent' nginx instead of `master`.
"""
nginx = nginx_class(role="agent")
nginx.start()
yield nginx
nginx.stop()
@pytest.fixture(scope='class')
def agent_ar_process_perclass(nginx_class):
"""
Same as `master_ar_process_perclass` fixture except for the fact that it
starts 'agent' nginx instead of `master`.
"""
nginx = nginx_class(role="agent")
nginx.start()
yield nginx
nginx.stop()
@pytest.fixture(scope='session')
def jwt_generator(repo_is_ee):
"""Generate valid JWT for given repository flavour and parameters
ATM Open uses HS256, while EE: RS256. This fixture abstracts it away by
providing transparent interface to generating valid JWTs for given repository
flavour.
This fixture exposes interface where it is possible to manipulate resulting
JWT field values.
"""
if repo_is_ee:
key_path = os.getenv('IAM_PRIVKEY_FILE_PATH')
else:
key_path = os.getenv('IAM_SHARED_SECRET_FILE_PATH')
assert key_path is not None
if repo_is_ee:
def f(uid, *args, **kwargs):
return generate_rs256_jwt(key_path, uid=uid, *args, **kwargs)
else:
def f(uid, *args, **kwargs):
return generate_hs256_jwt(key_path, uid=uid, *args, **kwargs)
return f
@pytest.fixture(scope='session')
def mismatch_alg_jwt_generator(repo_is_ee):
"""Generate invalid JWT for given repository flavour and parameters
Tokens generated by this generator aren't recognized by Admin Router"""
return jwt_generator(not repo_is_ee)
@pytest.fixture(scope='session')
def valid_user_header(jwt_generator):
"""This fixture further simplifies JWT handling by providing a ready-to-use
headers with a valid JSON Web Token for `requests` module to use"""
token = jwt_generator(uid='bozydar')
header = {'Authorization': 'token={}'.format(token)}
return header
@pytest.fixture(scope='session')
def forged_user_header(jwt_generator):
"""Return JWT token with a forged UID claim"""
token = jwt_generator(uid='bozydar')
# Decode token:
header_bytes, payload_bytes, signature_bytes = [
base64url_decode(_.encode('ascii')) for _ in token.split(".")]
payload_dict = json.loads(payload_bytes.decode('ascii'))
# Rewrite uid and invert token decode procedure.
payload_dict['uid'] = 'fafok'
payload_bytes = json.dumps(payload_dict).encode('utf-8')
forged_token = '.'.join(
base64url_encode(_).decode('ascii') for _ in (
header_bytes, payload_bytes, signature_bytes)
)
header = {'Authorization': 'token={}'.format(forged_token)}
return header
|
|
#!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""fuse_gmock_files.py v0.1.0
Fuses Google Mock and Google Test source code into two .h files and a .cc file.
SYNOPSIS
fuse_gmock_files.py [GMOCK_ROOT_DIR] OUTPUT_DIR
Scans GMOCK_ROOT_DIR for Google Mock and Google Test source
code, assuming Google Test is in the GMOCK_ROOT_DIR/../googletest
directory, and generates three files:
OUTPUT_DIR/gtest/gtest.h, OUTPUT_DIR/gmock/gmock.h, and
OUTPUT_DIR/gmock-gtest-all.cc. Then you can build your tests
by adding OUTPUT_DIR to the include search path and linking
with OUTPUT_DIR/gmock-gtest-all.cc. These three files contain
everything you need to use Google Mock. Hence you can
"install" Google Mock by copying them to wherever you want.
GMOCK_ROOT_DIR can be omitted and defaults to the parent
directory of the directory holding this script.
EXAMPLES
./fuse_gmock_files.py fused_gmock
./fuse_gmock_files.py path/to/unpacked/gmock fused_gmock
This tool is experimental. In particular, it assumes that there is no
conditional inclusion of Google Mock or Google Test headers. Please
report any problems to googlemock@googlegroups.com. You can read
http://code.google.com/p/googlemock/wiki/CookBook for more
information.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sets
import sys
# We assume that this file is in the scripts/ directory in the Google
# Mock root directory.
DEFAULT_GMOCK_ROOT_DIR = os.path.join(os.path.dirname(__file__), '..')
# We need to call into googletest/scripts/fuse_gtest_files.py.
sys.path.append(os.path.join(DEFAULT_GMOCK_ROOT_DIR, '../googletest/scripts'))
import fuse_gtest_files
gtest = fuse_gtest_files
# Regex for matching '#include "gmock/..."'.
INCLUDE_GMOCK_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(gmock/.+)"')
# Where to find the source seed files.
GMOCK_H_SEED = 'include/gmock/gmock.h'
GMOCK_ALL_CC_SEED = 'mlhist/gmock-all.cc'
# Where to put the generated files.
GTEST_H_OUTPUT = 'gtest/gtest.h'
GMOCK_H_OUTPUT = 'gmock/gmock.h'
GMOCK_GTEST_ALL_CC_OUTPUT = 'gmock-gtest-all.cc'
def GetGTestRootDir(gmock_root):
"""Returns the root directory of Google Test."""
return os.path.join(gmock_root, '../googletest')
def ValidateGMockRootDir(gmock_root):
"""Makes sure gmock_root points to a valid gmock root directory.
The function aborts the program on failure.
"""
gtest.ValidateGTestRootDir(GetGTestRootDir(gmock_root))
gtest.VerifyFileExists(gmock_root, GMOCK_H_SEED)
gtest.VerifyFileExists(gmock_root, GMOCK_ALL_CC_SEED)
def ValidateOutputDir(output_dir):
"""Makes sure output_dir points to a valid output directory.
The function aborts the program on failure.
"""
gtest.VerifyOutputFile(output_dir, gtest.GTEST_H_OUTPUT)
gtest.VerifyOutputFile(output_dir, GMOCK_H_OUTPUT)
gtest.VerifyOutputFile(output_dir, GMOCK_GTEST_ALL_CC_OUTPUT)
def FuseGMockH(gmock_root, output_dir):
"""Scans folder gmock_root to generate gmock/gmock.h in output_dir."""
output_file = file(os.path.join(output_dir, GMOCK_H_OUTPUT), 'w')
processed_files = sets.Set() # Holds all gmock headers we've processed.
def ProcessFile(gmock_header_path):
"""Processes the given gmock header file."""
# We don't process the same header twice.
if gmock_header_path in processed_files:
return
processed_files.add(gmock_header_path)
# Reads each line in the given gmock header.
for line in file(os.path.join(gmock_root, gmock_header_path), 'r'):
m = INCLUDE_GMOCK_FILE_REGEX.match(line)
if m:
# It's '#include "gmock/..."' - let's process it recursively.
ProcessFile('include/' + m.group(1))
else:
m = gtest.INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
# It's '#include "gtest/foo.h"'. We translate it to
# "gtest/gtest.h", regardless of what foo is, since all
# gtest headers are fused into gtest/gtest.h.
# There is no need to #include gtest.h twice.
if not gtest.GTEST_H_SEED in processed_files:
processed_files.add(gtest.GTEST_H_SEED)
output_file.write('#include "%s"\n' % (gtest.GTEST_H_OUTPUT,))
else:
# Otherwise we copy the line unchanged to the output file.
output_file.write(line)
ProcessFile(GMOCK_H_SEED)
output_file.close()
def FuseGMockAllCcToFile(gmock_root, output_file):
"""Scans folder gmock_root to fuse gmock-all.cc into output_file."""
processed_files = sets.Set()
def ProcessFile(gmock_source_file):
"""Processes the given gmock source file."""
# We don't process the same #included file twice.
if gmock_source_file in processed_files:
return
processed_files.add(gmock_source_file)
# Reads each line in the given gmock source file.
for line in file(os.path.join(gmock_root, gmock_source_file), 'r'):
m = INCLUDE_GMOCK_FILE_REGEX.match(line)
if m:
# It's '#include "gmock/foo.h"'. We treat it as '#include
# "gmock/gmock.h"', as all other gmock headers are being fused
# into gmock.h and cannot be #included directly.
# There is no need to #include "gmock/gmock.h" more than once.
if not GMOCK_H_SEED in processed_files:
processed_files.add(GMOCK_H_SEED)
output_file.write('#include "%s"\n' % (GMOCK_H_OUTPUT,))
else:
m = gtest.INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
# It's '#include "gtest/..."'.
# There is no need to #include gtest.h as it has been
# #included by gtest-all.cc.
pass
else:
m = gtest.INCLUDE_SRC_FILE_REGEX.match(line)
if m:
# It's '#include "mlhist/foo"' - let's process it recursively.
ProcessFile(m.group(1))
else:
# Otherwise we copy the line unchanged to the output file.
output_file.write(line)
ProcessFile(GMOCK_ALL_CC_SEED)
def FuseGMockGTestAllCc(gmock_root, output_dir):
"""Scans folder gmock_root to generate gmock-gtest-all.cc in output_dir."""
output_file = file(os.path.join(output_dir, GMOCK_GTEST_ALL_CC_OUTPUT), 'w')
# First, fuse gtest-all.cc into gmock-gtest-all.cc.
gtest.FuseGTestAllCcToFile(GetGTestRootDir(gmock_root), output_file)
# Next, append fused gmock-all.cc to gmock-gtest-all.cc.
FuseGMockAllCcToFile(gmock_root, output_file)
output_file.close()
def FuseGMock(gmock_root, output_dir):
"""Fuses gtest.h, gmock.h, and gmock-gtest-all.h."""
ValidateGMockRootDir(gmock_root)
ValidateOutputDir(output_dir)
gtest.FuseGTestH(GetGTestRootDir(gmock_root), output_dir)
FuseGMockH(gmock_root, output_dir)
FuseGMockGTestAllCc(gmock_root, output_dir)
def main():
argc = len(sys.argv)
if argc == 2:
# fuse_gmock_files.py OUTPUT_DIR
FuseGMock(DEFAULT_GMOCK_ROOT_DIR, sys.argv[1])
elif argc == 3:
# fuse_gmock_files.py GMOCK_ROOT_DIR OUTPUT_DIR
FuseGMock(sys.argv[1], sys.argv[2])
else:
print __doc__
sys.exit(1)
if __name__ == '__main__':
main()
|
|
#! /usr/bin/env python
from __future__ import print_function
from argparse import ArgumentParser
import logging
import os
import re
import sys
import yaml
LOGGER = logging.getLogger('upgrade_dbt_schema')
LOGFILE = 'upgrade_dbt_schema_tests_v1_to_v2.txt'
COLUMN_NAME_PAT = re.compile(r'\A[a-zA-Z0-9_]+\Z')
# compatibility nonsense
try:
basestring = basestring
except NameError:
basestring = str
def is_column_name(value):
if not isinstance(value, basestring):
return False
return COLUMN_NAME_PAT.match(value) is not None
class OperationalError(Exception):
def __init__(self, message):
self.message = message
super().__init__(message)
def setup_logging(filename):
LOGGER.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(levelname)s: %(asctime)s: %(message)s')
file_handler = logging.FileHandler(filename=filename)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
stderr_handler = logging.StreamHandler()
stderr_handler.setLevel(logging.WARNING)
stderr_handler.setFormatter(formatter)
LOGGER.addHandler(file_handler)
LOGGER.addHandler(stderr_handler)
def parse_args(args):
parser = ArgumentParser(description='dbt schema converter')
parser.add_argument(
'--logfile-path',
dest='logfile_path',
help='The path to write the logfile to',
default=LOGFILE
)
parser.add_argument(
'--no-backup',
action='store_false',
dest='backup',
help='if set, do not generate ".backup" files.'
)
parser.add_argument(
'--apply',
action='store_true',
help=('if set, apply changes instead of just logging about found '
'schema.yml files')
)
parser.add_argument(
'--complex-test',
dest='extra_complex_tests',
action='append',
help='extra "complex" tests, as key:value pairs, where key is the '
'test name and value is the test key that contains the column '
'name.'
)
parser.add_argument(
'--complex-test-file',
dest='extra_complex_tests_file',
default=None,
help='The path to an optional yaml file of key/value pairs that does '
'the same as --complex-test.'
)
parser.add_argument('search_directory')
parsed = parser.parse_args(args)
return parsed
def backup_file(src, dst):
if not os.path.exists(src):
LOGGER.debug('no file at {} - nothing to back up'.format(src))
return
LOGGER.debug('backing up file at {} to {}'.format(src, dst))
with open(src, 'rb') as ifp, open(dst, 'wb') as ofp:
ofp.write(ifp.read())
LOGGER.debug('backup successful')
def validate_and_mutate_args(parsed):
"""Validate arguments, raising OperationalError on bad args. Also convert
the complex tests from 'key:value' -> {'key': 'value'}.
"""
if not os.path.exists(parsed.search_directory):
raise OperationalError(
'input directory at {} does not exist!'.format(parsed.search_directory)
)
complex_tests = {}
if parsed.extra_complex_tests_file:
if not os.path.exists(parsed.extra_complex_tests_file):
raise OperationalError(
'complex tests definition file at {} does not exist'
.format(parsed.extra_complex_tests_file)
)
with open(parsed.extra_complex_tests_file) as fp:
extra_tests = yaml.safe_load(fp)
if not isinstance(extra_tests, dict):
raise OperationalError(
'complex tests definition file at {} is not a yaml mapping'
.format(parsed.extra_complex_tests_file)
)
complex_tests.update(extra_tests)
if parsed.extra_complex_tests:
for tst in parsed.extra_complex_tests:
pair = tst.split(':', 1)
if len(pair) != 2:
raise OperationalError('Invalid complex test "{}"'.format(tst))
complex_tests[pair[0]] = pair[1]
parsed.extra_complex_tests = complex_tests
def handle(parsed):
"""Try to handle the schema conversion. On failure, raise OperationalError
and let the caller handle it.
"""
validate_and_mutate_args(parsed)
with open(os.path.join(parsed.search_directory, 'dbt_project.yml')) as fp:
project = yaml.safe_load(fp)
model_dirs = project.get('model-paths', ['models'])
if parsed.apply:
print('converting the following files to the v2 spec:')
else:
print('would convert the following files to the v2 spec:')
for model_dir in model_dirs:
search_path = os.path.join(parsed.search_directory, model_dir)
convert_project(search_path, parsed.backup, parsed.apply,
parsed.extra_complex_tests)
if not parsed.apply:
print('Run with --apply to write these changes. Files with an error '
'will not be converted.')
def find_all_yaml(path):
for root, _, files in os.walk(path):
for filename in files:
if filename.endswith('.yml'):
yield os.path.join(root, filename)
def convert_project(path, backup, write, extra_complex_tests):
for filepath in find_all_yaml(path):
try:
convert_file(filepath, backup, write, extra_complex_tests)
except OperationalError as exc:
print('{} - could not convert: {}'.format(filepath, exc.message))
LOGGER.error(exc.message)
def convert_file(path, backup, write, extra_complex_tests):
LOGGER.info('loading input file at {}'.format(path))
with open(path) as fp:
initial = yaml.safe_load(fp)
version = initial.get('version', 1)
# the isinstance check is to handle the case of models named 'version'
if version == 2:
msg = '{} - already v2, no need to update'.format(path)
print(msg)
LOGGER.info(msg)
return
elif version != 1 and isinstance(version, int):
raise OperationalError(
'input file is not a v1 yaml file (reports as {})'.format(version)
)
new_file = convert_schema(initial, extra_complex_tests)
if write:
LOGGER.debug(
'writing converted schema to output file at {}'.format(path)
)
if backup:
backup_file(path, path+'.backup')
with open(path, 'w') as fp:
yaml.dump(new_file, fp, default_flow_style=False, indent=2)
print('{} - UPDATED'.format(path))
LOGGER.info('successfully wrote v2 schema.yml file to {}'.format(path))
else:
print('{} - Not updated (dry run)'.format(path))
LOGGER.info('would have written v2 schema.yml file to {}'.format(path))
def main(args=None):
if args is None:
args = sys.argv[1:]
parsed = parse_args(args)
setup_logging(parsed.logfile_path)
try:
handle(parsed)
except OperationalError as exc:
LOGGER.error(exc.message)
except:
LOGGER.exception('Fatal error during conversion attempt')
else:
LOGGER.info('successfully converted files in {}'.format(
parsed.search_directory
))
def sort_keyfunc(item):
if isinstance(item, basestring):
return item
else:
return list(item)[0]
def sorted_column_list(column_dict):
columns = []
for column in sorted(column_dict.values(), key=lambda c: c['name']):
# make the unit tests a lot nicer.
column['tests'].sort(key=sort_keyfunc)
columns.append(CustomSortedColumnsSchema(**column))
return columns
class ModelTestBuilder:
SIMPLE_COLUMN_TESTS = {'unique', 'not_null'}
# map test name -> the key that indicates column name
COMPLEX_COLUMN_TESTS = {
'relationships': 'from',
'accepted_values': 'field',
}
def __init__(self, model_name, extra_complex_tests=None):
self.model_name = model_name
self.columns = {}
self.model_tests = []
self._simple_column_tests = self.SIMPLE_COLUMN_TESTS.copy()
# overwrite with ours last so we always win.
self._complex_column_tests = {}
if extra_complex_tests:
self._complex_column_tests.update(extra_complex_tests)
self._complex_column_tests.update(self.COMPLEX_COLUMN_TESTS)
def get_column(self, column_name):
if column_name in self.columns:
return self.columns[column_name]
column = {'name': column_name, 'tests': []}
self.columns[column_name] = column
return column
def add_column_test(self, column_name, test_name):
column = self.get_column(column_name)
column['tests'].append(test_name)
def add_table_test(self, test_name, test_value):
if not isinstance(test_value, dict):
test_value = {'arg': test_value}
self.model_tests.append({test_name: test_value})
def handle_simple_column_test(self, test_name, test_values):
for column_name in test_values:
LOGGER.info(
'found a {} test for model {}, column {}'.format(
test_name, self.model_name, column_name
)
)
self.add_column_test(column_name, test_name)
def handle_complex_column_test(self, test_name, test_values):
"""'complex' columns are lists of dicts, where each dict has a single
key (the test name) and the value of that key is a dict of test values.
"""
column_key = self._complex_column_tests[test_name]
for dct in test_values:
if column_key not in dct:
raise OperationalError(
'got an invalid {} test in model {}, no "{}" value in {}'
.format(test_name, self.model_name, column_key, dct)
)
column_name = dct[column_key]
# for syntax nice-ness reasons, we define these tests as single-key
# dicts where the key is the test name.
test_value = {k: v for k, v in dct.items() if k != column_key}
value = {test_name: test_value}
LOGGER.info(
'found a test for model {}, column {} - arguments: {}'.format(
self.model_name, column_name, test_value
)
)
self.add_column_test(column_name, value)
def handle_unknown_test(self, test_name, test_values):
if all(map(is_column_name, test_values)):
LOGGER.debug(
'Found custom test named {}, inferred that it only takes '
'columns as arguments'.format(test_name)
)
self.handle_simple_column_test(test_name, test_values)
else:
LOGGER.warning(
'Found a custom test named {} that appears to take extra '
'arguments. Converting it to a model-level test'.format(
test_name
)
)
for test_value in test_values:
self.add_table_test(test_name, test_value)
def populate_test(self, test_name, test_values):
if not isinstance(test_values, list):
raise OperationalError(
'Expected type "list" for test values in constraints '
'under test {} inside model {}, got "{}"'.format(
test_name, self.model_name, type(test_values)
)
)
if test_name in self._simple_column_tests:
self.handle_simple_column_test(test_name, test_values)
elif test_name in self._complex_column_tests:
self.handle_complex_column_test(test_name, test_values)
else:
self.handle_unknown_test(test_name, test_values)
def populate_from_constraints(self, constraints):
for test_name, test_values in constraints.items():
self.populate_test(test_name, test_values)
def generate_model_dict(self):
model = {'name': self.model_name}
if self.model_tests:
model['tests'] = self.model_tests
if self.columns:
model['columns'] = sorted_column_list(self.columns)
return CustomSortedModelsSchema(**model)
def convert_schema(initial, extra_complex_tests):
models = []
for model_name, model_data in initial.items():
if 'constraints' not in model_data:
# don't care about this model
continue
builder = ModelTestBuilder(model_name, extra_complex_tests)
builder.populate_from_constraints(model_data['constraints'])
model = builder.generate_model_dict()
models.append(model)
return CustomSortedRootSchema(version=2, models=models)
class CustomSortedSchema(dict):
ITEMS_ORDER = NotImplemented
@classmethod
def _items_keyfunc(cls, items):
key = items[0]
if key not in cls.ITEMS_ORDER:
return len(cls.ITEMS_ORDER)
else:
return cls.ITEMS_ORDER.index(key)
@staticmethod
def representer(self, data):
"""Note that 'self' here is NOT an instance of CustomSortedSchema, but
of some yaml thing.
"""
parent_iter = data.items()
good_iter = sorted(parent_iter, key=data._items_keyfunc)
return self.represent_mapping('tag:yaml.org,2002:map', good_iter)
class CustomSortedRootSchema(CustomSortedSchema):
ITEMS_ORDER = ['version', 'models']
class CustomSortedModelsSchema(CustomSortedSchema):
ITEMS_ORDER = ['name', 'columns', 'tests']
class CustomSortedColumnsSchema(CustomSortedSchema):
ITEMS_ORDER = ['name', 'tests']
for cls in (CustomSortedRootSchema, CustomSortedModelsSchema, CustomSortedColumnsSchema):
yaml.add_representer(cls, cls.representer)
if __name__ == '__main__':
main()
else:
# a cute trick so we only import/run these things under nose.
import mock # noqa
import unittest # noqa
SAMPLE_SCHEMA = '''
foo:
constraints:
not_null:
- id
- email
- favorite_color
unique:
- id
- email
accepted_values:
- { field: favorite_color, values: ['blue', 'green'] }
- { field: likes_puppies, values: ['yes'] }
simple_custom:
- id
- favorite_color
known_complex_custom:
- { field: likes_puppies, arg1: test }
# becomes a table-level test
complex_custom:
- { field: favorite_color, arg1: test, arg2: ref('bar') }
bar:
constraints:
not_null:
- id
'''
EXPECTED_OBJECT_OUTPUT = [
{
'name': 'bar',
'columns': [
{
'name': 'id',
'tests': [
'not_null'
]
}
]
},
{
'name': 'foo',
'columns': [
{
'name': 'email',
'tests': [
'not_null',
'unique',
],
},
{
'name': 'favorite_color',
'tests': [
{'accepted_values': {
'values': ['blue', 'green']}
},
'not_null',
'simple_custom',
],
},
{
'name': 'id',
'tests': [
'not_null',
'simple_custom',
'unique',
],
},
{
'name': 'likes_puppies',
'tests': [
{'accepted_values': {'values': ['yes']}},
{'known_complex_custom': {'arg1': 'test'}},
]
},
],
'tests': [
{'complex_custom': {
'field': 'favorite_color',
'arg1': 'test',
'arg2': "ref('bar')"
}},
],
},
]
class TestConvert(unittest.TestCase):
maxDiff = None
def test_convert(self):
input_schema = yaml.safe_load(SAMPLE_SCHEMA)
output_schema = convert_schema(input_schema,
{'known_complex_custom': 'field'})
self.assertEqual(output_schema['version'], 2)
sorted_models = sorted(output_schema['models'],
key=lambda x: x['name'])
self.assertEqual(sorted_models, EXPECTED_OBJECT_OUTPUT)
def test_parse_validate_and_mutate_args_simple(self):
args = ['my-input']
parsed = parse_args(args)
self.assertEqual(parsed.search_directory, 'my-input')
with self.assertRaises(OperationalError):
validate_and_mutate_args(parsed)
with mock.patch('os.path.exists') as exists:
exists.return_value = True
validate_and_mutate_args(parsed)
# validate will mutate this to be a dict
self.assertEqual(parsed.extra_complex_tests, {})
def test_parse_validate_and_mutate_args_extra_tests(self):
args = [
'--complex-test', 'known_complex_custom:field',
'--complex-test', 'other_complex_custom:column',
'my-input'
]
parsed = parse_args(args)
with mock.patch('os.path.exists') as exists:
exists.return_value = True
validate_and_mutate_args(parsed)
self.assertEqual(
parsed.extra_complex_tests,
{
'known_complex_custom': 'field',
'other_complex_custom': 'column'
}
)
|
|
#!/usr/bin/python
# write out the data in a form useful to pass to the sba (demo) program
# it appears camera poses are basically given as [ R | t ] where R is
# the same R we use throughout and t is the 'tvec'
# todo, run sba and automatically parse output ...
import sys
sys.path.insert(0, "/usr/local/lib/python2.7/site-packages/")
import argparse
import cPickle as pickle
import math
import numpy as np
sys.path.append('../lib')
import Matcher
import ProjectMgr
import SBA1
import transformations
d2r = math.pi / 180.0 # a helpful constant
parser = argparse.ArgumentParser(description='Keypoint projection.')
parser.add_argument('--project', required=True, help='project directory')
args = parser.parse_args()
# return a 3d affine tranformation between current camera locations
# and original camera locations.
def get_recenter_affine(src_list, dst_list):
src = [[], [], [], []] # current camera locations
dst = [[], [], [], []] # original camera locations
for i in range(len(src_list)):
src_ned = src_list[i]
src[0].append(src_ned[0])
src[1].append(src_ned[1])
src[2].append(src_ned[2])
src[3].append(1.0)
dst_ned = dst_list[i]
dst[0].append(dst_ned[0])
dst[1].append(dst_ned[1])
dst[2].append(dst_ned[2])
dst[3].append(1.0)
print "%s <-- %s" % (dst_ned, src_ned)
A = transformations.superimposition_matrix(src, dst, scale=True)
print "A:\n", A
return A
# transform a point list given an affine transform matrix
def transform_points( A, pts_list ):
src = [[], [], [], []]
for p in pts_list:
src[0].append(p[0])
src[1].append(p[1])
src[2].append(p[2])
src[3].append(1.0)
dst = A.dot( np.array(src) )
result = []
for i in range(len(pts_list)):
result.append( [ float(dst[0][i]),
float(dst[1][i]),
float(dst[2][i]) ] )
return result
proj = ProjectMgr.ProjectMgr(args.project)
proj.load_image_info()
proj.load_features()
proj.undistort_keypoints()
#m = Matcher.Matcher()
matches_direct = pickle.load( open( args.project + "/matches_direct", "rb" ) )
print "unique features:", len(matches_direct)
image_width = proj.image_list[0].width
camw, camh = proj.cam.get_image_params()
scale = float(image_width) / float(camw)
print 'scale:', scale
sba = SBA1.SBA1(args.project)
sba.prepair_data( proj.image_list, matches_direct, proj.cam.get_K(scale) )
cameras, features = sba.run_live()
for i, image in enumerate(proj.image_list):
orig = image.camera_pose
new = cameras[i]
newq = np.array( [ new[0], new[1], new[2], new[3] ] )
tvec = np.array( [ new[4], new[5], new[6] ] )
Rned2cam = transformations.quaternion_matrix(newq)[:3,:3]
cam2body = image.get_cam2body()
Rned2body = cam2body.dot(Rned2cam)
Rbody2ned = np.matrix(Rned2body).T
(yaw, pitch, roll) = transformations.euler_from_matrix(Rbody2ned, 'rzyx')
#print "orig ypr =", image.camera_pose['ypr']
#print "new ypr =", [yaw/d2r, pitch/d2r, roll/d2r]
pos = -np.matrix(Rned2cam).T * np.matrix(tvec).T
newned = pos.T[0].tolist()[0]
#print "orig ned =", image.camera_pose['ned']
#print "new ned =", newned
image.set_camera_pose_sba( ned=newned, ypr=[yaw/d2r, pitch/d2r, roll/d2r] )
# now count how many features show up in each image
for i in proj.image_list:
i.feature_count = 0
for i, match in enumerate(matches_direct):
for j, p in enumerate(match[1:]):
image = proj.image_list[ p[0] ]
image.feature_count += 1
# compare original camera locations with sba camera locations and
# derive a transform matrix to 'best fit' the new camera locations
# over the original ... trusting the original group gps solution as
# our best absolute truth for positioning the system in world
# coordinates.
src_list = []
dst_list = []
for image in proj.image_list:
if image.feature_count >= 25:
# only consider images that are in the fitted set
ned, ypr, quat = image.get_camera_pose_sba()
src_list.append(ned)
ned, ypr, quat = image.get_camera_pose()
dst_list.append(ned)
A = get_recenter_affine(src_list, dst_list)
# extract the rotation matrix (R) from the affine transform
scale, shear, angles, trans, persp = transformations.decompose_matrix(A)
R = transformations.euler_matrix(*angles)
print "R:\n", R
# update the sba camera locations based on best fit
camera_list = []
# load current sba poses
for image in proj.image_list:
ned, ypr, quat = image.get_camera_pose_sba()
camera_list.append( ned )
# refit
new_cams = transform_points(A, camera_list)
# update sba poses. FIXME: do we need to update orientation here as
# well? Somewhere we worked out the code, but it may not matter all
# that much ... except for later manually computing mean projection
# error.
for i, image in enumerate(proj.image_list):
ned_orig, ypr_orig, quat_orig = image.get_camera_pose()
ned, ypr, quat = image.get_camera_pose_sba()
Rbody2ned = image.get_body2ned_sba()
# update the orientation with the same transform to keep
# everything in proper consistent alignment
newRbody2ned = R[:3,:3].dot(Rbody2ned)
(yaw, pitch, roll) = transformations.euler_from_matrix(newRbody2ned, 'rzyx')
image.set_camera_pose_sba(ned=new_cams[i],
ypr=[yaw/d2r, pitch/d2r, roll/d2r])
print 'image:', image.name
print ' orig pos:', ned_orig
print ' fit pos:', new_cams[i]
print ' dist moved:', np.linalg.norm( np.array(ned_orig) - np.array(new_cams[i]))
image.save_meta()
# update the sba point locations based on same best fit transform
# derived from the cameras (remember that 'features' is the point
# features structure spit out by the SBA process)
feature_list = []
for f in features:
feature_list.append( f.tolist() )
new_feats = transform_points(A, feature_list)
# create the matches_sba list (copy) and update the ned coordinate
matches_sba = list(matches_direct)
for i, match in enumerate(matches_sba):
#print type(new_feats[i])
matches_sba[i][0] = new_feats[i]
# write out the updated match_dict
print "Writing match_sba file ...", len(matches_sba), 'features'
pickle.dump(matches_sba, open(args.project + "/matches_sba", "wb"))
# collect/group match chains that refer to the same keypoint
matches_tmp = list(matches_sba)
count = 0
done = False
while not done:
print "Iteration:", count
count += 1
matches_new = []
matches_lookup = {}
for i, match in enumerate(matches_tmp):
# scan if any of these match points have been previously seen
# and record the match index
index = -1
for p in match[1:]:
key = "%d-%d" % (p[0], p[1])
if key in matches_lookup:
index = matches_lookup[key]
break
if index < 0:
# not found, append to the new list
for p in match[1:]:
key = "%d-%d" % (p[0], p[1])
matches_lookup[key] = len(matches_new)
matches_new.append(match)
else:
# found a previous reference, append these match items
existing = matches_new[index]
# only append items that don't already exist in the early
# match, and only one match per image (!)
for p in match[1:]:
key = "%d-%d" % (p[0], p[1])
found = False
for e in existing[1:]:
if p[0] == e[0]:
found = True
break
if not found:
# add
existing.append(p)
matches_lookup[key] = index
# print "new:", existing
# print
if len(matches_new) == len(matches_tmp):
done = True
else:
matches_tmp = matches_new
matches_group = matches_tmp
# write out the updated match_dict
print "Writing match_group file ...", len(matches_group), 'features'
pickle.dump(matches_group, open(args.project + "/matches_group", "wb"))
|
|
# Copyright 2011 Piston Cloud Computing, Inc.
# All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test of Policy Engine For Manila."""
import os.path
import mock
from oslo_config import cfg
from oslo_policy import policy as common_policy
import six
from six.moves.urllib import request as urlrequest
from manila import context
from manila import exception
from manila import policy
from manila import test
from manila import utils
CONF = cfg.CONF
class PolicyFileTestCase(test.TestCase):
def setUp(self):
super(PolicyFileTestCase, self).setUp()
# since is_admin is defined by policy, create context before reset
self.context = context.RequestContext('fake', 'fake')
policy.reset()
self.target = {}
def test_modified_policy_reloads(self):
with utils.tempdir() as tmpdir:
tmpfilename = os.path.join(tmpdir, 'policy')
CONF.set_override('policy_file', tmpfilename, group='oslo_policy')
action = "example:test"
with open(tmpfilename, "w") as policyfile:
policyfile.write("""{"example:test": []}""")
policy.init(tmpfilename)
policy.enforce(self.context, action, self.target)
with open(tmpfilename, "w") as policyfile:
policyfile.write("""{"example:test": ["false:false"]}""")
# NOTE(vish): reset stored policy cache so we don't have to
# sleep(1)
policy._ENFORCER.load_rules(True)
self.assertRaises(
exception.PolicyNotAuthorized,
policy.enforce,
self.context,
action,
self.target,
)
class PolicyTestCase(test.TestCase):
def setUp(self):
super(PolicyTestCase, self).setUp()
policy.reset()
policy.init()
self.rules = {
"true": [],
"example:allowed": [],
"example:denied": [["false:false"]],
"example:get_http": [["http:http://www.example.com"]],
"example:my_file": [["role:compute_admin"],
["project_id:%(project_id)s"]],
"example:early_and_fail": [["false:false", "rule:true"]],
"example:early_or_success": [["rule:true"], ["false:false"]],
"example:lowercase_admin": [["role:admin"], ["role:sysadmin"]],
"example:uppercase_admin": [["role:ADMIN"], ["role:sysadmin"]],
}
self._set_rules()
self.context = context.RequestContext('fake', 'fake', roles=['member'])
self.target = {}
def tearDown(self):
policy.reset()
super(PolicyTestCase, self).tearDown()
def _set_rules(self):
these_rules = common_policy.Rules.from_dict(self.rules)
policy._ENFORCER.set_rules(these_rules)
def test_enforce_nonexistent_action_throws(self):
action = "example:noexist"
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
self.context, action, self.target)
def test_enforce_bad_action_throws(self):
action = "example:denied"
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
self.context, action, self.target)
def test_enforce_good_action(self):
action = "example:allowed"
policy.enforce(self.context, action, self.target)
def test_enforce_http_true(self):
def fakeurlopen(url, post_data):
return six.StringIO("True")
action = "example:get_http"
target = {}
with mock.patch.object(urlrequest, 'urlopen', fakeurlopen):
result = policy.enforce(self.context, action, target)
self.assertTrue(result)
def test_enforce_http_false(self):
def fakeurlopen(url, post_data):
return six.StringIO("False")
action = "example:get_http"
target = {}
with mock.patch.object(urlrequest, 'urlopen', fakeurlopen):
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
self.context, action, target)
def test_templatized_enforcement(self):
target_mine = {'project_id': 'fake'}
target_not_mine = {'project_id': 'another'}
action = "example:my_file"
policy.enforce(self.context, action, target_mine)
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
self.context, action, target_not_mine)
def test_early_AND_enforcement(self):
action = "example:early_and_fail"
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
self.context, action, self.target)
def test_early_OR_enforcement(self):
action = "example:early_or_success"
policy.enforce(self.context, action, self.target)
def test_ignore_case_role_check(self):
lowercase_action = "example:lowercase_admin"
uppercase_action = "example:uppercase_admin"
# NOTE(dprince) we mix case in the Admin role here to ensure
# case is ignored
admin_context = context.RequestContext('admin',
'fake',
roles=['AdMiN'])
policy.enforce(admin_context, lowercase_action, self.target)
policy.enforce(admin_context, uppercase_action, self.target)
class DefaultPolicyTestCase(test.TestCase):
def setUp(self):
super(DefaultPolicyTestCase, self).setUp()
policy.reset()
policy.init()
self.rules = {
"default": [],
"example:exist": "false:false"
}
self._set_rules('default')
self.context = context.RequestContext('fake', 'fake')
def tearDown(self):
super(DefaultPolicyTestCase, self).tearDown()
policy.reset()
def _set_rules(self, default_rule):
these_rules = common_policy.Rules.from_dict(self.rules,
default_rule=default_rule)
policy._ENFORCER.set_rules(these_rules)
def test_policy_called(self):
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
self.context, "example:exist", {})
def test_not_found_policy_calls_default(self):
policy.enforce(self.context, "example:noexist", {})
def test_default_not_found(self):
new_default_rule = "default_noexist"
# FIXME(gyee): need to overwrite the Enforcer's default_rule first
# as it is recreating the rules with its own default_rule instead
# of the default_rule passed in from set_rules(). I think this is a
# bug in Oslo policy.
policy._ENFORCER.default_rule = new_default_rule
self._set_rules(new_default_rule)
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
self.context, "example:noexist", {})
class ContextIsAdminPolicyTestCase(test.TestCase):
def setUp(self):
super(ContextIsAdminPolicyTestCase, self).setUp()
policy.reset()
policy.init()
def _set_rules(self, rules, default_rule):
these_rules = common_policy.Rules.from_dict(rules,
default_rule=default_rule)
policy._ENFORCER.set_rules(these_rules)
def test_default_admin_role_is_admin(self):
ctx = context.RequestContext('fake', 'fake', roles=['johnny-admin'])
self.assertFalse(ctx.is_admin)
ctx = context.RequestContext('fake', 'fake', roles=['admin'])
self.assertTrue(ctx.is_admin)
def test_custom_admin_role_is_admin(self):
# define explict rules for context_is_admin
rules = {
'context_is_admin': [["role:administrator"], ["role:johnny-admin"]]
}
self._set_rules(rules, CONF.oslo_policy.policy_default_rule)
ctx = context.RequestContext('fake', 'fake', roles=['johnny-admin'])
self.assertTrue(ctx.is_admin)
ctx = context.RequestContext('fake', 'fake', roles=['administrator'])
self.assertTrue(ctx.is_admin)
# default rule no longer applies
ctx = context.RequestContext('fake', 'fake', roles=['admin'])
self.assertFalse(ctx.is_admin)
def test_context_is_admin_undefined(self):
rules = {
"admin_or_owner": "role:admin or project_id:%(project_id)s",
"default": "rule:admin_or_owner",
}
self._set_rules(rules, CONF.oslo_policy.policy_default_rule)
ctx = context.RequestContext('fake', 'fake')
self.assertFalse(ctx.is_admin)
ctx = context.RequestContext('fake', 'fake', roles=['admin'])
self.assertTrue(ctx.is_admin)
|
|
""" Classification and decoding related tools """
import numpy as np
from functools import reduce
from sklearn.feature_selection import SelectKBest
import re
from six import string_types
def feature_selection(feat_select, X, y):
"""" Implements various kinds of feature selection """
# K-best
if re.match('.*-best', feat_select) is not None:
n = int(feat_select.split('-')[0])
selector = SelectKBest(k=n)
import warnings
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=UserWarning)
features_selected = np.where(
selector.fit(X, y).get_support() is True)[0]
elif re.match('.*-randombest', feat_select) is not None:
n = int(feat_select.split('-')[0])
from random import shuffle
features = range(0, X.shape[1])
shuffle(features)
features_selected = features[:n]
return features_selected
def get_score(X, y, clf, scoring='accuracy'):
prediction = clf.predict(X)
if scoring == 'accuracy':
from sklearn.metrics import accuracy_score
score = accuracy_score(y, prediction)
elif scoring == 'f1':
from sklearn.metrics import f1_score
score = f1_score(y, prediction)
else:
score = scoring(y, prediction.squeeze())
return prediction, score
def classify_by_features(dataset, features, studies=None, method='SVM',
scikit_classifier=None):
pass
def regularize(X, method='scale'):
if method == 'scale':
from sklearn import preprocessing
return preprocessing.scale(X, with_mean=False)
else:
raise Exception('Unrecognized regularization method')
def get_studies_by_regions(dataset, masks, threshold=0.08, remove_overlap=True,
studies=None, features=None,
regularization="scale"):
""" Set up data for a classification task given a set of masks
Given a set of masks, this function retrieves studies associated with
each mask at the specified threshold, optionally removes overlap and
filters by studies and features, and returns studies by feature matrix
(X) and class labels (y)
Args:
dataset: a Neurosynth dataset
maks: a list of paths to Nifti masks
threshold: percentage of voxels active within the mask for study
to be included
remove_overlap: A boolean indicating if studies studies that
appear in more than one mask should be excluded
studies: An optional list of study names used to constrain the set
used in classification. If None, will use all features in the
dataset.
features: An optional list of feature names used to constrain the
set used in classification. If None, will use all features in
the dataset.
regularize: Optional boolean indicating if X should be regularized
Returns:
A tuple (X, y) of np arrays.
X is a feature by studies matrix and y is a vector of class labels
"""
import nibabel as nib
import os
# Load masks using NiBabel
try:
loaded_masks = [nib.load(os.path.relpath(m)) for m in masks]
except OSError:
print('Error loading masks. Check the path')
# Get a list of studies that activate for each mask file--i.e., a list of
# lists
grouped_ids = [dataset.get_studies(mask=m, activation_threshold=threshold)
for m in loaded_masks]
# Flattened ids
flat_ids = reduce(lambda a, b: a + b, grouped_ids)
# Remove duplicates
if remove_overlap:
import collections
flat_ids = [id for (id, count) in
collections.Counter(flat_ids).items() if count == 1]
grouped_ids = [[x for x in m if x in flat_ids] for m in
grouped_ids] # Remove
# Create class label(y)
y = [[idx] * len(ids) for (idx, ids) in enumerate(grouped_ids)]
y = reduce(lambda a, b: a + b, y) # Flatten
y = np.array(y)
# Extract feature set for each class separately
X = [dataset.get_feature_data(ids=group_ids, features=features)
for group_ids in grouped_ids]
X = np.vstack(tuple(X))
if regularization:
X = regularize(X, method=regularization)
return (X, y)
def get_feature_order(dataset, features):
""" Returns a list with the order that features requested appear in
dataset """
all_features = dataset.get_feature_names()
i = [all_features.index(f) for f in features]
return i
def classify_regions(dataset, masks, method='ERF', threshold=0.08,
remove_overlap=True, regularization='scale',
output='summary', studies=None, features=None,
class_weight='auto', classifier=None,
cross_val='4-Fold', param_grid=None, scoring='accuracy'):
""" Perform classification on specified regions
Given a set of masks, this function retrieves studies associated with
each mask at the specified threshold, optionally removes overlap and
filters by studies and features. Then it trains an algorithm to
classify studies based on features and tests performance.
Args:
dataset: a Neurosynth dataset
maks: a list of paths to Nifti masks
method: a string indicating which method to used.
'SVM': Support Vector Classifier with rbf kernel
'ERF': Extremely Randomized Forest classifier
'Dummy': A dummy classifier using stratified classes as
predictor
threshold: percentage of voxels active within the mask for study
to be included
remove_overlap: A boolean indicating if studies studies that
appear in more than one mask should be excluded
regularization: A string indicating type of regularization to use.
If None, performs no regularization.
'scale': Unit scale without demeaning
output: A string indicating output type
'summary': Dictionary with summary statistics including score
and n
'summary_clf': Same as above but also includes classifier
'clf': Only returns classifier
Warning: using cv without grid will return an untrained
classifier
studies: An optional list of study names used to constrain the set
used in classification. If None, will use all features in the
dataset.
features: An optional list of feature names used to constrain the
set used in classification. If None, will use all features in
the dataset.
class_weight: Parameter to pass to classifier determining how to
weight classes
classifier: An optional sci-kit learn classifier to use instead of
pre-set up classifiers set up using 'method'
cross_val: A string indicating type of cross validation to use.
Can also pass a scikit_classifier
param_grid: A dictionary indicating which parameters to optimize
using GridSearchCV. If None, no GridSearch will be used
Returns:
A tuple (X, y) of np arrays.
X is a feature by studies matrix and y is a vector of class labels
"""
(X, y) = get_studies_by_regions(dataset, masks, threshold, remove_overlap,
studies, features,
regularization=regularization)
return classify(X, y, method, classifier, output, cross_val,
class_weight, scoring=scoring, param_grid=param_grid)
def classify(X, y, clf_method='ERF', classifier=None, output='summary_clf',
cross_val=None, class_weight=None, regularization=None,
param_grid=None, scoring='accuracy', refit_all=True,
feat_select=None):
""" Wrapper for scikit-learn classification functions
Imlements various types of classification and cross validation """
# Build classifier
clf = Classifier(clf_method, classifier, param_grid)
# Fit & test model with or without cross-validation
if cross_val is not None:
score = clf.cross_val_fit(X, y, cross_val, scoring=scoring,
feat_select=feat_select,
class_weight=class_weight)
else:
# Does not support scoring function
score = clf.fit(X, y, class_weight=class_weight).score(X, y)
# Return some stuff...
from collections import Counter
if output == 'clf':
return clf
else:
if output == 'summary':
output = {'score': score, 'n': dict(Counter(y))}
elif output == 'summary_clf':
output = {
'score': score,
'n': dict(Counter(y)),
'clf': clf,
'features_selected': clf.features_selected,
'predictions': clf.predictions
}
return output
class Classifier:
def __init__(self, clf_method='ERF', classifier=None, param_grid=None):
""" Initialize a new classifier instance """
# Set classifier
self.features_selected = None
self.predictions = None
if classifier is not None:
self.clf = classifier
from sklearn.svm import LinearSVC
import random
if isinstance(self.clf, LinearSVC):
self.clf.set_params().random_state = random.randint(0, 200)
else:
if clf_method == 'SVM':
from sklearn import svm
self.clf = svm.SVC()
elif clf_method == 'ERF':
from sklearn.ensemble import ExtraTreesClassifier
self.clf = ExtraTreesClassifier(
n_estimators=100, max_depth=None, min_samples_split=1,
random_state=0)
elif clf_method == 'GBC':
from sklearn.ensemble import GradientBoostingClassifier
self.clf = GradientBoostingClassifier(n_estimators=100,
max_depth=1)
elif clf_method == 'Dummy':
from sklearn.dummy import DummyClassifier
self.clf = DummyClassifier(strategy='stratified')
else:
raise Exception('Unrecognized classification method')
if isinstance(param_grid, dict):
from sklearn.model_selection import GridSearchCV
self.clf = GridSearchCV(estimator=self.clf,
param_grid=param_grid)
def fit(self, X, y, cv=None, class_weight='auto'):
""" Fits X to outcomes y, using clf """
# Incorporate error checking such as :
# if isinstance(self.classifier, ScikitClassifier):
# do one thingNone
# otherwiseNone.
self.X = X
self.y = y
self.set_class_weight(class_weight=class_weight, y=y)
self.clf = self.clf.fit(X, y)
return self.clf
def set_class_weight(self, class_weight='auto', y=None):
""" Sets the class_weight of the classifier to match y """
if class_weight is None:
cw = None
try:
self.clf.set_params(class_weight=cw)
except ValueError:
pass
elif class_weight == 'auto':
c = np.bincount(y)
ii = np.nonzero(c)[0]
c = c / float(c.sum())
cw = dict(zip(ii[::-1], c[ii]))
try:
self.clf.set_params(class_weight=cw)
except ValueError:
import warnings
warnings.warn(
"Tried to set class_weight, but failed. The classifier "
"probably doesn't support it")
def cross_val_fit(self, X, y, cross_val='4-Fold', scoring='accuracy',
feat_select=None, class_weight='auto'):
""" Fits X to outcomes y, using clf and cv_method """
# from sklearn.model_selection import cross_validation
from sklearn import model_selection
self.X = X
self.y = y
self.set_class_weight(class_weight=class_weight, y=y)
# Set cross validator
if isinstance(cross_val, string_types):
if re.match('.*-Fold', cross_val) is not None:
n = int(cross_val.split('-')[0])
self.cver = model_selection.StratifiedKFold(n_splits=n).split(X,y)
# self.cver = model_selection.StratifiedKFold(self.y, n)
else:
raise Exception('Unrecognized cross validation method')
else:
self.cver = cross_val
if feat_select is not None:
self.features_selected = []
# Perform cross-validated classification
# from sklearn.grid_search import GridSearchCV
if isinstance(self.clf, model_selection.GridSearchCV):
import warnings
if feat_select is not None:
warnings.warn(
"Cross-validated feature selection not supported with "
"GridSearchCV")
self.clf.set_params(cv=self.cver, scoring=scoring)
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=UserWarning)
self.clf = self.clf.fit(X, y)
self.cvs = self.clf.best_score_
else:
self.cvs = self.feat_select_cvs(
feat_select=feat_select, scoring=scoring)
if feat_select is not None:
fs = feature_selection(
feat_select, X, y)
self.features_selected.append(fs)
X = X[:, fs]
self.clf.fit(X, y)
return self.cvs.mean()
def feat_select_cvs(self, scoring=None, feat_select=None):
""" Returns cross validated scores (just like cross_val_score),
but includes feature selection as part of the cross validation loop """
scores = []
self.predictions = []
for train, test in self.cver:
X_train, X_test, y_train, y_test = self.X[
train], self.X[test], self.y[train], self.y[test]
if feat_select is not None:
# Get which features are kept
fs = feature_selection(
feat_select, X_train, y_train)
self.features_selected.append(fs)
# Filter X to only keep selected features
X_train, X_test = X_train[
:, fs], X_test[:, fs]
# Set scoring (not implement as accuracy is default)
# Train classifier
self.clf.fit(X_train, y_train)
# Test classifier
predicition, s = get_score(
X_test, y_test, self.clf, scoring=scoring)
scores.append(s)
self.predictions.append((y_test, predicition))
return np.array(scores)
def fit_dataset(self, dataset, y, features=None,
feature_type='features'):
""" Given a dataset, fits either features or voxels to y """
# Get data from dataset
if feature_type == 'features':
X = np.rot90(dataset.feature_table.data.toarray())
elif feature_type == 'voxels':
X = np.rot90(dataset.image_table.data.toarray())
self.sk_classifier.fit(X, y)
|
|
from __future__ import absolute_import
import django
import six
from six import string_types
from sentry.utils.compat import implements_to_string
def cmp(a, b):
return (a > b) - (a < b)
class Bit(object):
"""
Represents a single Bit.
"""
def __init__(self, number, is_set=True):
self.number = number
self.is_set = bool(is_set)
self.mask = 2 ** int(number)
self.children = []
if not self.is_set:
self.mask = ~self.mask
def __repr__(self):
return "<%s: number=%d, is_set=%s>" % (self.__class__.__name__, self.number, self.is_set)
# def __str__(self):
# if self.is_set:
# return 'Yes'
# return 'No'
def __int__(self):
return self.mask
def __bool__(self):
return self.is_set
__nonzero__ = __bool__
def __eq__(self, value):
if isinstance(value, Bit):
return value.number == self.number and value.is_set == self.is_set
elif isinstance(value, bool):
return value == self.is_set
elif isinstance(value, int):
return value == self.mask
return value == self.is_set
def __ne__(self, value):
return not self == value
def __coerce__(self, value):
return (self.is_set, bool(value))
def __invert__(self):
return self.__class__(self.number, not self.is_set)
def __and__(self, value):
if isinstance(value, Bit):
value = value.mask
return value & self.mask
def __rand__(self, value):
if isinstance(value, Bit):
value = value.mask
return self.mask & value
def __or__(self, value):
if isinstance(value, Bit):
value = value.mask
return value | self.mask
def __ror__(self, value):
if isinstance(value, Bit):
value = value.mask
return self.mask | value
def __lshift__(self, value):
if isinstance(value, Bit):
value = value.mask
return value << self.mask
def __rlshift__(self, value):
if isinstance(value, Bit):
value = value.mask
return self.mask << value
def __rshift__(self, value):
if isinstance(value, Bit):
value = value.mask
return value >> self.mask
def __rrshift__(self, value):
if isinstance(value, Bit):
value = value.mask
return self.mask >> value
def __xor__(self, value):
if isinstance(value, Bit):
value = value.mask
return value ^ self.mask
def __rxor__(self, value):
if isinstance(value, Bit):
value = value.mask
return self.mask ^ value
def __sentry__(self):
return repr(self)
def evaluate(self, evaluator, qn, connection):
return self.mask, []
def prepare(self, evaluator, query, allow_joins):
return evaluator.prepare_node(self, query, allow_joins)
@implements_to_string
class BitHandler(object):
"""
Represents an array of bits, each as a ``Bit`` object.
"""
def __init__(self, value, keys, labels=None):
# TODO: change to bitarray?
if value:
self._value = int(value)
else:
self._value = 0
self._keys = keys
self._labels = labels is not None and labels or keys
def __eq__(self, other):
if not isinstance(other, BitHandler):
return False
return self._value == other._value
def __lt__(self, other):
return int(self._value) < other
def __le__(self, other):
return int(self._value) <= other
def __gt__(self, other):
return int(self._value) > other
def __ge__(self, other):
return int(self._value) >= other
def __cmp__(self, other):
return cmp(self._value, other)
def __repr__(self):
return "<%s: %s>" % (
self.__class__.__name__,
", ".join("%s=%s" % (k, self.get_bit(n).is_set) for n, k in enumerate(self._keys)),
)
def __str__(self):
return six.text_type(self._value)
def __int__(self):
return self._value
def __bool__(self):
return bool(self._value)
__nonzero__ = __bool__
def __and__(self, value):
return BitHandler(self._value & int(value), self._keys)
def __or__(self, value):
return BitHandler(self._value | int(value), self._keys)
def __add__(self, value):
return BitHandler(self._value + int(value), self._keys)
def __sub__(self, value):
return BitHandler(self._value - int(value), self._keys)
def __lshift__(self, value):
return BitHandler(self._value << int(value), self._keys)
def __rshift__(self, value):
return BitHandler(self._value >> int(value), self._keys)
def __xor__(self, value):
return BitHandler(self._value ^ int(value), self._keys)
def __contains__(self, key):
bit_number = self._keys.index(key)
return bool(self.get_bit(bit_number))
def __getattr__(self, key):
if key.startswith("_"):
return object.__getattribute__(self, key)
if key not in self._keys:
raise AttributeError("%s is not a valid flag" % key)
return self.get_bit(self._keys.index(key))
__getitem__ = __getattr__
def __setattr__(self, key, value):
if key.startswith("_"):
return object.__setattr__(self, key, value)
if key not in self._keys:
raise AttributeError("%s is not a valid flag" % key)
self.set_bit(self._keys.index(key), value)
__setitem__ = __setattr__
def __iter__(self):
return self.iteritems() # NOQA
def __sentry__(self):
return repr(self)
def _get_mask(self):
return self._value
mask = property(_get_mask)
def evaluate(self, evaluator, qn, connection):
return self.mask, []
def get_bit(self, bit_number):
mask = 2 ** int(bit_number)
return Bit(bit_number, self._value & mask != 0)
def set_bit(self, bit_number, true_or_false):
mask = 2 ** int(bit_number)
if true_or_false:
self._value |= mask
else:
self._value &= ~mask
return Bit(bit_number, self._value & mask != 0)
def keys(self):
return self._keys
def iterkeys(self):
return iter(self._keys)
def items(self):
return list(self.iteritems()) # NOQA
def iteritems(self):
for k in self._keys:
yield (k, getattr(self, k).is_set)
def get_label(self, flag):
if isinstance(flag, string_types):
flag = self._keys.index(flag)
if isinstance(flag, Bit):
flag = flag.number
return self._labels[flag]
if django.VERSION[:2] >= (1, 8):
from django.core.exceptions import ImproperlyConfigured
# We need to register adapters in Django 1.8 in order to prevent
# "ProgrammingError: can't adapt type"
try:
from django.db.backends.postgresql_psycopg2.base import Database
Database.extensions.register_adapter(Bit, lambda x: Database.extensions.AsIs(int(x)))
Database.extensions.register_adapter(BitHandler, lambda x: Database.extensions.AsIs(int(x)))
except ImproperlyConfigured:
pass
|
|
"""
Encode Parrot Bebop commands
(possible usage also as parsing command log files)
usage:
./commands.py <cmd log file>
"""
import time
import struct
from threading import Thread,Event,Lock
from collections import defaultdict
def takeoffCmd():
# ARCOMMANDS_ID_PROJECT_ARDRONE3 = 1,
# ARCOMMANDS_ID_ARDRONE3_CLASS_PILOTING = 0,
# ARCOMMANDS_ID_ARDRONE3_PILOTING_CMD_TAKEOFF = 1,
return struct.pack("BBH", 1, 0, 1)
def landCmd():
# ARCOMMANDS_ID_PROJECT_ARDRONE3 = 1,
# ARCOMMANDS_ID_ARDRONE3_CLASS_PILOTING = 0,
# ARCOMMANDS_ID_ARDRONE3_PILOTING_CMD_LANDING = 3,
return struct.pack("BBH", 1, 0, 3)
def emergencyCmd():
# ARCOMMANDS_ID_PROJECT_ARDRONE3 = 1,
# ARCOMMANDS_ID_ARDRONE3_CLASS_PILOTING = 0,
# ARCOMMANDS_ID_ARDRONE3_PILOTING_CMD_EMERGENCY = 4,
return struct.pack("BBH", 1, 0, 4)
def trimCmd():
# ARCOMMANDS_ID_PROJECT_ARDRONE3 = 1,
# ARCOMMANDS_ID_ARDRONE3_CLASS_PILOTING = 0,
# ARCOMMANDS_ID_ARDRONE3_PILOTING_CMD_FLATTRIM = 0,
return struct.pack("BBH", 1, 0, 0)
def movePCMDCmd( active, roll, pitch, yaw, gaz ):
# ARCOMMANDS_ID_PROJECT_ARDRONE3 = 1,
# ARCOMMANDS_ID_ARDRONE3_CLASS_PILOTING = 0,
# ARCOMMANDS_ID_ARDRONE3_PILOTING_CMD_PCMD = 2,
psi = 0.0 # Magnetic north heading of the controlling device (deg) [-180;180]
flag = 0
if active:
flag = 1
return struct.pack("<BBHBbbbbf", 1, 0, 2, flag, roll, pitch, yaw, gaz, psi )
def videoAutorecordingCmd( enabled=True ):
# ARCOMMANDS_ID_PROJECT_ARDRONE3 = 1,
# ARCOMMANDS_ID_ARDRONE3_CLASS_PICTURESETTINGS = 19,
# ARCOMMANDS_ID_ARDRONE3_PICTURESETTINGS_CMD_VIDEOAUTORECORDSELECTION = 5,
massStorageId = 0 # internal ??
if enabled:
return struct.pack("BBHBB", 1, 19, 5, 1, massStorageId)
else:
return struct.pack("BBHBB", 1, 19, 5, 0, massStorageId)
def takePictureCmd():
# ARCOMMANDS_ID_PROJECT_ARDRONE3 = 1,
# ARCOMMANDS_ID_ARDRONE3_CLASS_MEDIARECORD = 7,
# ARCOMMANDS_ID_ARDRONE3_MEDIARECORD_CMD_PICTURE = 0,
# ARCOMMANDS_ID_ARDRONE3_MEDIARECORD_CMD_PICTURE_V2 = 2,
massStorageId = 0 # internal ??
return struct.pack("BBHB", 1, 7, 2, massStorageId)
def videoRecordingCmd( on=True ):
# ARCOMMANDS_ID_PROJECT_ARDRONE3 = 1,
# ARCOMMANDS_ID_ARDRONE3_CLASS_MEDIARECORD = 7,
# ARCOMMANDS_ID_ARDRONE3_MEDIARECORD_CMD_VIDEO = 1
massStorageId = 0 # internal ??
if on:
return struct.pack("BBHIB", 1, 7, 1, 1, massStorageId)
else:
return struct.pack("BBHIB", 1, 7, 1, 0, massStorageId)
def setDateCmd( date ):
# ARCOMMANDS_ID_PROJECT_COMMON = 0,
# ARCOMMANDS_ID_COMMON_CLASS_COMMON = 4,
# ARCOMMANDS_ID_COMMON_COMMON_CMD_CURRENTDATE = 1,
# Date with ISO-8601 format
return struct.pack("BBH", 0, 4, 1) + date.isoformat() + '\0'
def setTimeCmd( time ):
# ARCOMMANDS_ID_PROJECT_COMMON = 0,
# ARCOMMANDS_ID_COMMON_CLASS_COMMON = 4,
# ARCOMMANDS_ID_COMMON_COMMON_CMD_CURRENTTIME = 2,
# Time with ISO-8601 format
# note, that "time.isoformat()" did not work '19:39:22.887000' milisec??
return struct.pack("BBH", 0, 4, 2) + time.strftime("T%H%M%S+0000") + '\0'
def setSpeedSettingsCmdList( maxVerticalSpeed, maxRotationSpeed, hullProtection, outdoor ):
# ARCOMMANDS_ID_PROJECT_ARDRONE3 = 1,
# ARCOMMANDS_ID_ARDRONE3_CLASS_SPEEDSETTINGS = 11,
# ARCOMMANDS_ID_ARDRONE3_SPEEDSETTINGS_CMD_MAXVERTICALSPEED = 0,
return [ struct.pack("BBHf", 1, 11, 0, maxVerticalSpeed), # Current max vertical speed in m/s
struct.pack("BBHf", 1, 11, 1, maxRotationSpeed), # Current max rotation speed in degree/s
struct.pack("BBHB", 1, 11, 2, hullProtection),
struct.pack("BBHB", 1, 11, 3, outdoor) ]
def videoStreamingCmd( enable ):
"enable video stream?"
# ARCOMMANDS_ID_PROJECT_ARDRONE3 = 1,
# ARCOMMANDS_ID_ARDRONE3_CLASS_MEDIASTREAMING = 21,
# ARCOMMANDS_ID_ARDRONE3_MEDIASTREAMING_CMD_VIDEOENABLE = 0,
return struct.pack("BBHB", 1, 21, 0, enable)
def requestAllSettingsCmd():
# ARCOMMANDS_ID_PROJECT_COMMON = 0,
# ARCOMMANDS_ID_COMMON_CLASS_SETTINGS = 2,
# ARCOMMANDS_ID_COMMON_SETTINGS_CMD_ALLSETTINGS = 0
return struct.pack("BBH", 0, 2, 0)
def requestAllStatesCmd():
# ARCOMMANDS_ID_PROJECT_COMMON = 0,
# ARCOMMANDS_ID_COMMON_CLASS_COMMON = 4,
# ARCOMMANDS_ID_COMMON_COMMON_CMD_ALLSTATES = 0
return struct.pack("BBH", 0, 4, 0)
def moveCameraCmd( tilt, pan ):
"Tilt/Pan camera consign for the drone (in degrees)"
# ARCOMMANDS_ID_PROJECT_ARDRONE3 = 1,
# ARCOMMANDS_ID_ARDRONE3_CLASS_CAMERA = 1,
# ARCOMMANDS_ID_ARDRONE3_CAMERA_CMD_ORIENTATION = 0,
return struct.pack("BBHbb", 1, 1, 0, tilt, pan)
def resetHomeCmd():
# ARCOMMANDS_ID_PROJECT_ARDRONE3 = 1
# ARCOMMANDS_ID_ARDRONE3_CLASS_GPSSETTINGS = 23
# ARCOMMANDS_ID_ARDRONE3_GPSSETTINGS_CMD_RESETHOME = 1
return struct.pack("BBH", 1, 23, 1)
# NOT TESTED - Aldo? Altitude?
def setHomeCmd( lat, lon, altitude=2.0 ):
# ARCOMMANDS_ID_PROJECT_ARDRONE3 = 1
# ARCOMMANDS_ID_ARDRONE3_CLASS_GPSSETTINGS = 23
# ARCOMMANDS_ID_ARDRONE3_GPSSETTINGS_CMD_SETHOME = 0,
return struct.pack("<BBHddd", 1, 23, 0, lat, lon, altitude)
# NOT TESTED - Aldo?
def navigateHomeCmd( go=1 ):
"navigate home - to STOP use go=False"
# ARCOMMANDS_ID_PROJECT_ARDRONE3 = 1,
# ARCOMMANDS_ID_ARDRONE3_CLASS_PILOTING = 0,
# ARCOMMANDS_ID_ARDRONE3_PILOTING_CMD_NAVIGATEHOME = 5,
return struct.pack("<BBHB", 1, 0, 5, go)
# def moveByCmd( dX, dY, dZ, dPsi):
# # ARCOMMANDS_ID_PROJECT_ARDRONE3 = 1,
# # ARCOMMANDS_ID_ARDRONE3_CLASS_PILOTING = 0,
# # ARCOMMANDS_ID_ARDRONE3_PILOTING_CMD_MOVEBY= 7,
# # Move the drone to a relative position and rotate heading by a given angle
# # The frame is horizontal and relative to the current drone orientation:
# # - X is front
# # - Y is right
# # - Z is down
# #print ('Moving')
# return struct.pack("<BBHffff", 1, 0, 7, dX, dY, dZ, dPsi)
# NOT TESTED
def Cmd( lat, lon, altitude):
# ARCOMMANDS_ID_PROJECT_ARDRONE3 = 1,
# ARCOMMANDS_ID_ARDRONE3_CLASS_PILOTING = 0,
# ARCOMMANDS_ID_ARDRONE3_PILOTING_CMD_MOVETO= 10,
# Move the drone to a specified location
# Latitude of the location (in degrees) to reach (Double)
# Longitude of the location (in degrees) to reach (Double)
# Altitude above sea level (in m) to reach (Double)
# Orientation mode of the move to (enum) [NONE, TO_TARGET, HEADING_START, HEADING_DURING]
# Heading (relative to the North in degrees).This value is only used if the orientation mode is HEADING_START or HEADING_DURING
mode = 0
heading = 0.0
return struct.pack("<BBHfffIf", 1, 0, 10, lat, lon, altitude, mode, heading)
def cancelMoveToCmd():
# ARCOMMANDS_ID_PROJECT_ARDRONE3 = 1,
# ARCOMMANDS_ID_ARDRONE3_CLASS_PILOTING = 0,
# ARCOMMANDS_ID_ARDRONE3_PILOTING_CMD_MOVETO= 11,
# Cancel the current moveTo. If there is no current moveTo, this command has no effect.
return struct.pack("<BBH", 1, 0, 11)
def packData( payload, ackRequest=False ):
frameType = 2
if ackRequest:
frameId = 11
else:
frameId = 10
buf = struct.pack("<BBBI", frameType, frameId, 0, len(payload)+7)
return buf + payload
class CommandSender( Thread ):
"it is necessary to send PCMD with fixed frequency - Free Flight uses 40Hz/25ms"
INTERNAL_COMMAND_PREFIX = chr(0x42)
EXTERNAL_COMMAND_PREFIX = chr(0x33)
def __init__( self, commandChannel, hostPortPair ):
Thread.__init__( self )
self.setDaemon( True )
self.shouldIRun = Event()
self.shouldIRun.set()
self.lock = Lock()
self.command = commandChannel
self.hostPortPair = hostPortPair
self.seqId = defaultdict( int )
self.cmd = packData( movePCMDCmd( False, 0, 0, 0, 0 ) )
assert self.isPCMD( self.cmd )
self.index = 0
self.dropIndex = 7 # fake wifi problems
def updateSeq( self, cmd ):
"relace sequential byte based on 'channel'"
assert len(cmd) > 3, repr(cmd)
frameId = cmd[1]
self.seqId[ frameId ] += 1
return cmd[:2] + chr(self.seqId[frameId] % 256) + cmd[3:]
def isPCMD( self, cmd ):
if len(cmd) != 7+13: # BBHBbbbbf
return False
return struct.unpack("BBH", cmd[7:7+4]) == (1, 0, 2)
def send( self, cmd ):
self.lock.acquire()
self.command.separator( self.EXTERNAL_COMMAND_PREFIX )
if cmd is not None:
if self.isPCMD( cmd ):
self.cmd = cmd
self.command.separator( cmd ) # just store the command without sending it
else:
self.command.sendto( self.updateSeq(cmd), self.hostPortPair )
self.command.separator( "\xFF" )
self.lock.release()
def run( self ):
while self.shouldIRun.isSet():
self.index += 1
if self.dropIndex is None or self.index % self.dropIndex != 0:
self.lock.acquire()
self.command.separator( self.INTERNAL_COMMAND_PREFIX )
self.command.sendto( self.updateSeq(self.cmd), self.hostPortPair )
self.command.separator( "\xFF" )
self.lock.release()
time.sleep(0.025) # 40Hz
class CommandSenderReplay( CommandSender ):
"fake class to replay synced messages"
def __init__( self, commandChannel, hostPortPair, checkAsserts=True ):
CommandSender.__init__( self, commandChannel, hostPortPair )
self.checkAsserts = checkAsserts
def start( self ):
"block default Thread behavior"
print "STARTED Replay"
def send( self, cmd ):
if not self.checkAsserts:
# ignore input completely
return
prefix = self.command.debugRead(1)
while prefix == self.INTERNAL_COMMAND_PREFIX:
self.command.separator( self.updateSeq(self.cmd) ) # just verify command identity
self.command.separator( "\xFF" )
prefix = self.command.debugRead(1)
assert prefix == self.EXTERNAL_COMMAND_PREFIX, hex(ord(prefix))
if cmd is not None:
if self.isPCMD( cmd ):
self.cmd = cmd
self.command.separator( cmd ) # just verify command identity
else:
self.command.sendto( self.updateSeq(cmd), self.hostPortPair )
self.command.separator( "\xFF" )
if __name__ == "__main__":
import sys
if len(sys.argv) < 2:
print __doc__
sys.exit(2)
f = open(sys.argv[1], "rb")
prefix = f.read(1)
while len(prefix) > 0:
print hex(ord(prefix))
assert prefix in [CommandSender.INTERNAL_COMMAND_PREFIX, CommandSender.EXTERNAL_COMMAND_PREFIX]
term = f.read(1)
if term != "\xFF":
header = term + f.read(6)
frameType, frameId, seqId, totalLen = struct.unpack( "<BBBI", header )
data = header + f.read( totalLen-7 )
print " ".join(["%02X" % ord(x) for x in data])
term = f.read(1)
else:
print "EMPTY"
prefix = f.read(1)
# vim: expandtab sw=4 ts=4
|
|
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import TYPE_CHECKING, Tuple
from twisted.web.server import Request
from synapse.api.errors import (
AuthError,
Codes,
InvalidClientCredentialsError,
NotFoundError,
SynapseError,
)
from synapse.http.server import HttpServer
from synapse.http.servlet import RestServlet, parse_json_object_from_request
from synapse.http.site import SynapseRequest
from synapse.rest.client._base import client_patterns
from synapse.types import JsonDict, RoomAlias
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
ClientDirectoryServer(hs).register(http_server)
ClientDirectoryListServer(hs).register(http_server)
ClientAppserviceDirectoryListServer(hs).register(http_server)
class ClientDirectoryServer(RestServlet):
PATTERNS = client_patterns("/directory/room/(?P<room_alias>[^/]*)$", v1=True)
def __init__(self, hs: "HomeServer"):
super().__init__()
self.store = hs.get_datastores().main
self.directory_handler = hs.get_directory_handler()
self.auth = hs.get_auth()
async def on_GET(self, request: Request, room_alias: str) -> Tuple[int, JsonDict]:
room_alias_obj = RoomAlias.from_string(room_alias)
res = await self.directory_handler.get_association(room_alias_obj)
return 200, res
async def on_PUT(
self, request: SynapseRequest, room_alias: str
) -> Tuple[int, JsonDict]:
room_alias_obj = RoomAlias.from_string(room_alias)
content = parse_json_object_from_request(request)
if "room_id" not in content:
raise SynapseError(
400, 'Missing params: ["room_id"]', errcode=Codes.BAD_JSON
)
logger.debug("Got content: %s", content)
logger.debug("Got room name: %s", room_alias_obj.to_string())
room_id = content["room_id"]
servers = content["servers"] if "servers" in content else None
logger.debug("Got room_id: %s", room_id)
logger.debug("Got servers: %s", servers)
# TODO(erikj): Check types.
room = await self.store.get_room(room_id)
if room is None:
raise SynapseError(400, "Room does not exist")
requester = await self.auth.get_user_by_req(request)
await self.directory_handler.create_association(
requester, room_alias_obj, room_id, servers
)
return 200, {}
async def on_DELETE(
self, request: SynapseRequest, room_alias: str
) -> Tuple[int, JsonDict]:
room_alias_obj = RoomAlias.from_string(room_alias)
try:
service = self.auth.get_appservice_by_req(request)
await self.directory_handler.delete_appservice_association(
service, room_alias_obj
)
logger.info(
"Application service at %s deleted alias %s",
service.url,
room_alias_obj.to_string(),
)
return 200, {}
except InvalidClientCredentialsError:
# fallback to default user behaviour if they aren't an AS
pass
requester = await self.auth.get_user_by_req(request)
user = requester.user
await self.directory_handler.delete_association(requester, room_alias_obj)
logger.info(
"User %s deleted alias %s", user.to_string(), room_alias_obj.to_string()
)
return 200, {}
class ClientDirectoryListServer(RestServlet):
PATTERNS = client_patterns("/directory/list/room/(?P<room_id>[^/]*)$", v1=True)
def __init__(self, hs: "HomeServer"):
super().__init__()
self.store = hs.get_datastores().main
self.directory_handler = hs.get_directory_handler()
self.auth = hs.get_auth()
async def on_GET(self, request: Request, room_id: str) -> Tuple[int, JsonDict]:
room = await self.store.get_room(room_id)
if room is None:
raise NotFoundError("Unknown room")
return 200, {"visibility": "public" if room["is_public"] else "private"}
async def on_PUT(
self, request: SynapseRequest, room_id: str
) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
content = parse_json_object_from_request(request)
visibility = content.get("visibility", "public")
await self.directory_handler.edit_published_room_list(
requester, room_id, visibility
)
return 200, {}
async def on_DELETE(
self, request: SynapseRequest, room_id: str
) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
await self.directory_handler.edit_published_room_list(
requester, room_id, "private"
)
return 200, {}
class ClientAppserviceDirectoryListServer(RestServlet):
PATTERNS = client_patterns(
"/directory/list/appservice/(?P<network_id>[^/]*)/(?P<room_id>[^/]*)$", v1=True
)
def __init__(self, hs: "HomeServer"):
super().__init__()
self.store = hs.get_datastores().main
self.directory_handler = hs.get_directory_handler()
self.auth = hs.get_auth()
async def on_PUT(
self, request: SynapseRequest, network_id: str, room_id: str
) -> Tuple[int, JsonDict]:
content = parse_json_object_from_request(request)
visibility = content.get("visibility", "public")
return await self._edit(request, network_id, room_id, visibility)
async def on_DELETE(
self, request: SynapseRequest, network_id: str, room_id: str
) -> Tuple[int, JsonDict]:
return await self._edit(request, network_id, room_id, "private")
async def _edit(
self, request: SynapseRequest, network_id: str, room_id: str, visibility: str
) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
if not requester.app_service:
raise AuthError(
403, "Only appservices can edit the appservice published room list"
)
await self.directory_handler.edit_published_appservice_room_list(
requester.app_service.id, network_id, room_id, visibility
)
return 200, {}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.