gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import abc
import six
from cryptography import utils
@six.add_metaclass(abc.ABCMeta)
class Mode(object):
@abc.abstractproperty
def name(self):
"""
A string naming this mode (e.g. "ECB", "CBC").
"""
@abc.abstractmethod
def validate_for_algorithm(self, algorithm):
"""
Checks that all the necessary invariants of this (mode, algorithm)
combination are met.
"""
@six.add_metaclass(abc.ABCMeta)
class ModeWithInitializationVector(object):
@abc.abstractproperty
def initialization_vector(self):
"""
The value of the initialization vector for this mode as bytes.
"""
@six.add_metaclass(abc.ABCMeta)
class ModeWithTweak(object):
@abc.abstractproperty
def tweak(self):
"""
The value of the tweak for this mode as bytes.
"""
@six.add_metaclass(abc.ABCMeta)
class ModeWithNonce(object):
@abc.abstractproperty
def nonce(self):
"""
The value of the nonce for this mode as bytes.
"""
@six.add_metaclass(abc.ABCMeta)
class ModeWithAuthenticationTag(object):
@abc.abstractproperty
def tag(self):
"""
The value of the tag supplied to the constructor of this mode.
"""
def _check_aes_key_length(self, algorithm):
if algorithm.key_size > 256 and algorithm.name == "AES":
raise ValueError(
"Only 128, 192, and 256 bit keys are allowed for this AES mode"
)
def _check_iv_length(self, algorithm):
if len(self.initialization_vector) * 8 != algorithm.block_size:
raise ValueError("Invalid IV size ({0}) for {1}.".format(
len(self.initialization_vector), self.name
))
def _check_iv_and_key_length(self, algorithm):
_check_aes_key_length(self, algorithm)
_check_iv_length(self, algorithm)
@utils.register_interface(Mode)
@utils.register_interface(ModeWithInitializationVector)
class CBC(object):
name = "CBC"
def __init__(self, initialization_vector):
if not isinstance(initialization_vector, bytes):
raise TypeError("initialization_vector must be bytes")
self._initialization_vector = initialization_vector
initialization_vector = utils.read_only_property("_initialization_vector")
validate_for_algorithm = _check_iv_and_key_length
@utils.register_interface(Mode)
@utils.register_interface(ModeWithTweak)
class XTS(object):
name = "XTS"
def __init__(self, tweak):
if not isinstance(tweak, bytes):
raise TypeError("tweak must be bytes")
if len(tweak) != 16:
raise ValueError("tweak must be 128-bits (16 bytes)")
self._tweak = tweak
tweak = utils.read_only_property("_tweak")
def validate_for_algorithm(self, algorithm):
if algorithm.key_size not in (256, 512):
raise ValueError(
"The XTS specification requires a 256-bit key for AES-128-XTS"
" and 512-bit key for AES-256-XTS"
)
@utils.register_interface(Mode)
class ECB(object):
name = "ECB"
validate_for_algorithm = _check_aes_key_length
@utils.register_interface(Mode)
@utils.register_interface(ModeWithInitializationVector)
class OFB(object):
name = "OFB"
def __init__(self, initialization_vector):
if not isinstance(initialization_vector, bytes):
raise TypeError("initialization_vector must be bytes")
self._initialization_vector = initialization_vector
initialization_vector = utils.read_only_property("_initialization_vector")
validate_for_algorithm = _check_iv_and_key_length
@utils.register_interface(Mode)
@utils.register_interface(ModeWithInitializationVector)
class CFB(object):
name = "CFB"
def __init__(self, initialization_vector):
if not isinstance(initialization_vector, bytes):
raise TypeError("initialization_vector must be bytes")
self._initialization_vector = initialization_vector
initialization_vector = utils.read_only_property("_initialization_vector")
validate_for_algorithm = _check_iv_and_key_length
@utils.register_interface(Mode)
@utils.register_interface(ModeWithInitializationVector)
class CFB8(object):
name = "CFB8"
def __init__(self, initialization_vector):
if not isinstance(initialization_vector, bytes):
raise TypeError("initialization_vector must be bytes")
self._initialization_vector = initialization_vector
initialization_vector = utils.read_only_property("_initialization_vector")
validate_for_algorithm = _check_iv_and_key_length
@utils.register_interface(Mode)
@utils.register_interface(ModeWithNonce)
class CTR(object):
name = "CTR"
def __init__(self, nonce):
if not isinstance(nonce, bytes):
raise TypeError("nonce must be bytes")
self._nonce = nonce
nonce = utils.read_only_property("_nonce")
def validate_for_algorithm(self, algorithm):
_check_aes_key_length(self, algorithm)
if len(self.nonce) * 8 != algorithm.block_size:
raise ValueError("Invalid nonce size ({0}) for {1}.".format(
len(self.nonce), self.name
))
@utils.register_interface(Mode)
@utils.register_interface(ModeWithInitializationVector)
@utils.register_interface(ModeWithAuthenticationTag)
class GCM(object):
name = "GCM"
_MAX_ENCRYPTED_BYTES = (2 ** 39 - 256) // 8
_MAX_AAD_BYTES = (2 ** 64) // 8
def __init__(self, initialization_vector, tag=None, min_tag_length=16):
# len(initialization_vector) must in [1, 2 ** 64), but it's impossible
# to actually construct a bytes object that large, so we don't check
# for it
if not isinstance(initialization_vector, bytes):
raise TypeError("initialization_vector must be bytes")
self._initialization_vector = initialization_vector
if tag is not None:
if not isinstance(tag, bytes):
raise TypeError("tag must be bytes or None")
if min_tag_length < 4:
raise ValueError("min_tag_length must be >= 4")
if len(tag) < min_tag_length:
raise ValueError(
"Authentication tag must be {0} bytes or longer.".format(
min_tag_length)
)
self._tag = tag
tag = utils.read_only_property("_tag")
initialization_vector = utils.read_only_property("_initialization_vector")
def validate_for_algorithm(self, algorithm):
_check_aes_key_length(self, algorithm)
|
|
"""String generation"""
import re
from string import Formatter
dflt_formatter = Formatter()
########### Partial and incremental formatting #########################################################################
# TODO: Make .vformat (therefore .format) work with args and kwargs
# TODO: Make it not blow up and conserve spec (e.g. the 1.2f of {foo:1.2f}) when not specified
class PartialFormatter(Formatter):
"""A string formatter that won't complain if the fields are only partially formatted.
But note that you will lose the spec part of your template (e.g. in {foo:1.2f}, you'll loose the 1.2f
if not foo is given -- but {foo} will remain).
>>> partial_formatter = PartialFormatter()
>>> str_template = 'foo:{foo} bar={bar} a={a} b={b:0.02f} c={c}'
>>> partial_formatter.format(str_template, bar="BAR", b=34)
'foo:{foo} bar=BAR a={a} b=34.00 c={c}'
Note: If you only need a formatting function (not the transformed formatting string), a simpler solution may be:
```
import functools
format_str = functools.partial(str_template.format, bar="BAR", b=34)
```
See https://stackoverflow.com/questions/11283961/partial-string-formatting for more options and discussions.
"""
def get_value(self, key, args, kwargs):
try:
return super().get_value(key, args, kwargs)
except KeyError:
return '{' + key + '}'
def format_fields_set(self, s):
return {x[1] for x in self.parse(s) if x[1]}
partial_formatter = PartialFormatter()
# TODO: For those who love algorithmic optimization, there's some wasted to cut out here below.
def _unformatted(d):
for k, v in d.items():
if isinstance(v, str) and len(partial_formatter.format_fields_set(v)) > 0:
yield k
def _fields_to_format(d):
for k, v in d.items():
if isinstance(v, str):
yield from partial_formatter.format_fields_set(v)
def format_str_vals_of_dict(d, *, max_formatting_loops=10, **kwargs):
"""
:param d:
:param max_formatting_loops:
:param kwargs:
:return:
>>> d = {
... 'filepath': '{root}/{file}.{ext}',
... 'ext': 'txt'
... }
>>> format_str_vals_of_dict(d, root='ROOT', file='FILE')
{'filepath': 'ROOT/FILE.txt', 'ext': 'txt'}
Note that if the input mapping `d` and the kwargs have a conflict, the mapping version is used!
>>> format_str_vals_of_dict(d, root='ROOT', file='FILE', ext='will_not_be_used')
{'filepath': 'ROOT/FILE.txt', 'ext': 'txt'}
But if you want to override an input mapping, you can -- the usual way:
>>> format_str_vals_of_dict(dict(d, ext='will_be_used'), root='ROOT', file='FILE')
{'filepath': 'ROOT/FILE.will_be_used', 'ext': 'will_be_used'}
If you don't provide enough fields to satisfy all the format fields in the values of `d`,
you'll be told to bugger off.
>>> format_str_vals_of_dict(d, root='ROOT')
Traceback (most recent call last):
...
ValueError: I won't be able to complete that. You'll need to provide the values for:
file
And it's recursive...
>>> d = {
... 'filepath': '{root}/{filename}',
... 'filename': '{file}.{ext}'
... }
>>> my_configs = {'root': 'ROOT', 'file': 'FILE', 'ext': 'EXT'}
>>> format_str_vals_of_dict(d, **my_configs)
{'filepath': 'ROOT/FILE.EXT', 'filename': 'FILE.EXT'}
# TODO: Could make the above work if filename is give, but not file nor ext! At least as an option.
"""
d = dict(**d) # make a shallow copy
# The defaults (kwargs) cannot overlap with any keys of d, so:
kwargs = {k: kwargs[k] for k in set(kwargs) - set(d)}
provided_fields = set(d) | set(kwargs)
missing_fields = set(_fields_to_format(d)) - provided_fields
if missing_fields:
raise ValueError("I won't be able to complete that. You'll need to provide the values for:\n" +
f" {', '.join(missing_fields)}")
for i in range(max_formatting_loops):
unformatted = set(_unformatted(d))
if unformatted:
for k in unformatted:
d[k] = partial_formatter.format(d[k], **kwargs, **d)
else:
break
else:
raise ValueError(f"There are still some unformatted fields, "
f"but I reached my max {max_formatting_loops} allowed loops. " +
f"Those fields are: {set(_fields_to_format(d)) - (set(d) | set(kwargs))}")
return d
#######################################################################################################################
def compile_str_from_parsed(parsed):
"""The (quasi-)inverse of string.Formatter.parse.
Args:
parsed: iterator of (literal_text, field_name, format_spec, conversion) tuples,
as yield by string.Formatter.parse
Returns:
A format string that would produce such a parsed input.
>>> from string import Formatter
>>> s = "ROOT/{}/{0!r}/{1!i:format}/hello{:0.02f}TAIL"
>>> assert compile_str_from_parsed(Formatter().parse(s)) == s
>>>
>>> # Or, if you want to see more details...
>>> parsed = list(Formatter().parse(s))
>>> for p in parsed:
... print(p)
('ROOT/', '', '', None)
('/', '0', '', 'r')
('/', '1', 'format', 'i')
('/hello', '', '0.02f', None)
('TAIL', None, None, None)
>>> compile_str_from_parsed(parsed)
'ROOT/{}/{0!r}/{1!i:format}/hello{:0.02f}TAIL'
"""
result = ''
for literal_text, field_name, format_spec, conversion in parsed:
# output the literal text
if literal_text:
result += literal_text
# if there's a field, output it
if field_name is not None:
result += '{'
if field_name != '':
result += field_name
if conversion:
result += '!' + conversion
if format_spec:
result += ':' + format_spec
result += '}'
return result
def transform_format_str(format_str, parsed_tuple_trans_func):
return compile_str_from_parsed(
map(lambda args: parsed_tuple_trans_func(*args), dflt_formatter.parse(format_str)))
def _empty_field_name(literal_text, field_name, format_spec, conversion):
if field_name is not None:
return literal_text, '', format_spec, conversion
else:
return literal_text, field_name, format_spec, conversion
def auto_field_format_str(format_str):
"""Get an auto field version of the format_str
Args:
format_str: A format string
Returns:
A transformed format_str
>>> auto_field_format_str('R/{0}/{one}/{}/{two}/T')
'R/{}/{}/{}/{}/T'
"""
return transform_format_str(format_str, _empty_field_name)
def _mk_naming_trans_func(names=None):
if names is None:
names = map(str, range(99999))
_names = iter(names)
def trans_func(literal_text, field_name, format_spec, conversion):
if field_name is not None:
return literal_text, next(_names), format_spec, conversion
else:
return literal_text, field_name, format_spec, conversion
return trans_func
def name_fields_in_format_str(format_str, field_names=None):
"""Get a manual field version of the format_str
Args:
format_str: A format string
names: An iterable that produces enough strings to fill all of format_str fields
Returns:
A transformed format_str
>>> name_fields_in_format_str('R/{0}/{one}/{}/{two}/T')
'R/{0}/{1}/{2}/{3}/T'
>>> # Note here that we use the field name to inject a field format as well
>>> name_fields_in_format_str('R/{foo}/{0}/{}/T', ['42', 'hi:03.0f', 'world'])
'R/{42}/{hi:03.0f}/{world}/T'
"""
return transform_format_str(format_str, _mk_naming_trans_func(field_names))
def match_format_string(format_str, s):
"""Match s against the given format string, return dict of matches.
We assume all of the arguments in format string are named keyword arguments (i.e. no {} or
{:0.2f}). We also assume that all chars are allowed in each keyword argument, so separators
need to be present which aren't present in the keyword arguments (i.e. '{one}{two}' won't work
reliably as a format string but '{one}-{two}' will if the hyphen isn't used in {one} or {two}).
We raise if the format string does not match s.
Author: https://stackoverflow.com/users/2593383/nonagon
Found here: https://stackoverflow.com/questions/10663093/use-python-format-string-in-reverse-for-parsing
Example:
>>> fs = '{test}-{flight}-{go}'
>>> s = fs.format(test='first', flight='second', go='third')
>>> match_format_string(fs, s)
{'test': 'first', 'flight': 'second', 'go': 'third'}
"""
# First split on any keyword arguments, note that the names of keyword arguments will be in the
# 1st, 3rd, ... positions in this list
tokens = re.split(r'\{(.*?)\}', format_str)
keywords = tokens[1::2]
# Now replace keyword arguments with named groups matching them. We also escape between keyword
# arguments so we support meta-characters there. Re-join tokens to form our regexp pattern
tokens[1::2] = map(u'(?P<{}>.*)'.format, keywords)
tokens[0::2] = map(re.escape, tokens[0::2])
pattern = ''.join(tokens)
# Use our pattern to match the given string, raise if it doesn't match
matches = re.match(pattern, s)
if not matches:
raise Exception("Format string did not match")
# Return a dict with all of our keywords and their values
return {x: matches.group(x) for x in keywords}
def _is_not_none(x):
return x is not None
def format_params_in_str_format(format_string):
"""
Get the "parameter" indices/names of the format_string
Args:
format_string: A format string (i.e. a string with {...} to mark parameter placement and formatting
Returns:
A list of parameter indices used in the format string, in the order they appear, with repetition.
Parameter indices could be integers, strings, or None (to denote "automatic field numbering".
>>> format_string = '{0} (no 1) {2}, and {0} is a duplicate, {} is unnamed and {name} is string-named'
>>> list(format_params_in_str_format(format_string))
[0, 2, 0, None, 'name']
"""
return map(lambda x: int(x) if str.isnumeric(x) else x if x != '' else None,
filter(_is_not_none, (x[1] for x in dflt_formatter.parse(format_string))))
def n_format_params_in_str_format(format_string):
""" The number of parameters"""
return len(set(format_params_in_str_format(format_string)))
def arg_and_kwargs_indices(format_string):
"""
Args:
format_string: A format string (i.e. a string with {...} to mark parameter placement and formatting
Returns:
>>> format_string = '{0} (no 1) {2}, {see} this, {0} is a duplicate (appeared before) and {name} is string-named'
>>> assert arg_and_kwargs_indices(format_string) == ({0, 2}, {'name', 'see'})
>>> format_string = 'This is a format string with only automatic field specification: {}, {}, {} etc.'
>>> arg_and_kwargs_indices(format_string)
(None, None)
"""
d = {True: set(), False: set()}
for x in format_params_in_str_format(format_string):
d[isinstance(x, int)].add(x)
args_keys, kwargs_keys = _validate_str_format_arg_and_kwargs_keys(d[True], d[False])
return args_keys, kwargs_keys
def _validate_str_format_arg_and_kwargs_keys(args_keys, kwargs_keys):
"""check that str_format is entirely manual or entirely automatic field specification"""
if any(not x for x in kwargs_keys): # {} (automatic field numbering) show up as '' in args_keys
# so need to check that args_keys is empty and kwargs has only None (no "manual" names)
if (len(args_keys) != 0) or (len(kwargs_keys) != 1):
raise ValueError(
f"cannot switch from manual field specification (i.e. {{number}} or {{name}}) "
"to automatic (i.e. {}) field numbering. But you did:\n{str_format}")
return None, None
else:
return args_keys, kwargs_keys
pipe_split_p = re.compile("\s*\|\s*")
func_and_arg_p = re.compile('(?P<func>\w+)\((?P<args>.*)\)', flags=re.DOTALL)
comma_sep_p = re.compile('\s*,\s*')
def get_func_and_arg_dict(s):
"""
Parses the input string recursively, returning:
* if the string has the format f(*args): a nested func_and_arg_dict
* else returns the string itself, unchanged.
A func_and_arg_dict is a dict of the format:
{"func": FUNC_STRING, "args": ARG_STRING_LIST},
where ARG_STRING_LIST elements are themselves strings or func_and_arg_dicts.
The intended use is to parse string.Formatter() spec strings and carry out the instructions therein.
:param s: a string
:return: the same string, or a {func:, args:} dict if the string has the f(*args) pattern
>>> get_func_and_arg_dict("foo")
'foo'
>>> get_func_and_arg_dict("foo()")
{'args': [], 'func': 'foo'}
>>> get_func_and_arg_dict("foo(bar)")
{'args': ['bar'], 'func': 'foo'}
>>> get_func_and_arg_dict("f(g(x), y)")
{'args': [{'args': ['x'], 'func': 'g'}, 'y'], 'func': 'f'}
>>> get_func_and_arg_dict('f(g(x), "two words", h(z))')
{'args': [{'args': ['x'], 'func': 'g'}, '"two words"', {'args': ['z'], 'func': 'h'}], 'func': 'f'}
"""
match = func_and_arg_p.match(s)
if match:
func_and_arg_dict = match.groupdict()
if 'args' in func_and_arg_dict:
args_list = comma_sep_p.split(func_and_arg_dict['args'])
if args_list == ['']:
args_list = []
for i, arg in enumerate(args_list):
arg_expanded = get_func_and_arg_dict(arg)
args_list[i] = arg_expanded
func_and_arg_dict['args'] = args_list
return func_and_arg_dict
else:
return s
class PipelineTemplate(Formatter):
def __init__(self, **key_to_action):
"""
A string.Formatter that accepts a |-separated specification of a pipeline through which the input value should
go through before being output (cast automatically to a str).
This formatter is created by specifying what functions correspond to the names that will be used in the spec
of the string. Standard format specifications (such as 04.04f, d, etc) can be used as well, anywhere in the
pipeline.
:param key_to_action: key=action specifications. Action must be a callable which will be applied to input value
>>> p = PipelineTemplate(plus_ten=lambda x: x + 10,
... times_ten=lambda x: x * 10
... )
>>> p.format('{x:plus_ten}', x=1) # plus_ten points to the function that does that job
'11'
>>> p.format('{x:plus_ten|times_ten}', x=1) # you can pipeline the functions you've specified
'110'
>>> p.format('{x} + 10 = {x:plus_ten}, ({x} + 10) * 10 = {x:plus_ten|times_ten}', x=1)
'1 + 10 = 11, (1 + 10) * 10 = 110'
>>> p.format('x + 10 = {0:plus_ten}, (x + 10) * 10 = {0:plus_ten|times_ten}', 1) # no name use
'x + 10 = 11, (x + 10) * 10 = 110'
>>> p.format('{x: times_ten | plus_ten }', x=1) # can have spaces between pipes
'20'
>>> p.format('{x:04.02f}', x=2) # you can also use standard formatting specs
'2.00'
>>> p.format('{x:times_ten | plus_ten | 04.0f}', x=2) # even in a pipeline
'0030'
>>> p = {
... 'f_wrap': lambda x: map('f({})'.format, x),
... 'csv': lambda x: ', '.join(x),
... }
>>>
>>> p = PipelineTemplate(**p)
>>>
>>> p.format('this --> {alist:f_wrap|csv}', alist=['A'])
'this --> f(A)'
>>> p.format('that --> {alist:f_wrap|csv}', alist=['A', 'B', 'C'])
'that --> f(A), f(B), f(C)'
>>> # and if you didn't define what you needed in the constructor arguments, you can always write python code
>>> s = '''This {x:
... lambda x: x + 2
... | lambda x: x * 10
... | 3.02f} was obtained through python functions.'''
>>> PipelineTemplate().format(s, x=1)
'This 30.00 was obtained through python functions.'
>>> # and you can even use functions that need to be imported!
>>> p = {
... 'add_10': lambda x: x + 10
... }
>>> s = '''{x:
... lambda x: list(map(lambda xx: xx + 2, x))
... | lambda x: __import__('numpy').array(x) * 10
... | __import__('numpy').sum
... | add_10}'''
>>> PipelineTemplate(**p).format(s, x=[1, 2])
'80'
"""
self.key_to_action = key_to_action
def format_field(self, value, spec):
spec = spec.strip()
spec_list = pipe_split_p.split(spec)
for spec in spec_list:
try:
if spec in self.key_to_action:
value = self.key_to_action[spec](value)
else:
try:
f = eval(spec)
value = eval("f(value)") # TODO: evals are not secure. Put safety checks in place.
except Exception:
value = super(PipelineTemplate, self).format_field(value, spec)
except ValueError as e:
raise ValueError("{}: {}".format(spec, e.args[0]))
return str(value)
def wrapper(prefix='', suffix=''):
return "{prefix}{{}}{suffix}".format(prefix=prefix, suffix=suffix).format
def mapper(func):
return lambda x: list(map(func, x))
def templater(template):
template = template.replace("{{}}", "{}")
return template.format
|
|
# coding=utf-8
# Copyright 2019 The Interval Bound Propagation Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for sentence representation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
from absl import logging
import sonnet as snt
import tensorflow as tf
from tensorflow.contrib import lookup as contrib_lookup
def get_padded_embeddings(embeddings,
vocabulary_table,
tokens, batch_size,
token_indexes=None):
"""Reshapes and pads 'raw' word embeddings.
Say we have batch of B tokenized sentences, of variable length, with a total
of W tokens. For example, B = 2 and W = 3 + 4 = 7:
[['The', 'cat', 'eats'],
[ 'A', 'black', 'cat', 'jumps']]
Since rows have variable length, this cannot be represented as a tf.Tensor.
It is represented as a tf.SparseTensor, with 7 values & indexes:
indices: [[0,0], [0,1], [0,2], [1,0], [1,1], [1,2], [1,3]]
values: ['The', 'cat', 'eats', 'A', 'black', 'cat', 'jumps']
We have also built a vocabulary table:
vocabulary table: ['cat', 'The', 'A', 'black', 'eats', 'jumps']
We also have the embeddings, a WxD matrix of floats
representing each word in the vocabulary table as a normal tf.Tensor.
For example, with D=3, embeddings could be:
[[0.4, 0.5, -0.6], # This is the embedding for word 0 = 'cat'
[0.1, -0.3, 0.6], # This is the embedding for word 1 = 'The''
[0.7, 0.8, -0.9], # This is the embedding for word 2 = 'A'
[-0.1, 0.9, 0.7], # This is the embedding for word 3 = 'black'
[-0.2, 0.4, 0.7], # This is the embedding for word 4 = 'eats
[0.3, -0.5, 0.2]] # This is the embedding for word 5 = 'jumps'
This function builds a normal tf.Tensor containing the embeddings for the
tokens provided, in the correct order, with appropriate 0 padding.
In our example, the returned tensor would be:
[[[0.1, -0.3, 0.6], [0.4, 0.5, -0.6], [-0.2, 0.4, 0.7], [0.0, 0.0, 0.0]],
[[0.7, 0.8, -0.9], [-0.1, 0.9, 0.7], [0.4, 0.5, -0.6], [0.3, -0.5, 0.2]]]
Note that since the first sentence has only 3 words, the 4th embedding gets
replaced by a D-dimensional vector of 0.
Args:
embeddings: [W, D] Tensor of floats, containing the embeddings, initialized
with the same vocabulary file as vocabulary_table.
vocabulary_table: a tf.contrib.lookup.LookupInterface,
containing the vocabulary, initialized with the same vocabulary file as
embeddings.
tokens: [B, ?] SparseTensor of strings, the tokens.
batch_size: Python integer.
token_indexes: A Boolean, indicating whether the input tokens are
token ids or string.
Returns:
[B, L, D] Tensor of floats: the embeddings in the correct order,
appropriately padded with 0.0, where L = max(num_tokens) and B = batch_size
"""
embedding_dim = embeddings.get_shape()[1].value # D in docstring above.
num_tokens_in_batch = tf.shape(tokens.indices)[0] # W in the docstring above.
max_length = tokens.dense_shape[1] # This is L in the docstring above.
# Get indices of tokens in vocabulary_table.
if token_indexes is not None:
indexes = token_indexes
else:
indexes = vocabulary_table.lookup(tokens.values)
# Get word embeddings.
tokens_embeddings = tf.gather(embeddings, indexes)
# Shape of the return tensor.
new_shape = tf.cast(
tf.stack([batch_size, max_length, embedding_dim], axis=0), tf.int32)
# Build the vector of indices for the return Tensor.
# In the example above, indices_final would be:
# [[[0,0,0], [0,0,1], [0,0,2]],
# [[0,1,0], [0,1,1], [0,1,2]],
# [[0,2,0], [0,2,1], [0,2,2]],
# [[1,0,0], [1,0,1], [1,0,2]],
# [[1,1,0], [1,1,1], [1,1,2]],
# [[1,2,0], [1,2,1], [1,2,2]],
# [[1,3,0], [1,3,1], [1,3,2]]]
tiled = tf.tile(tokens.indices, [1, embedding_dim])
indices_tiled = tf.cast(
tf.reshape(tiled, [num_tokens_in_batch * embedding_dim, 2]), tf.int32)
indices_linear = tf.expand_dims(
tf.tile(tf.range(0, embedding_dim), [num_tokens_in_batch]), axis=1)
indices_final = tf.concat([indices_tiled, indices_linear], axis=1)
# Build the dense Tensor.
embeddings_padded = tf.sparse_to_dense(
sparse_indices=indices_final,
output_shape=new_shape,
sparse_values=tf.reshape(tokens_embeddings,
[num_tokens_in_batch * embedding_dim]))
embeddings_padded.set_shape((batch_size, None, embedding_dim))
return embeddings_padded
def get_padded_indexes(vocabulary_table,
tokens, batch_size,
token_indexes=None):
"""Get the indices of tokens from vocabulary table.
Args:
vocabulary_table: a tf.contrib.lookup.LookupInterface,
containing the vocabulary, initialized with the same vocabulary file as
embeddings.
tokens: [B, ?] SparseTensor of strings, the tokens.
batch_size: Python integer.
token_indexes: A Boolean, indicating whether the input tokens are
token ids or string.
Returns:
[B, L] Tensor of integers: indices of tokens in the correct order,
appropriately padded with 0, where L = max(num_tokens) and B = batch_size
"""
num_tokens_in_batch = tf.shape(tokens.indices)[0]
max_length = tokens.dense_shape[1]
# Get indices of tokens in vocabulary_table.
if token_indexes is not None:
indexes = token_indexes
else:
indexes = vocabulary_table.lookup(tokens.values)
# Build the dense Tensor.
indexes_padded = tf.sparse_to_dense(
sparse_indices=tokens.indices,
output_shape=[batch_size, max_length],
sparse_values=tf.reshape(indexes,
[num_tokens_in_batch]))
indexes_padded.set_shape((batch_size, None))
return indexes_padded
class EmbedAndPad(snt.AbstractModule):
"""Embed and pad tokenized words.
This class primary functionality is similar to get_padded_embeddings.
It stores references to the embeddings and vocabulary table for convenience,
so that the user does not have to keep and pass them around.
"""
def __init__(self,
batch_size,
vocabularies,
embedding_dim,
num_oov_buckets=1000,
fine_tune_embeddings=False,
padded_token=None,
name='embed_and_pad'):
super(EmbedAndPad, self).__init__(name=name)
self._batch_size = batch_size
vocab_file, vocab_size = get_merged_vocabulary_file(vocabularies,
padded_token)
self._vocab_size = vocab_size
self._num_oov_buckets = num_oov_buckets
# Load vocabulary table for index lookup.
self._vocabulary_table = contrib_lookup.index_table_from_file(
vocabulary_file=vocab_file,
num_oov_buckets=num_oov_buckets,
vocab_size=self._vocab_size)
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
# The default value is chosen from language/bert/modeling.py.
return tf.truncated_normal_initializer(stddev=initializer_range)
self._embeddings = tf.get_variable('embeddings_matrix',
[self._vocab_size + num_oov_buckets,
embedding_dim],
trainable=fine_tune_embeddings,
initializer=create_initializer())
def _build(self, tokens):
padded_embeddings = get_padded_embeddings(
self._embeddings, self._vocabulary_table, tokens, self._batch_size)
return padded_embeddings
@property
def vocab_table(self):
return self._vocabulary_table
@property
def vocab_size(self):
return self._vocab_size + self._num_oov_buckets
def get_accuracy(logits, labels):
"""Top 1 accuracy from logits and labels."""
return tf.reduce_mean(tf.cast(tf.nn.in_top_k(logits, labels, 1), tf.float32))
def get_num_correct_predictions(logits, labels):
"""Get the number of correct predictions over a batch."""
predictions = tf.cast(tf.argmax(logits, axis=1), tf.int64)
evals = tf.equal(predictions, labels)
num_correct = tf.reduce_sum(tf.cast(evals, tf.float64))
return num_correct
def get_merged_vocabulary_file(vocabularies, padded_token=None):
"""Merges several vocabulary files into one temporary file.
The TF object that loads the embedding expects a vocabulary file, to know
which embeddings it should load.
See tf.contrib.embedding.load_embedding_initializer.
When we want to train/test on several datasets simultaneously we need to merge
their vocabulary files into a single file.
Args:
vocabularies: Iterable of vocabularies. Each vocabulary should be
a list of tokens.
padded_token: If not None, add the padded_token to the first index.
Returns:
outfilename: Name of the merged file. Contains the union of all tokens in
filenames, without duplicates, one token per line.
vocabulary_size: Count of tokens in the merged file.
"""
uniques = [set(vocabulary) for vocabulary in vocabularies]
unique_merged = frozenset().union(*uniques)
unique_merged_sorted = sorted(unique_merged)
if padded_token is not None:
# Add padded token as 0 index.
unique_merged_sorted = [padded_token] + unique_merged_sorted
vocabulary_size = len(unique_merged_sorted)
outfile = tempfile.NamedTemporaryFile(delete=False)
outfile.write(b'\n'.join(unique_merged_sorted))
outfilename = outfile.name
logging.info('Merged vocabulary file with %d tokens: %s', vocabulary_size,
outfilename)
outfile.close()
return outfilename, vocabulary_size
|
|
# -*- coding: utf-8 -*-
"""
Project Tracking & Management
"""
module = request.controller
resourcename = request.function
if not settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
mode_task = settings.get_project_mode_task()
# =============================================================================
def index():
""" Module's Home Page """
if mode_task:
redirect(URL(f="project", vars={"tasks":1}))
elif settings.get_project_mode_drr():
# Bypass home page & go direct to searching for Projects
redirect(URL(f="project", args="search"))
else:
# Bypass home page & go direct to list of Projects
# - no good search options available
redirect(URL(f="project"))
#module_name = settings.modules[module].name_nice
#response.title = module_name
#return dict(module_name=module_name)
# =============================================================================
def create():
""" Redirect to project/create """
redirect(URL(f="project", args="create"))
# -----------------------------------------------------------------------------
def project():
""" RESTful CRUD controller """
if "tasks" in request.get_vars:
# Return simplified controller to pick a Project for which to list the Open Tasks
table = s3db.project_project
s3.crud_strings["project_project"].title_list = T("Open Tasks for Project")
#s3.crud_strings["project_project"].sub_title_list = T("Select Project")
s3mgr.LABEL.READ = "Select"
s3mgr.LABEL.UPDATE = "Select"
s3db.configure("project_project",
deletable=False,
listadd=False)
# Post-process
def postp(r, output):
if r.interactive:
if not r.component:
read_url = URL(f="task", args="search",
vars={"project":"[id]"})
update_url = URL(f="task", args="search",
vars={"project":"[id]"})
s3mgr.crud.action_buttons(r, deletable=False,
read_url=read_url,
update_url=update_url)
return output
s3.postp = postp
return s3_rest_controller()
table = s3db.hrm_human_resource
table.person_id.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Person"),
T("Select the person assigned to this role for this project.")))
doc_table = s3db.table("doc_document", None)
if doc_table is not None:
doc_table.organisation_id.readable = doc_table.organisation_id.writable = False
doc_table.person_id.readable = doc_table.person_id.writable = False
doc_table.location_id.readable = doc_table.location_id.writable = False
# Pre-process
def prep(r):
# Location Filter
s3db.gis_location_filter(r)
if r.interactive:
if not r.component:
if r.id:
r.table.human_resource_id.represent = lambda id: \
s3db.hrm_human_resource_represent(id, show_link=True)
elif r.function == "index":
r.method = "search"
# If just a few Projects, then a List is sufficient
#r.method = "list"
else:
if r.component_name == "organisation":
if r.method != "update":
host_role = 1
otable = s3db.project_organisation
query = (otable.deleted != True) & \
(otable.role == host_role) & \
(otable.project_id == r.id)
row = db(query).select(otable.id,
limitby=(0, 1)).first()
if row:
project_organisation_roles = \
dict(s3.project_organisation_roles)
del project_organisation_roles[host_role]
otable.role.requires = \
IS_NULL_OR(IS_IN_SET(project_organisation_roles))
elif r.component_name in ("activity", "location"):
# Default the Location Selector list of countries to those found in the project
countries = r.record.countries_id
if countries:
ttable = s3db.gis_location_tag
query = (ttable.location_id.belongs(countries)) & \
(ttable.tag == "ISO2")
countries = db(query).select(ttable.value)
settings.gis.countries = [c.value for c in countries]
elif r.component_name == "task":
r.component.table.milestone_id.requires = IS_NULL_OR(IS_ONE_OF(db,
"project_milestone.id",
"%(name)s",
filterby="project_id",
filter_opts=(r.id,),
))
if "open" in request.get_vars:
# Show only the Open Tasks for this Project
statuses = s3.project_task_active_statuses
filter = (r.component.table.status.belongs(statuses))
r.resource.add_component_filter("task", filter)
elif r.component_name == "beneficiary":
db.project_beneficiary.project_location_id.requires = IS_NULL_OR(
IS_ONE_OF(db,
"project_location.id",
s3db.project_location_represent,
sort=True,
filterby="project_id",
filter_opts=[r.id])
)
elif r.component_name == "human_resource":
# We can pass the human resource type filter in the URL
group = r.vars.get("group", None)
table = db.project_human_resource
s3db.hrm_human_resource.person_id.represent = lambda id: \
s3db.pr_person_represent(id, show_link=True)
# These values are defined in hrm_type_opts
if group:
crud_strings = s3.crud_strings
if group == "staff":
group = 1
table.human_resource_id.label = T("Staff")
crud_strings["project_human_resource"] = crud_strings["hrm_staff"]
crud_strings["project_human_resource"].update(
subtitle_create = T("Add Staff Member to Project")
)
elif group == "volunteer":
group = 2
table.human_resource_id.label = T("Volunteer")
crud_strings["project_human_resource"] = crud_strings["hrm_volunteer"]
crud_strings["project_human_resource"].update(
subtitle_create = T("Add Volunteer to Project")
)
# Use the group to filter the component list
filter_by_type = (db.hrm_human_resource.type == group)
r.resource.add_component_filter("human_resource", filter_by_type)
# Use the group to filter the form widget for adding a new record
table.human_resource_id.requires = IS_ONE_OF(
db,
"hrm_human_resource.id",
s3db.hrm_human_resource_represent,
filterby="type",
filter_opts=(group,),
orderby="hrm_human_resource.person_id",
sort=True
)
return True
s3.prep = prep
# Post-process
def postp(r, output):
if r.interactive:
if not r.component:
# Set the minimum end_date to the same as the start_date
s3.jquery_ready.append(
'''S3.start_end_date('project_project_start_date','project_project_end_date')''')
if mode_task:
read_url = URL(args=["[id]", "task"])
update_url = URL(args=["[id]", "task"])
s3mgr.crud.action_buttons(r,
read_url=read_url,
update_url=update_url)
elif r.component_name == "beneficiary":
# Set the minimum end_date to the same as the start_date
s3.jquery_ready.append(
'''S3.start_end_date('project_beneficiary_start_date','project_beneficiary_end_date')''')
return output
s3.postp = postp
rheader = s3db.project_rheader
return s3_rest_controller(module,
"project", # Need to specify as sometimes we come via index()
rheader=rheader,
csv_template="project")
# =============================================================================
def status():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def theme():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def hazard():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def framework():
""" RESTful CRUD controller """
return s3_rest_controller(rheader=s3db.project_rheader)
# =============================================================================
def organisation():
""" RESTful CRUD controller """
if settings.get_project_multiple_organisations():
s3db.configure("project_organisation",
insertable=False,
editable=False,
deletable=False)
list_btn = A(T("Funding Report"),
_href=URL(c="project", f="organisation",
args="report", vars=request.get_vars),
_class="action-btn")
return s3_rest_controller(list_btn=list_btn,
csv_template="organisation")
else:
tabs = [
(T("Basic Details"), None),
(T("Projects"), "project"),
(T("Contacts"), "human_resource"),
]
rheader = lambda r: s3db.org_rheader(r, tabs)
return s3_rest_controller("org", resourcename,
rheader=rheader)
# =============================================================================
def beneficiary_type():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def beneficiary():
""" RESTful CRUD controller """
tablename = "project_beneficiary"
s3db.configure("project_beneficiary",
insertable=False,
editable=False,
deletable=False)
list_btn = A(T("Beneficiary Report"),
_href=URL(c="project", f="beneficiary",
args="report", vars=request.get_vars),
_class="action-btn")
return s3_rest_controller()
# =============================================================================
def activity_type():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def activity():
""" RESTful CRUD controller """
tablename = "%s_%s" % (module, resourcename)
table = s3db[tablename]
# Pre-process
def prep(r):
if r.interactive:
if r.component is not None:
if r.component_name == "document":
doc_table = s3db.doc_document
doc_table.organisation_id.readable = False
doc_table.person_id.readable = False
doc_table.location_id.readable = False
doc_table.organisation_id.writable = False
doc_table.person_id.writable = False
doc_table.location_id.writable = False
return True
s3.prep = prep
# Pre-process
def postp(r, output):
if r.representation == "plain":
def represent(record, field):
if field.represent:
return field.represent(record[field])
else:
return record[field]
# Add VirtualFields to Map Popup
# Can't inject into SQLFORM, so need to simply replace
item = TABLE()
table.id.readable = False
table.location_id.readable = False
fields = [table[f] for f in table.fields if table[f].readable]
record = r.record
for field in fields:
item.append(TR(TD(field.label), TD(represent(record, field))))
hierarchy = gis.get_location_hierarchy()
item.append(TR(TD(hierarchy["L4"]), TD(record["name"])))
for field in ["L3", "L2", "L1"]:
item.append(TR(TD(hierarchy[field]), TD(record[field])))
output["item"] = item
return output
s3.postp = postp
return s3_rest_controller(rheader=s3db.project_rheader,
csv_template="activity")
# -----------------------------------------------------------------------------
def location():
"""
RESTful CRUD controller to display project location information
"""
tablename = "%s_%s" % (module, resourcename)
table = s3db[tablename]
# Pre-process
def prep(r):
if r.interactive:
if r.component is not None:
if r.component_name == "document":
doc_table = s3db.doc_document
doc_table.organisation_id.readable = False
doc_table.person_id.readable = False
doc_table.location_id.readable = False
doc_table.organisation_id.writable = False
doc_table.person_id.writable = False
doc_table.location_id.writable = False
return True
s3.prep = prep
# Pre-process
def postp(r, output):
if r.representation == "plain":
# Replace the Map Popup contents with custom content
item = TABLE()
def represent(record, field):
if field.represent:
return field.represent(record[field])
else:
return record[field]
if settings.get_project_community():
# The Community is the primary resource
record = r.record
table.id.readable = False
table.location_id.readable = False
fields = [table[f] for f in table.fields if table[f].readable]
for field in fields:
data = record[field]
if data:
represent = field.represent
if represent:
item.append(TR(TD(field.label),
TD(represent(data))))
else:
item.append(TR(TD(field.label), TD(data)))
hierarchy = gis.get_location_hierarchy()
for field in ["L4", "L3", "L2", "L1"]:
if field in hierarchy and record[field]:
item.append(TR(TD(hierarchy[field]),
TD(record[field])))
output["item"] = item
else:
# The Project is the primary resource
project_id = r.record.project_id
ptable = s3db.project_project
query = (ptable.id == project_id)
project = db(query).select(limitby=(0, 1)).first()
ptable.id.readable = False
fields = [ptable[f] for f in ptable.fields if ptable[f].readable]
for field in fields:
data = project[field]
if data:
represent = field.represent
if represent:
item.append(TR(TD(field.label),
TD(represent(data))))
else:
item.append(TR(TD(field.label), TD(data)))
title = s3.crud_strings["project_project"].title_display
# Assume authorised to see details
popup_url = URL(f="project", args=[project_id])
details_btn = A(T("Show Details"), _href=popup_url,
_id="details-btn", _target="_blank")
output = dict(
item = item,
title = title,
details_btn = details_btn,
)
return output
s3.postp = postp
return s3_rest_controller(interactive_report=True,
rheader=s3db.project_rheader,
csv_template="location")
# -----------------------------------------------------------------------------
def community_contact():
""" Show a list of all community contacts """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def report():
"""
RESTful CRUD controller
@ToDo: Why is this needed? To have no rheader?
"""
return s3_rest_controller(module, "activity")
# =============================================================================
def task():
""" RESTful CRUD controller """
return s3db.project_task_controller()
# =============================================================================
def task_project():
""" RESTful CRUD controller """
if auth.permission.format != "s3json":
return ""
# Pre-process
def prep(r):
if r.method != "options":
return False
return True
s3.prep = prep
return s3_rest_controller()
# =============================================================================
def task_activity():
""" RESTful CRUD controller """
if auth.permission.format != "s3json":
return ""
# Pre-process
def prep(r):
if r.method != "options":
return False
return True
s3.prep = prep
return s3_rest_controller()
# =============================================================================
def milestone():
""" RESTful CRUD controller """
return s3_rest_controller()
# =============================================================================
def time():
""" RESTful CRUD controller """
tablename = "project_time"
table = s3db[tablename]
if "mine" in request.get_vars:
# Show the Logged Time for this User
s3.crud_strings["project_time"].title_list = T("My Logged Hours")
s3db.configure("project_time",
listadd=False)
person_id = auth.s3_logged_in_person()
if person_id:
now = request.utcnow
s3.filter = (table.person_id == person_id) & \
(table.date > (now - datetime.timedelta(days=2)))
try:
list_fields = s3db.get_config(tablename,
"list_fields")
list_fields.remove("person_id")
s3db.configure(tablename,
list_fields=list_fields)
except:
pass
elif "week" in request.get_vars:
now = request.utcnow
week = datetime.timedelta(days=7)
s3.filter = (table.date > (now - week))
return s3_rest_controller()
# =============================================================================
# Comments
# =============================================================================
def comment_parse(comment, comments, task_id=None):
"""
Parse a Comment
@param: comment - a gluon.sql.Row: the current comment
@param: comments - a gluon.sql.Rows: full list of comments
@param: task_id - a reference ID: optional task commented on
"""
author = B(T("Anonymous"))
if comment.created_by:
utable = s3db.auth_user
ptable = s3db.pr_person
ltable = s3db.pr_person_user
query = (utable.id == comment.created_by)
left = [ltable.on(ltable.user_id == utable.id),
ptable.on(ptable.pe_id == ltable.pe_id)]
row = db(query).select(utable.email,
ptable.first_name,
ptable.middle_name,
ptable.last_name,
left=left, limitby=(0, 1)).first()
if row:
person = row.pr_person
user = row[utable._tablename]
username = s3_fullname(person)
email = user.email.strip().lower()
import hashlib
hash = hashlib.md5(email).hexdigest()
url = "http://www.gravatar.com/%s" % hash
author = B(A(username, _href=url, _target="top"))
if not task_id and comment.task_id:
table = s3db.project_task
task = "re: %s" % table[comment.task_id].name
header = DIV(author, " ", task)
task_id = comment.task_id
else:
header = author
thread = LI(DIV(s3base.s3_avatar_represent(comment.created_by),
DIV(DIV(header,
_class="comment-header"),
DIV(XML(comment.body)),
_class="comment-text"),
DIV(DIV(comment.created_on,
_class="comment-date"),
DIV(A(T("Reply"),
_class="action-btn"),
_onclick="comment_reply(%i);" % comment.id,
_class="comment-reply"),
_class="fright"),
_id="comment-%i" % comment.id,
_task_id=task_id,
_class="comment-box"))
# Add the children of this thread
children = UL(_class="children")
id = comment.id
count = 0
for comment in comments:
if comment.parent == id:
count = 1
child = comment_parse(comment, comments, task_id=task_id)
children.append(child)
if count == 1:
thread.append(children)
return thread
# -----------------------------------------------------------------------------
def comments():
""" Function accessed by AJAX from rfooter to handle Comments """
try:
task_id = request.args[0]
except:
raise HTTP(400)
table = s3db.project_comment
field = table.task_id
field.default = task_id
field.writable = field.readable = False
# Form to add a new Comment
form = crud.create(table, formname="project_comment/%s" % task_id)
# List of existing Comments
comments = db(field == task_id).select(table.id,
table.parent,
table.body,
table.created_by,
table.created_on)
output = UL(_id="comments")
for comment in comments:
if not comment.parent:
# Show top-level threads at top-level
thread = comment_parse(comment, comments, task_id=task_id)
output.append(thread)
script = "".join((
'''$('#comments').collapsible({xoffset:'-5',yoffset:'50',imagehide:img_path+'arrow-down.png',imageshow:img_path+'arrow-right.png',defaulthide:false})
$('#project_comment_parent__row1').hide()
$('#project_comment_parent__row').hide()
$('#project_comment_body').ckeditor(ck_config)
$('#submit_record__row input').click(function(){
$('#comment-form').hide()
$('#project_comment_body').ckeditorGet().destroy()
return true
})'''))
# No layout in this output!
#s3.jquery_ready.append(script)
output = DIV(output,
DIV(H4(T("New Post"),
_id="comment-title"),
form,
_id="comment-form",
_class="clear"),
SCRIPT(script))
return XML(output)
# -----------------------------------------------------------------------------
def partners():
# ToDo: This could need to be a deployment setting
current.request.get_vars["organisation.organisation_type_id$name"] = "Bilateral,Government,Intergovernmental,NGO,UN agency"
return s3db.org_organisation_controller()
# END =========================================================================
|
|
import mock
import django.test
from django.contrib.auth.middleware import AuthenticationMiddleware
from django.contrib.auth.models import User
from django.contrib.sessions.middleware import SessionMiddleware
from django.core.cache import cache
from django.template import context
from ..models import Token
from ..middlewares import CsrfMiddleware
from ..utils import prep_key
from .. import conf
from .base import ClientHandler, make_expired
class TestCsrfToken(django.test.TestCase):
def setUp(self):
self.client.handler = ClientHandler()
User.objects.create_user('jbalogh', 'j@moz.com', 'password')
self.save_ANON_ALWAYS = conf.ANON_ALWAYS
conf.ANON_ALWAYS = False
def tearDown(self):
conf.ANON_ALWAYS = self.save_ANON_ALWAYS
def login(self):
assert self.client.login(username='jbalogh', password='password')
def test_csrftoken_unauthenticated(self):
# request.csrf_token is '' for anonymous users.
response = self.client.get('/', follow=True)
self.assertEqual(response._request.csrf_token, '')
def test_csrftoken_authenticated(self):
# request.csrf_token is a random non-empty string for authed users.
self.login()
response = self.client.get('/', follow=True)
# The CSRF token is a 32-character MD5 string.
self.assertEqual(len(response._request.csrf_token), 32)
def test_csrftoken_new_session(self):
# The csrf_token is added to request.session the first time.
self.login()
response = self.client.get('/', follow=True)
# The CSRF token is a 32-character MD5 string.
token = response._request.session['csrf_token']
self.assertEqual(len(token), 32)
self.assertEqual(token, response._request.csrf_token)
def test_csrftoken_existing_session(self):
# The csrf_token in request.session is reused on subsequent requests.
self.login()
r1 = self.client.get('/', follow=True)
token = r1._request.session['csrf_token']
r2 = self.client.get('/', follow=True)
self.assertEqual(r1._request.csrf_token, r2._request.csrf_token)
self.assertEqual(token, r2._request.csrf_token)
class TestCsrfMiddleware(django.test.TestCase):
def setUp(self):
self.token = 'a' * 32
self.rf = django.test.RequestFactory()
self.mw = CsrfMiddleware()
self._user = User.objects.create()
def process_view(self, request, view=None):
request.session = {}
return self.mw.process_view(request, view, None, None)
def test_anon_token_from_cookie(self):
rf = django.test.RequestFactory()
rf.cookies[conf.ANON_COOKIE] = self.token
cache.set(prep_key(self.token), 'woo')
request = rf.get('/')
SessionMiddleware().process_request(request)
AuthenticationMiddleware().process_request(request)
self.mw.process_request(request)
self.assertEqual(request.csrf_token, 'woo')
def test_set_csrftoken_once(self):
# Make sure process_request only sets request.csrf_token once.
request = self.rf.get('/')
request.csrf_token = 'woo'
self.mw.process_request(request)
self.assertEqual(request.csrf_token, 'woo')
def test_reject_view(self):
# Check that the reject view returns a 403.
response = self.process_view(self.rf.post('/'))
self.assertEqual(response.status_code, 403)
def test_csrf_exempt(self):
# Make sure @csrf_exempt still works.
view = type("", (), {'csrf_exempt': True})()
self.assertEqual(self.process_view(self.rf.post('/'), view), None)
def test_safe_whitelist(self):
# CSRF should not get checked on these methods.
self.assertEqual(self.process_view(self.rf.get('/')), None)
self.assertEqual(self.process_view(self.rf.head('/')), None)
self.assertEqual(self.process_view(self.rf.options('/')), None)
def test_unsafe_methods(self):
self.assertEqual(self.process_view(self.rf.post('/')).status_code,
403)
self.assertEqual(self.process_view(self.rf.put('/')).status_code,
403)
self.assertEqual(self.process_view(self.rf.delete('/')).status_code,
403)
def test_csrfmiddlewaretoken(self):
# The user token should be found in POST['csrfmiddlewaretoken'].
request = self.rf.post('/', {'csrfmiddlewaretoken': self.token})
self.assertEqual(self.process_view(request).status_code, 403)
request.csrf_token = self.token
self.assertEqual(self.process_view(request), None)
def test_x_csrftoken(self):
# The user token can be found in the X-CSRFTOKEN header.
request = self.rf.post('/', HTTP_X_CSRFTOKEN=self.token)
self.assertEqual(self.process_view(request).status_code, 403)
request.csrf_token = self.token
self.assertEqual(self.process_view(request), None)
def test_require_request_token_or_user_token(self):
# Blank request and user tokens raise an error on POST.
request = self.rf.post('/', HTTP_X_CSRFTOKEN='')
request.csrf_token = ''
self.assertEqual(self.process_view(request).status_code, 403)
def test_token_no_match(self):
# A 403 is returned when the tokens don't match.
request = self.rf.post('/', HTTP_X_CSRFTOKEN='woo')
request.csrf_token = ''
self.assertEqual(self.process_view(request).status_code, 403)
def test_csrf_token_context_processor(self):
# Our CSRF token should be available in the template context.
request = mock.Mock()
request.csrf_token = self.token
request.groups = []
ctx = {}
for processor in context.get_standard_processors():
ctx.update(processor(request))
self.assertEqual(ctx['csrf_token'], self.token)
def _authenticated_request(self, token=None, **kwargs):
"""Create mocked request object for authenticated user"""
self._user.is_authenticated = lambda: True
if token is None:
token = Token.objects.create(owner=self._user).value
return mock.MagicMock(
csrf_token=token,
user=self._user,
POST={},
META={'HTTP_X_CSRFTOKEN': token},
csrf_processing_done=False,
_dont_enforce_csrf_checks=False,
**kwargs)
def test_reject_for_wrong_token_if_authenticated(self):
"""Test reject for wrong token if authenticated"""
request = self._authenticated_request('wrong')
self.assertIsNotNone(self.process_view(request))
def test_reject_when_token_expired(self):
"""Test reject when csrf token expired"""
token = make_expired(Token.objects.create(owner=self._user))
request = self._authenticated_request(token.value)
self.assertIsNotNone(self.process_view(request))
def test_accept_when_token_is_ok(self):
"""Test accept when token is ok"""
request = self._authenticated_request()
self.assertIsNone(self.process_view(request))
def test_renew_csrf_token_on_request_if_expired(self):
"""Test renew csrf token on request if expired"""
token = make_expired(Token.objects.create(owner=self._user))
request = self._authenticated_request(token.value, session={
'csrf_token': token.value,
})
del request.csrf_token
self.mw.process_request(request)
self.assertNotEqual(token.value, request.csrf_token)
def test_not_change_csrf_token_on_request_if_valid(self):
"""Test not change csrf token on request if valid"""
request = self._authenticated_request()
token = request.csrf_token
request.session = {
'csrf_token': token,
}
del request.csrf_token
self.mw.process_request(request)
self.assertEqual(token, request.csrf_token)
def test_add_csrf_token_on_request(self):
"""Test add csrf token on request"""
request = self._authenticated_request()
del request.csrf_token
self.mw.process_request(request)
self.assertIsNotNone(request.csrf_token)
class TestPerViewCsrf(django.test.TestCase):
"""Per view csrf test case"""
def setUp(self):
self.user = User.objects.create_user('test', 'test@test.test', 'test')
self.client.handler = ClientHandler()
self.client.login(username='test', password='test')
def _get_token(self):
return Token.objects.create(
owner=self.user,
for_view="session_csrf.tests.base.per_view",
)
def test_ok_with_correct_per_view_csrf(self):
"""Test response is ok with correct per-view csrf"""
response = self.client.post('/per-view', {
'csrfmiddlewaretoken': self._get_token().value,
})
self.assertEqual(response.status_code, 200)
def test_not_ok_with_expired_csrf_token(self):
"""Test not ok with expired csrf token"""
token = make_expired(self._get_token())
response = self.client.post('/per-view', {
'csrfmiddlewaretoken': token.value,
})
self.assertEqual(response.status_code, 403)
def test_not_ok_without_token(self):
"""Test not ok without token"""
response = self.client.post('/per-view')
self.assertEqual(response.status_code, 403)
|
|
#!/usr/bin/env/python
import wx
from collections import deque
class SnakeGame(wx.Frame):
def __init__(self, parent, title):
wx.Frame.__init__(self, parent, title=title, size=(300,350), style = wx.DEFAULT_FRAME_STYLE & ~(wx.RESIZE_BORDER | wx.RESIZE_BOX | wx.MAXIMIZE_BOX))
# Initialize status bar
self.statusBar = self.CreateStatusBar()
self.statusBar.SetStatusText('Score: ' + '0')
self.board = Board(self)
box = wx.BoxSizer(wx.HORIZONTAL)
box.Add(self.board, 1, wx.EXPAND | wx.ALL | wx.ALIGN_CENTER, 0)
self.SetSizer(box)
self.board.SetFocus()
self.board.Start()
self.Center()
self.Show(True)
def OnAbout(self,e):
dlg = wx.MessageDialog( self, 'Created by Simon So, 2010', 'About Snake', wx.OK)
dlg.ShowModal()
dlg.Destroy()
def OnExit(self,e):
self.Close(True)
class SnakeBody(object):
def __init__(self):
self.Width = 5
self.Length = 50
self.HeadDir = SnakeDir.RIGHT
self.TailDir = SnakeDir.RIGHT
self.Body = [[wx.Point(50,100),wx.Point(0, 100)]]
self.NumSegments = 1
def SaveTurnPos(self):
self.Body[0].insert(1,wx.Point(self.Body[0][0].x,self.Body[0][0].y))
def Move(self):
self.CalcTailDir()
headDir = self.HeadDir
tailDir = self.TailDir
if headDir == SnakeDir.RIGHT:
self.MoveHeadRight()
elif headDir == SnakeDir.LEFT:
self.MoveHeadLeft()
elif headDir == SnakeDir.UP:
self.MoveHeadUp()
else: # headDir == SnakeDir.DOWN:
self.MoveHeadDown()
if tailDir == SnakeDir.RIGHT:
self.MoveTailRight()
elif tailDir == SnakeDir.LEFT:
self.MoveTailLeft()
elif tailDir == SnakeDir.UP:
self.MoveTailUp()
else: # headDir == SnakeDir.DOWN:
self.MoveTailDown()
def MoveHeadUp(self):
if self.Body[0][0].y == 0:
self.Body.insert(0,[wx.Point(self.Body[0][0].x,Board.Height),wx.Point(self.Body[0][0].x,Board.Height)])
self.NumSegments += 1
else:
self.Body[0][0].y -= 1
def MoveHeadDown(self):
if self.Body[0][0].y == Board.Height:
self.Body.insert(0,[wx.Point(self.Body[0][0].x,0),wx.Point(self.Body[0][0].x,0)])
self.NumSegments += 1
else:
self.Body[0][0].y += 1
def MoveHeadLeft(self):
if self.Body[0][0].x == 0:
self.Body.insert(0,[wx.Point(Board.Width,self.Body[0][0].y),wx.Point(Board.Width,self.Body[0][0].y)])
self.NumSegments += 1
else:
self.Body[0][0].x -= 1
def MoveHeadRight(self):
if self.Body[0][0].x == Board.Width:
self.Body.insert(0,[wx.Point(0,self.Body[0][0].y),wx.Point(0,self.Body[0][0].y)])
self.NumSegments += 1
else:
self.Body[0][0].x += 1
def MoveTailUp(self):
if self.Body[-1][0] == self.Body[-1][-1]:
del self.Body[-1]
self.NumSegments -= 1
else:
self.Body[-1][-1].y -= 1
def MoveTailDown(self):
if self.Body[-1][0] == self.Body[-1][-1]:
del self.Body[-1]
self.NumSegments -= 1
else:
self.Body[-1][-1].y += 1
def MoveTailLeft(self):
if self.Body[-1][0] == self.Body[-1][-1]:
del self.Body[-1]
self.NumSegments -= 1
else:
self.Body[-1][-1].x -= 1
def MoveTailRight(self):
if self.Body[-1][0] == self.Body[-1][-1]:
del self.Body[-1]
self.NumSegments -= 1
else:
self.Body[-1][-1].x += 1
def CalcTailDir(self):
diff = self.Body[-1][-2] - self.Body[-1][-1]
if diff.x > 0:
self.TailDir = SnakeDir.RIGHT
elif diff.x < 0:
self.TailDir = SnakeDir.LEFT
elif diff.y > 0:
self.TailDir = SnakeDir.DOWN
elif diff.y < 0:
self.TailDir = SnakeDir.UP
elif diff.x == 0 and diff.y == 0:
if len(self.Body[-1]) > 2:
nextDiff = self.Body[-1][-3] - self.Body[-1][-1]
if nextDiff.x > 0:
self.TailDir = SnakeDir.RIGHT
elif nextDiff.x < 0:
self.TailDir = SnakeDir.LEFT
elif nextDiff.y > 0:
self.TailDir = SnakeDir.DOWN
elif nextDiff.y < 0:
self.TailDir = SnakeDir.UP
del self.Body[-1][-2]
else:
del self.Body[-1]
def SetPos(self, point):
self.Body[0][0] = point
def SetWidth(self, width):
self.Width = width
def SetLength(self, length):
self.Length = length
def SetHeadDir(self, dir):
self.HeadDir = dir
def GetBody(self):
return self.Body
def GetPos(self):
return self.Body[0][0]
def GetWidth(self):
return self.Width
def GetLength(self):
return self.Length
def GetHeadDir(self):
return self.HeadDir
class SnakeDir(object):
UP = 0
DOWN = 1
LEFT = 2
RIGHT = 3
class Board(wx.Panel):
Width = 300
Height = 300
Speed = 10
ID_TIMER = 1
def __init__(self, parent):
wx.Panel.__init__(self, parent)
self.isPaused = False
self.Score = 0
self.Snake = SnakeBody()
self.timer = wx.Timer(self, Board.ID_TIMER)
self.Bind(wx.EVT_TIMER, self.OnTimer, id=Board.ID_TIMER)
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)
self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBkgd)
def Start(self):
self.timer.Start(Board.Speed, wx.TIMER_CONTINUOUS)
def Pause(self):
self.isPaused = not self.isPaused
if self.isPaused:
self.timer.Stop()
self.GetParent().statusBar.SetStatusText('Score: ' + str(self.Score) + ' - PAUSED')
else:
self.timer.Start(Board.Speed)
self.GetParent().statusBar.SetStatusText('Score: ' + str(self.Score))
self.Refresh()
def SetSnakeDir(self, newDir):
if newDir == self.Snake.HeadDir:
return
elif newDir == SnakeDir.UP and self.Snake.HeadDir == SnakeDir.DOWN:
return
elif newDir == SnakeDir.DOWN and self.Snake.HeadDir == SnakeDir.UP:
return
elif newDir == SnakeDir.LEFT and self.Snake.HeadDir == SnakeDir.RIGHT:
return
elif newDir == SnakeDir.RIGHT and self.Snake.HeadDir == SnakeDir.LEFT:
return
else:
self.Snake.SetHeadDir(newDir)
self.Snake.SaveTurnPos()
# Does nothing, catches erase background event to prevent flickering
def OnEraseBkgd(self, e):
return
def OnKeyDown(self, e):
keycode = e.GetKeyCode()
if keycode == ord('P') or keycode == ord('p'):
self.Pause()
return
if keycode == ord('X') or keycode == ord('x'):
self.GetParent().Close(True)
return
if self.isPaused:
return
# elif keycode == wx.WXK_LEFT:
# self.SetSnakeDir(SnakeDir.LEFT)
# print LEFT
# elif keycode == wx.WXK_RIGHT:
# self.SetSnakeDir(SnakeDir.RIGHT)
# print RIGHT
# elif keycode == wx.WXK_DOWN:
# self.SetSnakeDir(SnakeDir.DOWN)
# print DOWN
# elif keycode == wx.WXK_UP:
# self.SetSnakeDir(SnakeDir.UP)
# print UP
if keycode == ord('A') or keycode == ord('a'):
self.SetSnakeDir(SnakeDir.LEFT)
elif keycode == ord('D') or keycode == ord('d'):
self.SetSnakeDir(SnakeDir.RIGHT)
elif keycode == ord('S') or keycode == ord('s'):
self.SetSnakeDir(SnakeDir.DOWN)
elif keycode == ord('W') or keycode == ord('w'):
self.SetSnakeDir(SnakeDir.UP)
else:
e.Skip()
def OnPaint(self, e):
dc = wx.BufferedPaintDC(self)
dc.Clear()
self.DrawSnake(dc)
def DrawSnake(self, dc):
snakeLen = self.Snake.GetLength()
snakeWidth = self.Snake.GetWidth()
headPos = self.Snake.GetPos()
dc.SetPen(wx.Pen(wx.BLACK,snakeWidth))
for segments in self.Snake.Body:
dc.DrawLines(segments)
def OnTimer(self, e):
if e.GetId() == Board.ID_TIMER:
self.Snake.Move()
self.Refresh()
else:
e.Skip()
app = wx.App(False)
frame = SnakeGame(None, 'Snake')
app.MainLoop()
|
|
"""
@package mi.dataset.parser.test
@file mi-dataset/mi/dataset/parser/test/test_phsen_abcdef_imodem.py
@author Joe Padula
@brief Test code for the phsen_abcdef_imodem parser
"""
__author__ = 'Joe Padula'
import os
from nose.plugins.attrib import attr
from mi.core.exceptions import UnexpectedDataException
from mi.logging import log
from mi.dataset.test.test_parser import ParserUnitTestCase
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.driver.phsen_abcdef.imodem.resource import RESOURCE_PATH
from mi.dataset.parser.phsen_abcdef_imodem import \
PhsenAbcdefImodemParticleClassKey, \
PhsenAbcdefImodemParser
from mi.dataset.parser.phsen_abcdef_imodem_particles import \
PhsenAbcdefImodemMetadataRecoveredDataParticle, \
PhsenAbcdefImodemControlRecoveredDataParticle, \
PhsenAbcdefImodemInstrumentRecoveredDataParticle, \
PhsenAbcdefImodemMetadataTelemeteredDataParticle, \
PhsenAbcdefImodemControlTelemeteredDataParticle, \
PhsenAbcdefImodemInstrumentTelemeteredDataParticle
O_MODE = 'rU' # Universal Open mode
@attr('UNIT', group='mi')
class PhsenAbcdefImodemParserUnitTestCase(ParserUnitTestCase):
"""
phsen_abcdef_imodem Parser unit test suite
"""
def setUp(self):
ParserUnitTestCase.setUp(self)
self._recovered_parser_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.phsen_abcdef_imodem_particles',
DataSetDriverConfigKeys.PARTICLE_CLASS: None,
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {
PhsenAbcdefImodemParticleClassKey.METADATA_PARTICLE_CLASS:
PhsenAbcdefImodemMetadataRecoveredDataParticle,
PhsenAbcdefImodemParticleClassKey.CONTROL_PARTICLE_CLASS:
PhsenAbcdefImodemControlRecoveredDataParticle,
PhsenAbcdefImodemParticleClassKey.INSTRUMENT_PARTICLE_CLASS:
PhsenAbcdefImodemInstrumentRecoveredDataParticle,
}
}
self._telemetered_parser_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.phsen_abcdef_imodem_particles',
DataSetDriverConfigKeys.PARTICLE_CLASS: None,
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {
PhsenAbcdefImodemParticleClassKey.METADATA_PARTICLE_CLASS:
PhsenAbcdefImodemMetadataTelemeteredDataParticle,
PhsenAbcdefImodemParticleClassKey.CONTROL_PARTICLE_CLASS:
PhsenAbcdefImodemControlTelemeteredDataParticle,
PhsenAbcdefImodemParticleClassKey.INSTRUMENT_PARTICLE_CLASS:
PhsenAbcdefImodemInstrumentTelemeteredDataParticle,
}
}
def build_telem_parser(self):
"""
Build a telemetered parser, storing it in self.parser
"""
if self.stream_handle is None:
self.fail("Must set stream handle before building telemetered parser")
self.parser = PhsenAbcdefImodemParser(self._telemetered_parser_config, self.stream_handle,
self.exception_callback)
def build_recov_parser(self):
"""
Build a telemetered parser, storing it in self.parser
This requires stream handle to be set before calling it
"""
if self.stream_handle is None:
self.fail("Must set stream handle before building recovered parser")
self.parser = PhsenAbcdefImodemParser(self._recovered_parser_config, self.stream_handle,
self.exception_callback)
def test_happy_path_simple(self):
"""
Read a file and verify that a pH and control records and header/footer can be read.
Verify that the contents of the instrument, control and metadata particles are correct.
The last record is a control record with battery data.
There should be no exceptions generated.
"""
log.debug('===== START TEST HAPPY PATH SINGLE =====')
# Recovered
with open(os.path.join(RESOURCE_PATH, 'example1.DAT'), O_MODE) as file_handle:
parser = PhsenAbcdefImodemParser(self._recovered_parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(5)
log.debug("Num particles: %d", len(particles))
self.assert_particles(particles, "example1_rec.yml", RESOURCE_PATH)
self.assertEquals(self.exception_callback_value, [])
# Telemetered
with open(os.path.join(RESOURCE_PATH, 'example1.DAT'), O_MODE) as file_handle:
parser = PhsenAbcdefImodemParser(self._telemetered_parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(5)
log.debug("Num particles: %d", len(particles))
self.assert_particles(particles, "example1_tel.yml", RESOURCE_PATH)
self.assertEquals(self.exception_callback_value, [])
log.debug('===== END TEST HAPPY PATH SINGLE =====')
def test_invalid_header_timestamp(self):
"""
The file used in this test has error in the File Date Time for the header record.
This results in 4 particles being created instead of 5
(metadata particle is not created), and also result in the exception
callback being called.
"""
log.debug('===== START TEST INVALID METADATA TIMESTAMP =====')
with open(os.path.join(RESOURCE_PATH, 'invalid_header_timestamp.DAT'), O_MODE) as file_handle:
num_particles_to_request = 5
num_expected_particles = 4
parser = PhsenAbcdefImodemParser(self._recovered_parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, "invalid_header_timestamp_rec.yml", RESOURCE_PATH)
log.debug('Exceptions : %s', self.exception_callback_value)
self.assert_(isinstance(self.exception_callback_value[0], UnexpectedDataException))
log.debug('===== END TEST INVALID METADATA TIMESTAMP =====')
def test_invalid_record_type(self):
"""
The file used in this test has a record type in the second record that does not match any
of the expected record types.
This results in 5 particles being retrieved instead of 6, and also result in the exception
callback being called.
"""
log.debug('===== START TEST INVALID RECORD TYPE =====')
with open(os.path.join(RESOURCE_PATH, 'invalid_record_type.DAT'), O_MODE) as file_handle:
num_particles_to_request = 6
num_expected_particles = 5
parser = PhsenAbcdefImodemParser(self._recovered_parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, "invalid_record_type_rec.yml", RESOURCE_PATH)
log.debug('Exceptions : %s', self.exception_callback_value)
self.assert_(isinstance(self.exception_callback_value[0], UnexpectedDataException))
log.debug('===== END TEST INVALID RECORD TYPE =====')
def test_ph_record_missing_timestamp(self):
"""
The file used in this test has a pH record (the second record - Record[331])
with a missing timestamp.
This results in 5 particles being retrieved instead of 6, and also result in the exception
callback being called.
"""
log.debug('===== START TEST PH RECORD MISSING TIMESTAMP =====')
with open(os.path.join(RESOURCE_PATH, 'ph_record_missing_timestamp.DAT'), O_MODE) as file_handle:
num_particles_to_request = 6
num_expected_particles = 5
parser = PhsenAbcdefImodemParser(self._recovered_parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, "ph_record_missing_timestamp_rec.yml", RESOURCE_PATH)
log.debug('Exceptions : %s', self.exception_callback_value)
self.assert_(isinstance(self.exception_callback_value[0], UnexpectedDataException))
log.debug('===== END TEST PH RECORD MISSING TIMESTAMP =====')
def test_no_science_particles(self):
"""
The file used in this test only contains header and footer records.
Verify that no science (pH or Control) particles are produced if the input file
has no pH data records or control data, i.e., they just contain header and footer records.
In this case only the metadata particle will get created.
"""
log.debug('===== START TEST NO SCIENCE PARTICLES =====')
with open(os.path.join(RESOURCE_PATH, 'header_and_footer_only.DAT'), O_MODE) as file_handle:
num_particles_to_request = 2
num_expected_particles = 1
parser = PhsenAbcdefImodemParser(self._recovered_parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, "header_and_footer_only_rec.yml", RESOURCE_PATH)
self.assertEquals(self.exception_callback_value, [])
log.debug('===== END TEST NO SCIENCE PARTICLES =====')
def test_incorrect_length(self):
"""
The last records in the file used in this test has a length that does not match the Length
field in the record. This tests for this requirement:
If the beginning of another instrument data record (* character), is encountered before "Length"
bytes have been found, where "Length" is the record length specified in a record, then we can not
reliably parse the record.
This results in 5 particles being retrieved instead of 6, and also result in the exception
callback being called.
"""
log.debug('===== START TEST INCORRECT LENGTH =====')
with open(os.path.join(RESOURCE_PATH, 'incorrect_data_length.DAT'), O_MODE) as file_handle:
num_particles_to_request = 6
num_expected_particles = 5
parser = PhsenAbcdefImodemParser(self._recovered_parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, "incorrect_data_length_rec.yml", RESOURCE_PATH)
log.debug('Exceptions : %s', self.exception_callback_value)
self.assert_(isinstance(self.exception_callback_value[0], UnexpectedDataException))
log.debug('===== END TEST INCORRECT LENGTH =====')
def test_invalid_checksum(self):
"""
The first record in the file used in this test has in invalid checksum. An instrument particle will
still get created, but the passed_checksum parameter will be 0 (no warning or error msg generated).
"""
log.debug('===== START TEST INVALID CHECKSUM =====')
with open(os.path.join(RESOURCE_PATH, 'invalid_checksum.DAT'), O_MODE) as file_handle:
num_particles_to_request = 5
num_expected_particles = 5
parser = PhsenAbcdefImodemParser(self._recovered_parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, "invalid_checksum_rec.yml", RESOURCE_PATH)
# No exception should be thrown
self.assertEquals(self.exception_callback_value, [])
log.debug('===== END TEST INVALID CHECKSUM =====')
def test_invalid_header_fields(self):
"""
The header in the file used in this test has in invalid Voltage and Number of Samples Written.
A metadata particle will still get created, but there will be None in some of the parameters
(an exception will be generated).
"""
log.debug('===== START TEST INVALID HEADER FIELDS =====')
with open(os.path.join(RESOURCE_PATH, 'invalid_header_fields.DAT'), O_MODE) as file_handle:
num_particles_to_request = 5
num_expected_particles = 5
parser = PhsenAbcdefImodemParser(self._recovered_parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, "invalid_header_fields_rec.yml", RESOURCE_PATH)
self.assert_(isinstance(self.exception_callback_value[0], UnexpectedDataException))
log.debug('===== END TEST INVALID HEADER FIELDS =====')
def test_real_file(self):
"""
The file used in this test, is a real file from the acquisition server.
It contains 20 pH records:
Verify that 20 instrument particles and one metadata particle are generated
from the real file.
"""
log.debug('===== START TEST REAL FILE =====')
num_particles_to_request = 25
num_expected_particles = 21
with open(os.path.join(RESOURCE_PATH, 'phsen1_20140730_190554.DAT'), O_MODE) as file_handle:
parser = PhsenAbcdefImodemParser(self._recovered_parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
log.info(len(particles))
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, "phsen1_20140730_190554_rec.yml", RESOURCE_PATH)
self.assertEquals(self.exception_callback_value, [])
with open(os.path.join(RESOURCE_PATH, 'phsen1_20140730_190554.DAT'), O_MODE) as file_handle:
parser = PhsenAbcdefImodemParser(self._telemetered_parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
log.info(len(particles))
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, "phsen1_20140730_190554_tel.yml", RESOURCE_PATH)
self.assertEquals(self.exception_callback_value, [])
log.debug('===== END TEST REAL FILE =====')
def test_real_file_2(self):
"""
The file used in this test, is a real file from the acquisition server.
It contains 9 pH records:
Verify that 9 instrument particles and one metadata particle are generated
from the real file.
"""
log.debug('===== START TEST REAL 2 FILE =====')
num_particles_to_request = 10
num_expected_particles = 10
with open(os.path.join(RESOURCE_PATH, 'phsen1_20140725_192532.DAT'), O_MODE) as file_handle:
parser = PhsenAbcdefImodemParser(self._recovered_parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
log.info(len(particles))
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, "phsen1_20140725_192532_rec.yml", RESOURCE_PATH)
self.assertEquals(self.exception_callback_value, [])
with open(os.path.join(RESOURCE_PATH, 'phsen1_20140725_192532.DAT'), O_MODE) as file_handle:
parser = PhsenAbcdefImodemParser(self._telemetered_parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
log.info(len(particles))
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, "phsen1_20140725_192532_tel.yml", RESOURCE_PATH)
self.assertEquals(self.exception_callback_value, [])
log.debug('===== END TEST REAL 2 FILE =====')
def particle_to_yml(self, particles, filename, mode='w'):
"""
This is added as a testing helper, not actually as part of the parser tests. Since the same particles
will be used for the driver test it is helpful to write them to .yml in the same form they need in the
results.yml fids here.
"""
# open write append, if you want to start from scratch manually delete this fid
fid = open(os.path.join(RESOURCE_PATH, filename), mode)
fid.write('header:\n')
fid.write(" particle_object: 'MULTIPLE'\n")
fid.write(" particle_type: 'MULTIPLE'\n")
fid.write('data:\n')
for i in range(0, len(particles)):
particle_dict = particles[i].generate_dict()
fid.write(' - _index: %d\n' % (i+1))
fid.write(' particle_object: %s\n' % particles[i].__class__.__name__)
fid.write(' particle_type: %s\n' % particle_dict.get('stream_name'))
fid.write(' internal_timestamp: %f\n' % particle_dict.get('internal_timestamp'))
for val in particle_dict.get('values'):
if isinstance(val.get('value'), float):
fid.write(' %s: %16.16f\n' % (val.get('value_id'), val.get('value')))
elif isinstance(val.get('value'), str):
fid.write(' %s: \'%s\'\n' % (val.get('value_id'), val.get('value')))
else:
fid.write(' %s: %s\n' % (val.get('value_id'), val.get('value')))
fid.close()
def create_yml(self):
"""
This utility creates a yml file
"""
self.stream_handle = open(os.path.join(RESOURCE_PATH, 'phsen1_20140725_192532.DAT'), O_MODE)
self.build_telem_parser()
particles = self.parser.get_records(21)
self.particle_to_yml(particles, 'phsen1_20140725_192532_tel.yml')
self.stream_handle.close()
|
|
#!/usr/bin/env python
import os
import sys
import logging
import click
import textwrap
import shutil
from mkdocs import __version__
from mkdocs import utils
from mkdocs import config
if sys.platform.startswith("win"):
try:
import colorama
except ImportError:
pass
else:
colorama.init()
log = logging.getLogger(__name__)
class ColorFormatter(logging.Formatter):
colors = {
'CRITICAL': 'red',
'ERROR': 'red',
'WARNING': 'yellow',
'DEBUG': 'blue'
}
text_wrapper = textwrap.TextWrapper(
width=shutil.get_terminal_size(fallback=(0, 0)).columns,
replace_whitespace=False,
break_long_words=False,
break_on_hyphens=False,
initial_indent=' '*12,
subsequent_indent=' '*12
)
def format(self, record):
message = super().format(record)
prefix = f'{record.levelname:<8} - '
if record.levelname in self.colors:
prefix = click.style(prefix, fg=self.colors[record.levelname])
if self.text_wrapper.width:
# Only wrap text if a terminal width was detected
msg = '\n'.join(
self.text_wrapper.fill(line)
for line in message.splitlines()
)
# Prepend prefix after wrapping so that color codes don't affect length
return prefix + msg[12:]
return prefix + message
class State:
''' Maintain logging level.'''
def __init__(self, log_name='mkdocs', level=logging.INFO):
self.logger = logging.getLogger(log_name)
# Don't restrict level on logger; use handler
self.logger.setLevel(1)
self.logger.propagate = False
self.stream = logging.StreamHandler()
self.stream.setFormatter(ColorFormatter())
self.stream.setLevel(level)
self.stream.name = 'MkDocsStreamHandler'
self.logger.addHandler(self.stream)
pass_state = click.make_pass_decorator(State, ensure=True)
clean_help = "Remove old files from the site_dir before building (the default)."
config_help = "Provide a specific MkDocs config"
dev_addr_help = ("IP address and port to serve documentation locally (default: "
"localhost:8000)")
strict_help = ("Enable strict mode. This will cause MkDocs to abort the build "
"on any warnings.")
theme_help = "The theme to use when building your documentation."
theme_choices = utils.get_theme_names()
site_dir_help = "The directory to output the result of the documentation build."
use_directory_urls_help = "Use directory URLs when building pages (the default)."
reload_help = "Enable the live reloading in the development server (this is the default)"
no_reload_help = "Disable the live reloading in the development server."
dirty_reload_help = "Enable the live reloading in the development server, but only re-build files that have changed"
commit_message_help = ("A commit message to use when committing to the "
"Github Pages remote branch. Commit {sha} and MkDocs {version} are available as expansions")
remote_branch_help = ("The remote branch to commit to for Github Pages. This "
"overrides the value specified in config")
remote_name_help = ("The remote name to commit to for Github Pages. This "
"overrides the value specified in config")
force_help = "Force the push to the repository."
ignore_version_help = "Ignore check that build is not being deployed with an older version of MkDocs."
watch_theme_help = ("Include the theme in list of files to watch for live reloading. "
"Ignored when live reload is not used.")
shell_help = "Use the shell when invoking Git."
watch_help = ("A directory or file to watch for live reloading. "
"Can be supplied multiple times.")
def add_options(opts):
def inner(f):
for i in reversed(opts):
f = i(f)
return f
return inner
def verbose_option(f):
def callback(ctx, param, value):
state = ctx.ensure_object(State)
if value:
state.stream.setLevel(logging.DEBUG)
return click.option('-v', '--verbose',
is_flag=True,
expose_value=False,
help='Enable verbose output',
callback=callback)(f)
def quiet_option(f):
def callback(ctx, param, value):
state = ctx.ensure_object(State)
if value:
state.stream.setLevel(logging.ERROR)
return click.option('-q', '--quiet',
is_flag=True,
expose_value=False,
help='Silence warnings',
callback=callback)(f)
common_options = add_options([quiet_option, verbose_option])
common_config_options = add_options([
click.option('-f', '--config-file', type=click.File('rb'), help=config_help),
# Don't override config value if user did not specify --strict flag
# Conveniently, load_config drops None values
click.option('-s', '--strict', is_flag=True, default=None, help=strict_help),
click.option('-t', '--theme', type=click.Choice(theme_choices), help=theme_help),
# As with --strict, set the default to None so that this doesn't incorrectly
# override the config file
click.option('--use-directory-urls/--no-directory-urls', is_flag=True, default=None, help=use_directory_urls_help)
])
PYTHON_VERSION = f"{sys.version_info.major}.{sys.version_info.minor}"
PKG_DIR = os.path.dirname(os.path.abspath(__file__))
@click.group(context_settings={'help_option_names': ['-h', '--help']})
@click.version_option(
__version__,
'-V', '--version',
message=f'%(prog)s, version %(version)s from { PKG_DIR } (Python { PYTHON_VERSION })'
)
@common_options
def cli():
"""
MkDocs - Project documentation with Markdown.
"""
@cli.command(name="serve")
@click.option('-a', '--dev-addr', help=dev_addr_help, metavar='<IP:PORT>')
@click.option('--livereload', 'livereload', flag_value='livereload', help=reload_help, default=True)
@click.option('--no-livereload', 'livereload', flag_value='no-livereload', help=no_reload_help)
@click.option('--dirtyreload', 'livereload', flag_value='dirty', help=dirty_reload_help)
@click.option('--watch-theme', help=watch_theme_help, is_flag=True)
@click.option('-w', '--watch', help=watch_help, type=click.Path(exists=True), multiple=True, default=[])
@common_config_options
@common_options
def serve_command(dev_addr, livereload, watch, **kwargs):
"""Run the builtin development server"""
from mkdocs.commands import serve
serve.serve(dev_addr=dev_addr, livereload=livereload, watch=watch, **kwargs)
@cli.command(name="build")
@click.option('-c', '--clean/--dirty', is_flag=True, default=True, help=clean_help)
@common_config_options
@click.option('-d', '--site-dir', type=click.Path(), help=site_dir_help)
@common_options
def build_command(clean, **kwargs):
"""Build the MkDocs documentation"""
from mkdocs.commands import build
build.build(config.load_config(**kwargs), dirty=not clean)
@cli.command(name="gh-deploy")
@click.option('-c', '--clean/--dirty', is_flag=True, default=True, help=clean_help)
@click.option('-m', '--message', help=commit_message_help)
@click.option('-b', '--remote-branch', help=remote_branch_help)
@click.option('-r', '--remote-name', help=remote_name_help)
@click.option('--force', is_flag=True, help=force_help)
@click.option('--ignore-version', is_flag=True, help=ignore_version_help)
@click.option('--shell', is_flag=True, help=shell_help)
@common_config_options
@click.option('-d', '--site-dir', type=click.Path(), help=site_dir_help)
@common_options
def gh_deploy_command(clean, message, remote_branch, remote_name, force, ignore_version, shell, **kwargs):
"""Deploy your documentation to GitHub Pages"""
cfg = config.load_config(
remote_branch=remote_branch,
remote_name=remote_name,
**kwargs
)
from mkdocs.commands import build, gh_deploy
build.build(cfg, dirty=not clean)
gh_deploy.gh_deploy(cfg, message=message, force=force, ignore_version=ignore_version, shell=shell)
@cli.command(name="new")
@click.argument("project_directory")
@common_options
def new_command(project_directory):
"""Create a new MkDocs project"""
from mkdocs.commands import new
new.new(project_directory)
if __name__ == '__main__': # pragma: no cover
cli()
|
|
import logging
import warnings
from twisted.internet import defer
from twisted.trial import unittest
from pytest import raises
import scrapy
from scrapy.crawler import Crawler, CrawlerRunner, CrawlerProcess
from scrapy.settings import Settings, default_settings
from scrapy.spiderloader import SpiderLoader
from scrapy.utils.log import configure_logging, get_scrapy_root_handler
from scrapy.utils.spider import DefaultSpider
from scrapy.utils.misc import load_object
from scrapy.extensions.throttle import AutoThrottle
from scrapy.extensions import telnet
class BaseCrawlerTest(unittest.TestCase):
def assertOptionIsDefault(self, settings, key):
self.assertIsInstance(settings, Settings)
self.assertEqual(settings[key], getattr(default_settings, key))
class CrawlerTestCase(BaseCrawlerTest):
def setUp(self):
self.crawler = Crawler(DefaultSpider, Settings())
def test_deprecated_attribute_spiders(self):
with warnings.catch_warnings(record=True) as w:
spiders = self.crawler.spiders
self.assertEqual(len(w), 1)
self.assertIn("Crawler.spiders", str(w[0].message))
sl_cls = load_object(self.crawler.settings['SPIDER_LOADER_CLASS'])
self.assertIsInstance(spiders, sl_cls)
self.crawler.spiders
is_one_warning = len(w) == 1
if not is_one_warning:
for warning in w:
print(warning)
self.assertTrue(is_one_warning, "Warn deprecated access only once")
def test_populate_spidercls_settings(self):
spider_settings = {'TEST1': 'spider', 'TEST2': 'spider'}
project_settings = {'TEST1': 'project', 'TEST3': 'project'}
class CustomSettingsSpider(DefaultSpider):
custom_settings = spider_settings
settings = Settings()
settings.setdict(project_settings, priority='project')
crawler = Crawler(CustomSettingsSpider, settings)
self.assertEqual(crawler.settings.get('TEST1'), 'spider')
self.assertEqual(crawler.settings.get('TEST2'), 'spider')
self.assertEqual(crawler.settings.get('TEST3'), 'project')
self.assertFalse(settings.frozen)
self.assertTrue(crawler.settings.frozen)
def test_crawler_accepts_dict(self):
crawler = Crawler(DefaultSpider, {'foo': 'bar'})
self.assertEqual(crawler.settings['foo'], 'bar')
self.assertOptionIsDefault(crawler.settings, 'RETRY_ENABLED')
def test_crawler_accepts_None(self):
crawler = Crawler(DefaultSpider)
self.assertOptionIsDefault(crawler.settings, 'RETRY_ENABLED')
def test_crawler_rejects_spider_objects(self):
with raises(ValueError):
Crawler(DefaultSpider())
class SpiderSettingsTestCase(unittest.TestCase):
def test_spider_custom_settings(self):
class MySpider(scrapy.Spider):
name = 'spider'
custom_settings = {
'AUTOTHROTTLE_ENABLED': True
}
crawler = Crawler(MySpider, {})
enabled_exts = [e.__class__ for e in crawler.extensions.middlewares]
self.assertIn(AutoThrottle, enabled_exts)
class CrawlerLoggingTestCase(unittest.TestCase):
def test_no_root_handler_installed(self):
handler = get_scrapy_root_handler()
if handler is not None:
logging.root.removeHandler(handler)
class MySpider(scrapy.Spider):
name = 'spider'
crawler = Crawler(MySpider, {})
assert get_scrapy_root_handler() is None
def test_spider_custom_settings_log_level(self):
log_file = self.mktemp()
class MySpider(scrapy.Spider):
name = 'spider'
custom_settings = {
'LOG_LEVEL': 'INFO',
'LOG_FILE': log_file,
# disable telnet if not available to avoid an extra warning
'TELNETCONSOLE_ENABLED': telnet.TWISTED_CONCH_AVAILABLE,
}
configure_logging()
self.assertEqual(get_scrapy_root_handler().level, logging.DEBUG)
crawler = Crawler(MySpider, {})
self.assertEqual(get_scrapy_root_handler().level, logging.INFO)
info_count = crawler.stats.get_value('log_count/INFO')
logging.debug('debug message')
logging.info('info message')
logging.warning('warning message')
logging.error('error message')
with open(log_file, 'rb') as fo:
logged = fo.read().decode('utf8')
self.assertNotIn('debug message', logged)
self.assertIn('info message', logged)
self.assertIn('warning message', logged)
self.assertIn('error message', logged)
self.assertEqual(crawler.stats.get_value('log_count/ERROR'), 1)
self.assertEqual(crawler.stats.get_value('log_count/WARNING'), 1)
self.assertEqual(
crawler.stats.get_value('log_count/INFO') - info_count, 1)
self.assertEqual(crawler.stats.get_value('log_count/DEBUG', 0), 0)
class SpiderLoaderWithWrongInterface(object):
def unneeded_method(self):
pass
class CustomSpiderLoader(SpiderLoader):
pass
class CrawlerRunnerTestCase(BaseCrawlerTest):
def test_spider_manager_verify_interface(self):
settings = Settings({
'SPIDER_LOADER_CLASS': 'tests.test_crawler.SpiderLoaderWithWrongInterface'
})
with warnings.catch_warnings(record=True) as w:
self.assertRaises(AttributeError, CrawlerRunner, settings)
self.assertEqual(len(w), 1)
self.assertIn("SPIDER_LOADER_CLASS", str(w[0].message))
self.assertIn("scrapy.interfaces.ISpiderLoader", str(w[0].message))
def test_crawler_runner_accepts_dict(self):
runner = CrawlerRunner({'foo': 'bar'})
self.assertEqual(runner.settings['foo'], 'bar')
self.assertOptionIsDefault(runner.settings, 'RETRY_ENABLED')
def test_crawler_runner_accepts_None(self):
runner = CrawlerRunner()
self.assertOptionIsDefault(runner.settings, 'RETRY_ENABLED')
def test_deprecated_attribute_spiders(self):
with warnings.catch_warnings(record=True) as w:
runner = CrawlerRunner(Settings())
spiders = runner.spiders
self.assertEqual(len(w), 1)
self.assertIn("CrawlerRunner.spiders", str(w[0].message))
self.assertIn("CrawlerRunner.spider_loader", str(w[0].message))
sl_cls = load_object(runner.settings['SPIDER_LOADER_CLASS'])
self.assertIsInstance(spiders, sl_cls)
def test_spidermanager_deprecation(self):
with warnings.catch_warnings(record=True) as w:
runner = CrawlerRunner({
'SPIDER_MANAGER_CLASS': 'tests.test_crawler.CustomSpiderLoader'
})
self.assertIsInstance(runner.spider_loader, CustomSpiderLoader)
is_one_warning = len(w) == 1
if not is_one_warning:
for warning in w:
print(warning)
self.assertIn('Please use SPIDER_LOADER_CLASS', str(w[0].message))
self.assertTrue(is_one_warning)
def test_crawl_rejects_spider_objects(self):
with raises(ValueError):
CrawlerRunner().crawl(DefaultSpider())
def test_create_crawler_rejects_spider_objects(self):
with raises(ValueError):
CrawlerRunner().create_crawler(DefaultSpider())
class CrawlerProcessTest(BaseCrawlerTest):
def test_crawler_process_accepts_dict(self):
runner = CrawlerProcess({'foo': 'bar'})
self.assertEqual(runner.settings['foo'], 'bar')
self.assertOptionIsDefault(runner.settings, 'RETRY_ENABLED')
def test_crawler_process_accepts_None(self):
runner = CrawlerProcess()
self.assertOptionIsDefault(runner.settings, 'RETRY_ENABLED')
class ExceptionSpider(scrapy.Spider):
name = 'exception'
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
raise ValueError('Exception in from_crawler method')
class NoRequestsSpider(scrapy.Spider):
name = 'no_request'
def start_requests(self):
return []
class CrawlerRunnerHasSpider(unittest.TestCase):
@defer.inlineCallbacks
def test_crawler_runner_bootstrap_successful(self):
runner = CrawlerRunner()
yield runner.crawl(NoRequestsSpider)
self.assertEqual(runner.bootstrap_failed, False)
@defer.inlineCallbacks
def test_crawler_runner_bootstrap_successful_for_several(self):
runner = CrawlerRunner()
yield runner.crawl(NoRequestsSpider)
yield runner.crawl(NoRequestsSpider)
self.assertEqual(runner.bootstrap_failed, False)
@defer.inlineCallbacks
def test_crawler_runner_bootstrap_failed(self):
runner = CrawlerRunner()
try:
yield runner.crawl(ExceptionSpider)
except ValueError:
pass
else:
self.fail('Exception should be raised from spider')
self.assertEqual(runner.bootstrap_failed, True)
@defer.inlineCallbacks
def test_crawler_runner_bootstrap_failed_for_several(self):
runner = CrawlerRunner()
try:
yield runner.crawl(ExceptionSpider)
except ValueError:
pass
else:
self.fail('Exception should be raised from spider')
yield runner.crawl(NoRequestsSpider)
self.assertEqual(runner.bootstrap_failed, True)
|
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import os
import re
import logging
import threading
import parsers
import apirdflib
#from apirdflib import rdfGetTargets, rdfGetSources
logging.basicConfig(level=logging.INFO) # dev_appserver.py --log_level debug .
log = logging.getLogger(__name__)
schemasInitialized = False
extensionsLoaded = False
extensionLoadErrors = ""
#INTESTHARNESS used to flag we are in a test harness - not called by webApp so somethings will work different!
#setInTestHarness(True) should be called from test suites.
INTESTHARNESS = False
def setInTestHarness(val):
global INTESTHARNESS
INTESTHARNESS = val
def getInTestHarness():
global INTESTHARNESS
return INTESTHARNESS
AllLayersList = []
def setAllLayersList(val):
global AllLayersList
AllLayersList = val
#Copy it into apirdflib
apirdflib.allLayersList = val
def getAllLayersList():
global AllLayersList
return AllLayersList
EVERYLAYER = "!EVERYLAYER!"
sitename = "schema.org"
sitemode = "mainsite" # whitespaced list for CSS tags,
# e.g. "mainsite testsite", "extensionsite" when off expected domains
DYNALOAD = True # permits read_schemas to be re-invoked live.
#JINJA_ENVIRONMENT = jinja2.Environment(
# loader=jinja2.FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')),
# extensions=['jinja2.ext.autoescape'], autoescape=True)
debugging = False
# Core API: we have a single schema graph built from triples and units.
NodeIDMap = {}
ext_re = re.compile(r'([^\w,])+')
all_layers = {}
all_terms = {}
# Utility declaration of W3C Initial Context
# From http://www.w3.org/2011/rdfa-context/rdfa-1.1
# and http://www.w3.org/2013/json-ld-context/rdfa11
# Enables all these prefixes without explicit declaration when
# using schema.org's JSON-LD context file.
#
namespaces = """ "schema": "http://schema.org/",
"cat": "http://www.w3.org/ns/dcat#",
"cc": "http://creativecommons.org/ns#",
"cnt": "http://www.w3.org/2008/content#",
"ctag": "http://commontag.org/ns#",
"dc": "http://purl.org/dc/terms/",
"dcat": "http://www.w3.org/ns/dcat#",
"dcterms": "http://purl.org/dc/terms/",
"describedby": "http://www.w3.org/2007/05/powder-s#describedby",
"earl": "http://www.w3.org/ns/earl#",
"foaf": "http://xmlns.com/foaf/0.1/",
"gldp": "http://www.w3.org/ns/people#",
"gr": "http://purl.org/goodrelations/v1#",
"grddl": "http://www.w3.org/2003/g/data-view#",
"ht": "http://www.w3.org/2006/http#",
"ical": "http://www.w3.org/2002/12/cal/icaltzd#",
"license": "http://www.w3.org/1999/xhtml/vocab#license",
"ma": "http://www.w3.org/ns/ma-ont#",
"og": "http://ogp.me/ns#",
"org": "http://www.w3.org/ns/org#",
"org": "http://www.w3.org/ns/org#",
"owl": "http://www.w3.org/2002/07/owl#",
"prov": "http://www.w3.org/ns/prov#",
"ptr": "http://www.w3.org/2009/pointers#",
"qb": "http://purl.org/linked-data/cube#",
"rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
"rdfa": "http://www.w3.org/ns/rdfa#",
"rdfs": "http://www.w3.org/2000/01/rdf-schema#",
"rev": "http://purl.org/stuff/rev#",
"rif": "http://www.w3.org/2007/rif#",
"role": "http://www.w3.org/1999/xhtml/vocab#role",
"rr": "http://www.w3.org/ns/r2rml#",
"sd": "http://www.w3.org/ns/sparql-service-description#",
"sioc": "http://rdfs.org/sioc/ns#",
"skos": "http://www.w3.org/2004/02/skos/core#",
"skosxl": "http://www.w3.org/2008/05/skos-xl#",
"v": "http://rdf.data-vocabulary.org/#",
"vcard": "http://www.w3.org/2006/vcard/ns#",
"void": "http://rdfs.org/ns/void#",
"wdr": "http://www.w3.org/2007/05/powder#",
"wdrs": "http://www.w3.org/2007/05/powder-s#",
"xhv": "http://www.w3.org/1999/xhtml/vocab#",
"xml": "http://www.w3.org/XML/1998/namespace",
"xsd": "http://www.w3.org/2001/XMLSchema#",
"""
class DataCacheTool():
def __init__ (self):
self._DataCache = {}
self.tlocal = threading.local()
self.tlocal.CurrentDataCache = "core"
self._DataCache[self.tlocal.CurrentDataCache] = {}
def getCache(self,cache=None):
if cache == None:
cache = self.getCurrent()
if cache in self._DataCache.keys():
return self._DataCache[cache]
else:
self._DataCache[cache] = {}
return self._DataCache[cache]
def get(self,key,cache=None):
return self.getCache(cache).get(key)
def put(self,key,val,cache=None):
self.getCache(cache)[key] = val
def setCurrent(self,current):
self.tlocal.CurrentDataCache = current
if(self._DataCache.get(current) == None):
self._DataCache[current] = {}
log.info("Setting _CurrentDataCache: %s",current)
def getCurrent(self):
return self.tlocal.CurrentDataCache
def keys(self):
return self._DataCache.keys()
DataCache = DataCacheTool()
class Unit ():
"""
Unit represents a node in our schema graph. IDs are local,
e.g. "Person" or use simple prefixes, e.g. rdfs:Class.
"""
def __init__ (self, id):
self.id = id
NodeIDMap[id] = self
self.arcsIn = []
self.arcsOut = []
self.examples = None
self.home = None
self.subtypes = None
self.sourced = False
self.category = " "
self.typeFlags = {}
def __str__(self):
return self.id
def GetImmediateSubtypes(self, layers='core'):
return GetImmediateSubtypes(self, layers=layers)
@staticmethod
def GetUnit (id, createp=False):
"""Return a Unit representing a node in the schema graph.
Argument:
createp -- should we create node if we don't find it? (default: False)
"""
ret = None
if (id in NodeIDMap):
return NodeIDMap[id]
ret = apirdflib.rdfGetTriples(id)
if (ret == None and createp != False):
return Unit(id)
return ret
@staticmethod
def GetUnitNoLoad(id, createp=False):
if (id in NodeIDMap):
return NodeIDMap[id]
if (createp != False):
return Unit(id)
return None
def typeOf(self, type, layers='core'):
"""Boolean, true if the unit has an rdf:type matching this type."""
types = GetTargets( Unit.GetUnit("rdf:type"), self, layers )
return (type in types)
# Function needs rewriting to use GetTargets(arc,src,layers) and recurse
def subClassOf(self, type, layers='core'):
"""Boolean, true if the unit has an rdfs:subClassOf matching this type, direct or implied (in specified layer(s))."""
if (self.id == type.id):
return True
parents = GetTargets( Unit.GetUnit("rdfs:subClassOf"), self, layers )
if type in parents:
return True
else:
for p in parents:
if p.subClassOf(type, layers):
return True
return False
def directInstanceOf(self, type, layers='core'):
"""Boolean, true if the unit has a direct typeOf (aka rdf:type) property matching this type, direct or implied (in specified layer(s))."""
mytypes = GetTargets( Unit.GetUnit("rdf:type"), self, layers )
if type in mytypes:
return True
return False # TODO: consider an API for implied types too?
def isClass(self, layers='core'):
"""Does this unit represent a class/type?"""
if self.typeFlags.has_key('c'):
return self.typeFlags['c']
isClass = self.typeOf(Unit.GetUnit("rdfs:Class"), layers=EVERYLAYER)
self.typeFlags['c'] = isClass
return isClass
def isAttribute(self, layers='core'):
"""Does this unit represent an attribute/property?"""
if self.typeFlags.has_key('p'):
return self.typeFlags['p']
isProp = self.typeOf(Unit.GetUnit("rdf:Property"), layers=EVERYLAYER)
self.typeFlags['p'] = isProp
return isProp
def isEnumeration(self, layers='core'):
"""Does this unit represent an enumerated type?"""
if self.typeFlags.has_key('e'):
return self.typeFlags['e']
isE = self.subClassOf(Unit.GetUnit("Enumeration"), layers=EVERYLAYER)
self.typeFlags['e'] = isE
return isE
def isEnumerationValue(self, layers='core'):
"""Does this unit represent a member of an enumerated type?"""
if self.typeFlags.has_key('ev'):
return self.typeFlags['ev']
types = GetTargets(Unit.GetUnit("rdf:type"), self , layers=EVERYLAYER)
log.debug("isEnumerationValue() called on %s, found %s types. layers: %s" % (self.id, str( len( types ) ), layers ) )
found_enum = False
for t in types:
if t.subClassOf(Unit.GetUnit("Enumeration"), layers=EVERYLAYER):
found_enum = True
break
self.typeFlags['ev'] = found_enum
return found_enum
def isDataType(self, layers='core'):
"""
Does this unit represent a DataType type or sub-type?
DataType and its children do not descend from Thing, so we need to
treat it specially.
"""
if self.typeFlags.has_key('d'):
return self.typeFlags['d']
ret = False
if (self.directInstanceOf(Unit.GetUnit("DataType"), layers=layers)):
ret = True
else:
subs = GetTargets(Unit.GetUnit("rdf:type"), self, layers=layers)
subs += GetTargets(Unit.GetUnit("rdfs:subClassOf"), self, layers=layers)
for p in subs:
if p.isDataType(layers=layers):
ret = True
break
self.typeFlags['d'] = ret
return ret
@staticmethod
def storePrefix(prefix):
"""Stores the prefix declaration for a given class or property"""
# Currently defined just to let the tests pass
pass
# e.g. <http://schema.org/actors> <http://schema.org/supersededBy> <http://schema.org/actor> .
def superseded(self, layers='core'):
"""Has this property been superseded? (i.e. deprecated/archaic), in any of these layers."""
supersededBy_values = GetTargets( Unit.GetUnit("supersededBy"), self, layers )
return ( len(supersededBy_values) > 0)
def supersedes(self, layers='core'):
"""Returns a property (assume max 1) that is supersededBy this one, or nothing."""
olderterms = GetSources( Unit.GetUnit("supersededBy"), self, layers )
if len(olderterms) > 0:
return olderterms[0]
else:
return None
def supersedes_all(self, layers='core'):
"""Returns terms that is supersededBy by this later one, or nothing. (in this layer)"""
return(GetSources( Unit.GetUnit("supersededBy"), self, layers ))
# so we want sources of arcs pointing here with 'supersededBy'
# e.g. vendor supersededBy seller ; returns newer 'seller' for earlier 'vendor'.
def supersededBy(self, layers='core'):
"""Returns a property (assume max 1) that supersededs this one, or nothing."""
newerterms = GetTargets( Unit.GetUnit("supersededBy"), self, layers )
if len(newerterms)>0:
return newerterms.pop()
else:
return None
return ret
def category(self):
return self.category
def getHomeLayer(self,defaultToCore=False):
ret = self.home
if ret == None:
if defaultToCore:
ret = 'core'
else:
log.info("WARNING %s has no home extension defined!!" % self.id)
ret = ""
return ret
def superproperties(self, layers='core'):
"""Returns super-properties of this one."""
if not self.isAttribute(layers=layers):
logging.debug("Non-property %s won't have subproperties." % self.id)
return None
superprops = GetTargets(Unit.GetUnit("rdfs:subPropertyOf"),self, layers=layers )
return superprops
def subproperties(self, layers='core'):
"""Returns direct subproperties of this property."""
if not self.isAttribute(layers=layers):
logging.debug("Non-property %s won't have subproperties." % self.id)
return None
subprops = GetSources(Unit.GetUnit("rdfs:subPropertyOf"),self, layers=layers )
return subprops
def inverseproperty(self, layers="core"):
"""A property that is an inverseOf this one, e.g. alumni vs alumniOf."""
a = GetTargets(Unit.GetUnit("inverseOf"), self, layers=layers)
b = GetSources(Unit.GetUnit("inverseOf"), self, layers=layers)
if len(a)>0:
return a.pop()
else:
if len(b) > 0:
return b.pop()
else:
return None
for triple in self.arcsOut:
if (triple.target != None and triple.arc.id == "inverseOf"):
return triple.target
for triple in self.arcsIn:
if (triple.source != None and triple.arc.id == "inverseOf"):
return triple.source
return None
def UsageStr (self) :
str = GetUsage(self.id)
if (str == '1') :
return "Between 10 and 100 domains"
elif (str == '2'):
return "Between 100 and 1000 domains"
elif (str == '3'):
return "Between 1000 and 10,000 domains"
elif (str == '4'):
return "Between 10,000 and 50,000 domains"
elif (str == '5'):
return "Between 50,000 and 100,000 domains"
elif (str == '7'):
return "Between 100,000 and 250,000 domains"
elif (str == '8'):
return "Between 250,000 and 500,000 domains"
elif (str == '9'):
return "Between 500,000 and 1,000,000 domains"
elif (str == '10'):
return "Over 1,000,000 domains"
else:
return "Fewer than 10 domains"
# NOTE: each Triple is in exactly one layer, by default 'core'. When we
# read_schemas() from data/ext/{x}/*.rdfa each schema triple is given a
# layer named "x". Access to triples can default to layer="core" or take
# a custom layer or layers, e.g. layers="bib", or layers=["bib", "foo"].
# This is verbose but at least explicit. If we move towards making better
# use of external templates for site generation we could reorganize.
# For now e.g. 'grep GetSources api.py| grep -v layer' and
# 'grep GetTargets api.py| grep -v layer' etc. can check for non-layered usage.
#
# Units, on the other hand, are layer-independent. For now we have only a
# crude inLayer(layerlist, unit) API to check which layers mention a term.
class Triple ():
"""Triple represents an edge in the graph: source, arc and target/text."""
def __init__ (self, source, arc, target, text, layer='core'):
"""Triple constructor keeps state via source node's arcsOut."""
self.source = source
source.arcsOut.append(self)
self.arc = arc
self.layer = layer
self.id = self
if (target != None):
self.target = target
self.text = None
target.arcsIn.append(self)
elif (text != None):
self.text = text
self.target = None
def __str__ (self):
ret = ""
if self.source != None:
ret += "%s " % self.source
if self.target != None:
ret += "%s " % self.target
if self.arc != None:
ret += "%s " % self.arc
return ret
@staticmethod
def AddTriple(source, arc, target, layer='core'):
"""AddTriple stores a thing-valued new Triple within source Unit."""
if (source == None or arc == None or target == None):
log.info("Bailing")
return
else:
# for any term mentioned as subject or object, we register the layer
# TODO: make this into a function
x = all_terms.get(source.id) # subjects
if x is None:
x = []
if layer not in x:
x.append(layer)
all_terms[source.id]= x
x = all_terms.get(target.id) # objects
if x is None:
x = []
if layer not in x:
x.append(layer)
all_terms[target.id]= x
return Triple(source, arc, target, None, layer)
@staticmethod
def AddTripleText(source, arc, text, layer='core'):
"""AddTriple stores a string-valued new Triple within source Unit."""
if (source == None or arc == None or text == None):
return
else:
return Triple(source, arc, None, text, layer)
def GetTargets(arc, source, layers='core'):
"""All values for a specified arc on specified graph node (within any of the specified layers)."""
# log.debug("GetTargets checking in layer: %s for unit: %s arc: %s" % (layers, source.id, arc.id))
targets = {}
fred = False
try:
for triple in source.arcsOut:
if (triple.arc == arc):
if (triple.target != None and (layers == EVERYLAYER or triple.layer in layers)):
targets[triple.target] = 1
elif (triple.text != None and (layers == EVERYLAYER or triple.layer in layers)):
targets[triple.text] = 1
return targets.keys()
except Exception as e:
log.debug("GetTargets caught exception %s" % e)
return []
def GetSources(arc, target, layers='core'):
"""All source nodes for a specified arc pointing to a specified node (within any of the specified layers)."""
#log.debug("GetSources checking in layer: %s for unit: %s arc: %s" % (layers, target.id, arc.id))
if(target.sourced == False):
apirdflib.rdfGetSourceTriples(target)
sources = {}
for triple in target.arcsIn:
if (triple.arc == arc and (layers == EVERYLAYER or triple.layer in layers)):
sources[triple.source] = 1
return sources.keys()
def GetArcsIn(target, layers='core'):
"""All incoming arc types for this specified node (within any of the specified layers)."""
arcs = {}
for triple in target.arcsIn:
if (layers == EVERYLAYER or triple.layer in layers):
arcs[triple.arc] = 1
return arcs.keys()
def GetArcsOut(source, layers='core'):
"""All outgoing arc types for this specified node."""
arcs = {}
for triple in source.arcsOut:
if (layers == EVERYLAYER or triple.layer in layers):
arcs[triple.arc] = 1
return arcs.keys()
# Utility API
def GetComment(node, layers='core') :
"""Get the first rdfs:comment we find on this node (or "No comment"), within any of the specified layers."""
tx = GetComments(node, layers)
if len(tx) > 0:
return MD.parse(tx[0])
else:
return "No comment"
def GetComments(node, layers='core') :
"""Get the rdfs:comment(s) we find on this node within any of the specified layers."""
return GetTargets(Unit.GetUnit("rdfs:comment", True), node, layers=layers )
def GetsoftwareVersions(node, layers='core') :
"""Get the schema:softwareVersion(s) we find on this node (or [] ), within any of the specified layers."""
return GetTargets(Unit.GetUnit("softwareVersion", True), node, layers=layers )
def GetImmediateSubtypes(n, layers='core'):
"""Get this type's immediate subtypes, i.e. that are subClassOf this."""
if n==None:
return None
subs = GetSources( Unit.GetUnit("rdfs:subClassOf", True), n, layers=layers)
if (n.isDataType() or n.id == "DataType"):
subs += GetSources( Unit.GetUnit("rdf:type", True), n, layers=layers)
subs.sort(key=lambda x: x.id)
return subs
def GetImmediateSupertypes(n, layers='core'):
"""Get this type's immediate supertypes, i.e. that we are subClassOf."""
if n==None:
return None
sups = GetTargets( Unit.GetUnit("rdfs:subClassOf", True), n, layers=layers)
if (n.isDataType() or n.id == "DataType"):
sups += GetTargets( Unit.GetUnit("rdf:type", True), n, layers=layers)
sups.sort(key=lambda x: x.id)
return sups
Utc = "util_cache"
def GetAllTypes(layers='core'):
global Utc
"""Return all types in the graph."""
KEY = "AllTypes:%s" % layers
if DataCache.get(KEY+'x',Utc):
logging.debug("DataCache HIT: %s" % KEY)
return DataCache.get(KEY,Utc)
else:
logging.debug("DataCache MISS: %s" % KEY)
mynode = Unit.GetUnit("Thing", True)
subbed = {}
todo = [mynode]
while todo:
current = todo.pop()
subs = GetImmediateSubtypes(current, EVERYLAYER)
if inLayer(layers,current):
subbed[current] = 1
for sc in subs:
if subbed.get(sc.id) == None:
todo.append(sc)
DataCache.put(KEY,subbed.keys(),Utc)
return subbed.keys()
def GetAllDataTypes(layers='core'):
global Utc
"""Return all types in the graph."""
KEY = "AllDataTypes:%s" % layers
if DataCache.get(KEY+'x',Utc):
logging.debug("DataCache HIT: %s" % KEY)
return DataCache.get(KEY,Utc)
else:
logging.debug("DataCache MISS: %s" % KEY)
mynode = Unit.GetUnit("DataType", True)
subbed = {}
todo = [mynode]
while todo:
current = todo.pop()
subs = GetImmediateSubtypes(current, EVERYLAYER)
if inLayer(layers,current):
subbed[current] = 1
for sc in subs:
if subbed.get(sc.id) == None:
todo.append(sc)
DataCache.put(KEY,subbed.keys(),Utc)
return subbed.keys()
def GetAllEnumerationValues(layers='core'):
global Utc
KEY = "AllEnums:%s" % layers
if DataCache.get(KEY,Utc):
logging.debug("DataCache HIT: %s" % KEY)
return DataCache.get(KEY,Utc)
else:
logging.debug("DataCache MISS: %s" % KEY)
mynode = Unit.GetUnit("Enumeration", True)
enums = {}
subbed = {}
todo = [mynode]
while todo:
current = todo.pop()
subs = GetImmediateSubtypes(current, EVERYLAYER)
subbed[current] = 1
for sc in subs:
vals = GetSources( Unit.GetUnit("rdf:type", True), sc, layers=EVERYLAYER)
for val in vals:
if inLayer(layers,val):
enums[val] = 1
if subbed.get(sc.id) == None:
todo.append(sc)
DataCache.put(KEY,enums.keys(),Utc)
return enums.keys()
def GetAllProperties(layers='core'):
"""Return all properties in the graph."""
global Utc
KEY = "AllProperties:%s" % layers
if DataCache.get(KEY,Utc):
logging.debug("DataCache HIT: %s" % KEY)
return DataCache.get(KEY,Utc)
else:
logging.debug("DataCache MISS: %s" % KEY)
mynode = Unit.GetUnit("Thing")
props = GetSources(Unit.GetUnit("rdf:type", True), Unit.GetUnit("rdf:Property", True), layers=EVERYLAYER)
res = []
for prop in props:
if inLayer(layers,prop):
res.append(prop)
sorted_all_properties = sorted(res, key=lambda u: u.id)
DataCache.put(KEY,sorted_all_properties,Utc)
return sorted_all_properties
def GetAllTerms(layers='core',includeDataTypes=False):
ret = GetAllTypes(layers)
ret.extend(GetAllEnumerationValues(layers))
ret.extend(GetAllProperties(layers))
if includeDataTypes:
ret.extend(GetAllDataTypes(layers))
return sorted(ret,key=lambda u: u.id)
def GetParentList(start_unit, end_unit=None, path=[], layers='core'):
"""
Returns one or more lists, each giving a path from a start unit to a supertype parent unit.
example:
for path in GetParentList( Unit.GetUnit("Restaurant") ):
pprint.pprint(', '.join([str(x.id) for x in path ]))
'Restaurant, FoodEstablishment, LocalBusiness, Organization, Thing'
'Restaurant, FoodEstablishment, LocalBusiness, Place, Thing'
"""
if not end_unit:
end_unit = Unit.GetUnit("Thing")
arc=Unit.GetUnit("rdfs:subClassOf")
logging.debug("from %s to %s - path length %d" % (start_unit.id, end_unit.id, len(path) ) )
path = path + [start_unit]
if start_unit == end_unit:
return [path]
if not Unit.GetUnit(start_unit.id):
return []
paths = []
for node in GetTargets(arc, start_unit, layers=layers):
if node not in path:
newpaths = GetParentList(node, end_unit, path, layers=layers)
for newpath in newpaths:
paths.append(newpath)
return paths
def HasMultipleBaseTypes(typenode, layers='core'):
"""True if this unit represents a type with more than one immediate supertype."""
return len( GetTargets( Unit.GetUnit("rdfs:subClassOf", True), typenode, layers ) ) > 1
EXAMPLES = {}
ExamplesCount = 0
class Example ():
@staticmethod
def AddExample(terms, original_html, microdata, rdfa, jsonld, egmeta, layer='core'):
"""
Add an Example (via constructor registering it with the terms that it
mentions, i.e. stored in term.examples).
"""
# todo: fix partial examples: if (len(terms) > 0 and len(original_html) > 0 and (len(microdata) > 0 or len(rdfa) > 0 or len(jsonld) > 0)):
typeinfo = "".join( [" %s " % t for t in terms] )
if "FakeEntryNeeded" in typeinfo or terms==[]:
return
if (len(terms) > 0 and len(original_html) > 0 and len(microdata) > 0 and len(rdfa) > 0 and len(jsonld) > 0):
return Example(terms, original_html, microdata, rdfa, jsonld, egmeta, layer='core')
else:
log.info("API AddExample skipped a case due to missing value(s) in example. Target terms: %s ORIG: %s MICRODATA: %s RDFA: %s JSON: %s EGMETA: %s " % ( typeinfo, original_html, microdata, rdfa, jsonld, egmeta ) )
def get(self, name, layers='core') :
"""Exposes original_content, microdata, rdfa and jsonld versions (in the layer(s) specified)."""
if name == 'original_html':
return self.original_html
if name == 'microdata':
return self.microdata
if name == 'rdfa':
return self.rdfa
if name == 'jsonld':
return self.jsonld
def __init__ (self, terms, original_html, microdata, rdfa, jsonld, egmeta, layer='core'):
"""Example constructor, registers itself with the relevant Unit(s)."""
global EXAMPLES,ExamplesCount
ExamplesCount += 1
self.orderId = ExamplesCount #Used to maintain consistancy of display order
self.terms = terms
self.original_html = original_html
self.microdata = microdata
self.rdfa = rdfa
self.jsonld = jsonld
self.egmeta = egmeta
self.layer = layer
for term in terms:
if "id" in egmeta:
logging.debug("Created Example with ID %s and type %s" % ( egmeta["id"], term ))
if(EXAMPLES.get(term, None) == None):
EXAMPLES[term] = []
EXAMPLES.get(term).append(self)
def LoadExamples(node, layers='core'):
"""Returns the examples (if any) for some Unit node."""
#log.info("Getting examples for: %s" % node.id)
if(node.examples == None):
node.examples = EXAMPLES.get(node.id)
if(node.examples == None):
node.examples = []
return node.examples
USAGECOUNTS = {}
def StoreUsage(id,count):
USAGECOUNTS[id] = count
def GetUsage(id):
return USAGECOUNTS.get(id,0)
def GetExtMappingsRDFa(node, layers='core'):
"""Self-contained chunk of RDFa HTML markup with mappings for this term."""
if (node.isClass()):
equivs = GetTargets(Unit.GetUnit("owl:equivalentClass"), node, layers=layers)
if len(equivs) > 0:
markup = ''
for c in equivs:
if (c.id.startswith('http')):
markup = markup + "<link property=\"owl:equivalentClass\" href=\"%s\"/>\n" % c.id
else:
markup = markup + "<link property=\"owl:equivalentClass\" resource=\"%s\"/>\n" % c.id
return markup
if (node.isAttribute()):
equivs = GetTargets(Unit.GetUnit("owl:equivalentProperty"), node, layers)
if len(equivs) > 0:
markup = ''
for c in equivs:
markup = markup + "<link property=\"owl:equivalentProperty\" href=\"%s\"/>\n" % c.id
return markup
return "<!-- no external mappings noted for this term. -->"
def GetJsonLdContext(layers='core'):
"""Generates a basic JSON-LD context file for schema.org."""
# Caching assumes the context is neutral w.r.t. our hostname.
if DataCache.get('JSONLDCONTEXT'):
log.debug("DataCache: recycled JSONLDCONTEXT")
return DataCache.get('JSONLDCONTEXT')
else:
global namespaces
jsonldcontext = "{\n \"@context\": {\n"
jsonldcontext += " \"type\": \"@type\",\n"
jsonldcontext += " \"id\": \"@id\",\n"
jsonldcontext += " \"@vocab\": \"http://schema.org/\",\n"
jsonldcontext += namespaces
url = Unit.GetUnit("URL")
date = Unit.GetUnit("Date")
datetime = Unit.GetUnit("DateTime")
# properties = sorted(GetSources(Unit.GetUnit("rdf:type",True), Unit.GetUnit("rdf:Property",True), layers=getAllLayersList()), key=lambda u: u.id)
# for p in properties:
for t in GetAllTerms(EVERYLAYER,includeDataTypes=True):
if t.isClass(EVERYLAYER) or t.isEnumeration(EVERYLAYER) or t.isEnumerationValue(EVERYLAYER) or t.isDataType(EVERYLAYER):
jsonldcontext += " \"" + t.id + "\": {\"@id\": \"schema:" + t.id + "\"},"
elif t.isAttribute(EVERYLAYER):
range = GetTargets(Unit.GetUnit("rangeIncludes"), t, layers=EVERYLAYER)
type = None
if url in range:
type = "@id"
elif date in range:
type = "Date"
elif datetime in range:
type = "DateTime"
typins = ""
if type:
typins = ", \"@type\": \"" + type + "\""
jsonldcontext += " \"" + t.id + "\": { \"@id\": \"schema:" + t.id + "\"" + typins + "},"
jsonldcontext += "}}\n"
jsonldcontext = jsonldcontext.replace("},}}","}\n }\n}")
jsonldcontext = jsonldcontext.replace("},","},\n")
DataCache.put('JSONLDCONTEXT',jsonldcontext)
log.debug("DataCache: added JSONLDCONTEXT")
return jsonldcontext
#### UTILITIES
def inLayer(layerlist, node):
"""Does a unit get its type mentioned in a layer?"""
if (node is None):
return False
if len(GetTargets(Unit.GetUnit("rdf:type"), node, layers=layerlist) ) > 0:
log.debug("Found typeOf for node %s in layers: %s" % (node.id, layerlist ))
return True
if len(GetTargets(Unit.GetUnit("rdfs:subClassOf"), node, layers=layerlist) ) > 0:
# TODO: should we really test for any mention of a term, not just typing?
return True
return False
def read_file (filename):
"""Read a file from disk, return it as a single string."""
strs = []
file_path = full_path(filename)
import codecs
log.debug("READING FILE: filename=%s file_path=%s " % (filename, file_path ) )
for line in codecs.open(file_path, 'r', encoding="utf8").readlines():
strs.append(line)
return "".join(strs)
def full_path(filename):
"""convert local file name to full path."""
import os.path
folder = os.path.dirname(os.path.realpath(__file__))
return os.path.join(folder, filename)
def setHomeValues(items,layer='core',defaultToCore=False):
global extensionLoadErrors
for node in items:
if(node == None):
continue
home = GetTargets( Unit.GetUnit("isPartOf"), node, layer )
if(len(home) > 0):
if(node.home != None):
msg = "ERROR: %s trying to overwite home from %s to %s" % (node.id,node.home,home[0].id)
log.info(msg)
extensionLoadErrors += msg + '\n'
else:
h = home[0].id.strip()
if h.startswith("http://"):
h = h[7:]
node.home = re.match( r'([\w\-_]+)[\.:]?', h).group(1)
if(node.home == 'schema'):
node.home = 'core'
elif node.home == None:
if defaultToCore:
node.home = "core"
else:
msg = "ERROR: %s has no home defined" % (node.id)
log.info(msg)
extensionLoadErrors += msg + '\n'
def read_schemas(loadExtensions=False):
"""Read/parse/ingest schemas from data/*.rdfa. Also data/*examples.txt"""
import os.path
import glob
import re
global schemasInitialized
schemasInitialized = True
if (not schemasInitialized or DYNALOAD):
log.info("(re)loading core and annotations.")
files = glob.glob("data/*.rdfa")
jfiles = glob.glob("data/*.jsonld")
for jf in jfiles:
rdfequiv = jf[:-7]+".rdfa"
if not rdfequiv in files: #Only add .jsonld files if no equivalent .rdfa
files.append(jf)
file_paths = []
for f in files:
file_paths.append(full_path(f))
apirdflib.load_graph('core',file_paths)
files = glob.glob("data/*examples.txt")
read_examples(files,'core')
files = glob.glob("data/2015-04-vocab_counts.txt")
for file in files:
usage_data = read_file(file)
parser = parsers.UsageFileParser(None)
parser.parse(usage_data)
schemasInitialized = True
def read_extensions(extensions):
import os.path
import glob
import re
global extensionsLoaded
extfiles = []
expfiles = []
if not extensionsLoaded: #2nd load will throw up errors and duplicate terms
log.info("(re)scanning for extensions %s " % extensions)
for i in extensions:
all_layers[i] = "1"
extfiles = glob.glob("data/ext/%s/*.rdfa" % i)
jextfiles = glob.glob("data/ext/%s/*.jsonld" % i)
for jf in jextfiles:
rdfequiv = jf[:-7]+".rdfa"
if not rdfequiv in extfiles: #Only add .jsonld files if no equivalent .rdfa
extfiles.append(jf)
# log.info("FILES: %s" % extfiles)
file_paths = []
for f in extfiles:
file_paths.append(full_path(f))
apirdflib.load_graph(i,file_paths)
expfiles = glob.glob("data/ext/%s/*examples.txt" % i)
read_examples(expfiles,i)
log.info("Extensions found: %s ." % " , ".join(extfiles) )
# fnstrip_re = re.compile("\/.*")
# for ext in extfiles:
# ext_file_path = full_path(ext)
# extid = ext.replace('data/ext/', '')
# extid = re.sub(fnstrip_re,'',extid)
# log.info("Preparing to parse extension data: %s as '%s'" % (ext_file_path, "%s" % extid))
extensionsLoaded = True
def read_examples(files, layer):
example_contents = []
for f in files:
example_content = read_file(f)
example_contents.append(example_content)
log.debug("examples loaded from: %s" % f)
parser = parsers.ParseExampleFile(None,layer=layer)
parser.parse(example_contents)
def StripHtmlTags(source):
if source and len(source) > 0:
return re.sub('<[^<]+?>', '', source)
return ""
def ShortenOnSentence(source,lengthHint=250):
if source and len(source) > lengthHint:
source = source.strip()
sentEnd = re.compile('[.!?]')
sentList = sentEnd.split(source)
com=""
count = 0
while count < len(sentList):
if(count > 0 ):
if len(com) < len(source):
com += source[len(com)]
com += sentList[count]
count += 1
if count == len(sentList):
if len(com) < len(source):
com += source[len(source) - 1]
if len(com) > lengthHint:
if len(com) < len(source):
com += source[len(com)]
break
if len(source) > len(com) + 1:
com += ".."
source = com
return source
class MarkdownTool():
def __init__ (self):
import markdown
from markdown.extensions.wikilinks import WikiLinkExtension
self._md = markdown.Markdown(extensions=[WikiLinkExtension(base_url='/', end_url='', html_class='localLink')])
def parse(self,source,preservePara=False):
if not source or len(source) == 0:
return ""
source = source.strip()
source = source.replace("\\n","\n")
ret = self._md.reset().convert(source)
if not preservePara:
#Remove wrapping <p> </p> that Markdown adds by default
if len(ret) > 7 and ret.startswith("<p>") and ret.endswith("</p>"):
ret = ret[3:len(ret)-4]
return ret
MD = MarkdownTool()
|
|
#!/usr/bin/env python
# 12.01.2007, c
"""
Probe finite element solutions in points defined by various geometrical probes.
Generation mode
---------------
python probe.py [generation options] <input file> <results file>
Probe the data in the results file corresponding to the problem defined in the
input file. The input file options must contain 'gen_probes' and 'probe_hook'
keys, pointing to proper functions accessible from the input file scope.
For each probe returned by `gen_probes()` a data plot figure and a text
file with the data plotted are saved, see the options below.
Generation options
------------------
-o, --auto-dir, --same-dir, -f, --only-names, -s
Postprocessing mode
-------------------
python probe.py [postprocessing options] <probe file> <figure file>
Read a previously probed data from the probe text file, re-plot them,
and integrate them along the probe.
Postprocessing options
----------------------
--postprocess, --radial, --only-names
Notes
-----
For extremely thin hexahedral elements the Newton's iteration for finding the
reference element coordinates might converge to a spurious solution outside
of the element. To obtain some values even in this case, try increasing the
--close-limit option value.
"""
from __future__ import absolute_import
import os
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import numpy as nm
import sfepy
from sfepy.base.base import output, assert_
from sfepy.base.ioutils import edit_filename
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.discrete import Problem
from sfepy.discrete.fem import MeshIO
from sfepy.discrete.probes import write_results, read_results
import six
helps = {
'debug':
'automatically start debugger when an exception is raised',
'filename' :
'basename of output file(s) [default: <basename of input file>]',
'output_format' :
'output figure file format (supported by the matplotlib backend used) '\
'[default: %(default)s]',
'auto_dir' :
'the directory of the results file is determined automatically using the '\
'"output_dir" option in input file options',
'same_dir' :
'store the probe figures/data in the directory of the results file',
'only_names' :
'probe only named data',
'step' :
'probe the given time step',
'close_limit' :
'maximum limit distance of a point from the closest element allowed'
' for extrapolation. [default: %(default)s]',
'postprocess' :
'postprocessing mode',
'radial' :
'assume radial integration',
}
def generate_probes(filename_input, filename_results, options,
conf=None, problem=None, probes=None, labels=None,
probe_hooks=None):
"""
Generate probe figures and data files.
"""
if conf is None:
required, other = get_standard_keywords()
conf = ProblemConf.from_file(filename_input, required, other)
opts = conf.options
if options.auto_dir:
output_dir = opts.get_('output_dir', '.')
filename_results = os.path.join(output_dir, filename_results)
output('results in: %s' % filename_results)
io = MeshIO.any_from_filename(filename_results)
step = options.step if options.step >= 0 else io.read_last_step()
all_data = io.read_data(step)
output('loaded:', list(all_data.keys()))
output('from step:', step)
if options.only_names is None:
data = all_data
else:
data = {}
for key, val in six.iteritems(all_data):
if key in options.only_names:
data[key] = val
if problem is None:
problem = Problem.from_conf(conf,
init_equations=False, init_solvers=False)
if probes is None:
gen_probes = conf.get_function(conf.options.gen_probes)
probes, labels = gen_probes(problem)
if probe_hooks is None:
probe_hooks = {None : conf.get_function(conf.options.probe_hook)}
if options.output_filename_trunk is None:
options.output_filename_trunk = problem.ofn_trunk
filename_template = options.output_filename_trunk \
+ ('_%%d.%s' % options.output_format)
if options.same_dir:
filename_template = os.path.join(os.path.dirname(filename_results),
filename_template)
output_dir = os.path.dirname(filename_results)
for ip, probe in enumerate(probes):
output(ip, probe.name)
probe.set_options(close_limit=options.close_limit)
for key, probe_hook in six.iteritems(probe_hooks):
out = probe_hook(data, probe, labels[ip], problem)
if out is None: continue
if isinstance(out, tuple):
fig, results = out
else:
fig = out
if key is not None:
filename = filename_template % (key, ip)
else:
filename = filename_template % ip
if fig is not None:
if isinstance(fig, dict):
for fig_name, fig_fig in six.iteritems(fig):
fig_filename = edit_filename(filename,
suffix='_' + fig_name)
fig_fig.savefig(fig_filename)
output('figure ->', os.path.normpath(fig_filename))
else:
fig.savefig(filename)
output('figure ->', os.path.normpath(filename))
if results is not None:
txt_filename = edit_filename(filename, new_ext='.txt')
write_results(txt_filename, probe, results)
output('data ->', os.path.normpath(txt_filename))
def integrate_along_line(x, y, is_radial=False):
"""
Integrate numerically (trapezoidal rule) a function :math:`y=y(x)`.
If is_radial is True, multiply each :math:`y` by :math:`4 \pi x^2`.
"""
dx = nm.diff(x)
ay = 0.5 * (y[:-1] + y[1:])
if is_radial:
ax = 0.5 * (x[:-1] + x[1:])
val = 4.0 * nm.pi * nm.sum(ay * dx * (ax**2))
else:
val = nm.sum(ay * dx)
return val
def postprocess(filename_input, filename_results, options):
"""
Postprocess probe data files - replot, integrate data.
"""
from matplotlib import pyplot as plt
header, results = read_results(filename_input,
only_names=options.only_names)
output(header)
fig = plt.figure()
for name, result in six.iteritems(results):
pars, vals = result[:, 0], result[:, 1]
ii = nm.where(nm.isfinite(vals))[0]
# Nans only at the edges.
assert_(nm.diff(ii).sum() == (len(ii)-1))
val = integrate_along_line(pars[ii], vals[ii], options.radial)
label = r'%s: $\int\ %s' % (name, name)
if options.radial:
label += ' (r)'
label += '$ = %.5e'% val
plt.plot(pars, vals, label=label, lw=0.2, marker='+', ms=1)
plt.ylabel('probed data')
plt.xlabel('probe coordinate')
output(label)
plt.legend()
fig.savefig(filename_results)
def main():
parser = ArgumentParser(description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('--version', action='version',
version='%(prog)s ' + sfepy.__version__)
parser.add_argument('--debug',
action='store_true', dest='debug',
default=False, help=helps['debug'])
parser.add_argument('-o', metavar='filename',
action='store', dest='output_filename_trunk',
default=None, help=helps['filename'])
parser.add_argument('--auto-dir',
action='store_true', dest='auto_dir',
default=False, help=helps['auto_dir'])
parser.add_argument('--same-dir',
action='store_true', dest='same_dir',
default=False, help=helps['same_dir'])
parser.add_argument('-f', '--format', metavar='format',
action='store', dest='output_format',
default='png', help=helps['output_format'])
parser.add_argument('--only-names', metavar='list of names',
action='store', dest='only_names',
default=None, help=helps['only_names'])
parser.add_argument('-s', '--step', type=int, metavar='step',
action='store', dest='step',
default=0, help=helps['step'])
parser.add_argument('-c', '--close-limit', type=float, metavar='distance',
action='store', dest='close_limit',
default=0.1, help=helps['close_limit'])
parser.add_argument('-p', '--postprocess',
action='store_true', dest='postprocess',
default=False, help=helps['postprocess'])
parser.add_argument('--radial',
action='store_true', dest='radial',
default=False, help=helps['radial'])
parser.add_argument('filename_in')
parser.add_argument('filename_out')
options = parser.parse_args()
if options.debug:
from sfepy.base.base import debug_on_error; debug_on_error()
filename_input = options.filename_in
filename_results = options.filename_out
if options.only_names is not None:
options.only_names = options.only_names.split(',')
output.prefix = 'probe:'
if options.postprocess:
postprocess(filename_input, filename_results, options)
else:
generate_probes(filename_input, filename_results, options)
if __name__ == '__main__':
main()
|
|
import time
import os.path
import bencode
from django.db import models
from django.utils import timezone
from django.utils.functional import cached_property
from pyquery.pyquery import PyQuery
from bibliotik.settings import BIBLIOTIK_GET_TORRENT_URL
from home.models import TransTorrentBase
from what_transcode.utils import get_info_hash_from_data
EBOOK_FORMATS = ['EPUB', 'PDF', 'MOBI', 'AZW3', 'DJVU', 'CBR', 'CHM', 'TXT']
LANGUAGES = ['English', 'Irish', 'German', 'French', 'Spanish', 'Italian', 'Latin', 'Japanese',
'Danish', 'Swedish', 'Norwegian', 'Dutch', 'Russian', 'Polish', 'Portuguese', 'Greek',
'Turkish', 'Hungarian', 'Korean', 'Chinese', 'Thai', 'Indonesian', 'Arabic']
def load_bibliotik_data(bibliotik_client, torrent_id):
exception = None
for i in xrange(3):
try:
response = bibliotik_client.session.get(
BIBLIOTIK_GET_TORRENT_URL.format(torrent_id), allow_redirects=False)
if response.status_code != 200:
raise Exception('Getting bibliotik data returned HTTP {0}'
.format(response.status_code))
return response.text
except Exception as ex:
print u'Error while retrieving bibliotik data. Will retry: {0}'.format(ex)
time.sleep(2)
exception = ex
raise exception
class BibliotikTorrent(models.Model):
info_hash = models.CharField(max_length=40, db_index=True)
retrieved = models.DateTimeField()
category = models.CharField(max_length=32)
format = models.CharField(max_length=16)
retail = models.BooleanField(default=False)
pages = models.IntegerField()
language = models.CharField(max_length=32)
isbn = models.CharField(max_length=16)
cover_url = models.TextField()
tags = models.TextField()
publisher = models.TextField()
year = models.IntegerField()
author = models.TextField()
title = models.TextField()
html_page = models.TextField()
torrent_filename = models.TextField(null=True)
torrent_file = models.BinaryField(null=True)
@cached_property
def publisher_list(self):
return self.publisher.split(';')
def __unicode__(self):
return u'BibliotikTorrent id={0} hash={1}'.format(self.id, self.info_hash)
def import_bibliotik_data(self, bibliotik_client):
self.html_page = load_bibliotik_data(bibliotik_client, self.id)
self.parse_html_page()
def parse_html_page(self):
pq = PyQuery(self.html_page)
authors = []
for author in pq('p#creatorlist a').items():
authors.append(author.text())
self.author = ', '.join(authors)
self.title = pq('h1#title').text()
if not self.title:
raise Exception(u'Title should not be empty.')
self.category = pq('h1#title > img:first-child').attr('title')
details = pq('p#details_content_info').text().split(', ')
self.format = details[0]
details = details[1:]
if self.category == u'Ebooks':
assert self.format in EBOOK_FORMATS, u'Unknown eBook format {0}'.format(self.format)
elif self.category == u'Applications':
pass
elif self.category == u'Articles':
pass
elif self.category == u'Audiobooks':
pass
elif self.category == u'Comics':
pass
elif self.category == u'Journals':
pass
elif self.category == u'Magazines':
pass
else:
raise Exception(u'Unknown category {0}'.format(self.category))
if details[0] == u'Retail':
self.retail = True
details = details[1:]
else:
self.retail = False
if details[0].endswith(u'pages'):
self.pages = int(details[0][:-len(u'pages') - 1])
details = details[1:]
else:
self.pages = 0
if details[0].split(' ')[0] in LANGUAGES:
parts = details[0].split(' ')
details = details[1:]
self.language = parts[0]
parts = parts[1:]
if len(parts):
assert parts[0][0] == '(' and parts[0][-1] == ')', u'Unknown string after language'
self.isbn = parts[0][1:-1]
parts = parts[1:]
else:
self.isbn = ''
else:
self.language = ''
assert len(details) == 0, u'All details must be parsed: {0}'.format(', '.join(details))
self.cover_url = pq('div#sidebar > a[rel="lightbox"] > img').attr('src') or ''
self.tags = ', '.join(i.text() for i in pq('span.taglist > a').items())
publisher_year = pq('p#published').text()
if publisher_year:
assert publisher_year.startswith('Published '), \
"Publisher doesn't start with Published"
publisher_year = publisher_year[len('Published '):]
if publisher_year.startswith('by '):
publisher_year = publisher_year[len('by '):]
self.publisher = ';'.join(i.text() for i in pq('p#published > a').items())
assert self.publisher, 'Publisher can not be empty'
publisher_mod = ' , '.join(i.text() for i in pq('p#published > a').items())
assert publisher_year.startswith(publisher_mod), \
'publisher_year does not start with self.publisher'
publisher_year = publisher_year[len(publisher_mod) + 1:]
else:
self.publisher = ''
if publisher_year:
assert publisher_year.startswith('in '), 'Invalid publisher_year'
publisher_year = publisher_year[len('in '):]
self.year = int(publisher_year)
else:
self.year = 0
@staticmethod
def get_or_create(bibliotik_client, torrent_id):
try:
return BibliotikTorrent.objects.get(id=torrent_id)
except BibliotikTorrent.DoesNotExist:
if not bibliotik_client:
raise Exception('We do not have the BibliotikTorrent, but no client was provided.')
torrent = BibliotikTorrent(
id=torrent_id,
retrieved=timezone.now(),
)
torrent.import_bibliotik_data(bibliotik_client)
torrent.download_torrent_file(bibliotik_client)
torrent.info_hash = get_info_hash_from_data(torrent.torrent_file)
torrent.save()
return torrent
def download_torrent_file(self, bibliotik_client):
for i in xrange(3):
try:
self._download_torrent_file(bibliotik_client)
return
except Exception as ex:
exception = ex
time.sleep(2)
raise exception
def _download_torrent_file(self, bibliotik_client):
if self.torrent_file is not None:
return
filename, torrent_file = bibliotik_client.download_torrent(self.id)
bencode.bdecode(torrent_file)
self.torrent_filename = filename
self.torrent_file = torrent_file
class BibliotikTransTorrent(TransTorrentBase):
bibliotik_torrent = models.ForeignKey(BibliotikTorrent)
@property
def path(self):
return os.path.join(self.location.path, unicode(self.bibliotik_torrent.id))
def __unicode__(self):
return u'BibliotikTransTorrent(torrent_id={0}, bibliotik_id={1}, name={2})'.format(
self.torrent_id, self.bibliotik_torrent_id, self.torrent_name)
class BibliotikFulltext(models.Model):
info = models.TextField()
more_info = models.TextField()
def update(self, bibliotik_torrent):
info = u'{0} - {1}'.format(bibliotik_torrent.author, bibliotik_torrent.title)
more_info = ' '.join([
', '.join(bibliotik_torrent.publisher_list),
bibliotik_torrent.isbn,
str(bibliotik_torrent.year),
bibliotik_torrent.format,
bibliotik_torrent.tags
])
if self.info != info or self.more_info != more_info:
self.info = info
self.more_info = more_info
self.save()
|
|
import numpy as np
from math import ceil
import tensorflow as tf
# import backprojecting_layer.backprojecting_op as backproject_op
# import backprojecting_layer.backprojecting_op_grad
# import projecting_layer.projecting_op as project_op
# import projecting_layer.projecting_op_grad
# import computing_label_layer.computing_label_op as compute_label_op
# import computing_flow_layer.computing_flow_op as compute_flow_op
# import computing_flow_layer.computing_flow_op_grad
# import triplet_loss.triplet_loss_op as triplet_loss_op
import triplet_flow_loss.triplet_flow_loss_op as triplet_flow_loss_op
from triplet_flow_loss import triplet_flow_loss_op_grad
# from gru2d import GRU2DCell
# from gru2d_original import GRUCell
# from gru3d import GRU3DCell
# from vanilla2d import Vanilla2DCell
# from add2d import Add2DCell
# import custom_gradient_checker as gradient_checker
# from tensorflow.python.framework import constant_op
DEFAULT_PADDING = 'SAME'
def layer(op):
def layer_decorated(self, *args, **kwargs):
# Automatically set a name if not provided.
name = kwargs.setdefault('name', self.get_unique_name(op.__name__))
# Figure out the layer inputs.
if len(self.inputs)==0:
raise RuntimeError('No input variables found for layer %s.'%name)
elif len(self.inputs)==1:
layer_input = self.inputs[0]
else:
layer_input = list(self.inputs)
# Perform the operation and get the output.
layer_output = op(self, layer_input, *args, **kwargs)
# Add to layer LUT.
self.layers[name] = layer_output
# This output is now the input for the next layer.
self.feed(layer_output)
# Return self for chained calls.
return self
return layer_decorated
class Network(object):
def __init__(self, inputs, trainable=True):
self.inputs = []
self.layers = dict(inputs)
self.trainable = trainable
self.setup()
def setup(self):
raise NotImplementedError('Must be subclassed.')
def load(self, data_path, session, ignore_missing=False):
'''Load network weights.
data_path: The path to the numpy-serialized network weights
session: The current TensorFlow session
ignore_missing: If true, serialized weights for missing layers are ignored.
'''
data_dict = np.load(data_path).item()
for op_name in data_dict:
print op_name
with tf.variable_scope(op_name, reuse=True):
for param_name, data in data_dict[op_name].iteritems():
try:
var = tf.get_variable(param_name)
session.run(var.assign(data))
except ValueError:
if not ignore_missing:
raise
# try to assign dual weights
with tf.variable_scope(op_name+'_p', reuse=True):
for param_name, data in data_dict[op_name].iteritems():
try:
var = tf.get_variable(param_name)
session.run(var.assign(data))
except ValueError:
if not ignore_missing:
raise
with tf.variable_scope(op_name+'_d', reuse=True):
for param_name, data in data_dict[op_name].iteritems():
try:
var = tf.get_variable(param_name)
session.run(var.assign(data))
except ValueError:
if not ignore_missing:
raise
def feed(self, *args):
assert len(args)!=0
self.inputs = []
for layer in args:
if isinstance(layer, basestring):
try:
layer = self.layers[layer]
print layer
except KeyError:
print self.layers.keys()
raise KeyError('Unknown layer name fed: %s'%layer)
self.inputs.append(layer)
return self
def feed_additional(self, *args):
assert len(args)!=0
for layer in args:
if isinstance(layer, basestring):
try:
layer = self.layers[layer]
print layer
except KeyError:
print self.layers.keys()
raise KeyError('Unknown layer name fed: %s'%layer)
self.inputs.append(layer)
return self
def get_output(self, layer):
try:
layer = self.layers[layer]
except KeyError:
print self.layers.keys()
raise KeyError('Unknown layer name fed: %s'%layer)
return layer
def get_unique_name(self, prefix):
id = sum(t.startswith(prefix) for t,_ in self.layers.items())+1
return '%s_%d'%(prefix, id)
def make_var(self, name, shape, initializer=None, trainable=True):
return tf.get_variable(name, shape, initializer=initializer, trainable=trainable)
def validate_padding(self, padding):
assert padding in ('SAME', 'VALID')
def make_deconv_filter(self, name, f_shape, trainable=True):
width = f_shape[0]
heigh = f_shape[0]
f = ceil(width/2.0)
c = (2 * f - 1 - f % 2) / (2.0 * f)
bilinear = np.zeros([f_shape[0], f_shape[1]])
for x in range(width):
for y in range(heigh):
value = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
bilinear[x, y] = value
weights = np.zeros(f_shape)
for i in range(f_shape[2]):
weights[:, :, i, i] = bilinear
init = tf.constant_initializer(value=weights, dtype=tf.float32)
var = tf.get_variable(name, shape=weights.shape, initializer=init, trainable=trainable)
return var
@layer
def conv(self, input, k_h, k_w, c_o, s_h, s_w, name, reuse=None, relu=True, padding=DEFAULT_PADDING, group=1, trainable=True, biased=True, c_i=-1, elu=False):
self.validate_padding(padding)
if isinstance(input, tuple):
input = input[0]
if c_i == -1:
c_i = input.get_shape()[-1]
assert c_i%group==0
assert c_o%group==0
convolve = lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding)
with tf.variable_scope(name, reuse=reuse) as scope:
init_weights = tf.truncated_normal_initializer(0.0, stddev=0.001)
kernel = self.make_var('weights', [k_h, k_w, c_i/group, c_o], init_weights, trainable)
if group==1:
output = convolve(input, kernel)
else:
input_groups = tf.split(3, group, input) # tf.split is called with incorrect arguments, this will crash
kernel_groups = tf.split(3, group, kernel) # tf.split is called with incorrect arguments
output_groups = [convolve(i, k) for i,k in zip(input_groups, kernel_groups)]
output = tf.concat(3, output_groups)
# Add the biases
if biased:
init_biases = tf.constant_initializer(0.0)
biases = self.make_var('biases', [c_o], init_biases, trainable)
output = tf.nn.bias_add(output, biases)
if relu:
if elu:
output = tf.nn.elu(output, name=scope.name)
else:
output = tf.nn.relu(output, name=scope.name)
return output
@layer
def conv3d(self, input, k_d, k_h, k_w, c_i, c_o, s_d, s_h, s_w, name, reuse=None, relu=True, padding=DEFAULT_PADDING, trainable=True):
self.validate_padding(padding)
if isinstance(input, tuple):
input = input[0]
with tf.variable_scope(name, reuse=reuse) as scope:
init_weights = tf.truncated_normal_initializer(0.0, stddev=0.001)
init_biases = tf.constant_initializer(0.0)
kernel = self.make_var('weights', [k_d, k_h, k_w, c_i, c_o], init_weights, trainable)
biases = self.make_var('biases', [c_o], init_biases, trainable)
conv = tf.nn.conv3d(input, kernel, [1, s_d, s_h, s_w, 1], padding=padding)
if relu:
bias = tf.nn.bias_add(conv, biases)
return tf.nn.relu(bias, name=scope.name)
return tf.nn.bias_add(conv, biases, name=scope.name)
@layer
def deconv(self, input, k_h, k_w, c_o, s_h, s_w, name, reuse=None, padding=DEFAULT_PADDING, trainable=True, c_i=None):
# type: (any, int, int, int, int, int, str, bool, str, bool) -> any
"""
:param input: input tensor
:param k_h: height
:param k_w: width
:param c_o: output depth
:param s_h: vertical stride
:param s_w: horizontal stride
:param name: layer name
:param reuse: should weights be reused from another layer with the same name?
:param padding:
:param trainable:
:return:
"""
self.validate_padding(padding)
if c_i is None:
c_i = input.get_shape()[-1]
with tf.variable_scope(name, reuse=reuse) as scope:
# Compute shape out of input
in_shape = tf.shape(input)
h = in_shape[1] * s_h
w = in_shape[2] * s_w
new_shape = [in_shape[0], h, w, c_o]
output_shape = tf.stack(new_shape)
# filter
f_shape = [k_h, k_w, c_o, c_i]
weights = self.make_deconv_filter('weights', f_shape, trainable)
return tf.nn.conv2d_transpose(input, weights, output_shape, [1, s_h, s_w, 1], padding=padding, name=scope.name)
# @layer
# def backproject(self, input, grid_size, kernel_size, threshold, name):
# return backproject_op.backproject(input[0], input[1], input[2], input[3], input[4], grid_size, kernel_size, threshold, name=name)
# @layer
# def compute_flow(self, input, kernel_size, threshold, max_weight, name):
# return compute_flow_op.compute_flow(input[0], input[1], input[2], input[3], input[4], kernel_size, threshold, max_weight, name=name)
# @layer
# def triplet_loss(self, input, margin, name):
# return triplet_loss_op.triplet_loss(input[0], input[1], tf.cast(input[2], tf.int32), margin, name=name)
@layer
def triplet_flow_loss(self, input, margin, negative_radius, name, positive_radius=1):
output = triplet_flow_loss_op.triplet_flow_loss(self.get_output(input[0]), self.get_output(input[1]),
self.get_output(input[2]), self.get_output(input[3]),
self.get_output(input[4]), self.get_output(input[5]),
margin, positive_radius, negative_radius, name=name)
return output
# @layer
# def project(self, input, kernel_size, threshold, name):
# return project_op.project(input[0], input[1], input[2], kernel_size, threshold, name=name)
# @layer
# def compute_label(self, input, name):
# return compute_label_op.compute_label(input[0], input[1], input[2], name=name)
# @layer
# def rnn_gru2d(self, input, num_units, channels, name, reuse=None):
# with tf.variable_scope(name, reuse=reuse) as scope:
# gru2d = GRU2DCell(num_units, channels)
# return gru2d(input[0], input[1][0], input[1][1], scope)
#
# @layer
# def rnn_gru2d_original(self, input, num_units, channels, name, reuse=None):
# with tf.variable_scope(name, reuse=reuse) as scope:
# gru2d = GRUCell(num_units, channels)
# return gru2d(input[0], input[1][0], input[1][1], scope)
#
# @layer
# def rnn_gru3d(self, input, num_units, channels, name, reuse=None):
# with tf.variable_scope(name, reuse=reuse) as scope:
# gru3d = GRU3DCell(num_units, channels)
# return gru3d(input[0][0], input[0][2], input[1], scope)
#
# @layer
# def rnn_vanilla2d(self, input, num_units, channels, name, reuse=None):
# with tf.variable_scope(name, reuse=reuse) as scope:
# vanilla2d = Vanilla2DCell(num_units, channels)
# return vanilla2d(input[0], input[1], scope)
#
# @layer
# def rnn_add2d(self, input, num_units, channels, step, name, reuse=None):
# with tf.variable_scope(name, reuse=reuse) as scope:
# add2d = Add2DCell(num_units, channels)
# return add2d(input[0], input[1], step, scope)
@layer
def sigmoid(self, input, name):
return tf.nn.sigmoid(input, name=name)
@layer
def relu(self, input, name):
return tf.nn.relu(input, name=name)
@layer
def lrelu(self, input, name, leak=0.2):
return tf.maximum(input, leak * input)
@layer
def max_pool(self, input, k_h, k_w, s_h, s_w, name, padding=DEFAULT_PADDING):
self.validate_padding(padding)
return tf.nn.max_pool(input,
ksize=[1, k_h, k_w, 1],
strides=[1, s_h, s_w, 1],
padding=padding,
name=name)
@layer
def max_pool_int(self, input, k_h, k_w, s_h, s_w, name, padding=DEFAULT_PADDING):
self.validate_padding(padding)
return tf.cast(tf.nn.max_pool(input,
ksize=[1, k_h, k_w, 1],
strides=[1, s_h, s_w, 1],
padding=padding), dtype=tf.int32,
name=name)
@layer
def avg_pool(self, input, k_h, k_w, s_h, s_w, name, padding=DEFAULT_PADDING):
self.validate_padding(padding)
return tf.nn.avg_pool(input,
ksize=[1, k_h, k_w, 1],
strides=[1, s_h, s_w, 1],
padding=padding,
name=name)
@layer
def cast(self, input, target_dtype, name):
return tf.cast(input, target_dtype, name=name)
@layer
def round(self, input, name):
return tf.round(input, name=name)
@layer
def lrn(self, input, radius, alpha, beta, name, bias=1.0):
return tf.nn.local_response_normalization(input,
depth_radius=radius,
alpha=alpha,
beta=beta,
bias=bias,
name=name)
@layer
def concat(self, inputs, axis, name):
return tf.concat(axis=axis, values=inputs, name=name)
@layer
def add(self, inputs, name):
return tf.add_n(inputs, name=name)
@layer
def add_immediate(self, input, value, name):
return tf.add(input, value, name=name)
@layer
def mult_immediate(self, input, value, name):
return tf.multiply(input, value, name=name)
@layer
def div_immediate(self, input, value, name):
return tf.divide(input, value, name=name)
@layer
def subtract(self, inputs, name):
assert len(inputs) == 2, "subtract must receive two input layers"
return tf.subtract(inputs[0], inputs[1], name=name)
@layer
def multiply_sum(self, inputs, num_classes, name):
prob = tf.reshape(inputs[0], [-1, num_classes])
image = tf.matmul(prob, inputs[1])
input_shape = tf.shape(inputs[0])
return tf.reshape(image, [input_shape[0], input_shape[1], input_shape[2], 3])
@layer
def l2_normalize(self, input, dim, name):
return tf.nn.l2_normalize(input, dim, name=name)
@layer
def fc(self, input, num_out, name, num_in=-1, height=-1, width=-1, channel=-1, reuse=None, relu=True, trainable=True):
with tf.variable_scope(name, reuse=reuse) as scope:
# only use the first input
if isinstance(input, tuple):
input = input[0]
if height > 0 and width > 0 and channel > 0:
input_shape = tf.shape(input)
input = tf.reshape(input, [input_shape[0], height, width, channel])
input_shape = input.get_shape()
if input_shape.ndims == 4:
dim = 1
for d in input_shape[1:].as_list():
dim *= d
feed_in = tf.reshape(input, [-1, dim])
else:
if num_in == -1:
feed_in, dim = (input, int(input_shape[-1]))
else:
feed_in, dim = (input, int(num_in))
init_weights = tf.truncated_normal_initializer(0.0, stddev=0.001)
init_biases = tf.constant_initializer(0.0)
weights = self.make_var('weights', [dim, num_out], init_weights, trainable)
biases = self.make_var('biases', [num_out], init_biases, trainable)
op = tf.nn.relu_layer if relu else tf.nn.xw_plus_b
fc = op(feed_in, weights, biases, name=scope.name)
return fc
@layer
def reshape(self, input, shape, name):
return tf.reshape(input, shape, name)
@layer
def argmax_3d(self, input, name):
return tf.argmax(input, 4, name)
@layer
def argmax_2d(self, input, name):
return tf.argmax(input, 3, name)
@layer
def tanh(self, input, name):
# only use the first input
if isinstance(input, tuple):
input = input[0]
return tf.nn.tanh(input, name)
@layer
def softmax(self, input, name):
# only use the first input
if isinstance(input, tuple):
input = input[0]
return tf.nn.softmax(input, name)
@layer
def log_softmax(self, input, name):
# only use the first input
if isinstance(input, tuple):
input = input[0]
return tf.nn.log_softmax(input, name=name)
@layer
def softmax_high_dimension(self, input, num_classes, name):
# only use the first input
if isinstance(input, tuple):
input = input[0]
input_shape = input.get_shape()
ndims = input_shape.ndims
array = np.ones(ndims)
array[-1] = num_classes
m = tf.reduce_max(input, reduction_indices=[ndims-1], keep_dims=True)
multiples = tf.convert_to_tensor(array, dtype=tf.int32)
e = tf.exp(tf.subtract(input, tf.tile(m, multiples)))
s = tf.reduce_sum(e, reduction_indices=[ndims-1], keep_dims=True)
return tf.div(e, tf.tile(s, multiples))
@layer
def log_softmax_high_dimension(self, input, num_classes, name):
# only use the first input
if isinstance(input, tuple):
input = input[0]
input_shape = input.get_shape()
ndims = input_shape.ndims
array = np.ones(ndims)
array[-1] = num_classes
m = tf.reduce_max(input, reduction_indices=[ndims-1], keep_dims=True)
multiples = tf.convert_to_tensor(array, dtype=tf.int32)
d = tf.subtract(input, tf.tile(m, multiples))
e = tf.exp(d)
s = tf.reduce_sum(e, reduction_indices=[ndims-1], keep_dims=True)
return tf.subtract(d, tf.log(tf.tile(s, multiples)))
@layer
def batch_normalization(self, input, name, scale_offset=False, relu=False, reuse=None, trainable=True):
# NOTE: Currently, only inference is supported
with tf.variable_scope(name, reuse=reuse) as scope:
shape = [input.get_shape()[-1]]
if scale_offset:
scale = self.make_var('scale', shape=shape, trainable=trainable)
offset = self.make_var('offset', shape=shape, trainable=trainable)
else:
scale, offset = (None, None)
output = tf.nn.batch_normalization(
input,
mean=self.make_var('mean', shape=shape, initializer=tf.constant_initializer(0.0), trainable=trainable),
variance=self.make_var('variance', shape=shape, initializer=tf.constant_initializer(1.0), trainable=trainable),
offset=offset,
scale=scale,
# TODO: This is the default Caffe batch norm eps
# Get the actual eps from parameters
variance_epsilon=1e-5,
name=name)
if relu:
output = tf.nn.relu(output)
return output
@layer
def batch_norm(self, input, name, c_i=-1, momentum=0.9, epsilon=1e-5, is_training=True, relu=False, reuse=None):
if c_i != -1:
input_shape = tf.shape(input)
input = tf.reshape(input, [input_shape[0], input_shape[1], input_shape[2], c_i])
with tf.variable_scope(name, reuse=reuse) as scope:
output = tf.contrib.layers.batch_norm(input,
decay=momentum,
updates_collections=None,
epsilon=epsilon,
scale=True,
is_training=is_training,
scope=scope)
if relu:
output = tf.nn.relu(output)
return output
@layer
def dropout(self, input, keep_prob, name):
if isinstance(input, tuple):
input = input[0]
return tf.nn.dropout(input, keep_prob, name=name)
def make_3d_spatial_filter(self, name, size, channel, theta):
depth = size
height = size
width = size
kernel = np.zeros([size, size, size])
c = size / 2
for d in range(depth):
for h in range(height):
for w in range(width):
kernel[d, h, w] = np.exp( -1 * ((d - c) * (d - c) + (h - c) * (h - c) + (w - c) * (w - c)) / (2.0 * theta * theta) )
kernel[c, c, c] = 0
weights = np.zeros([size, size, size, channel, channel])
for i in range(channel):
weights[:, :, :, i, i] = kernel
init = tf.constant_initializer(value=weights, dtype=tf.float32)
var = tf.get_variable(name, shape=weights.shape, initializer=init, trainable=False)
return var
@layer
def meanfield_3d(self, input, num_classes, name, reuse=None, trainable=True):
# only use the first input
if isinstance(input, tuple):
input = input[0]
with tf.variable_scope(name, reuse=reuse) as scope:
# softmax
'''
input_shape = input.get_shape()
ndims = input_shape.ndims
array = np.ones(ndims)
array[-1] = num_classes
m = tf.reduce_max(input, reduction_indices=[ndims-1], keep_dims=True)
multiples = tf.convert_to_tensor(array, dtype=tf.int32)
e = tf.exp(tf.sub(input, tf.tile(m, multiples)))
s = tf.reduce_sum(e, reduction_indices=[ndims-1], keep_dims=True)
Q = tf.div(e, tf.tile(s, multiples))
'''
# message passing
weights_message = self.make_3d_spatial_filter('weights_message', 3, num_classes, 0.8)
message = tf.nn.conv3d(input, weights_message, [1, 1, 1, 1, 1], padding=DEFAULT_PADDING)
# compatibility transform
kernel = np.zeros([1, 1, 1, num_classes, num_classes])
for i in range(num_classes):
kernel[0, 0, 0, i, i] = 1
init_weights = tf.constant_initializer(value=kernel, dtype=tf.float32)
weights_comp = self.make_var('weights_comp', [1, 1, 1, num_classes, num_classes], init_weights, trainable)
compatibility = tf.nn.conv3d(message, weights_comp, [1, 1, 1, 1, 1], padding=DEFAULT_PADDING)
# add unary potential
return input + compatibility
def make_2d_spatial_filter(self, name, size, channel, theta):
height = size
width = size
kernel = np.zeros([size, size])
c = size / 2
for h in range(height):
for w in range(width):
kernel[h, w] = np.exp( -1 * ((h - c) * (h - c) + (w - c) * (w - c)) / (2.0 * theta * theta) )
kernel[c, c] = 0
weights = np.zeros([size, size, channel, channel])
for i in range(channel):
weights[:, :, i, i] = kernel
init = tf.constant_initializer(value=weights, dtype=tf.float32)
var = tf.get_variable(name, shape=weights.shape, initializer=init, trainable=False)
return var
@layer
def meanfield_2d(self, input, num_steps, num_classes, name, reuse=None, trainable=True):
# only use the first input
if isinstance(input, tuple):
input = input[0]
input_shape = input.get_shape()
ndims = input_shape.ndims
array = np.ones(ndims)
array[-1] = num_classes
multiples = tf.convert_to_tensor(array, dtype=tf.int32)
unary = input
for i in range(num_steps):
if i > 0:
reuse = True
with tf.variable_scope(name, reuse=reuse) as scope:
# softmax
m = tf.reduce_max(unary, reduction_indices=[ndims-1], keep_dims=True)
e = tf.exp(tf.sub(unary, tf.tile(m, multiples)))
s = tf.reduce_sum(e, reduction_indices=[ndims-1], keep_dims=True)
Q = tf.div(e, tf.tile(s, multiples))
# message passing
weights_message = self.make_2d_spatial_filter('weights_message', 3, num_classes, 0.8)
message = tf.nn.conv2d(Q, weights_message, [1, 1, 1, 1], padding=DEFAULT_PADDING)
# compatibility transform
kernel = np.zeros([1, 1, num_classes, num_classes])
for i in range(num_classes):
kernel[0, 0, i, i] = 1
init_weights = tf.constant_initializer(value=kernel, dtype=tf.float32)
weights_comp = self.make_var('weights_comp', [1, 1, num_classes, num_classes], init_weights, trainable)
compatibility = tf.nn.conv2d(message, weights_comp, [1, 1, 1, 1], padding=DEFAULT_PADDING)
# add unary potential
unary = unary + compatibility
return unary
|
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from oslo.serialization import jsonutils
from nova.api.openstack import compute
from nova.compute import api as compute_api
from nova.compute import flavors
from nova import db
from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
import nova.tests.unit.image.fake
MANUAL_INSTANCE_UUID = fakes.FAKE_UUID
AUTO_INSTANCE_UUID = fakes.FAKE_UUID.replace('a', 'b')
stub_instance = fakes.stub_instance
API_DISK_CONFIG = 'OS-DCF:diskConfig'
def instance_addresses(context, instance_id):
return None
class DiskConfigTestCaseV21(test.TestCase):
def setUp(self):
super(DiskConfigTestCaseV21, self).setUp()
self._set_up_app()
self._setup_fake_image_service()
fakes.stub_out_nw_api(self.stubs)
FAKE_INSTANCES = [
fakes.stub_instance(1,
uuid=MANUAL_INSTANCE_UUID,
auto_disk_config=False),
fakes.stub_instance(2,
uuid=AUTO_INSTANCE_UUID,
auto_disk_config=True)
]
def fake_instance_get(context, id_):
for instance in FAKE_INSTANCES:
if id_ == instance['id']:
return instance
self.stubs.Set(db, 'instance_get', fake_instance_get)
def fake_instance_get_by_uuid(context, uuid,
columns_to_join=None, use_slave=False):
for instance in FAKE_INSTANCES:
if uuid == instance['uuid']:
return instance
self.stubs.Set(db, 'instance_get_by_uuid',
fake_instance_get_by_uuid)
def fake_instance_get_all(context, *args, **kwargs):
return FAKE_INSTANCES
self.stubs.Set(db, 'instance_get_all', fake_instance_get_all)
self.stubs.Set(db, 'instance_get_all_by_filters',
fake_instance_get_all)
self.stubs.Set(objects.Instance, 'save',
lambda *args, **kwargs: None)
def fake_rebuild(*args, **kwargs):
pass
self.stubs.Set(compute_api.API, 'rebuild', fake_rebuild)
def fake_instance_create(context, inst_, session=None):
inst = fake_instance.fake_db_instance(**{
'id': 1,
'uuid': AUTO_INSTANCE_UUID,
'created_at': datetime.datetime(2010, 10, 10, 12, 0, 0),
'updated_at': datetime.datetime(2010, 10, 10, 12, 0, 0),
'progress': 0,
'name': 'instance-1', # this is a property
'task_state': '',
'vm_state': '',
'auto_disk_config': inst_['auto_disk_config'],
'security_groups': inst_['security_groups'],
'instance_type': flavors.get_default_flavor(),
})
def fake_instance_get_for_create(context, id_, *args, **kwargs):
return (inst, inst)
self.stubs.Set(db, 'instance_update_and_get_original',
fake_instance_get_for_create)
def fake_instance_get_all_for_create(context, *args, **kwargs):
return [inst]
self.stubs.Set(db, 'instance_get_all',
fake_instance_get_all_for_create)
self.stubs.Set(db, 'instance_get_all_by_filters',
fake_instance_get_all_for_create)
def fake_instance_add_security_group(context, instance_id,
security_group_id):
pass
self.stubs.Set(db,
'instance_add_security_group',
fake_instance_add_security_group)
return inst
self.stubs.Set(db, 'instance_create', fake_instance_create)
def _set_up_app(self):
self.app = compute.APIRouterV21(init_only=('servers', 'images',
'os-disk-config'))
def _get_expected_msg_for_invalid_disk_config(self):
return ('{{"badRequest": {{"message": "Invalid input for'
' field/attribute {0}. Value: {1}. u\'{1}\' is'
' not one of [\'AUTO\', \'MANUAL\']", "code": 400}}}}')
def _setup_fake_image_service(self):
self.image_service = nova.tests.unit.image.fake.stub_out_image_service(
self.stubs)
timestamp = datetime.datetime(2011, 1, 1, 1, 2, 3)
image = {'id': '88580842-f50a-11e2-8d3a-f23c91aec05e',
'name': 'fakeimage7',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': False,
'container_format': 'ova',
'disk_format': 'vhd',
'size': '74185822',
'properties': {'auto_disk_config': 'Disabled'}}
self.image_service.create(None, image)
def tearDown(self):
super(DiskConfigTestCaseV21, self).tearDown()
nova.tests.unit.image.fake.FakeImageService_reset()
def assertDiskConfig(self, dict_, value):
self.assertIn(API_DISK_CONFIG, dict_)
self.assertEqual(dict_[API_DISK_CONFIG], value)
def test_show_server(self):
req = fakes.HTTPRequest.blank(
'/fake/servers/%s' % MANUAL_INSTANCE_UUID)
res = req.get_response(self.app)
server_dict = jsonutils.loads(res.body)['server']
self.assertDiskConfig(server_dict, 'MANUAL')
req = fakes.HTTPRequest.blank(
'/fake/servers/%s' % AUTO_INSTANCE_UUID)
res = req.get_response(self.app)
server_dict = jsonutils.loads(res.body)['server']
self.assertDiskConfig(server_dict, 'AUTO')
def test_detail_servers(self):
req = fakes.HTTPRequest.blank('/fake/servers/detail')
res = req.get_response(self.app)
server_dicts = jsonutils.loads(res.body)['servers']
expectations = ['MANUAL', 'AUTO']
for server_dict, expected in zip(server_dicts, expectations):
self.assertDiskConfig(server_dict, expected)
def test_show_image(self):
req = fakes.HTTPRequest.blank(
'/fake/images/a440c04b-79fa-479c-bed1-0b816eaec379')
res = req.get_response(self.app)
image_dict = jsonutils.loads(res.body)['image']
self.assertDiskConfig(image_dict, 'MANUAL')
req = fakes.HTTPRequest.blank(
'/fake/images/70a599e0-31e7-49b7-b260-868f441e862b')
res = req.get_response(self.app)
image_dict = jsonutils.loads(res.body)['image']
self.assertDiskConfig(image_dict, 'AUTO')
def test_detail_image(self):
req = fakes.HTTPRequest.blank('/fake/images/detail')
res = req.get_response(self.app)
image_dicts = jsonutils.loads(res.body)['images']
expectations = ['MANUAL', 'AUTO']
for image_dict, expected in zip(image_dicts, expectations):
# NOTE(sirp): image fixtures 6 and 7 are setup for
# auto_disk_config testing
if image_dict['id'] in (6, 7):
self.assertDiskConfig(image_dict, expected)
def test_create_server_override_auto(self):
req = fakes.HTTPRequest.blank('/fake/servers')
req.method = 'POST'
req.content_type = 'application/json'
body = {'server': {
'name': 'server_test',
'imageRef': 'cedef40a-ed67-4d10-800e-17455edce175',
'flavorRef': '1',
API_DISK_CONFIG: 'AUTO'
}}
req.body = jsonutils.dumps(body)
res = req.get_response(self.app)
server_dict = jsonutils.loads(res.body)['server']
self.assertDiskConfig(server_dict, 'AUTO')
def test_create_server_override_manual(self):
req = fakes.HTTPRequest.blank('/fake/servers')
req.method = 'POST'
req.content_type = 'application/json'
body = {'server': {
'name': 'server_test',
'imageRef': 'cedef40a-ed67-4d10-800e-17455edce175',
'flavorRef': '1',
API_DISK_CONFIG: 'MANUAL'
}}
req.body = jsonutils.dumps(body)
res = req.get_response(self.app)
server_dict = jsonutils.loads(res.body)['server']
self.assertDiskConfig(server_dict, 'MANUAL')
def test_create_server_detect_from_image(self):
"""If user doesn't pass in diskConfig for server, use image metadata
to specify AUTO or MANUAL.
"""
req = fakes.HTTPRequest.blank('/fake/servers')
req.method = 'POST'
req.content_type = 'application/json'
body = {'server': {
'name': 'server_test',
'imageRef': 'a440c04b-79fa-479c-bed1-0b816eaec379',
'flavorRef': '1',
}}
req.body = jsonutils.dumps(body)
res = req.get_response(self.app)
server_dict = jsonutils.loads(res.body)['server']
self.assertDiskConfig(server_dict, 'MANUAL')
req = fakes.HTTPRequest.blank('/fake/servers')
req.method = 'POST'
req.content_type = 'application/json'
body = {'server': {
'name': 'server_test',
'imageRef': '70a599e0-31e7-49b7-b260-868f441e862b',
'flavorRef': '1',
}}
req.body = jsonutils.dumps(body)
res = req.get_response(self.app)
server_dict = jsonutils.loads(res.body)['server']
self.assertDiskConfig(server_dict, 'AUTO')
def test_create_server_detect_from_image_disabled_goes_to_manual(self):
req = fakes.HTTPRequest.blank('/fake/servers')
req.method = 'POST'
req.content_type = 'application/json'
body = {'server': {
'name': 'server_test',
'imageRef': '88580842-f50a-11e2-8d3a-f23c91aec05e',
'flavorRef': '1',
}}
req.body = jsonutils.dumps(body)
res = req.get_response(self.app)
server_dict = jsonutils.loads(res.body)['server']
self.assertDiskConfig(server_dict, 'MANUAL')
def test_create_server_errors_when_disabled_and_auto(self):
req = fakes.HTTPRequest.blank('/fake/servers')
req.method = 'POST'
req.content_type = 'application/json'
body = {'server': {
'name': 'server_test',
'imageRef': '88580842-f50a-11e2-8d3a-f23c91aec05e',
'flavorRef': '1',
API_DISK_CONFIG: 'AUTO'
}}
req.body = jsonutils.dumps(body)
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
def test_create_server_when_disabled_and_manual(self):
req = fakes.HTTPRequest.blank('/fake/servers')
req.method = 'POST'
req.content_type = 'application/json'
body = {'server': {
'name': 'server_test',
'imageRef': '88580842-f50a-11e2-8d3a-f23c91aec05e',
'flavorRef': '1',
API_DISK_CONFIG: 'MANUAL'
}}
req.body = jsonutils.dumps(body)
res = req.get_response(self.app)
server_dict = jsonutils.loads(res.body)['server']
self.assertDiskConfig(server_dict, 'MANUAL')
def _test_update_server_disk_config(self, uuid, disk_config):
req = fakes.HTTPRequest.blank(
'/fake/servers/%s' % uuid)
req.method = 'PUT'
req.content_type = 'application/json'
body = {'server': {API_DISK_CONFIG: disk_config}}
req.body = jsonutils.dumps(body)
res = req.get_response(self.app)
server_dict = jsonutils.loads(res.body)['server']
self.assertDiskConfig(server_dict, disk_config)
def test_update_server_override_auto(self):
self._test_update_server_disk_config(AUTO_INSTANCE_UUID, 'AUTO')
def test_update_server_override_manual(self):
self._test_update_server_disk_config(MANUAL_INSTANCE_UUID, 'MANUAL')
def test_update_server_invalid_disk_config(self):
# Return BadRequest if user passes an invalid diskConfig value.
req = fakes.HTTPRequest.blank(
'/fake/servers/%s' % MANUAL_INSTANCE_UUID)
req.method = 'PUT'
req.content_type = 'application/json'
body = {'server': {API_DISK_CONFIG: 'server_test'}}
req.body = jsonutils.dumps(body)
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
expected_msg = self._get_expected_msg_for_invalid_disk_config()
self.assertEqual(expected_msg.format(API_DISK_CONFIG, 'server_test'),
res.body)
def _test_rebuild_server_disk_config(self, uuid, disk_config):
req = fakes.HTTPRequest.blank(
'/fake/servers/%s/action' % uuid)
req.method = 'POST'
req.content_type = 'application/json'
body = {"rebuild": {
'imageRef': 'cedef40a-ed67-4d10-800e-17455edce175',
API_DISK_CONFIG: disk_config
}}
req.body = jsonutils.dumps(body)
res = req.get_response(self.app)
server_dict = jsonutils.loads(res.body)['server']
self.assertDiskConfig(server_dict, disk_config)
def test_rebuild_server_override_auto(self):
self._test_rebuild_server_disk_config(AUTO_INSTANCE_UUID, 'AUTO')
def test_rebuild_server_override_manual(self):
self._test_rebuild_server_disk_config(MANUAL_INSTANCE_UUID, 'MANUAL')
def test_create_server_with_auto_disk_config(self):
req = fakes.HTTPRequest.blank('/fake/servers')
req.method = 'POST'
req.content_type = 'application/json'
body = {'server': {
'name': 'server_test',
'imageRef': 'cedef40a-ed67-4d10-800e-17455edce175',
'flavorRef': '1',
API_DISK_CONFIG: 'AUTO'
}}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertIn('auto_disk_config', kwargs)
self.assertEqual(True, kwargs['auto_disk_config'])
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
req.body = jsonutils.dumps(body)
res = req.get_response(self.app)
server_dict = jsonutils.loads(res.body)['server']
self.assertDiskConfig(server_dict, 'AUTO')
def test_rebuild_server_with_auto_disk_config(self):
req = fakes.HTTPRequest.blank(
'/fake/servers/%s/action' % AUTO_INSTANCE_UUID)
req.method = 'POST'
req.content_type = 'application/json'
body = {"rebuild": {
'imageRef': 'cedef40a-ed67-4d10-800e-17455edce175',
API_DISK_CONFIG: 'AUTO'
}}
def rebuild(*args, **kwargs):
self.assertIn('auto_disk_config', kwargs)
self.assertEqual(True, kwargs['auto_disk_config'])
self.stubs.Set(compute_api.API, 'rebuild', rebuild)
req.body = jsonutils.dumps(body)
res = req.get_response(self.app)
server_dict = jsonutils.loads(res.body)['server']
self.assertDiskConfig(server_dict, 'AUTO')
def test_resize_server_with_auto_disk_config(self):
req = fakes.HTTPRequest.blank(
'/fake/servers/%s/action' % AUTO_INSTANCE_UUID)
req.method = 'POST'
req.content_type = 'application/json'
body = {"resize": {
"flavorRef": "3",
API_DISK_CONFIG: 'AUTO'
}}
def resize(*args, **kwargs):
self.assertIn('auto_disk_config', kwargs)
self.assertEqual(True, kwargs['auto_disk_config'])
self.stubs.Set(compute_api.API, 'resize', resize)
req.body = jsonutils.dumps(body)
req.get_response(self.app)
class DiskConfigTestCaseV2(DiskConfigTestCaseV21):
def _set_up_app(self):
self.flags(verbose=True,
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Disk_config'])
self.app = compute.APIRouter(init_only=('servers', 'images'))
def _get_expected_msg_for_invalid_disk_config(self):
return ('{{"badRequest": {{"message": "{0} must be either'
' \'MANUAL\' or \'AUTO\'.", "code": 400}}}}')
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: skip-file
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import defaultdict
from torch.nn.parameter import Parameter
from bigg.common.pytorch_util import glorot_uniform, MLP, BinaryTreeLSTMCell
from tqdm import tqdm
from bigg.model.util import AdjNode, ColAutomata, AdjRow
from bigg.model.tree_clib.tree_lib import TreeLib
from bigg.torch_ops import multi_index_select, PosEncoding
def hc_multi_select(ids_from, ids_to, h_froms, c_froms):
h_vecs = multi_index_select(ids_from,
ids_to,
*h_froms)
c_vecs = multi_index_select(ids_from,
ids_to,
*c_froms)
return h_vecs, c_vecs
def tree_state_select(h_bot, c_bot, h_buf, c_buf, fn_all_ids):
bot_froms, bot_tos, prev_froms, prev_tos = fn_all_ids()
if h_buf is None or prev_tos is None:
h_vecs = multi_index_select([bot_froms], [bot_tos], h_bot)
c_vecs = multi_index_select([bot_froms], [bot_tos], c_bot)
elif h_bot is None or bot_tos is None:
h_vecs = multi_index_select([prev_froms], [prev_tos], h_buf)
c_vecs = multi_index_select([prev_froms], [prev_tos], c_buf)
else:
h_vecs, c_vecs = hc_multi_select([bot_froms, prev_froms],
[bot_tos, prev_tos],
[h_bot, h_buf], [c_bot, c_buf])
return h_vecs, c_vecs
def batch_tree_lstm2(h_bot, c_bot, h_buf, c_buf, fn_all_ids, cell):
h_list = []
c_list = []
for i in range(2):
h_vecs, c_vecs = tree_state_select(h_bot, c_bot, h_buf, c_buf, lambda : fn_all_ids(i))
h_list.append(h_vecs)
c_list.append(c_vecs)
return cell((h_list[0], c_list[0]), (h_list[1], c_list[1]))
def batch_tree_lstm3(h_bot, c_bot, h_buf, c_buf, h_past, c_past, fn_all_ids, cell):
if h_past is None:
return batch_tree_lstm2(h_bot, c_bot, h_buf, c_buf, lambda i: fn_all_ids(i)[:-2], cell)
elif h_bot is None:
return batch_tree_lstm2(h_buf, c_buf, h_past, c_past, lambda i: fn_all_ids(i)[2:], cell)
elif h_buf is None:
return batch_tree_lstm2(h_bot, c_bot, h_past, c_past, lambda i: fn_all_ids(i)[0, 1, 4, 5], cell)
else:
h_list = []
c_list = []
for i in range(2):
bot_froms, bot_tos, prev_froms, prev_tos, past_froms, past_tos = fn_all_ids(i)
h_vecs, c_vecs = hc_multi_select([bot_froms, prev_froms, past_froms],
[bot_tos, prev_tos, past_tos],
[h_bot, h_buf, h_past],
[c_bot, c_buf, c_past])
h_list.append(h_vecs)
c_list.append(c_vecs)
return cell((h_list[0], c_list[0]), (h_list[1], c_list[1]))
class FenwickTree(nn.Module):
def __init__(self, args):
super(FenwickTree, self).__init__()
self.init_h0 = Parameter(torch.Tensor(1, args.embed_dim))
self.init_c0 = Parameter(torch.Tensor(1, args.embed_dim))
glorot_uniform(self)
self.merge_cell = BinaryTreeLSTMCell(args.embed_dim)
self.summary_cell = BinaryTreeLSTMCell(args.embed_dim)
if args.pos_enc:
self.pos_enc = PosEncoding(args.embed_dim, args.device, args.pos_base)
else:
self.pos_enc = lambda x: 0
def reset(self, list_states=[]):
self.list_states = []
for l in list_states:
t = []
for e in l:
t.append(e)
self.list_states.append(t)
def append_state(self, state, level):
if level >= len(self.list_states):
num_aug = level - len(self.list_states) + 1
for i in range(num_aug):
self.list_states.append([])
self.list_states[level].append(state)
def forward(self, new_state=None):
if new_state is None:
if len(self.list_states) == 0:
return (self.init_h0, self.init_c0)
else:
self.append_state(new_state, 0)
pos = 0
while pos < len(self.list_states):
if len(self.list_states[pos]) >= 2:
lch_state, rch_state = self.list_states[pos] # assert the length is 2
new_state = self.merge_cell(lch_state, rch_state)
self.list_states[pos] = []
self.append_state(new_state, pos + 1)
pos += 1
state = None
for pos in range(len(self.list_states)):
if len(self.list_states[pos]) == 0:
continue
cur_state = self.list_states[pos][0]
if state is None:
state = cur_state
else:
state = self.summary_cell(state, cur_state)
return state
def forward_train(self, h_bot, c_bot, h_buf0, c_buf0, prev_rowsum_h, prrev_rowsum_c):
# embed row tree
tree_agg_ids = TreeLib.PrepareRowEmbed()
row_embeds = [(self.init_h0, self.init_c0)]
if h_bot is not None:
row_embeds.append((h_bot, c_bot))
if prev_rowsum_h is not None:
row_embeds.append((prev_rowsum_h, prrev_rowsum_c))
if h_buf0 is not None:
row_embeds.append((h_buf0, c_buf0))
th_bot = h_bot
tc_bot = c_bot
for i, all_ids in enumerate(tree_agg_ids):
fn_ids = lambda x: all_ids[x]
if i:
th_bot = tc_bot = None
new_states = batch_tree_lstm3(th_bot, tc_bot,
row_embeds[-1][0], row_embeds[-1][1],
prev_rowsum_h, prrev_rowsum_c,
fn_ids, self.merge_cell)
row_embeds.append(new_states)
h_list, c_list = zip(*row_embeds)
joint_h = torch.cat(h_list, dim=0)
joint_c = torch.cat(c_list, dim=0)
# get history representation
init_select, all_ids, last_tos, next_ids, pos_info = TreeLib.PrepareRowSummary()
cur_state = (joint_h[init_select], joint_c[init_select])
ret_state = (joint_h[next_ids], joint_c[next_ids])
hist_rnn_states = []
hist_froms = []
hist_tos = []
for i, (done_from, done_to, proceed_from, proceed_input) in enumerate(all_ids):
hist_froms.append(done_from)
hist_tos.append(done_to)
hist_rnn_states.append(cur_state)
next_input = joint_h[proceed_input], joint_c[proceed_input]
sub_state = cur_state[0][proceed_from], cur_state[1][proceed_from]
cur_state = self.summary_cell(sub_state, next_input)
hist_rnn_states.append(cur_state)
hist_froms.append(None)
hist_tos.append(last_tos)
hist_h_list, hist_c_list = zip(*hist_rnn_states)
pos_embed = self.pos_enc(pos_info)
row_h = multi_index_select(hist_froms, hist_tos, *hist_h_list) + pos_embed
row_c = multi_index_select(hist_froms, hist_tos, *hist_c_list) + pos_embed
return (row_h, row_c), ret_state
class BitsRepNet(nn.Module):
def __init__(self, args):
super(BitsRepNet, self).__init__()
self.bits_compress = args.bits_compress
self.out_dim = args.embed_dim
assert self.out_dim >= self.bits_compress
self.device = args.device
def forward(self, on_bits, n_cols):
h = torch.zeros(1, self.out_dim).to(self.device)
h[0, :n_cols] = -1.0
h[0, on_bits] = 1.0
return h, h
class RecurTreeGen(nn.Module):
def __init__(self, args):
super(RecurTreeGen, self).__init__()
self.directed = args.directed
self.self_loop = args.self_loop
self.bits_compress = args.bits_compress
self.greedy_frac = args.greedy_frac
self.share_param = args.share_param
if not self.bits_compress:
self.leaf_h0 = Parameter(torch.Tensor(1, args.embed_dim))
self.leaf_c0 = Parameter(torch.Tensor(1, args.embed_dim))
self.empty_h0 = Parameter(torch.Tensor(1, args.embed_dim))
self.empty_c0 = Parameter(torch.Tensor(1, args.embed_dim))
self.topdown_left_embed = Parameter(torch.Tensor(2, args.embed_dim))
self.topdown_right_embed = Parameter(torch.Tensor(2, args.embed_dim))
glorot_uniform(self)
if self.bits_compress > 0:
self.bit_rep_net = BitsRepNet(args)
if self.share_param:
self.m_l2r_cell = BinaryTreeLSTMCell(args.embed_dim)
self.lr2p_cell = BinaryTreeLSTMCell(args.embed_dim)
self.pred_has_ch = MLP(args.embed_dim, [2 * args.embed_dim, 1])
self.m_pred_has_left = MLP(args.embed_dim, [2 * args.embed_dim, 1])
self.m_pred_has_right = MLP(args.embed_dim, [2 * args.embed_dim, 1])
self.m_cell_topdown = nn.LSTMCell(args.embed_dim, args.embed_dim)
self.m_cell_topright = nn.LSTMCell(args.embed_dim, args.embed_dim)
else:
fn_pred = lambda: MLP(args.embed_dim, [2 * args.embed_dim, 1])
fn_tree_cell = lambda: BinaryTreeLSTMCell(args.embed_dim)
fn_lstm_cell = lambda: nn.LSTMCell(args.embed_dim, args.embed_dim)
num_params = int(np.ceil(np.log2(args.max_num_nodes))) + 1
self.pred_has_ch = fn_pred()
pred_modules = [[] for _ in range(2)]
tree_cell_modules = []
lstm_cell_modules = [[] for _ in range(2)]
for _ in range(num_params):
for i in range(2):
pred_modules[i].append(fn_pred())
lstm_cell_modules[i].append(fn_lstm_cell())
tree_cell_modules.append(fn_tree_cell())
self.has_left_modules, self.has_right_modules = [nn.ModuleList(l) for l in pred_modules]
self.l2r_modules= nn.ModuleList(tree_cell_modules)
self.cell_topdown_modules, self.cell_topright_modules = [nn.ModuleList(l) for l in lstm_cell_modules]
self.lr2p_cell = fn_tree_cell()
self.row_tree = FenwickTree(args)
if args.tree_pos_enc:
self.tree_pos_enc = PosEncoding(args.embed_dim, args.device, args.pos_base, bias=np.pi / 4)
else:
self.tree_pos_enc = lambda x: 0
def cell_topdown(self, x, y, lv):
cell = self.m_cell_topdown if self.share_param else self.cell_topdown_modules[lv]
return cell(x, y)
def cell_topright(self, x, y, lv):
cell = self.m_cell_topright if self.share_param else self.cell_topright_modules[lv]
return cell(x, y)
def l2r_cell(self, x, y, lv):
cell = self.m_l2r_cell if self.share_param else self.l2r_modules[lv]
return cell(x, y)
def pred_has_left(self, x, lv):
mlp = self.m_pred_has_left if self.share_param else self.has_left_modules[lv]
return mlp(x)
def pred_has_right(self, x, lv):
mlp = self.m_pred_has_right if self.share_param else self.has_right_modules[lv]
return mlp(x)
def get_empty_state(self):
if self.bits_compress:
return self.bit_rep_net([], 1)
else:
return (self.empty_h0, self.empty_c0)
def get_prob_fix(self, prob):
p = prob * (1 - self.greedy_frac)
if prob >= 0.5:
p += self.greedy_frac
return p
def gen_row(self, ll, state, tree_node, col_sm, lb, ub):
assert lb <= ub
if tree_node.is_root:
prob_has_edge = torch.sigmoid(self.pred_has_ch(state[0]))
if col_sm.supervised:
has_edge = len(col_sm.indices) > 0
else:
has_edge = np.random.rand() < self.get_prob_fix(prob_has_edge.item())
if ub == 0:
has_edge = False
if tree_node.n_cols <= 0:
has_edge = False
if lb:
has_edge = True
if has_edge:
ll = ll + torch.log(prob_has_edge)
else:
ll = ll + torch.log(1 - prob_has_edge)
tree_node.has_edge = has_edge
else:
assert ub > 0
tree_node.has_edge = True
if not tree_node.has_edge: # an empty tree
return ll, self.get_empty_state(), 0
if tree_node.is_leaf:
tree_node.bits_rep = [0]
col_sm.add_edge(tree_node.col_range[0])
if self.bits_compress:
return ll, self.bit_rep_net(tree_node.bits_rep, tree_node.n_cols), 1
else:
return ll, (self.leaf_h0, self.leaf_c0), 1
else:
tree_node.split()
mid = (tree_node.col_range[0] + tree_node.col_range[1]) // 2
left_prob = torch.sigmoid(self.pred_has_left(state[0], tree_node.depth))
if col_sm.supervised:
has_left = col_sm.next_edge < mid
else:
has_left = np.random.rand() < self.get_prob_fix(left_prob.item())
if ub == 0:
has_left = False
if lb > tree_node.rch.n_cols:
has_left = True
ll = ll + (torch.log(left_prob) if has_left else torch.log(1 - left_prob))
left_pos = self.tree_pos_enc([tree_node.lch.n_cols])
state = self.cell_topdown(self.topdown_left_embed[[int(has_left)]] + left_pos, state, tree_node.depth)
if has_left:
lub = min(tree_node.lch.n_cols, ub)
llb = max(0, lb - tree_node.rch.n_cols)
ll, left_state, num_left = self.gen_row(ll, state, tree_node.lch, col_sm, llb, lub)
else:
left_state = self.get_empty_state()
num_left = 0
right_pos = self.tree_pos_enc([tree_node.rch.n_cols])
topdown_state = self.l2r_cell(state, (left_state[0] + right_pos, left_state[1] + right_pos), tree_node.depth)
rlb = max(0, lb - num_left)
rub = min(tree_node.rch.n_cols, ub - num_left)
if not has_left:
has_right = True
else:
right_prob = torch.sigmoid(self.pred_has_right(topdown_state[0], tree_node.depth))
if col_sm.supervised:
has_right = col_sm.has_edge(mid, tree_node.col_range[1])
else:
has_right = np.random.rand() < self.get_prob_fix(right_prob.item())
if rub == 0:
has_right = False
if rlb:
has_right = True
ll = ll + (torch.log(right_prob) if has_right else torch.log(1 - right_prob))
topdown_state = self.cell_topright(self.topdown_right_embed[[int(has_right)]], topdown_state, tree_node.depth)
if has_right: # has edge in right child
ll, right_state, num_right = self.gen_row(ll, topdown_state, tree_node.rch, col_sm, rlb, rub)
else:
right_state = self.get_empty_state()
num_right = 0
if tree_node.col_range[1] - tree_node.col_range[0] <= self.bits_compress:
summary_state = self.bit_rep_net(tree_node.bits_rep, tree_node.n_cols)
else:
summary_state = self.lr2p_cell(left_state, right_state)
return ll, summary_state, num_left + num_right
def forward(self, node_end, edge_list=None, node_start=0, list_states=[], lb_list=None, ub_list=None, col_range=None, num_nodes=None, display=False):
pos = 0
total_ll = 0.0
edges = []
self.row_tree.reset(list_states)
controller_state = self.row_tree()
if num_nodes is None:
num_nodes = node_end
pbar = range(node_start, node_end)
if display:
pbar = tqdm(pbar)
for i in pbar:
if edge_list is None:
col_sm = ColAutomata(supervised=False)
else:
indices = []
while pos < len(edge_list) and i == edge_list[pos][0]:
indices.append(edge_list[pos][1])
pos += 1
indices.sort()
col_sm = ColAutomata(supervised=True, indices=indices)
cur_row = AdjRow(i, self.directed, self.self_loop, col_range=col_range)
lb = 0 if lb_list is None else lb_list[i]
ub = cur_row.root.n_cols if ub_list is None else ub_list[i]
cur_pos_embed = self.row_tree.pos_enc([num_nodes - i])
controller_state = [x + cur_pos_embed for x in controller_state]
ll, cur_state, _ = self.gen_row(0, controller_state, cur_row.root, col_sm, lb, ub)
assert lb <= len(col_sm.indices) <= ub
controller_state = self.row_tree(cur_state)
edges += [(i, x) for x in col_sm.indices]
total_ll = total_ll + ll
return total_ll, edges, self.row_tree.list_states
def binary_ll(self, pred_logits, np_label, need_label=False, reduction='sum'):
pred_logits = pred_logits.view(-1, 1)
label = torch.tensor(np_label, dtype=torch.float32).to(pred_logits.device).view(-1, 1)
loss = F.binary_cross_entropy_with_logits(pred_logits, label, reduction=reduction)
if need_label:
return -loss, label
return -loss
def forward_row_trees(self, graph_ids, list_node_starts=None, num_nodes=-1, list_col_ranges=None):
TreeLib.PrepareMiniBatch(graph_ids, list_node_starts, num_nodes, list_col_ranges)
# embed trees
all_ids = TreeLib.PrepareTreeEmbed()
if not self.bits_compress:
h_bot = torch.cat([self.empty_h0, self.leaf_h0], dim=0)
c_bot = torch.cat([self.empty_c0, self.leaf_c0], dim=0)
fn_hc_bot = lambda d: (h_bot, c_bot)
else:
binary_embeds, base_feat = TreeLib.PrepareBinary()
fn_hc_bot = lambda d: (binary_embeds[d], binary_embeds[d]) if d < len(binary_embeds) else base_feat
max_level = len(all_ids) - 1
h_buf_list = [None] * (len(all_ids) + 1)
c_buf_list = [None] * (len(all_ids) + 1)
for d in range(len(all_ids) - 1, -1, -1):
fn_ids = lambda i: all_ids[d][i]
if d == max_level:
h_buf = c_buf = None
else:
h_buf = h_buf_list[d + 1]
c_buf = c_buf_list[d + 1]
h_bot, c_bot = fn_hc_bot(d + 1)
new_h, new_c = batch_tree_lstm2(h_bot, c_bot, h_buf, c_buf, fn_ids, self.lr2p_cell)
h_buf_list[d] = new_h
c_buf_list[d] = new_c
return fn_hc_bot, h_buf_list, c_buf_list
def forward_row_summaries(self, graph_ids, list_node_starts=None, num_nodes=-1, prev_rowsum_states=[None, None], list_col_ranges=None):
fn_hc_bot, h_buf_list, c_buf_list = self.forward_row_trees(graph_ids, list_node_starts, num_nodes, list_col_ranges)
row_states, next_states = self.row_tree.forward_train(*(fn_hc_bot(0)), h_buf_list[0], c_buf_list[0], *prev_rowsum_states)
return row_states, next_states
def forward_train(self, graph_ids, list_node_starts=None, num_nodes=-1, prev_rowsum_states=[None, None], list_col_ranges=None):
fn_hc_bot, h_buf_list, c_buf_list = self.forward_row_trees(graph_ids, list_node_starts, num_nodes, list_col_ranges)
row_states, next_states = self.row_tree.forward_train(*(fn_hc_bot(0)), h_buf_list[0], c_buf_list[0], *prev_rowsum_states)
# make prediction
logit_has_edge = self.pred_has_ch(row_states[0])
has_ch, _ = TreeLib.GetChLabel(0, dtype=np.bool)
ll = self.binary_ll(logit_has_edge, has_ch)
# has_ch_idx
cur_states = (row_states[0][has_ch], row_states[1][has_ch])
lv = 0
while True:
is_nonleaf = TreeLib.QueryNonLeaf(lv)
if is_nonleaf is None or np.sum(is_nonleaf) == 0:
break
cur_states = (cur_states[0][is_nonleaf], cur_states[1][is_nonleaf])
left_logits = self.pred_has_left(cur_states[0], lv)
has_left, num_left = TreeLib.GetChLabel(-1, lv)
left_update = self.topdown_left_embed[has_left] + self.tree_pos_enc(num_left)
left_ll, float_has_left = self.binary_ll(left_logits, has_left, need_label=True, reduction='sum')
ll = ll + left_ll
cur_states = self.cell_topdown(left_update, cur_states, lv)
left_ids = TreeLib.GetLeftRootStates(lv)
h_bot, c_bot = fn_hc_bot(lv + 1)
if lv + 1 < len(h_buf_list):
h_next_buf, c_next_buf = h_buf_list[lv + 1], c_buf_list[lv + 1]
else:
h_next_buf = c_next_buf = None
left_subtree_states = tree_state_select(h_bot, c_bot,
h_next_buf, c_next_buf,
lambda: left_ids)
has_right, num_right = TreeLib.GetChLabel(1, lv)
right_pos = self.tree_pos_enc(num_right)
left_subtree_states = [x + right_pos for x in left_subtree_states]
topdown_state = self.l2r_cell(cur_states, left_subtree_states, lv)
right_logits = self.pred_has_right(topdown_state[0], lv)
right_update = self.topdown_right_embed[has_right]
topdown_state = self.cell_topright(right_update, topdown_state, lv)
right_ll = self.binary_ll(right_logits, has_right, reduction='none') * float_has_left
ll = ll + torch.sum(right_ll)
lr_ids = TreeLib.GetLeftRightSelect(lv, np.sum(has_left), np.sum(has_right))
new_states = []
for i in range(2):
new_s = multi_index_select([lr_ids[0], lr_ids[2]], [lr_ids[1], lr_ids[3]],
cur_states[i], topdown_state[i])
new_states.append(new_s)
cur_states = tuple(new_states)
lv += 1
return ll, next_states
|
|
#!/usr/bin/python
# coding=utf-8
##########################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
from diamond.collector import Collector
from elasticsearch import ElasticSearchCollector
##########################################################################
class TestElasticSearchCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('ElasticSearchCollector', {})
self.collector = ElasticSearchCollector(config, None)
def test_import(self):
self.assertTrue(ElasticSearchCollector)
def test_new__instances_default(self):
config = get_collector_config('ElasticSearchCollector', {})
self.collector = ElasticSearchCollector(config, None)
self.assertEqual(self.collector.instances, {'': ('127.0.0.1', 9200)})
def test_new__instances_single(self):
config = get_collector_config('ElasticSearchCollector', {
'instances': 'bla'})
self.collector = ElasticSearchCollector(config, None)
self.assertEqual(self.collector.instances, {'default': ('bla', 9200)})
def test_new__instances_multi(self):
config = get_collector_config('ElasticSearchCollector', {
'instances': [
'something',
'foo@1234',
'bar@bla:1234',
]})
self.collector = ElasticSearchCollector(config, None)
self.assertEqual(self.collector.instances, {
'default': ('something', 9200),
'foo': ('1234', 9200),
'bar': ('bla', 1234),
})
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
returns = [
self.getFixture('stats'),
self.getFixture('cluster_stats'),
self.getFixture('indices_stats'),
]
urlopen_mock = patch('urllib2.urlopen', Mock(
side_effect=lambda *args: returns.pop(0)))
self.collector.config['cluster'] = True
urlopen_mock.start()
self.collector.collect()
urlopen_mock.stop()
# check how many fixtures were consumed
self.assertEqual(urlopen_mock.new.call_count, 3)
metrics = {
'http.current': 1,
'indices.docs.count': 11968062,
'indices.docs.deleted': 2692068,
'indices.datastore.size': 22724243633,
'indices._all.docs.count': 4,
'indices._all.docs.deleted': 0,
'indices._all.datastore.size': 2674,
'indices.test.docs.count': 4,
'indices.test.docs.deleted': 0,
'indices.test.datastore.size': 2674,
'process.cpu.percent': 58,
'process.mem.resident': 5192126464,
'process.mem.share': 11075584,
'process.mem.virtual': 7109668864,
'disk.reads.count': 55996,
'disk.reads.size': 1235387392,
'disk.writes.count': 5808198,
'disk.writes.size': 23287275520,
'thread_pool.generic.threads': 1,
'network.tcp.active_opens': 2299,
'jvm.mem.pools.CMS_Old_Gen.used': 530915016,
'cluster_health.nodes.total': 3,
'cluster_health.nodes.data': 3,
'cluster_health.shards.active_primary': 5,
'cluster_health.shards.active': 10,
'cluster_health.shards.relocating': 0,
'cluster_health.shards.unassigned': 0,
'cluster_health.shards.initializing': 0,
'cluster_health.status': 2,
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
@patch.object(Collector, 'publish')
def test_should_work_with_real_data_and_basic_auth(self, publish_mock):
self.collector.config["user"] = "user"
self.collector.config["password"] = "password"
self.test_should_work_with_real_data()
@patch.object(Collector, 'publish')
def test_should_work_with_real_data_logstash_mode(self, publish_mock):
returns = [
self.getFixture('stats'),
self.getFixture('logstash_indices_stats'),
]
urlopen_mock = patch('urllib2.urlopen', Mock(
side_effect=lambda *args: returns.pop(0)))
self.collector.config['logstash_mode'] = True
urlopen_mock.start()
self.collector.collect()
urlopen_mock.stop()
# check how many fixtures were consumed
self.assertEqual(urlopen_mock.new.call_count, 2)
# Omit all non-indices metrics, since those were already
# checked in previous test.
metrics = {
'indices.docs.count': 11968062,
'indices.docs.deleted': 2692068,
'indices.datastore.size': 22724243633,
'indices._all.docs.count': 35856619,
'indices._all.docs.deleted': 0,
'indices._all.datastore.size': 21903813340,
'indices._all.get.exists_time_in_millis': 0,
'indices._all.get.exists_total': 0,
'indices._all.get.missing_time_in_millis': 0,
'indices._all.get.missing_total': 0,
'indices._all.get.time_in_millis': 0,
'indices._all.get.total': 0,
'indices._all.indexing.delete_time_in_millis': 0,
'indices._all.indexing.delete_total': 0,
'indices._all.indexing.index_time_in_millis': 29251475,
'indices._all.indexing.index_total': 35189321,
'indices._all.search.fetch_time_in_millis': 6962,
'indices._all.search.fetch_total': 4084,
'indices._all.search.query_time_in_millis': 41211,
'indices._all.search.query_total': 4266,
'indices._all.store.throttle_time_in_millis': 0,
'indices.logstash-adm-syslog.indexes_in_group': 3,
'indices.logstash-adm-syslog.datastore.size': 21903813340,
'indices.logstash-adm-syslog.docs.count': 35856619,
'indices.logstash-adm-syslog.docs.deleted': 0,
'indices.logstash-adm-syslog.get.exists_time_in_millis': 0,
'indices.logstash-adm-syslog.get.exists_total': 0,
'indices.logstash-adm-syslog.get.missing_time_in_millis': 0,
'indices.logstash-adm-syslog.get.missing_total': 0,
'indices.logstash-adm-syslog.get.time_in_millis': 0,
'indices.logstash-adm-syslog.get.total': 0,
'indices.logstash-adm-syslog.indexing.delete_time_in_millis': 0,
'indices.logstash-adm-syslog.indexing.delete_total': 0,
'indices.logstash-adm-syslog.indexing.index_time_in_millis': 29251475, # NOQA
'indices.logstash-adm-syslog.indexing.index_total': 35189321,
'indices.logstash-adm-syslog.search.fetch_time_in_millis': 6962,
'indices.logstash-adm-syslog.search.fetch_total': 4084,
'indices.logstash-adm-syslog.search.query_time_in_millis': 41211,
'indices.logstash-adm-syslog.search.query_total': 4266,
'indices.logstash-adm-syslog.store.throttle_time_in_millis': 0,
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
@patch.object(Collector, 'publish')
def test_should_work_with_real_0_90_data(self, publish_mock):
returns = [
self.getFixture('stats0.90'),
self.getFixture('indices_stats'),
]
urlopen_mock = patch('urllib2.urlopen', Mock(
side_effect=lambda *args: returns.pop(0)))
urlopen_mock.start()
self.collector.collect()
urlopen_mock.stop()
# check how many fixtures were consumed
self.assertEqual(urlopen_mock.new.call_count, 2)
# test some 0.90 specific stats
metrics = {
'cache.filter.size': 1700,
'cache.filter.evictions': 9,
'cache.id.size': 98,
'fielddata.size': 1448,
'fielddata.evictions': 12,
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
@patch.object(Collector, 'publish')
def test_should_fail_gracefully(self, publish_mock):
urlopen_mock = patch('urllib2.urlopen', Mock(
return_value=self.getFixture('stats_blank')))
urlopen_mock.start()
self.collector.collect()
urlopen_mock.stop()
self.assertPublishedMany(publish_mock, {})
@patch.object(Collector, 'publish')
def test_multi_instances_with_real_data(self, publish_mock):
config = get_collector_config('ElasticSearchCollector', {
'instances': [
'esprodata01@10.10.10.201:9200',
'esprodata02@10.10.10.202:9200',
]})
self.collector = ElasticSearchCollector(config, None)
self.assertEqual(len(self.collector.instances), 2)
returns = [
self.getFixture('stats'),
self.getFixture('indices_stats'),
self.getFixture('stats2'),
self.getFixture('indices_stats2'),
]
urlopen_mock = patch('urllib2.urlopen', Mock(
side_effect=lambda *args: returns.pop(0)))
urlopen_mock.start()
self.collector.collect()
urlopen_mock.stop()
# check how many fixtures were consumed
self.assertEqual(urlopen_mock.new.call_count, 4)
metrics = {
'esprodata01.http.current': 1,
'esprodata02.http.current': 2,
'esprodata01.indices.docs.count': 11968062,
'esprodata02.indices.docs.count': 11968000,
'esprodata01.thread_pool.generic.threads': 1,
'esprodata02.thread_pool.generic.threads': 2,
'esprodata01.jvm.mem.pools.Par_Survivor_Space.max': 8716288,
'esprodata02.jvm.mem.pools.Par_Survivor_Space.max': 8710000,
'esprodata01.indices._all.docs.count': 4,
'esprodata02.indices._all.docs.count': 8,
}
self.assertPublishedMany(publish_mock, metrics)
@patch.object(Collector, 'publish')
def test_should_work_with_real_1_7_data(self, publish_mock):
returns = [
self.getFixture('stats1.7'),
self.getFixture('indices_stats'),
]
urlopen_mock = patch('urllib2.urlopen', Mock(
side_effect=lambda *args: returns.pop(0)))
urlopen_mock.start()
self.collector.collect()
urlopen_mock.stop()
# check how many fixtures were consumed
self.assertEqual(urlopen_mock.new.call_count, 2)
# test some 1.7 specific stats
metrics = {
'segments.count': 7,
'segments.mem.size': 75726,
'segments.index_writer.mem.size': 0,
'segments.index_writer.mem.max_size': 469762048,
'segments.version_map.mem.size': 0,
'segments.fixed_bit_set.mem.size': 0
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
##########################################################################
if __name__ == "__main__":
unittest.main()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PrivateEndpointConnectionsOperations:
"""PrivateEndpointConnectionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.servicebus.v2021_01_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
namespace_name: str,
**kwargs: Any
) -> AsyncIterable["_models.PrivateEndpointConnectionListResult"]:
"""Gets the available PrivateEndpointConnections within a namespace.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PrivateEndpointConnectionListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.servicebus.v2021_01_01_preview.models.PrivateEndpointConnectionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnectionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PrivateEndpointConnectionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/privateEndpointConnections'} # type: ignore
async def create_or_update(
self,
resource_group_name: str,
namespace_name: str,
private_endpoint_connection_name: str,
parameters: "_models.PrivateEndpointConnection",
**kwargs: Any
) -> "_models.PrivateEndpointConnection":
"""Creates or updates PrivateEndpointConnections of service namespace.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param private_endpoint_connection_name: The PrivateEndpointConnection name.
:type private_endpoint_connection_name: str
:param parameters: Parameters supplied to update Status of PrivateEndPoint Connection to
namespace resource.
:type parameters: ~azure.mgmt.servicebus.v2021_01_01_preview.models.PrivateEndpointConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection, or the result of cls(response)
:rtype: ~azure.mgmt.servicebus.v2021_01_01_preview.models.PrivateEndpointConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PrivateEndpointConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
namespace_name: str,
private_endpoint_connection_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01-preview"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
namespace_name: str,
private_endpoint_connection_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes an existing Private Endpoint Connection.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param private_endpoint_connection_name: The PrivateEndpointConnection name.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
namespace_name=namespace_name,
private_endpoint_connection_name=private_endpoint_connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
async def get(
self,
resource_group_name: str,
namespace_name: str,
private_endpoint_connection_name: str,
**kwargs: Any
) -> "_models.PrivateEndpointConnection":
"""Gets a description for the specified Private Endpoint Connection.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param private_endpoint_connection_name: The PrivateEndpointConnection name.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection, or the result of cls(response)
:rtype: ~azure.mgmt.servicebus.v2021_01_01_preview.models.PrivateEndpointConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
from libcloud.utils.py3 import httplib
from libcloud.dns.types import RecordType, ZoneDoesNotExistError
from libcloud.dns.types import RecordDoesNotExistError
from libcloud.dns.drivers.route53 import Route53DNSDriver
from libcloud.test import MockHttp
from libcloud.test.file_fixtures import DNSFileFixtures
from libcloud.test.secrets import DNS_PARAMS_ROUTE53
class Route53Tests(unittest.TestCase):
def setUp(self):
Route53DNSDriver.connectionCls.conn_class = Route53MockHttp
Route53MockHttp.type = None
self.driver = Route53DNSDriver(*DNS_PARAMS_ROUTE53)
def test_list_record_types(self):
record_types = self.driver.list_record_types()
self.assertEqual(len(record_types), 10)
self.assertTrue(RecordType.A in record_types)
def test_list_zones(self):
zones = self.driver.list_zones()
self.assertEqual(len(zones), 5)
zone = zones[0]
self.assertEqual(zone.id, '47234')
self.assertEqual(zone.type, 'master')
self.assertEqual(zone.domain, 't.com')
def test_list_records(self):
zone = self.driver.list_zones()[0]
records = self.driver.list_records(zone=zone)
self.assertEqual(len(records), 10)
record = records[1]
self.assertEqual(record.name, 'www')
self.assertEqual(record.id, 'A:www')
self.assertEqual(record.type, RecordType.A)
self.assertEqual(record.data, '208.111.35.173')
self.assertEqual(record.extra['ttl'], 86400)
record = records[3]
self.assertEqual(record.type, RecordType.MX)
self.assertEqual(record.data, 'ASPMX.L.GOOGLE.COM.')
self.assertEqual(record.extra['priority'], 1)
record = records[4]
self.assertEqual(record.type, RecordType.MX)
self.assertEqual(record.data, 'ALT1.ASPMX.L.GOOGLE.COM.')
self.assertEqual(record.extra['priority'], 5)
record = records[8]
self.assertEqual(record.type, RecordType.SRV)
self.assertEqual(record.data, 'xmpp-server.example.com.')
self.assertEqual(record.extra['priority'], 1)
self.assertEqual(record.extra['weight'], 10)
self.assertEqual(record.extra['port'], 5269)
def test_get_zone(self):
zone = self.driver.get_zone(zone_id='47234')
self.assertEqual(zone.id, '47234')
self.assertEqual(zone.type, 'master')
self.assertEqual(zone.domain, 't.com')
def test_get_record(self):
record = self.driver.get_record(zone_id='47234',
record_id='CNAME:wibble')
self.assertEqual(record.name, 'wibble')
self.assertEqual(record.type, RecordType.CNAME)
self.assertEqual(record.data, 't.com')
def test_list_records_zone_does_not_exist(self):
zone = self.driver.list_zones()[0]
Route53MockHttp.type = 'ZONE_DOES_NOT_EXIST'
try:
self.driver.list_records(zone=zone)
except ZoneDoesNotExistError:
e = sys.exc_info()[1]
self.assertEqual(e.zone_id, zone.id)
else:
self.fail('Exception was not thrown')
def test_get_zone_does_not_exist(self):
Route53MockHttp.type = 'ZONE_DOES_NOT_EXIST'
try:
self.driver.get_zone(zone_id='47234')
except ZoneDoesNotExistError:
e = sys.exc_info()[1]
self.assertEqual(e.zone_id, '47234')
else:
self.fail('Exception was not thrown')
def test_get_record_zone_does_not_exist(self):
Route53MockHttp.type = 'ZONE_DOES_NOT_EXIST'
try:
self.driver.get_record(zone_id='4444', record_id='28536')
except ZoneDoesNotExistError:
pass
else:
self.fail('Exception was not thrown')
def test_get_record_record_does_not_exist(self):
Route53MockHttp.type = 'RECORD_DOES_NOT_EXIST'
rid = 'CNAME:doesnotexist.t.com'
try:
self.driver.get_record(zone_id='47234',
record_id=rid)
except RecordDoesNotExistError:
pass
else:
self.fail('Exception was not thrown')
def test_create_zone(self):
zone = self.driver.create_zone(domain='t.com', type='master',
ttl=None, extra=None)
self.assertEqual(zone.id, '47234')
self.assertEqual(zone.domain, 't.com')
def test_create_record(self):
zone = self.driver.list_zones()[0]
record = self.driver.create_record(
name='www', zone=zone,
type=RecordType.A, data='127.0.0.1',
extra={'ttl': 0}
)
self.assertEqual(record.id, 'A:www')
self.assertEqual(record.name, 'www')
self.assertEqual(record.zone, zone)
self.assertEqual(record.type, RecordType.A)
self.assertEqual(record.data, '127.0.0.1')
def test_create_record_zone_name(self):
zone = self.driver.list_zones()[0]
record = self.driver.create_record(
name='', zone=zone,
type=RecordType.A, data='127.0.0.1',
extra={'ttl': 0}
)
self.assertEqual(record.id, 'A:')
self.assertEqual(record.name, '')
self.assertEqual(record.zone, zone)
self.assertEqual(record.type, RecordType.A)
self.assertEqual(record.data, '127.0.0.1')
def test_create_multi_value_record(self):
zone = self.driver.list_zones()[0]
records = self.driver.ex_create_multi_value_record(
name='balancer', zone=zone,
type=RecordType.A, data='127.0.0.1\n127.0.0.2',
extra={'ttl': 0}
)
self.assertEqual(len(records), 2)
self.assertEqual(records[0].id, 'A:balancer')
self.assertEqual(records[1].id, 'A:balancer')
self.assertEqual(records[0].name, 'balancer')
self.assertEqual(records[1].name, 'balancer')
self.assertEqual(records[0].zone, zone)
self.assertEqual(records[1].zone, zone)
self.assertEqual(records[0].type, RecordType.A)
self.assertEqual(records[1].type, RecordType.A)
self.assertEqual(records[0].data, '127.0.0.1')
self.assertEqual(records[1].data, '127.0.0.2')
def test_update_record(self):
zone = self.driver.list_zones()[0]
record = self.driver.list_records(zone=zone)[1]
params = {
'record': record,
'name': 'www',
'type': RecordType.A,
'data': '::1',
'extra': {'ttle': 0}}
updated_record = self.driver.update_record(**params)
self.assertEqual(record.data, '208.111.35.173')
self.assertEqual(updated_record.id, 'A:www')
self.assertEqual(updated_record.name, 'www')
self.assertEqual(updated_record.zone, record.zone)
self.assertEqual(updated_record.type, RecordType.A)
self.assertEqual(updated_record.data, '::1')
def test_delete_zone(self):
zone = self.driver.list_zones()[0]
status = self.driver.delete_zone(zone=zone)
self.assertTrue(status)
def test_delete_zone_does_not_exist(self):
zone = self.driver.list_zones()[0]
Route53MockHttp.type = 'ZONE_DOES_NOT_EXIST'
try:
self.driver.delete_zone(zone=zone)
except ZoneDoesNotExistError:
e = sys.exc_info()[1]
self.assertEqual(e.zone_id, zone.id)
else:
self.fail('Exception was not thrown')
def test_delete_record(self):
zone = self.driver.list_zones()[0]
record = self.driver.list_records(zone=zone)[0]
status = self.driver.delete_record(record=record)
self.assertTrue(status)
def test_delete_record_does_not_exist(self):
zone = self.driver.list_zones()[0]
record = self.driver.list_records(zone=zone)[0]
Route53MockHttp.type = 'RECORD_DOES_NOT_EXIST'
try:
self.driver.delete_record(record=record)
except RecordDoesNotExistError:
e = sys.exc_info()[1]
self.assertEqual(e.record_id, record.id)
else:
self.fail('Exception was not thrown')
class Route53MockHttp(MockHttp):
fixtures = DNSFileFixtures('route53')
def _2012_02_29_hostedzone_47234(self, method, url, body, headers):
body = self.fixtures.load('get_zone.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _2012_02_29_hostedzone(self, method, url, body, headers):
# print method, url, body, headers
if method == "POST":
body = self.fixtures.load("create_zone.xml")
return (httplib.CREATED, body, {}, httplib.responses[httplib.OK])
body = self.fixtures.load('list_zones.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _2012_02_29_hostedzone_47234_rrset(self, method, url, body, headers):
body = self.fixtures.load('list_records.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _2012_02_29_hostedzone_47234_rrset_ZONE_DOES_NOT_EXIST(self, method,
url, body, headers):
body = self.fixtures.load('zone_does_not_exist.xml')
return (httplib.NOT_FOUND, body,
{}, httplib.responses[httplib.NOT_FOUND])
def _2012_02_29_hostedzone_4444_ZONE_DOES_NOT_EXIST(self, method,
url, body, headers):
body = self.fixtures.load('zone_does_not_exist.xml')
return (httplib.NOT_FOUND, body,
{}, httplib.responses[httplib.NOT_FOUND])
def _2012_02_29_hostedzone_47234_ZONE_DOES_NOT_EXIST(self, method,
url, body, headers):
body = self.fixtures.load('zone_does_not_exist.xml')
return (httplib.NOT_FOUND, body,
{}, httplib.responses[httplib.NOT_FOUND])
def _2012_02_29_hostedzone_47234_rrset_RECORD_DOES_NOT_EXIST(self, method,
url, body, headers):
if method == "POST":
body = self.fixtures.load('invalid_change_batch.xml')
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.BAD_REQUEST])
body = self.fixtures.load('record_does_not_exist.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _2012_02_29_hostedzone_47234_RECORD_DOES_NOT_EXIST(self, method,
url, body, headers):
body = self.fixtures.load('get_zone.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if __name__ == '__main__':
sys.exit(unittest.main())
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions and classes related to optimization (weight updates)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import tensorflow.compat.v1 as tf
from mobilebert import lamb_optimizer
def create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu,
optimizer="adamw", weight_decay_rate=0.01,
end_lr_rate=0.0001, use_layer_wise_warmup=False,
total_warmup_phases=0):
"""Creates an optimizer training op."""
global_step = tf.train.get_or_create_global_step()
learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32)
# Implements linear decay of the learning rate.
learning_rate = tf.train.polynomial_decay(
learning_rate,
global_step,
num_train_steps,
end_learning_rate=learning_rate * end_lr_rate,
power=1.0,
cycle=False)
# Implements linear warmup. I.e., if global_step < num_warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
if num_warmup_steps:
global_steps_int = tf.cast(global_step, tf.int32)
warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)
global_steps_float = tf.cast(global_steps_int, tf.float32)
warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)
warmup_percent_done = global_steps_float / warmup_steps_float
warmup_learning_rate = init_lr * warmup_percent_done
is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)
learning_rate = (
(1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate)
# It is recommended that you use this optimizer for fine tuning, since this
# is how the model was trained (note that the Adam m/v variables are NOT
# loaded from init_checkpoint.)
if optimizer == "adamw":
optimizer = AdamWeightDecayOptimizer(
learning_rate=learning_rate,
weight_decay_rate=weight_decay_rate,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=[
"teacher", "LayerNorm", "layer_norm", "bias", "FakeLayerNorm"],
use_layer_wise_warmup=use_layer_wise_warmup,
total_warmup_phases=total_warmup_phases,
num_train_steps=num_train_steps)
elif optimizer == "lamb":
optimizer = lamb_optimizer.LAMBOptimizer(
learning_rate=learning_rate,
weight_decay_rate=weight_decay_rate,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=[
"teacher", "LayerNorm", "layer_norm", "bias", "FakeLayerNorm"],
use_layer_wise_warmup=use_layer_wise_warmup,
total_warmup_phases=total_warmup_phases,
num_train_steps=num_train_steps)
else:
raise ValueError("Not supported optimizer: ", optimizer)
if use_tpu:
optimizer = tf.tpu.CrossShardOptimizer(optimizer)
tvars = tf.trainable_variables()
grads = tf.gradients(loss, tvars)
tvars = [var for var in tf.trainable_variables()
if not var.name.startswith("teacher")]
grads = tf.gradients(loss, tvars, colocate_gradients_with_ops=True)
# This is how the model was pre-trained.
(grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)
ntvars = [var for var in tf.trainable_variables()
if var.name.startswith("teacher")]
ngrads = [None for var in ntvars]
train_op = optimizer.apply_gradients(
zip(grads + ngrads, tvars + ntvars), global_step=global_step)
# Normally the global step update is done inside of `apply_gradients`.
# However, `AdamWeightDecayOptimizer` doesn't do this. But if you use
# a different optimizer, you should probably take this line out.
new_global_step = global_step + 1
train_op = tf.group(train_op, [global_step.assign(new_global_step)])
return train_op
class AdamWeightDecayOptimizer(tf.train.Optimizer):
"""A basic Adam optimizer that includes "correct" L2 weight decay."""
def __init__(self,
learning_rate,
weight_decay_rate=0.0,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=None,
name="AdamWeightDecayOptimizer",
use_layer_wise_warmup=False,
total_warmup_phases=0,
num_train_steps=0):
"""Constructs a AdamWeightDecayOptimizer."""
super(AdamWeightDecayOptimizer, self).__init__(False, name)
del use_layer_wise_warmup
del total_warmup_phases
del num_train_steps
self.learning_rate = learning_rate
self.weight_decay_rate = weight_decay_rate
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.exclude_from_weight_decay = exclude_from_weight_decay
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""See base class."""
assignments = []
for (grad, param) in grads_and_vars:
if grad is None or param is None:
continue
param_name = self._get_variable_name(param.name)
m = tf.get_variable(
name=param_name + "/adam_m",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
v = tf.get_variable(
name=param_name + "/adam_v",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
# Standard Adam update.
next_m = (
tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad))
next_v = (
tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2,
tf.square(grad)))
update = next_m / (tf.sqrt(next_v) + self.epsilon)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want ot decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if self._do_use_weight_decay(param_name):
update += self.weight_decay_rate * param
update_with_lr = self.learning_rate * update
next_param = param - update_with_lr
assignments.extend(
[param.assign(next_param),
m.assign(next_m),
v.assign(next_v)])
return tf.group(*assignments, name=name)
def _do_use_weight_decay(self, param_name):
"""Whether to use L2 weight decay for `param_name`."""
if not self.weight_decay_rate:
return False
if self.exclude_from_weight_decay:
for r in self.exclude_from_weight_decay:
if re.search(r, param_name) is not None:
return False
return True
def _get_variable_name(self, param_name):
"""Get the variable name from the tensor name."""
m = re.match("^(.*):\\d+$", param_name)
if m is not None:
param_name = m.group(1)
return param_name
|
|
#!/usr/bin/env python
from __future__ import unicode_literals
import datetime
import json
import unittest
import webapp2
import main
from models.score import ScoreModel
from lib.constants import DATA_BLOB as d
from lib.constants import HTTP_CODE as http_code
from lib.constants import NFL as nfl
from lib.constants import SCOREBOARD as sb
from google.appengine.api import memcache
from google.appengine.ext import testbed
from test_lib.mock_service import UrlFetchMock
class TestApiScores(unittest.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
self.testbed.init_urlfetch_stub()
# Create the mocked service & inject it into the testbed
self.fetch_mock = UrlFetchMock()
self.testbed._register_stub(testbed.URLFETCH_SERVICE_NAME, self.fetch_mock)
self.app = main.get_app()
# Define the endpoint for all our requests
self.endpoint = "/scores"
# Define the base request
self.request = webapp2.Request.blank(self.endpoint)
# Timestamp in seconds
self.timestamp = int(datetime.datetime.now().strftime('%s'))
# An arbitrary and unrealistic week number
self.week = (self.timestamp % 1000) + 500
# Content for GET requests
self.content_str = (
'{"ss":[["Sat","4:30","final overtime",0,"BAL",'
'"38","DEN","35",0,0,"55829",0,"REG11","2012"'
']]}').encode("UTF-8")
# Data for GET requests
self.data = {
"content": self.content_str,
"final_url": (sb.URL_REG).encode("UTF-8"),
"status_code": 200
}
self.fetch_mock.set_return_values(**self.data)
def tearDown(self):
self.testbed.deactivate()
def test_get_basic(self):
"""
Call GET without any arguments to test for default behavior
"""
response = self.request.get_response(self.app)
self.assertEqual(
response.status_int,
http_code.OK,
"Status code 200 OK")
self.assertEqual(
response.headers['Content-Type'],
"application/json",
"Content-Type is \"application/json\"")
def test_get_detect_week_parameter(self):
"""
Call GET with the week parameter to specify the week to get.
The Datastore will be prepopulated with data to validate against.
"""
self.request.query_string = "week=" + str(self.week)
self.request = webapp2.Request.blank(
self.endpoint + "?week=" + str(self.week)
)
response = None
result = []
self.assertTrue(
ScoreModel(
week = self.week,
game_id = self.timestamp
).put(),
"Saving to datastore")
self.assertEqual(
len(ScoreModel().all().fetch(2)),
1,
"Datastore has exactly 1 entry")
response = self.request.get_response(self.app)
self.assertEqual(
response.status_int,
http_code.OK,
"Status code 200 OK")
self.assertNotEqual(
len(response.body),
0,
"Response came back non-empty")
result = json.loads(response.body)
self.assertIsNotNone(
result,
"JSON loaded properly")
self.assertEqual(
len(result),
1,
"Response came back with exactly 1 entry")
self.assertEqual(
result[0]['week'],
self.week,
"Season week number is correct")
self.assertEqual(
result[0][d.NFL_GAME_ID],
self.timestamp,
"NFL game ID matches")
@unittest.skip("Defaults are dynamic. Need to learn how to test dynamic elements")
def test_get_catch_non_integer_for_week_param(self):
"""
Handle the case where the parameter for 'week' isn't an expected
integer.
We expect behavior to default the week number and continue as
normal.
"""
default_week = 0
self.request = webapp2.Request.blank(self.endpoint + "?week=" + "MaunaLoa")
response = None
result = []
self.assertTrue(
ScoreModel(
week = default_week,
game_id = self.timestamp
).put(),
"Saving to datastore")
self.assertEqual(
len(ScoreModel().all().fetch(2)),
1,
"Datastore has exactly 1 entry")
response = self.request.get_response(self.app)
self.assertEqual(
response.status_int,
http_code.OK,
"Status code 200 OK")
self.assertNotEqual(
len(response.body),
0,
"Response came back non-empty")
result = json.loads(response.body)
self.assertEqual(
len(result),
1,
"Response came back with exactly 1 entry")
# This assert needs to be updated
self.assertEqual(
result[0]['week'],
default_week,
"Season week number is correct")
self.assertEqual(
result[0][d.NFL_GAME_ID],
self.timestamp,
"NFL game ID matches")
def test_post_basic(self):
"""
Call POST with no parameters to test for default behavior
"""
self.request.method = "POST"
response = self.request.get_response(self.app)
self.assertEqual(
response.status_int,
http_code.CREATED,
"Status code 201 Created")
self.assertEqual(
response.headers['Content-Type'],
"application/json",
"Content-Type is \"application/json\"")
def test_post_single_game(self):
"""
Call POST with enough parameters for a single game to test for
result.
Need to validate the datastore & memcache layers
"""
self.request.method = "POST"
self.request.POST[d.GAME_WEEK] = self.week
self.request.POST[d.NFL_GAME_ID] = self.timestamp
response = None
query = None
tag = "SCORES_S" + unicode(nfl.YEAR) + "W" + unicode(self.week)
query = ScoreModel().all().fetch(2)
self.assertEqual(
len(query),
0,
"Datastore is empty")
response = self.request.get_response(self.app)
self.assertEqual(
response.status_int,
http_code.CREATED,
"Status code 201 Created")
# Validate datastore
query = ScoreModel().all().fetch(2)
self.assertEqual(
len(query),
1,
"Datastore has exactly 1 entry")
self.assertEqual(
query[0].week,
self.week,
"Season week number is correct")
self.assertEqual(
query[0].game_id,
self.timestamp,
"NFL game ID matches")
# Validate memcache
query = memcache.get(tag)
self.assertIsNotNone(
query,
"Memcache hit")
query = json.loads(query)
self.assertEqual(
query['data'][0]['week'],
self.week,
"Season week number is correct")
self.assertEqual(
query['data'][0][d.NFL_GAME_ID],
self.timestamp,
"NFL game ID matches")
def test_post_with_incorrect_parameter(self):
"""
Call POST with an incorrect parameter.
"""
self.request.method = "POST"
self.request.POST[d.GAME_WEEK] = self.week
self.request.POST[d.NFL_GAME_ID] = self.timestamp
self.request.POST["gameid"] = self.timestamp + 1
response = None
query = None
tag = "SCORES_S" + unicode(nfl.YEAR) + "W" + unicode(self.week)
query = ScoreModel().all().fetch(2)
self.assertEqual(
len(query),
0,
"Datastore is empty")
response = self.request.get_response(self.app)
self.assertEqual(
response.status_int,
http_code.CREATED,
"Status code 201 Created")
# Validate datastore
query = ScoreModel().all().fetch(2)
self.assertEqual(
len(query),
1,
"Datastore has exactly 1 entry")
self.assertEqual(
query[0].week,
self.week,
"Season week number is correct")
self.assertEqual(
query[0].game_id,
self.timestamp,
"NFL game ID matches")
# Validate memcache
query = memcache.get(tag)
self.assertIsNotNone(
query,
"Memcache hit")
query = json.loads(query)
self.assertEqual(
query['data'][0]['week'],
self.week,
"Season week number is correct")
self.assertFalse(
"gameid" in query['data'][0],
"Fake NFL game ID is missing")
def test_all_data_propogates_from_source_to_mecache(self):
"""
Handle the case where the score source doesn't have data but
the datastore does (i.e., spread data), and have it propogate
to memcache.
"""
self.request = webapp2.Request.blank(self.endpoint)
response = None
result = []
# 55829 is from test content_str
expected_game_id = 55829
# 11 is from test content_str, 200 is regular season prefix
expected_week = 211
additional_data = {
"week": expected_week,
"game_id": expected_game_id,
"spread_odds": -7.5,
"spread_margin": 48.5
}
self.assertTrue(
ScoreModel(**additional_data).put(),
"Saving to datastore")
self.assertEqual(
len(ScoreModel().all().fetch(2)),
1,
"Datastore has exactly 1 entry")
response = self.request.get_response(self.app)
self.assertEqual(
response.status_int,
http_code.OK,
"Status code 200 OK")
self.assertNotEqual(
len(response.body),
0,
"Response came back non-empty")
result = json.loads(response.body)
self.assertEqual(
len(result),
1,
"Response came back with exactly 1 entry")
for key in additional_data:
target = result[0]
self.assertTrue(
key in target,
'Data has key for ' + key)
self.assertEqual(
additional_data[key],
target[key],
'Data for key "' + key + '" is correct')
def test_options_basic(self):
origin = 'http://spread.hellaballer.com'
request_method = 'POST'
request_header = 'Content-Type'
self.request.method = "OPTIONS"
self.request.headers['Origin'] = origin
self.request.headers['Access-Control-Request-Method'] = request_method
self.request.headers['Access-Control-Request-Headers'] = request_header
response = None
response = self.request.get_response(self.app)
self.assertEqual(
origin,
response.headers['Access-Control-Allow-Origin'])
self.assertTrue(
request_method in response.headers['Access-Control-Allow-Methods'])
self.assertEqual(
request_header,
response.headers['Access-Control-Allow-Headers'])
self.assertEqual(
'application/json; charset=utf-8',
response.headers['Content-Type'])
def test_week_id_slug(self):
self.request = webapp2.Request.blank(
self.endpoint + '/' + str(self.week)
)
response = None
result = []
self.assertTrue(
ScoreModel(
week = self.week,
game_id = self.timestamp
).put(),
"Saving to datastore")
self.assertEqual(
len(ScoreModel().all().fetch(2)),
1,
"Datastore has exactly 1 entry")
response = self.request.get_response(self.app)
self.assertEqual(
response.status_int,
http_code.OK,
"Status code 200 OK")
self.assertNotEqual(
len(response.body),
0,
"Response came back non-empty")
result = json.loads(response.body)
self.assertIsNotNone(
result,
"JSON loaded properly")
self.assertEqual(
len(result),
1,
"Response came back with exactly 1 entry")
self.assertEqual(
result[0]['week'],
self.week,
"Season week number is correct")
self.assertEqual(
result[0][d.NFL_GAME_ID],
self.timestamp,
"NFL game ID matches")
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mox
from neutronclient.common import exceptions as qe
from neutronclient.neutron import v2_0 as neutronV20
from neutronclient.v2_0 import client as neutronclient
import six
from heat.common import exception
from heat.common import template_format
from heat.engine.cfn import functions as cfn_funcs
from heat.engine.clients.os import neutron
from heat.engine.resources.openstack.neutron import subnet
from heat.engine import rsrc_defn
from heat.engine import scheduler
from heat.tests import common
from heat.tests import utils
neutron_template = '''
heat_template_version: 2015-04-30
description: Template to test subnet Neutron resource
resources:
net:
type: OS::Neutron::Net
properties:
name: the_net
tenant_id: c1210485b2424d48804aad5d39c61b8f
shared: true
dhcp_agent_ids:
- 28c25a04-3f73-45a7-a2b4-59e183943ddc
sub_net:
type: OS::Neutron::Subnet
properties:
network: { get_resource : net}
tenant_id: c1210485b2424d48804aad5d39c61b8f
ip_version: 4
cidr: 10.0.3.0/24
allocation_pools:
- start: 10.0.3.20
end: 10.0.3.150
host_routes:
- destination: 10.0.4.0/24
nexthop: 10.0.3.20
dns_nameservers:
- 8.8.8.8
port:
type: OS::Neutron::Port
properties:
device_id: d6b4d3a5-c700-476f-b609-1493dd9dadc0
name: port1
network: { get_resource : net}
fixed_ips:
- subnet: { get_resource : sub_net }
ip_address: 10.0.3.21
port2:
type: OS::Neutron::Port
properties:
name: port2
network: { get_resource : net}
router:
type: OS::Neutron::Router
properties:
l3_agent_id: 792ff887-6c85-4a56-b518-23f24fa65581
router_interface:
type: OS::Neutron::RouterInterface
properties:
router_id: { get_resource : router }
subnet: { get_resource : sub_net }
gateway:
type: OS::Neutron::RouterGateway
properties:
router_id: { get_resource : router }
network: { get_resource : net}
'''
neutron_template_deprecated = neutron_template.replace(
'neutron', 'neutron_id').replace('subnet', 'subnet_id')
class NeutronSubnetTest(common.HeatTestCase):
def setUp(self):
super(NeutronSubnetTest, self).setUp()
self.m.StubOutWithMock(neutronclient.Client, 'create_subnet')
self.m.StubOutWithMock(neutronclient.Client, 'delete_subnet')
self.m.StubOutWithMock(neutronclient.Client, 'show_subnet')
self.m.StubOutWithMock(neutronclient.Client, 'update_subnet')
self.m.StubOutWithMock(neutronV20, 'find_resourceid_by_name_or_id')
self.patchobject(neutron.NeutronClientPlugin, 'has_extension',
return_value=True)
def create_subnet(self, t, stack, resource_name):
resource_defns = stack.t.resource_definitions(stack)
rsrc = subnet.Subnet('test_subnet', resource_defns[resource_name],
stack)
return rsrc
def test_subnet(self):
update_props = {'subnet': {
'dns_nameservers': ['8.8.8.8', '192.168.1.254'],
'name': 'mysubnet',
'enable_dhcp': True,
'host_routes': [{'destination': '192.168.1.0/24',
'nexthop': '194.168.1.2'}],
"allocation_pools": [
{"start": "10.0.3.20", "end": "10.0.3.100"},
{"start": "10.0.3.110", "end": "10.0.3.200"}]}}
t = self._test_subnet(u_props=update_props)
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'None',
cmd_resource=None,
).AndReturn('None')
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'router',
'None',
cmd_resource=None,
).AndReturn('None')
stack = utils.parse_stack(t)
rsrc = self.create_subnet(t, stack, 'sub_net')
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
rsrc.validate()
ref_id = rsrc.FnGetRefId()
self.assertEqual('91e47a57-7508-46fe-afc9-fc454e8580e1', ref_id)
self.assertIsNone(rsrc.FnGetAtt('network_id'))
self.assertEqual('fc68ea2c-b60b-4b4f-bd82-94ec81110766',
rsrc.FnGetAtt('network_id'))
self.assertEqual('8.8.8.8', rsrc.FnGetAtt('dns_nameservers')[0])
# assert the dependency (implicit or explicit) between the ports
# and the subnet
self.assertIn(stack['port'], stack.dependencies[stack['sub_net']])
self.assertIn(stack['port2'], stack.dependencies[stack['sub_net']])
props = {
"name": 'mysubnet',
"network_id": cfn_funcs.ResourceRef(stack, "get_resource", "net"),
"tenant_id": "c1210485b2424d48804aad5d39c61b8f",
"ip_version": 4,
"cidr": "10.0.3.0/24",
"allocation_pools": [
{"start": "10.0.3.20", "end": "10.0.3.100"},
{"start": "10.0.3.110", "end": "10.0.3.200"}],
"dns_nameservers": ["8.8.8.8", "192.168.1.254"],
"host_routes": [
{"destination": "192.168.1.0/24", "nexthop": "194.168.1.2"}
]
}
update_snippet = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(),
props)
# rsrc.handle_update(update_snippet, {}, {})
scheduler.TaskRunner(rsrc.update, update_snippet)()
self.assertIsNone(scheduler.TaskRunner(rsrc.delete)())
rsrc.state_set(rsrc.CREATE, rsrc.COMPLETE, 'to delete again')
self.assertIsNone(scheduler.TaskRunner(rsrc.delete)())
self.m.VerifyAll()
def test_subnet_with_subnetpool(self):
subnet_dict = {
"subnet": {
"allocation_pools": [
{"start": "10.0.3.20", "end": "10.0.3.150"}],
"host_routes": [
{"destination": "10.0.4.0/24", "nexthop": "10.0.3.20"}],
"subnetpool_id": "None",
"prefixlen": 24,
"dns_nameservers": ["8.8.8.8"],
"enable_dhcp": True,
"gateway_ip": "10.0.3.1",
"id": "91e47a57-7508-46fe-afc9-fc454e8580e1",
"ip_version": 4,
"name": "name",
"network_id": "None",
"tenant_id": "c1210485b2424d48804aad5d39c61b8f"
}
}
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'subnetpool',
'None',
cmd_resource=None,
).AndReturn('None')
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'None',
cmd_resource=None,
).AndReturn('None')
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'subnetpool',
'None',
cmd_resource=None,
).AndReturn('None')
neutronclient.Client.create_subnet({
'subnet': {
'network_id': u'None',
'name': 'mysubnet',
'subnetpool_id': 'None',
'prefixlen': 24,
'dns_nameservers': [u'8.8.8.8'],
'allocation_pools': [
{'start': u'10.0.3.20', 'end': u'10.0.3.150'}],
'host_routes': [
{'destination': u'10.0.4.0/24', 'nexthop': u'10.0.3.20'}],
'ip_version': 4,
'enable_dhcp': True,
'tenant_id': 'c1210485b2424d48804aad5d39c61b8f'
}
}).AndReturn(subnet_dict)
neutronclient.Client.show_subnet(
'91e47a57-7508-46fe-afc9-fc454e8580e1').AndReturn(subnet_dict)
neutronclient.Client.delete_subnet(
'91e47a57-7508-46fe-afc9-fc454e8580e1'
).AndReturn(None)
neutronclient.Client.show_subnet(
'91e47a57-7508-46fe-afc9-fc454e8580e1'
).AndRaise(qe.NeutronClientException(status_code=404))
self.m.ReplayAll()
t = template_format.parse(neutron_template)
del t['resources']['sub_net']['properties']['cidr']
t['resources']['sub_net']['properties'][
'subnetpool'] = 'None'
t['resources']['sub_net']['properties'][
'prefixlen'] = 24
t['resources']['sub_net']['properties'][
'name'] = 'mysubnet'
stack = utils.parse_stack(t)
rsrc = self.create_subnet(t, stack, 'sub_net')
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
ref_id = rsrc.FnGetRefId()
self.assertEqual('91e47a57-7508-46fe-afc9-fc454e8580e1', ref_id)
scheduler.TaskRunner(rsrc.delete)()
self.m.VerifyAll()
def test_subnet_deprecated(self):
t = self._test_subnet(resolve_neutron=False)
stack = utils.parse_stack(t)
rsrc = self.create_subnet(t, stack, 'sub_net')
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'None',
cmd_resource=None,
).AndReturn('None')
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'router',
'None',
cmd_resource=None,
).AndReturn('None')
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
rsrc.validate()
ref_id = rsrc.FnGetRefId()
self.assertEqual('91e47a57-7508-46fe-afc9-fc454e8580e1', ref_id)
self.assertIsNone(rsrc.FnGetAtt('network_id'))
self.assertEqual('fc68ea2c-b60b-4b4f-bd82-94ec81110766',
rsrc.FnGetAtt('network_id'))
self.assertEqual('8.8.8.8', rsrc.FnGetAtt('dns_nameservers')[0])
# assert the dependency (implicit or explicit) between the ports
# and the subnet
self.assertIn(stack['port'], stack.dependencies[stack['sub_net']])
self.assertIn(stack['port2'], stack.dependencies[stack['sub_net']])
self.assertIsNone(scheduler.TaskRunner(rsrc.delete)())
rsrc.state_set(rsrc.CREATE, rsrc.COMPLETE, 'to delete again')
self.assertIsNone(scheduler.TaskRunner(rsrc.delete)())
self.m.VerifyAll()
def _test_subnet(self, resolve_neutron=True, u_props=None):
default_update_props = {'subnet': {
'dns_nameservers': ['8.8.8.8', '192.168.1.254'],
'name': 'mysubnet',
'enable_dhcp': True,
'host_routes': [{'destination': '192.168.1.0/24',
'nexthop': '194.168.1.2'}]}}
update_props = u_props if u_props else default_update_props
neutronclient.Client.create_subnet({
'subnet': {
'name': utils.PhysName('test_stack', 'test_subnet'),
'network_id': u'None',
'dns_nameservers': [u'8.8.8.8'],
'allocation_pools': [
{'start': u'10.0.3.20', 'end': u'10.0.3.150'}],
'host_routes': [
{'destination': u'10.0.4.0/24', 'nexthop': u'10.0.3.20'}],
'ip_version': 4,
'cidr': u'10.0.3.0/24',
'tenant_id': 'c1210485b2424d48804aad5d39c61b8f',
'enable_dhcp': True
}
}).AndReturn({
"subnet": {
"allocation_pools": [
{"start": "10.0.3.20", "end": "10.0.3.150"}],
"cidr": "10.0.3.0/24",
"dns_nameservers": ["8.8.8.8"],
"enable_dhcp": True,
"gateway_ip": "10.0.3.1",
"host_routes": [
{"destination": "10.0.4.0/24", "nexthop": "10.0.3.20"}],
"id": "91e47a57-7508-46fe-afc9-fc454e8580e1",
"ip_version": 4,
"name": "name",
"network_id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766",
"tenant_id": "c1210485b2424d48804aad5d39c61b8f"
}
})
neutronclient.Client.show_subnet(
'91e47a57-7508-46fe-afc9-fc454e8580e1').AndRaise(
qe.NeutronClientException(status_code=404))
sn = {
"subnet": {
"name": "name",
"network_id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766",
"tenant_id": "c1210485b2424d48804aad5d39c61b8f",
"allocation_pools": [
{"start": "10.0.3.20", "end": "10.0.3.150"}],
"gateway_ip": "10.0.3.1",
'host_routes': [
{'destination': u'10.0.4.0/24', 'nexthop': u'10.0.3.20'}],
"ip_version": 4,
"cidr": "10.0.3.0/24",
"dns_nameservers": ["8.8.8.8"],
"id": "91e47a57-7508-46fe-afc9-fc454e8580e1",
"enable_dhcp": True,
}
}
neutronclient.Client.show_subnet(
'91e47a57-7508-46fe-afc9-fc454e8580e1').AndReturn(sn)
neutronclient.Client.show_subnet(
'91e47a57-7508-46fe-afc9-fc454e8580e1').AndReturn(sn)
neutronclient.Client.show_subnet(
'91e47a57-7508-46fe-afc9-fc454e8580e1').AndReturn(sn)
# Delete script
neutronclient.Client.delete_subnet(
'91e47a57-7508-46fe-afc9-fc454e8580e1'
).AndReturn(None)
neutronclient.Client.show_subnet(
'91e47a57-7508-46fe-afc9-fc454e8580e1'
).AndRaise(qe.NeutronClientException(status_code=404))
neutronclient.Client.delete_subnet(
'91e47a57-7508-46fe-afc9-fc454e8580e1'
).AndRaise(qe.NeutronClientException(status_code=404))
if resolve_neutron:
t = template_format.parse(neutron_template)
# Update script
neutronclient.Client.update_subnet(
'91e47a57-7508-46fe-afc9-fc454e8580e1', update_props)
else:
t = template_format.parse(neutron_template_deprecated)
return t
def test_subnet_disable_dhcp(self):
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'None',
cmd_resource=None,
).AndReturn('None')
neutronclient.Client.create_subnet({
'subnet': {
'name': utils.PhysName('test_stack', 'test_subnet'),
'network_id': u'None',
'dns_nameservers': [u'8.8.8.8'],
'allocation_pools': [
{'start': u'10.0.3.20', 'end': u'10.0.3.150'}],
'host_routes': [
{'destination': u'10.0.4.0/24', 'nexthop': u'10.0.3.20'}],
'ip_version': 4,
'enable_dhcp': False,
'cidr': u'10.0.3.0/24',
'tenant_id': 'c1210485b2424d48804aad5d39c61b8f'
}
}).AndReturn({
"subnet": {
"allocation_pools": [
{"start": "10.0.3.20", "end": "10.0.3.150"}],
"host_routes": [
{"destination": "10.0.4.0/24", "nexthop": "10.0.3.20"}],
"cidr": "10.0.3.0/24",
"dns_nameservers": ["8.8.8.8"],
"enable_dhcp": False,
"gateway_ip": "10.0.3.1",
"id": "91e47a57-7508-46fe-afc9-fc454e8580e1",
"ip_version": 4,
"name": "name",
"network_id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766",
"tenant_id": "c1210485b2424d48804aad5d39c61b8f"
}
})
neutronclient.Client.show_subnet(
'91e47a57-7508-46fe-afc9-fc454e8580e1').AndReturn({
"subnet": {
"name": "name",
"network_id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766",
"tenant_id": "c1210485b2424d48804aad5d39c61b8f",
"allocation_pools": [
{"start": "10.0.3.20", "end": "10.0.3.150"}],
"host_routes": [
{"destination": "10.0.4.0/24",
"nexthop": "10.0.3.20"}],
"gateway_ip": "10.0.3.1",
"ip_version": 4,
"cidr": "10.0.3.0/24",
"dns_nameservers": ["8.8.8.8"],
"id": "91e47a57-7508-46fe-afc9-fc454e8580e1",
"enable_dhcp": False,
}
})
neutronclient.Client.delete_subnet(
'91e47a57-7508-46fe-afc9-fc454e8580e1'
).AndReturn(None)
neutronclient.Client.show_subnet(
'91e47a57-7508-46fe-afc9-fc454e8580e1'
).AndRaise(qe.NeutronClientException(status_code=404))
self.m.ReplayAll()
t = template_format.parse(neutron_template)
t['resources']['sub_net']['properties']['enable_dhcp'] = 'False'
stack = utils.parse_stack(t)
rsrc = self.create_subnet(t, stack, 'sub_net')
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
rsrc.validate()
ref_id = rsrc.FnGetRefId()
self.assertEqual('91e47a57-7508-46fe-afc9-fc454e8580e1', ref_id)
self.assertIs(False, rsrc.FnGetAtt('enable_dhcp'))
scheduler.TaskRunner(rsrc.delete)()
self.m.VerifyAll()
def test_null_gateway_ip(self):
p = {}
subnet.Subnet._null_gateway_ip(p)
self.assertEqual({}, p)
p = {'foo': 'bar'}
subnet.Subnet._null_gateway_ip(p)
self.assertEqual({'foo': 'bar'}, p)
p = {
'foo': 'bar',
'gateway_ip': '198.51.100.0'
}
subnet.Subnet._null_gateway_ip(p)
self.assertEqual({
'foo': 'bar',
'gateway_ip': '198.51.100.0'
}, p)
p = {
'foo': 'bar',
'gateway_ip': ''
}
subnet.Subnet._null_gateway_ip(p)
self.assertEqual({
'foo': 'bar',
'gateway_ip': None
}, p)
# This should not happen as prepare_properties
# strips out None values, but testing anyway
p = {
'foo': 'bar',
'gateway_ip': None
}
subnet.Subnet._null_gateway_ip(p)
self.assertEqual({
'foo': 'bar',
'gateway_ip': None
}, p)
def test_ipv6_subnet(self):
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'None',
cmd_resource=None,
).AndReturn('None')
neutronclient.Client.create_subnet({
'subnet': {
'name': utils.PhysName('test_stack', 'test_subnet'),
'network_id': u'None',
'dns_nameservers': [u'2001:4860:4860::8844'],
'ip_version': 6,
'enable_dhcp': True,
'cidr': u'fdfa:6a50:d22b::/64',
'tenant_id': 'c1210485b2424d48804aad5d39c61b8f',
'ipv6_address_mode': 'slaac',
'ipv6_ra_mode': 'slaac'
}
}).AndReturn({
"subnet": {
"allocation_pools": [
{"start": "fdfa:6a50:d22b::2",
"end": "fdfa:6a50:d22b:0:ffff:ffff:ffff:fffe"}],
"cidr": "fd00:1::/64",
"enable_dhcp": True,
"gateway_ip": "fdfa:6a50:d22b::1",
"id": "91e47a57-7508-46fe-afc9-fc454e8580e1",
"ip_version": 6,
"name": "name",
"network_id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766",
"tenant_id": "c1210485b2424d48804aad5d39c61b8f",
'ipv6_address_mode': 'slaac',
'ipv6_ra_mode': 'slaac'
}
})
self.m.ReplayAll()
t = template_format.parse(neutron_template)
props = t['resources']['sub_net']['properties']
props.pop('allocation_pools')
props.pop('host_routes')
props['ip_version'] = 6
props['ipv6_address_mode'] = 'slaac'
props['ipv6_ra_mode'] = 'slaac'
props['cidr'] = 'fdfa:6a50:d22b::/64'
props['dns_nameservers'] = ['2001:4860:4860::8844']
stack = utils.parse_stack(t)
rsrc = self.create_subnet(t, stack, 'sub_net')
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
rsrc.validate()
self.m.VerifyAll()
def test_host_routes_validate_destination(self):
t = template_format.parse(neutron_template)
props = t['resources']['sub_net']['properties']
props['host_routes'] = [{'destination': 'invalid_cidr',
'nexthop': '10.0.3.20'}]
stack = utils.parse_stack(t)
rsrc = stack['sub_net']
ex = self.assertRaises(exception.StackValidationFailed,
rsrc.validate)
msg = ("Property error: "
"resources.sub_net.properties.host_routes[0].destination: "
"Error validating value 'invalid_cidr': Invalid net cidr "
"invalid IPNetwork invalid_cidr ")
self.assertEqual(msg, six.text_type(ex))
def test_ipv6_validate_ra_mode(self):
t = template_format.parse(neutron_template)
props = t['resources']['sub_net']['properties']
props['ipv6_address_mode'] = 'dhcpv6-stateful'
props['ipv6_ra_mode'] = 'slaac'
props['ip_version'] = 6
stack = utils.parse_stack(t)
rsrc = stack['sub_net']
ex = self.assertRaises(exception.StackValidationFailed,
rsrc.validate)
self.assertEqual("When both ipv6_ra_mode and ipv6_address_mode are "
"set, they must be equal.", six.text_type(ex))
def test_ipv6_validate_ip_version(self):
t = template_format.parse(neutron_template)
props = t['resources']['sub_net']['properties']
props['ipv6_address_mode'] = 'slaac'
props['ipv6_ra_mode'] = 'slaac'
props['ip_version'] = 4
stack = utils.parse_stack(t)
rsrc = stack['sub_net']
ex = self.assertRaises(exception.StackValidationFailed,
rsrc.validate)
self.assertEqual("ipv6_ra_mode and ipv6_address_mode are not "
"supported for ipv4.", six.text_type(ex))
def test_validate_both_subnetpool_cidr(self):
t = template_format.parse(neutron_template)
props = t['resources']['sub_net']['properties']
props['subnetpool'] = 'new_pool'
stack = utils.parse_stack(t)
rsrc = stack['sub_net']
ex = self.assertRaises(exception.ResourcePropertyConflict,
rsrc.validate)
msg = ("Cannot define the following properties at the same time: "
"subnetpool, cidr.")
self.assertEqual(msg, six.text_type(ex))
def test_validate_none_subnetpool_cidr(self):
t = template_format.parse(neutron_template)
props = t['resources']['sub_net']['properties']
del props['cidr']
stack = utils.parse_stack(t)
rsrc = stack['sub_net']
ex = self.assertRaises(exception.PropertyUnspecifiedError,
rsrc.validate)
msg = ("At least one of the following properties must be specified: "
"subnetpool, cidr")
self.assertEqual(msg, six.text_type(ex))
def test_validate_both_prefixlen_cidr(self):
t = template_format.parse(neutron_template)
props = t['resources']['sub_net']['properties']
props['prefixlen'] = '24'
stack = utils.parse_stack(t)
rsrc = stack['sub_net']
ex = self.assertRaises(exception.ResourcePropertyConflict,
rsrc.validate)
msg = ("Cannot define the following properties at the same time: "
"prefixlen, cidr.")
self.assertEqual(msg, six.text_type(ex))
def test_deprecated_network_id(self):
template = """
heat_template_version: 2015-04-30
resources:
net:
type: OS::Neutron::Net
properties:
name: test
subnet:
type: OS::Neutron::Subnet
properties:
network_id: { get_resource: net }
cidr: 10.0.0.0/24
"""
t = template_format.parse(template)
stack = utils.parse_stack(t)
rsrc = stack['subnet']
stack.create()
self.assertEqual(cfn_funcs.ResourceRef(stack, 'get_resource', 'net'),
rsrc.properties.get('network'))
self.assertIsNone(rsrc.properties.get('network_id'))
|
|
# Generated by Django 2.1.4 on 2018-12-27 08:50
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Dbnfsp',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('chr', models.TextField(blank=True, null=True)),
('pos_1_based', models.TextField(blank=True, null=True)),
('ref', models.TextField(blank=True, null=True)),
('alt', models.TextField(blank=True, null=True)),
('aaref', models.TextField(blank=True, null=True)),
('aaalt', models.TextField(blank=True, null=True)),
('rs_dbSNP150', models.TextField(blank=True, null=True)),
('hg19_chr', models.TextField(blank=True, null=True)),
('hg19_pos_1_based', models.TextField(blank=True, null=True)),
('hg18_chr', models.TextField(blank=True, null=True)),
('hg18_pos_1_based', models.TextField(blank=True, null=True)),
('genename', models.TextField(blank=True, null=True)),
('cds_strand', models.TextField(blank=True, null=True)),
('refcodon', models.TextField(blank=True, null=True)),
('codonpos', models.TextField(blank=True, null=True)),
('codon_degeneracy', models.TextField(blank=True, null=True)),
('Ancestral_allele', models.TextField(blank=True, null=True)),
('AltaiNeandertal', models.TextField(blank=True, null=True)),
('Denisova', models.TextField(blank=True, null=True)),
('Ensembl_geneid', models.TextField(blank=True, null=True)),
('Ensembl_transcriptid', models.TextField(blank=True, null=True)),
('Ensembl_proteinid', models.TextField(blank=True, null=True)),
('aapos', models.TextField(blank=True, null=True)),
('SIFT_score', models.TextField(blank=True, null=True)),
('SIFT_converted_rankscore', models.TextField(blank=True, null=True)),
('SIFT_pred', models.TextField(blank=True, null=True)),
('Uniprot_acc_Polyphen2', models.TextField(blank=True, null=True)),
('Uniprot_id_Polyphen2', models.TextField(blank=True, null=True)),
('Uniprot_aapos_Polyphen2', models.TextField(blank=True, null=True)),
('Polyphen2_HDIV_score', models.TextField(blank=True, null=True)),
('Polyphen2_HDIV_rankscore', models.TextField(blank=True, null=True)),
('Polyphen2_HDIV_pred', models.TextField(blank=True, null=True)),
('Polyphen2_HVAR_score', models.TextField(blank=True, null=True)),
('Polyphen2_HVAR_rankscore', models.TextField(blank=True, null=True)),
('Polyphen2_HVAR_pred', models.TextField(blank=True, null=True)),
('LRT_score', models.TextField(blank=True, null=True)),
('LRT_converted_rankscore', models.TextField(blank=True, null=True)),
('LRT_pred', models.TextField(blank=True, null=True)),
('LRT_Omega', models.TextField(blank=True, null=True)),
('MutationTaster_score', models.TextField(blank=True, null=True)),
('MutationTaster_converted_rankscore', models.TextField(blank=True, null=True)),
('MutationTaster_pred', models.TextField(blank=True, null=True)),
('MutationTaster_model', models.TextField(blank=True, null=True)),
('MutationTaster_AAE', models.TextField(blank=True, null=True)),
('MutationAssessor_UniprotID', models.TextField(blank=True, null=True)),
('MutationAssessor_variant', models.TextField(blank=True, null=True)),
('MutationAssessor_score', models.TextField(blank=True, null=True)),
('MutationAssessor_score_rankscore', models.TextField(blank=True, null=True)),
('MutationAssessor_pred', models.TextField(blank=True, null=True)),
('FATHMM_score', models.TextField(blank=True, null=True)),
('FATHMM_converted_rankscore', models.TextField(blank=True, null=True)),
('FATHMM_pred', models.TextField(blank=True, null=True)),
('PROVEAN_score', models.TextField(blank=True, null=True)),
('PROVEAN_converted_rankscore', models.TextField(blank=True, null=True)),
('PROVEAN_pred', models.TextField(blank=True, null=True)),
('Transcript_id_VEST3', models.TextField(blank=True, null=True)),
('Transcript_var_VEST3', models.TextField(blank=True, null=True)),
('VEST3_score', models.TextField(blank=True, null=True)),
('VEST3_rankscore', models.TextField(blank=True, null=True)),
('MetaSVM_score', models.TextField(blank=True, null=True)),
('MetaSVM_rankscore', models.TextField(blank=True, null=True)),
('MetaSVM_pred', models.TextField(blank=True, null=True)),
('MetaLR_score', models.TextField(blank=True, null=True)),
('MetaLR_rankscore', models.TextField(blank=True, null=True)),
('MetaLR_pred', models.TextField(blank=True, null=True)),
('Reliability_index', models.TextField(blank=True, null=True)),
('M_CAP_score', models.TextField(blank=True, null=True)),
('M_CAP_rankscore', models.TextField(blank=True, null=True)),
('M_CAP_pred', models.TextField(blank=True, null=True)),
('REVEL_score', models.TextField(blank=True, null=True)),
('REVEL_rankscore', models.TextField(blank=True, null=True)),
('MutPred_score', models.TextField(blank=True, null=True)),
('MutPred_rankscore', models.TextField(blank=True, null=True)),
('MutPred_protID', models.TextField(blank=True, null=True)),
('MutPred_AAchange', models.TextField(blank=True, null=True)),
('MutPred_Top5features', models.TextField(blank=True, null=True)),
('CADD_raw', models.TextField(blank=True, null=True)),
('CADD_raw_rankscore', models.TextField(blank=True, null=True)),
('CADD_phred', models.TextField(blank=True, null=True)),
('DANN_score', models.TextField(blank=True, null=True)),
('DANN_rankscore', models.TextField(blank=True, null=True)),
('fathmm_MKL_coding_score', models.TextField(blank=True, null=True)),
('fathmm_MKL_coding_rankscore', models.TextField(blank=True, null=True)),
('fathmm_MKL_coding_pred', models.TextField(blank=True, null=True)),
('fathmm_MKL_coding_group', models.TextField(blank=True, null=True)),
('Eigen_coding_or_noncoding', models.TextField(blank=True, null=True)),
('Eigen_raw', models.TextField(blank=True, null=True)),
('Eigen_phred', models.TextField(blank=True, null=True)),
('Eigen_PC_raw', models.TextField(blank=True, null=True)),
('Eigen_PC_phred', models.TextField(blank=True, null=True)),
('Eigen_PC_raw_rankscore', models.TextField(blank=True, null=True)),
('GenoCanyon_score', models.TextField(blank=True, null=True)),
('GenoCanyon_score_rankscore', models.TextField(blank=True, null=True)),
('integrated_fitCons_score', models.TextField(blank=True, null=True)),
('integrated_fitCons_score_rankscore', models.TextField(blank=True, null=True)),
('integrated_confidence_value', models.TextField(blank=True, null=True)),
('GM12878_fitCons_score', models.TextField(blank=True, null=True)),
('GM12878_fitCons_score_rankscore', models.TextField(blank=True, null=True)),
('GM12878_confidence_value', models.TextField(blank=True, null=True)),
('H1_hESC_fitCons_score', models.TextField(blank=True, null=True)),
('H1_hESC_fitCons_score_rankscore', models.TextField(blank=True, null=True)),
('H1_hESC_confidence_value', models.TextField(blank=True, null=True)),
('HUVEC_fitCons_score', models.TextField(blank=True, null=True)),
('HUVEC_fitCons_score_rankscore', models.TextField(blank=True, null=True)),
('HUVEC_confidence_value', models.TextField(blank=True, null=True)),
('GERP_NR', models.TextField(blank=True, null=True)),
('GERP_RS', models.TextField(blank=True, null=True)),
('GERP_RS_rankscore', models.TextField(blank=True, null=True)),
('phyloP100way_vertebrate', models.TextField(blank=True, null=True)),
('phyloP100way_vertebrate_rankscore', models.TextField(blank=True, null=True)),
('phyloP20way_mammalian', models.TextField(blank=True, null=True)),
('phyloP20way_mammalian_rankscore', models.TextField(blank=True, null=True)),
('phastCons100way_vertebrate', models.TextField(blank=True, null=True)),
('phastCons100way_vertebrate_rankscore', models.TextField(blank=True, null=True)),
('phastCons20way_mammalian', models.TextField(blank=True, null=True)),
('phastCons20way_mammalian_rankscore', models.TextField(blank=True, null=True)),
('SiPhy_29way_pi', models.TextField(blank=True, null=True)),
('SiPhy_29way_logOdds', models.TextField(blank=True, null=True)),
('SiPhy_29way_logOdds_rankscore', models.TextField(blank=True, null=True)),
('Gp3_AC_1k', models.TextField(blank=True, null=True)),
('Gp3_AF_1k', models.TextField(blank=True, null=True)),
('Gp3_AFR_AC_1k', models.TextField(blank=True, null=True)),
('Gp3_AFR_AF_1k', models.TextField(blank=True, null=True)),
('Gp3_EUR_AC_1k', models.TextField(blank=True, null=True)),
('Gp3_EUR_AF_1k', models.TextField(blank=True, null=True)),
('Gp3_AMR_AC_1k', models.TextField(blank=True, null=True)),
('Gp3_AMR_AF_1k', models.TextField(blank=True, null=True)),
('Gp3_EAS_AC_1k', models.TextField(blank=True, null=True)),
('Gp3_EAS_AF_1k', models.TextField(blank=True, null=True)),
('Gp3_SAS_AC_1k', models.TextField(blank=True, null=True)),
('Gp3_SAS_AF_1k', models.TextField(blank=True, null=True)),
('TWINSUK_AC', models.TextField(blank=True, null=True)),
('TWINSUK_AF', models.TextField(blank=True, null=True)),
('ALSPAC_AC', models.TextField(blank=True, null=True)),
('ALSPAC_AF', models.TextField(blank=True, null=True)),
('ESP6500_AA_AC', models.TextField(blank=True, null=True)),
('ESP6500_AA_AF', models.TextField(blank=True, null=True)),
('ESP6500_EA_AC', models.TextField(blank=True, null=True)),
('ESP6500_EA_AF', models.TextField(blank=True, null=True)),
('ExAC_AC', models.TextField(blank=True, null=True)),
('ExAC_AF', models.TextField(blank=True, null=True)),
('ExAC_Adj_AC', models.TextField(blank=True, null=True)),
('ExAC_Adj_AF', models.TextField(blank=True, null=True)),
('ExAC_AFR_AC', models.TextField(blank=True, null=True)),
('ExAC_AFR_AF', models.TextField(blank=True, null=True)),
('ExAC_AMR_AC', models.TextField(blank=True, null=True)),
('ExAC_AMR_AF', models.TextField(blank=True, null=True)),
('ExAC_EAS_AC', models.TextField(blank=True, null=True)),
('ExAC_EAS_AF', models.TextField(blank=True, null=True)),
('ExAC_FIN_AC', models.TextField(blank=True, null=True)),
('ExAC_FIN_AF', models.TextField(blank=True, null=True)),
('ExAC_NFE_AC', models.TextField(blank=True, null=True)),
('ExAC_NFE_AF', models.TextField(blank=True, null=True)),
('ExAC_SAS_AC', models.TextField(blank=True, null=True)),
('ExAC_SAS_AF', models.TextField(blank=True, null=True)),
('ExAC_nonTCGA_AC', models.TextField(blank=True, null=True)),
('ExAC_nonTCGA_AF', models.TextField(blank=True, null=True)),
('ExAC_nonTCGA_Adj_AC', models.TextField(blank=True, null=True)),
('ExAC_nonTCGA_Adj_AF', models.TextField(blank=True, null=True)),
('ExAC_nonTCGA_AFR_AC', models.TextField(blank=True, null=True)),
('ExAC_nonTCGA_AFR_AF', models.TextField(blank=True, null=True)),
('ExAC_nonTCGA_AMR_AC', models.TextField(blank=True, null=True)),
('ExAC_nonTCGA_AMR_AF', models.TextField(blank=True, null=True)),
('ExAC_nonTCGA_EAS_AC', models.TextField(blank=True, null=True)),
('ExAC_nonTCGA_EAS_AF', models.TextField(blank=True, null=True)),
('ExAC_nonTCGA_FIN_AC', models.TextField(blank=True, null=True)),
('ExAC_nonTCGA_FIN_AF', models.TextField(blank=True, null=True)),
('ExAC_nonTCGA_NFE_AC', models.TextField(blank=True, null=True)),
('ExAC_nonTCGA_NFE_AF', models.TextField(blank=True, null=True)),
('ExAC_nonTCGA_SAS_AC', models.TextField(blank=True, null=True)),
('ExAC_nonTCGA_SAS_AF', models.TextField(blank=True, null=True)),
('ExAC_nonpsych_AC', models.TextField(blank=True, null=True)),
('ExAC_nonpsych_AF', models.TextField(blank=True, null=True)),
('ExAC_nonpsych_Adj_AC', models.TextField(blank=True, null=True)),
('ExAC_nonpsych_Adj_AF', models.TextField(blank=True, null=True)),
('ExAC_nonpsych_AFR_AC', models.TextField(blank=True, null=True)),
('ExAC_nonpsych_AFR_AF', models.TextField(blank=True, null=True)),
('ExAC_nonpsych_AMR_AC', models.TextField(blank=True, null=True)),
('ExAC_nonpsych_AMR_AF', models.TextField(blank=True, null=True)),
('ExAC_nonpsych_EAS_AC', models.TextField(blank=True, null=True)),
('ExAC_nonpsych_EAS_AF', models.TextField(blank=True, null=True)),
('ExAC_nonpsych_FIN_AC', models.TextField(blank=True, null=True)),
('ExAC_nonpsych_FIN_AF', models.TextField(blank=True, null=True)),
('ExAC_nonpsych_NFE_AC', models.TextField(blank=True, null=True)),
('ExAC_nonpsych_NFE_AF', models.TextField(blank=True, null=True)),
('ExAC_nonpsych_SAS_AC', models.TextField(blank=True, null=True)),
('ExAC_nonpsych_SAS_AF', models.TextField(blank=True, null=True)),
('gnomAD_exomes_AC', models.TextField(blank=True, null=True)),
('gnomAD_exomes_AN', models.TextField(blank=True, null=True)),
('gnomAD_exomes_AF', models.TextField(blank=True, null=True)),
('gnomAD_exomes_AFR_AC', models.TextField(blank=True, null=True)),
('gnomAD_exomes_AFR_AN', models.TextField(blank=True, null=True)),
('gnomAD_exomes_AFR_AF', models.TextField(blank=True, null=True)),
('gnomAD_exomes_AMR_AC', models.TextField(blank=True, null=True)),
('gnomAD_exomes_AMR_AN', models.TextField(blank=True, null=True)),
('gnomAD_exomes_AMR_AF', models.TextField(blank=True, null=True)),
('gnomAD_exomes_ASJ_AC', models.TextField(blank=True, null=True)),
('gnomAD_exomes_ASJ_AN', models.TextField(blank=True, null=True)),
('gnomAD_exomes_ASJ_AF', models.TextField(blank=True, null=True)),
('gnomAD_exomes_EAS_AC', models.TextField(blank=True, null=True)),
('gnomAD_exomes_EAS_AN', models.TextField(blank=True, null=True)),
('gnomAD_exomes_EAS_AF', models.TextField(blank=True, null=True)),
('gnomAD_exomes_FIN_AC', models.TextField(blank=True, null=True)),
('gnomAD_exomes_FIN_AN', models.TextField(blank=True, null=True)),
('gnomAD_exomes_FIN_AF', models.TextField(blank=True, null=True)),
('gnomAD_exomes_NFE_AC', models.TextField(blank=True, null=True)),
('gnomAD_exomes_NFE_AN', models.TextField(blank=True, null=True)),
('gnomAD_exomes_NFE_AF', models.TextField(blank=True, null=True)),
('gnomAD_exomes_SAS_AC', models.TextField(blank=True, null=True)),
('gnomAD_exomes_SAS_AN', models.TextField(blank=True, null=True)),
('gnomAD_exomes_SAS_AF', models.TextField(blank=True, null=True)),
('gnomAD_exomes_OTH_AC', models.TextField(blank=True, null=True)),
('gnomAD_exomes_OTH_AN', models.TextField(blank=True, null=True)),
('gnomAD_exomes_OTH_AF', models.TextField(blank=True, null=True)),
('gnomAD_genomes_AC', models.TextField(blank=True, null=True)),
('gnomAD_genomes_AN', models.TextField(blank=True, null=True)),
('gnomAD_genomes_AF', models.TextField(blank=True, null=True)),
('gnomAD_genomes_AFR_AC', models.TextField(blank=True, null=True)),
('gnomAD_genomes_AFR_AN', models.TextField(blank=True, null=True)),
('gnomAD_genomes_AFR_AF', models.TextField(blank=True, null=True)),
('gnomAD_genomes_AMR_AC', models.TextField(blank=True, null=True)),
('gnomAD_genomes_AMR_AN', models.TextField(blank=True, null=True)),
('gnomAD_genomes_AMR_AF', models.TextField(blank=True, null=True)),
('gnomAD_genomes_ASJ_AC', models.TextField(blank=True, null=True)),
('gnomAD_genomes_ASJ_AN', models.TextField(blank=True, null=True)),
('gnomAD_genomes_ASJ_AF', models.TextField(blank=True, null=True)),
('gnomAD_genomes_EAS_AC', models.TextField(blank=True, null=True)),
('gnomAD_genomes_EAS_AN', models.TextField(blank=True, null=True)),
('gnomAD_genomes_EAS_AF', models.TextField(blank=True, null=True)),
('gnomAD_genomes_FIN_AC', models.TextField(blank=True, null=True)),
('gnomAD_genomes_FIN_AN', models.TextField(blank=True, null=True)),
('gnomAD_genomes_FIN_AF', models.TextField(blank=True, null=True)),
('gnomAD_genomes_NFE_AC', models.TextField(blank=True, null=True)),
('gnomAD_genomes_NFE_AN', models.TextField(blank=True, null=True)),
('gnomAD_genomes_NFE_AF', models.TextField(blank=True, null=True)),
('gnomAD_genomes_OTH_AC', models.TextField(blank=True, null=True)),
('gnomAD_genomes_OTH_AN', models.TextField(blank=True, null=True)),
('gnomAD_genomes_OTH_AF', models.TextField(blank=True, null=True)),
('clinvar_rs', models.TextField(blank=True, null=True)),
('clinvar_clnsig', models.TextField(blank=True, null=True)),
('clinvar_trait', models.TextField(blank=True, null=True)),
('clinvar_golden_stars', models.TextField(blank=True, null=True)),
('Interpro_domain', models.TextField(blank=True, null=True)),
('GTEx_V6p_gene', models.TextField(blank=True, null=True)),
('GTEx_V6p_tissue', models.TextField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Genome1kGenotype',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('genotype', models.TextField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Genome1kSample',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Genome1kSampleVariant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('genotype', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='databases.Genome1kGenotype')),
('sample', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='databases.Genome1kSample')),
],
),
migrations.CreateModel(
name='Genome1kVariant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pos_index', models.TextField(db_index=True)),
('chrom', models.TextField(blank=True, db_index=True, null=True)),
('pos', models.TextField(blank=True, db_index=True, null=True)),
('rsid', models.TextField(blank=True, null=True)),
('ref', models.TextField(blank=True, db_index=True, null=True)),
('alt', models.TextField(blank=True, db_index=True, null=True)),
('qual', models.TextField(blank=True, null=True)),
('filter', models.TextField(blank=True, null=True)),
('info', models.TextField(blank=True, null=True)),
('format', models.TextField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Genome1kVariantIndex',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('index', models.TextField()),
('variant', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='databases.Genome1kVariant')),
],
),
migrations.CreateModel(
name='HGMD',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('index', models.TextField(db_index=True)),
('chrom', models.TextField(blank=True, db_index=True, null=True)),
('pos', models.TextField(blank=True, db_index=True, null=True)),
('rsid', models.TextField(blank=True, null=True)),
('ref', models.TextField(blank=True, db_index=True, null=True)),
('alt', models.TextField(blank=True, db_index=True, null=True)),
('qual', models.TextField(blank=True, null=True)),
('filter', models.TextField(blank=True, null=True)),
('mutclass', models.TextField(blank=True, null=True)),
('mut', models.TextField(blank=True, null=True)),
('gene', models.TextField(blank=True, null=True)),
('strand', models.TextField(blank=True, null=True)),
('dna', models.TextField(blank=True, null=True)),
('prot', models.TextField(blank=True, null=True)),
('db', models.TextField(blank=True, null=True)),
('phen', models.TextField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='VariSNP',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dbsnp_id', models.CharField(max_length=255)),
],
),
migrations.AddField(
model_name='genome1ksamplevariant',
name='variant',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='databases.Genome1kVariant'),
),
]
|
|
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from rmake.build import buildjob
from rmake.worker import chroot
from rmake.worker import node
from rmake.lib.apiutils import thaw, freeze
def toBuildFlavors(frz):
if '\\000' in frz or '\0' in frz:
# Looks like the old marshal-based format, just ignore it
return []
else:
return [ thaw('flavor', x) for x in frz.splitlines() ]
def fromBuildFlavors(flavorList):
return '\n'.join([freeze('flavor', x) for x in flavorList])
class NodeStore(object):
def __init__(self, db):
self.db = db
def addNode(self, name, host, slots, flavors):
cu = self.db.cursor()
cu.execute("""DELETE FROM Nodes where nodeName=?""", name)
cu.execute("""INSERT INTO Nodes (nodeName, host, slots, buildFlavors,
active)
VALUES (?, ?, ?, ?, 1)""",
name, host, slots, fromBuildFlavors(flavors))
def removeNode(self, name):
cu = self.db.cursor()
cu.execute("""UPDATE Nodes SET active=0 WHERE nodeName=?""", name)
def deactivateAllNodes(self):
cu = self.db.cursor()
cu.execute("""UPDATE Nodes SET active=0""")
cu.execute("""UPDATE Chroots SET active=0""")
def setChrootsForNode(self, nodeName, chrootPaths):
cu = self.db.cursor()
cu.execute("SELECT chrootId, troveId, path"
" FROM Chroots WHERE nodeName=?", nodeName)
chrootList = cu.fetchall()
currentPaths = set(chrootPaths)
knownPaths = set(x[2] for x in chrootList)
extraPaths = knownPaths - currentPaths
newPaths = currentPaths - knownPaths
extraIds = [ (x[0],x[1]) for x in chrootList if x[2] in extraPaths ]
for chrootId, troveId in extraIds:
cu.execute("""DELETE FROM Chroots WHERE chrootId=?""",
chrootId)
# use troveId too so we don't have to add an index on chrootId
cu.execute("""UPDATE BuildTroves set chrootId=0 WHERE troveId=?
AND chrootId=?""", troveId, chrootId)
for path in newPaths:
self._createChrootId(cu, nodeName, path, None)
def getNodes(self, names):
cu = self.db.cursor()
nodes = []
for name in names:
cu.execute('''SELECT nodeName, host, slots, buildFlavors, active
FROM Nodes WHERE name=?''', name)
results = self._fetchOne(cu, name)
name, host, slots, buildFlavors, active = results
buildFlavors = toBuildFlavors(buildFlavors)
chroots = self.getChrootsForHost(name)
nodes.append(node.Node(name, host, slots, buildFlavors, chroots,
active))
return nodes
def listNodes(self):
nodes = []
cu = self.db.cursor()
cu.execute('''SELECT nodeName, host, slots, buildFlavors, active
FROM Nodes where active=1''')
for name, host, slots, buildFlavors, active in cu.fetchall():
buildFlavors = toBuildFlavors(buildFlavors)
chroots = self.getChrootsForHost(name)
nodes.append(node.Node(name, host, slots, buildFlavors, active,
chroots))
return nodes
def getEmptySlots(self):
"""
Return the number of slots that are currently not in use.
This should be the number of jobs that can be added at the moment
w/o using up all of the available slots.
"""
totalSlots = self.getSlotCount()
cu = self.db.cursor()
cu.execute("SELECT COUNT(*) FROM Jobs WHERE state in (?, ?)",
buildjob.JOB_STATE_BUILD, buildjob.JOB_STATE_STARTED)
currentJobs = cu.fetchone()[0]
if totalSlots < currentJobs:
return 0
cu.execute("""SELECT jobId,COUNT(jobId) FROM Chroots
JOIN Nodes USING(nodeName)
LEFT JOIN BuildTroves
ON (Chroots.troveId=BuildTroves.troveId)
LEFT JOIN Jobs USING(jobId)
WHERE Chroots.active=1
AND Nodes.active=1
AND jobId IS NOT NULL
GROUP BY jobId""")
jobsSeen = 0
slots = 0
for jobId, count in cu:
jobsSeen += 1
slots += count
totalUsed = slots + (currentJobs - jobsSeen)
return max(totalSlots - totalUsed, 0)
def getSlotCount(self):
cu = self.db.cursor()
totalSlots = cu.execute(
"""SELECT SUM(slots)
FROM Nodes WHERE active=1""").fetchone()[0]
return max(totalSlots, 1)
def getOrCreateChrootId(self, trove):
cu = self.db.cursor()
chrootId = cu.execute("""SELECT chrootId from Chroots
WHERE nodeName=? and path=?""",
trove.chrootHost,
trove.chrootPath).fetchall()
if not chrootId:
return self.createChrootId(trove)
return chrootId[0][0]
def createChrootId(self, trove):
cu = self.db.cursor()
host = trove.getChrootHost()
path = trove.getChrootPath()
troveId = self.db.jobStore._getTroveId(cu, trove.jobId,
*trove.getNameVersionFlavor(True))
return self._createChrootId(cu, host, path, troveId)
def _createChrootId(self, cu, nodeName, path, troveId):
cu.execute("""INSERT INTO Chroots (nodeName, path, troveId, active)
VALUES (?,?,?,0)""", nodeName, path, troveId)
chrootId = cu.lastrowid
return chrootId
def moveChroot(self, nodeName, path, newPath):
cu = self.db.cursor()
cu.execute("""UPDATE Chroots SET path=? WHERE nodeName=? AND path=?""",
newPath, nodeName, path)
def removeChroot(self, nodeName, path):
cu = self.db.cursor()
cu.execute("""SELECT chrootId From Chroots
WHERE nodeName=? AND path=?""", nodeName, path)
chrootId = self.db._getOne(cu, (nodeName, path))[0]
cu.execute("""DELETE FROM Chroots WHERE chrootId=?""", chrootId)
cu.execute("""UPDATE BuildTroves set chrootId=0 WHERE chrootId=?""",
chrootId)
def chrootIsActive(self, nodeName, path):
cu = self.db.cursor()
cu.execute('SELECT active From Chroots WHERE nodeName=? AND path=?',
nodeName, path)
active = self.db._getOne(cu, (nodeName, path))[0]
return bool(active)
def setChrootActive(self, trove, active=True):
cu = self.db.cursor()
host = trove.getChrootHost()
path = trove.getChrootPath()
if not (host and path):
return
cu.execute("""UPDATE Chroots SET active=? WHERE nodeName=? and path=?""",
int(active), host, path)
return
def getChrootsForHost(self, nodeName):
cu = self.db.cursor()
cu.execute("""SELECT Chroots.nodeName, path,
jobId, troveName, version, flavor, Chroots.active
FROM Chroots
LEFT JOIN BuildTroves USING(troveId)
LEFT JOIN Nodes ON(Chroots.nodeName = Nodes.nodeName)
WHERE Nodes.active=1 and Nodes.nodeName=?""", nodeName)
return self._getChroots(cu)
def getAllChroots(self):
cu = self.db.cursor()
cu.execute("""SELECT Chroots.nodeName, path,
jobId, troveName, version, flavor, Chroots.active
FROM Chroots
LEFT JOIN BuildTroves USING(troveId)
LEFT JOIN Nodes ON(Chroots.nodeName = Nodes.nodeName)
WHERE Nodes.active=1""")
return self._getChroots(cu)
def _getChroots(self, cu):
chroots = []
for nodeName, path, jobId, name, version, flavor, active in cu:
if jobId:
version = thaw('version', version)
flavor = thaw('flavor', flavor)
troveTuple = (name, version, flavor)
else:
troveTuple = None
c = chroot.Chroot(nodeName, path, jobId, troveTuple, active)
chroots.append(c)
return chroots
|
|
#!/usr/bin/python
from gevent import monkey
monkey.patch_all()
import logging
import gevent
from gevent.coros import BoundedSemaphore
from kafka import KafkaClient, KeyedProducer, SimpleConsumer, common
from uveserver import UVEServer
import os
import json
import copy
import traceback
import uuid
import struct
import socket
import discoveryclient.client as client
from sandesh_common.vns.constants import ALARM_PARTITION_SERVICE_NAME
from pysandesh.util import UTCTimestampUsec
import select
import redis
from collections import namedtuple
PartInfo = namedtuple("PartInfo",["ip_address","instance_id","acq_time","port"])
def sse_pack(d):
"""Pack data in SSE format"""
buffer = ''
for k in ['event','data']:
if k in d.keys():
buffer += '%s: %s\n' % (k, d[k])
return buffer + '\n'
class UveStreamPart(gevent.Greenlet):
def __init__(self, partno, logger, q, pi, rpass):
gevent.Greenlet.__init__(self)
self._logger = logger
self._q = q
self._pi = pi
self._partno = partno
self._rpass = rpass
def syncpart(self, redish):
inst = self._pi.instance_id
part = self._partno
keys = list(redish.smembers("AGPARTKEYS:%s:%d" % (inst, part)))
ppe = redish.pipeline()
for key in keys:
ppe.hgetall("AGPARTVALUES:%s:%d:%s" % (inst, part, key))
pperes = ppe.execute()
idx=0
for res in pperes:
for tk,tv in res.iteritems():
msg = {'event': 'sync', 'data':\
json.dumps({'partition':self._partno,
'key':keys[idx], 'type':tk, 'value':tv})}
self._q.put(sse_pack(msg))
idx += 1
def _run(self):
lredis = None
pb = None
while True:
try:
lredis = redis.StrictRedis(
host=self._pi.ip_address,
port=self._pi.port,
password=self._rpass,
db=2)
pb = lredis.pubsub()
inst = self._pi.instance_id
part = self._partno
pb.subscribe('AGPARTPUB:%s:%d' % (inst, part))
self.syncpart(lredis)
for message in pb.listen():
if message["type"] != "message":
continue
dataline = message["data"]
try:
elems = json.loads(dataline)
except:
self._logger.error("AggUVE Parsing failed: %s" % str(message))
continue
else:
self._logger.error("AggUVE loading: %s" % str(elems))
ppe = lredis.pipeline()
for elem in elems:
# This UVE was deleted
if elem["type"] is None:
ppe.exists("AGPARTVALUES:%s:%d:%s" % \
(inst, part, elem["key"]))
else:
ppe.hget("AGPARTVALUES:%s:%d:%s" % \
(inst, part, elem["key"]), elem["type"])
pperes = ppe.execute()
idx = 0
for elem in elems:
if elem["type"] is None:
msg = {'event': 'update', 'data':\
json.dumps({'partition':part,
'key':elem["key"], 'type':None})}
else:
vjson = pperes[idx]
if vjson is None:
vdata = None
else:
vdata = json.loads(vjson)
msg = {'event': 'update', 'data':\
json.dumps({'partition':part,
'key':elem["key"], 'type':elem["type"],
'value':vdata})}
self._q.put(sse_pack(msg))
idx += 1
except gevent.GreenletExit:
break
except Exception as ex:
template = "Exception {0} in uve stream proc. Arguments:\n{1!r}"
messag = template.format(type(ex).__name__, ex.args)
self._logger.error("%s : traceback %s" % \
(messag, traceback.format_exc()))
lredis = None
if pb is not None:
pb.close()
pb = None
gevent.sleep(2)
return None
class UveStreamer(gevent.Greenlet):
def __init__(self, logger, q, rfile, agp_cb, partitions, rpass):
gevent.Greenlet.__init__(self)
self._logger = logger
self._q = q
self._rfile = rfile
self._agp_cb = agp_cb
self._agp = {}
self._parts = {}
self._partitions = partitions
self._rpass = rpass
def _run(self):
inputs = [ self._rfile ]
outputs = [ ]
msg = {'event': 'init', 'data':\
json.dumps({'partitions':self._partitions})}
self._q.put(sse_pack(msg))
while True:
readable, writable, exceptional = select.select(inputs, outputs, inputs, 1)
if (readable or writable or exceptional):
break
newagp = self._agp_cb()
set_new, set_old = set(newagp.keys()), set(self._agp.keys())
intersect = set_new.intersection(set_old)
# deleted parts
for elem in set_old - intersect:
self.partition_stop(elem)
# new parts
for elem in set_new - intersect:
self.partition_start(elem, newagp[elem])
# changed parts
for elem in intersect:
if self._agp[elem] != newagp[elem]:
self.partition_stop(elem)
self.partition_start(elem, newagp[elem])
self._agp = newagp
for part, pi in self._agp.iteritems():
self.partition_stop(part)
def partition_start(self, partno, pi):
self._logger.error("Starting agguve part %d using %s" %( partno, pi))
msg = {'event': 'clear', 'data':\
json.dumps({'partition':partno, 'acq_time':pi.acq_time})}
self._q.put(sse_pack(msg))
self._parts[partno] = UveStreamPart(partno, self._logger,
self._q, pi, self._rpass)
self._parts[partno].start()
def partition_stop(self, partno):
self._logger.error("Stopping agguve part %d" % partno)
self._parts[partno].kill()
self._parts[partno].get()
del self._parts[partno]
class PartitionHandler(gevent.Greenlet):
def __init__(self, brokers, group, topic, logger, limit):
gevent.Greenlet.__init__(self)
self._brokers = brokers
self._group = group
self._topic = topic
self._logger = logger
self._limit = limit
self._uvedb = {}
self._partoffset = 0
self._kfk = None
def msg_handler(self, mlist):
self._logger.info("%s Reading %s" % (self._topic, str(mlist)))
return True
def _run(self):
pcount = 0
while True:
try:
self._logger.error("New KafkaClient %s" % self._topic)
self._kfk = KafkaClient(self._brokers , "kc-" + self._topic)
try:
consumer = SimpleConsumer(self._kfk, self._group, self._topic, buffer_size = 4096*4, max_buffer_size=4096*32)
#except:
except Exception as ex:
template = "Consumer Failure {0} occured. Arguments:\n{1!r}"
messag = template.format(type(ex).__name__, ex.args)
self._logger.info("%s" % messag)
raise RuntimeError(messag)
self._logger.error("Starting %s" % self._topic)
# Find the offset of the last message that has been queued
consumer.seek(-1,2)
try:
mi = consumer.get_message(timeout=0.1)
consumer.commit()
except common.OffsetOutOfRangeError:
mi = None
#import pdb; pdb.set_trace()
self._logger.info("Last Queued for %s is %s" % \
(self._topic,str(mi)))
# start reading from last previously processed message
if mi != None:
consumer.seek(0,1)
else:
consumer.seek(0,0)
if self._limit:
raise gevent.GreenletExit
while True:
try:
mlist = consumer.get_messages(10,timeout=0.5)
if not self.msg_handler(mlist):
raise gevent.GreenletExit
consumer.commit()
pcount += len(mlist)
except TypeError as ex:
self._logger.error("Type Error: %s trace %s" % \
(str(ex.args), traceback.format_exc()))
gevent.sleep(0.1)
except common.FailedPayloadsError as ex:
self._logger.error("Payload Error: %s" % str(ex.args))
gevent.sleep(0.1)
except gevent.GreenletExit:
break
except AssertionError as ex:
self._partoffset = ex
break
except Exception as ex:
template = "An exception of type {0} occured. Arguments:\n{1!r}"
messag = template.format(type(ex).__name__, ex.args)
self._logger.error("%s : traceback %s" % \
(messag, traceback.format_exc()))
self.stop_partition()
gevent.sleep(2)
self._logger.error("Stopping %s pcount %d" % (self._topic, pcount))
partdb = self.stop_partition()
return self._partoffset, partdb
class UveStreamProc(PartitionHandler):
# Arguments:
#
# brokers : broker list for kafka bootstrap
# partition : partition number
# uve_topic : Topic to consume
# logger : logging object to use
# callback : Callback function for reporting the set of the UVEs
# that may have changed for a given notification
# rsc : Callback function to check on collector status
# and get sync contents for new collectors
# aginst : instance_id of alarmgen
# rport : redis server port
# disc : discovery client to publish to
def __init__(self, brokers, partition, uve_topic, logger, callback,
host_ip, rsc, aginst, rport, disc = None):
super(UveStreamProc, self).__init__(brokers, "workers",
uve_topic, logger, False)
self._uvedb = {}
self._uvein = {}
self._uveout = {}
self._callback = callback
self._partno = partition
self._host_ip = host_ip
self._ip_code, = struct.unpack('>I', socket.inet_pton(
socket.AF_INET, host_ip))
self.disc_rset = set()
self._resource_cb = rsc
self._aginst = aginst
self._disc = disc
self._acq_time = UTCTimestampUsec()
self._rport = rport
def acq_time(self):
return self._acq_time
def resource_check(self, msgs):
'''
This function compares the known collectors with the
list from discovery, and syncs UVE keys accordingly
'''
newset , coll_delete, chg_res = self._resource_cb(self._partno, self.disc_rset, msgs)
for coll in coll_delete:
self._logger.error("Part %d lost collector %s" % (self._partno, coll))
self.stop_partition(coll)
if len(chg_res):
self.start_partition(chg_res)
self.disc_rset = newset
if self._disc:
data = { 'instance-id' : self._aginst,
'partition' : str(self._partno),
'ip-address': self._host_ip,
'acq-time': str(self._acq_time),
'port':str(self._rport)}
self._disc.publish(ALARM_PARTITION_SERVICE_NAME, data)
def stop_partition(self, kcoll=None):
clist = []
if not kcoll:
clist = self._uvedb.keys()
# If all collectors are being cleared, clear resoures too
self.disc_rset = set()
if self._disc:
# TODO: Unpublish instead of setting acq-time to 0
data = { 'instance-id' : self._aginst,
'partition' : str(self._partno),
'ip-address': self._host_ip,
'acq-time': "0",
'port':str(self._rport)}
self._disc.publish(ALARM_PARTITION_SERVICE_NAME, data)
else:
clist = [kcoll]
self._logger.error("Stopping part %d collectors %s" % \
(self._partno,clist))
partdb = {}
chg = {}
for coll in clist:
partdb[coll] = {}
for gen in self._uvedb[coll].keys():
partdb[coll][gen] = {}
for tab in self._uvedb[coll][gen].keys():
for rkey in self._uvedb[coll][gen][tab].keys():
uk = tab + ":" + rkey
chg[uk] = None
partdb[coll][gen][uk] = \
set(self._uvedb[coll][gen][tab][rkey].keys())
del self._uvedb[coll]
self._logger.error("Stopping part %d UVEs %s" % \
(self._partno,str(chg.keys())))
self._callback(self._partno, chg)
return partdb
def start_partition(self, cbdb):
''' This function loads the initial UVE database.
for the partition
'''
self._logger.error("Starting part %d collectors %s" % \
(self._partno, str(cbdb.keys())))
uves = {}
for kcoll,coll in cbdb.iteritems():
self._uvedb[kcoll] = {}
for kgen,gen in coll.iteritems():
self._uvedb[kcoll][kgen] = {}
for kk in gen.keys():
tabl = kk.split(":",1)
tab = tabl[0]
rkey = tabl[1]
if not tab in self._uvedb[kcoll][kgen]:
self._uvedb[kcoll][kgen][tab] = {}
self._uvedb[kcoll][kgen][tab][rkey] = {}
uves[kk] = {}
for typ, contents in gen[kk].iteritems():
self._uvedb[kcoll][kgen][tab][rkey][typ] = {}
self._uvedb[kcoll][kgen][tab][rkey][typ]["c"] = 0
self._uvedb[kcoll][kgen][tab][rkey][typ]["u"] = \
uuid.uuid1(self._ip_code)
uves[kk][typ] = contents
self._logger.error("Starting part %d UVEs %s" % \
(self._partno, str(uves.keys())))
self._callback(self._partno, uves)
def contents(self):
return self._uvedb
def stats(self):
''' Return the UVEKey-Count stats collected over
the last time period for this partition, and
the incoming UVE Notifs as well.
Also, the stats should be cleared to prepare
for the next period of collection.
'''
ret_out = copy.deepcopy(self._uveout)
ret_in = copy.deepcopy(self._uvein)
self._uveout = {}
self._uvein = {}
return ret_in, ret_out
def msg_handler(self, mlist):
self.resource_check(mlist)
for mm in mlist:
if mm is None:
continue
self._logger.debug("%s Reading offset %d" % \
(self._topic, mm.offset))
if not self.msg_handler_single(mm):
self._logger.info("%s could not handle %s" % \
(self._topic, str(mm)))
return False
return True
def msg_handler_single(self, om):
self._partoffset = om.offset
chg = {}
try:
uv = json.loads(om.message.value)
coll = uv["coll"]
gen = uv["gen"]
if not self._uvedb.has_key(coll):
# This partition is not synced yet.
# Ignore this message
self._logger.debug("%s Ignoring UVE %s" % (self._topic, str(om)))
return True
if not self._uvedb[coll].has_key(gen):
self._uvedb[coll][gen] = {}
if (uv["message"] == "UVEUpdate"):
tabl = uv["key"].split(":",1)
tab = tabl[0]
rkey = tabl[1]
if tab not in self._uvedb[coll][gen]:
self._uvedb[coll][gen][tab] = {}
if not rkey in self._uvedb[coll][gen][tab]:
self._uvedb[coll][gen][tab][rkey] = {}
removed = False
# uv["type"] and uv["value"] can be decoded as follows:
# uv["type"] can be one of the following:
# - None # All Types under this UVE are deleted
# uv["value"] will not be present
# (this option is only for agg UVE updates)
# - "<Struct>" # uv["value"] refers to this struct
# uv["value"] can be one of the following:
# - None # This Type has been deleted.
# - {} # The Type has a value, which is
# not available in this message.
# (this option is only for raw UVE updates)
# - {<Value>} # The Value of the Type
# (this option is only for agg UVE updates)
if uv["type"] is None:
# TODO: Handling of delete UVE case
return False
if uv["value"] is None:
if uv["type"] in self._uvedb[coll][gen][tab][rkey]:
del self._uvedb[coll][gen][tab][rkey][uv["type"]]
if not len(self._uvedb[coll][gen][tab][rkey]):
del self._uvedb[coll][gen][tab][rkey]
removed = True
if not removed:
if uv["type"] in self._uvedb[coll][gen][tab][rkey]:
self._uvedb[coll][gen][tab][rkey][uv["type"]]["c"] +=1
else:
self._uvedb[coll][gen][tab][rkey][uv["type"]] = {}
self._uvedb[coll][gen][tab][rkey][uv["type"]]["c"] = 1
self._uvedb[coll][gen][tab][rkey][uv["type"]]["u"] = \
uuid.uuid1(self._ip_code)
chg[uv["key"]] = { uv["type"] : uv["value"] }
# Record stats on UVE Keys being processed
if not self._uveout.has_key(tab):
self._uveout[tab] = {}
if self._uveout[tab].has_key(uv["key"]):
self._uveout[tab][uv["key"]] += 1
else:
self._uveout[tab][uv["key"]] = 1
# Record stats on the input UVE Notifications
if not self._uvein.has_key(tab):
self._uvein[tab] = {}
if not self._uvein[tab].has_key(coll):
self._uvein[tab][coll] = {}
if not self._uvein[tab][coll].has_key(gen):
self._uvein[tab][coll][gen] = {}
if not self._uvein[tab][coll][gen].has_key(uv["type"]):
self._uvein[tab][coll][gen][uv["type"]] = 1
else:
self._uvein[tab][coll][gen][uv["type"]] += 1
else:
# Record stats on UVE Keys being processed
for tab in self._uvedb[coll][gen].keys():
for rkey in self._uvedb[coll][gen][tab].keys():
uk = tab + ":" + rkey
if not self._uveout.has_key(tab):
self._uveout[tab] = {}
if self._uveout[tab].has_key(uk):
self._uveout[tab][uk] += 1
else:
self._uveout[tab][uk] = 1
# when a generator is delelted, we need to
# notify for *ALL* its UVEs
chg[uk] = None
del self._uvedb[coll][gen]
except Exception as ex:
template = "An exception of type {0} in uve proc . Arguments:\n{1!r}"
messag = template.format(type(ex).__name__, ex.args)
self._logger.info("%s" % messag)
return False
else:
self._callback(self._partno, chg)
return True
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
workers = {}
brokers = "localhost:9092,localhost:9093,localhost:9094"
group = "workers"
kafka = KafkaClient(brokers,str(os.getpid()))
cons = SimpleConsumer(kafka, group, "ctrl")
cons.provide_partition_info()
print "Starting control"
end_ready = False
while end_ready == False:
try:
while True:
part, mmm = cons.get_message(timeout=None)
mm = mmm.message
print "Consumed ctrl " + str(mm)
if mm.value == "start":
if workers.has_key(mm.key):
print "Dup partition %s" % mm.key
raise ValueError
else:
ph = UveStreamProc(brokers, int(mm.key), "uve-" + mm.key, "alarm-x" + mm.key, logging)
ph.start()
workers[int(mm.key)] = ph
elif mm.value == "stop":
#import pdb; pdb.set_trace()
if workers.has_key(int(mm.key)):
ph = workers[int(mm.key)]
gevent.kill(ph)
res,db = ph.get()
print "Returned " + str(res)
print "State :"
for k,v in db.iteritems():
print "%s -> %s" % (k,str(v))
del workers[int(mm.key)]
else:
end_ready = True
cons.commit()
gevent.sleep(2)
break
except TypeError:
gevent.sleep(0.1)
except common.FailedPayloadsError as ex:
print "Payload Error: " + str(ex.args)
gevent.sleep(0.1)
lw=[]
for key, value in workers.iteritems():
gevent.kill(value)
lw.append(value)
gevent.joinall(lw)
print "Ending Consumers"
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Utilities for dealing with HEALPix projections and mappings
"""
from __future__ import absolute_import, division, print_function
import re
import healpy as hp
import numpy as np
from astropy.io import fits
from astropy.wcs import WCS
from astropy.coordinates import SkyCoord
from astropy.coordinates import Galactic, ICRS
from fermipy.wcs_utils import WCSProj
# This is an approximation of the size of HEALPix pixels (in degrees)
# for a particular order. It is used to convert from HEALPix to WCS-based
# projections
HPX_ORDER_TO_PIXSIZE = [32.0, 16.0, 8.0, 4.0, 2.0, 1.0,
0.50, 0.25, 0.1, 0.05, 0.025, 0.01,
0.005, 0.002]
class HPX_Conv(object):
""" Data structure to define how a HEALPix map is stored to FITS """
def __init__(self, convname, **kwargs):
"""
"""
self.convname = convname
self.colstring = kwargs.get('colstring', 'CHANNEL')
self.idxstring = kwargs.get('idxstring', 'PIX')
self.firstcol = kwargs.get('firstcol', 1)
self.extname = kwargs.get('extname', 'SKYMAP')
self.energy_hdu = kwargs.get('energy_hdu', 'EBOUNDS')
self.quantity_type = kwargs.get('quantity_type', 'integral')
self.coordsys = kwargs.get('coordsys', 'COORDSYS')
def colname(self, indx):
return "%s%i" % (self.colstring, indx)
# Various conventions for storing HEALPix maps in FITS files
HPX_FITS_CONVENTIONS = {'FGST_CCUBE': HPX_Conv('FGST_CCUBE'),
'FGST_LTCUBE': HPX_Conv('FGST_LTCUBE', colstring='COSBINS', extname='EXPOSURE', energy_hdu='CTHETABOUNDS'),
'FGST_BEXPCUBE': HPX_Conv('FGST_BEXPCUBE', colstring='ENERGY', extname='HPXEXPOSURES', energy_hdu='ENERGIES'),
'FGST_SRCMAP': HPX_Conv('FGST_SRCMAP', extname=None, quantity_type='differential'),
'FGST_TEMPLATE': HPX_Conv('FGST_TEMPLATE', colstring='ENERGY', energy_hdu='ENERGIES'),
'FGST_SRCMAP_SPARSE': HPX_Conv('FGST_SRCMAP_SPARSE', colstring=None, extname=None, quantity_type='differential'),
'GALPROP': HPX_Conv('GALPROP', colstring='Bin', idxstring='HPXINDEX', extname='SKYMAP2',
energy_hdu='ENERGIES', quantity_type='differential',
coordsys='COORDTYPE'),
'GALPROP2': HPX_Conv('GALPROP2', colstring='Bin', idxstring='HPXINDEX', extname='SKYMAP2',
energy_hdu='ENERGIES', quantity_type='differential')}
def coords_to_vec(lon, lat):
""" Converts longitute and latitude coordinates to a unit 3-vector
return array(3,n) with v_x[i],v_y[i],v_z[i] = directional cosines
"""
phi = np.radians(lon)
theta = (np.pi / 2) - np.radians(lat)
sin_t = np.sin(theta)
cos_t = np.cos(theta)
xVals = sin_t * np.cos(phi)
yVals = sin_t * np.sin(phi)
zVals = cos_t
# Stack them into the output array
out = np.vstack((xVals, yVals, zVals)).swapaxes(0, 1)
return out
def get_pixel_size_from_nside(nside):
""" Returns an estimate of the pixel size from the HEALPix nside coordinate
This just uses a lookup table to provide a nice round number for each
HEALPix order.
"""
order = int(np.log2(nside))
if order < 0 or order > 13:
raise ValueError('HEALPix order must be between 0 to 13 %i' % order)
return HPX_ORDER_TO_PIXSIZE[order]
def hpx_to_axes(h, npix):
""" Generate a sequence of bin edge vectors corresponding to the
axes of a HPX object."""
x = h.ebins
z = np.arange(npix[-1] + 1)
return x, z
def hpx_to_coords(h, shape):
""" Generate an N x D list of pixel center coordinates where N is
the number of pixels and D is the dimensionality of the map."""
x, z = hpx_to_axes(h, shape)
x = np.sqrt(x[0:-1] * x[1:])
z = z[:-1] + 0.5
x = np.ravel(np.ones(shape) * x[:, np.newaxis])
z = np.ravel(np.ones(shape) * z[np.newaxis, :])
return np.vstack((x, z))
def get_map_skydir(filename):
hdulist = fits.open(filename)
coordsys = hdulist[1].header['COORDSYS']
if coordsys == 'GAL':
return SkyCoord(0., 0., unit='deg', frame='galactic').transform_to('icrs')
else:
return SkyCoord(0., 0., unit='deg', frame='icrs')
def make_hpx_to_wcs_mapping_centers(hpx, wcs):
""" Make the mapping data needed to from from HPX pixelization to a
WCS-based array
Parameters
----------
hpx : `~fermipy.hpx_utils.HPX`
The healpix mapping (an HPX object)
wcs : `~astropy.wcs.WCS`
The wcs mapping (a pywcs.wcs object)
Returns
-------
ipixs : array(nx,ny) of HEALPix pixel indices for each wcs pixel
-1 indicates the wcs pixel does not contain the center of a HEALpix pixel
mult_val : array(nx,ny) of 1.
npix : tuple(nx,ny) with the shape of the wcs grid
"""
npix = (int(wcs.wcs.crpix[0] * 2), int(wcs.wcs.crpix[1] * 2))
mult_val = np.ones(npix).T.flatten()
sky_crds = hpx.get_sky_coords()
pix_crds = wcs.wcs_world2pix(sky_crds, 0).astype(int)
ipixs = -1 * np.ones(npix, int).T.flatten()
pix_index = npix[1] * pix_crds[0:, 0] + pix_crds[0:, 1]
if hpx._ipix is None:
for ipix, pix_crd in enumerate(pix_index):
ipixs[pix_crd] = ipix
else:
for pix_crd, ipix in zip(pix_index, hpx._ipix):
ipixs[pix_crd] = ipix
ipixs = ipixs.reshape(npix).T.flatten()
return ipixs, mult_val, npix
def make_hpx_to_wcs_mapping(hpx, wcs):
"""Make the mapping data needed to from from HPX pixelization to a
WCS-based array
Parameters
----------
hpx : `~fermipy.hpx_utils.HPX`
The healpix mapping (an HPX object)
wcs : `~astropy.wcs.WCS`
The wcs mapping (a pywcs.wcs object)
Returns
-------
ipixs : array(nx,ny) of HEALPix pixel indices for each wcs pixel
mult_val : array(nx,ny) of 1./number of wcs pixels pointing at each HEALPix pixel
npix : tuple(nx,ny) with the shape of the wcs grid
"""
npix = (int(wcs.wcs.crpix[0] * 2), int(wcs.wcs.crpix[1] * 2))
pix_crds = np.dstack(np.meshgrid(np.arange(npix[0]),
np.arange(npix[1]))).swapaxes(0, 1).reshape((npix[0] * npix[1], 2))
if wcs.wcs.naxis == 2:
sky_crds = wcs.wcs_pix2world(pix_crds, 0)
else:
use_wcs = wcs.dropaxis(2)
sky_crds = use_wcs.wcs_pix2world(pix_crds, 0)
sky_crds *= np.radians(1.)
sky_crds[0:, 1] = (np.pi / 2) - sky_crds[0:, 1]
fullmask = np.isnan(sky_crds)
mask = (fullmask[0:, 0] + fullmask[0:, 1]) == 0
ipixs = -1 * np.ones(npix, int).T.flatten()
ipixs[mask] = hp.pixelfunc.ang2pix(hpx.nside, sky_crds[0:, 1][mask],
sky_crds[0:, 0][mask], hpx.nest)
# Here we are counting the number of HEALPix pixels each WCS pixel points to;
# this could probably be vectorized by filling a histogram.
d_count = {}
for ipix in ipixs:
if ipix in d_count:
d_count[ipix] += 1
else:
d_count[ipix] = 1
# Here we are getting a multiplicative factor that tells use how to split up
# the counts in each HEALPix pixel (by dividing the corresponding WCS pixels
# by the number of associated HEALPix pixels).
# This could also likely be vectorized.
mult_val = np.ones(ipixs.shape)
for i, ipix in enumerate(ipixs):
mult_val[i] /= d_count[ipix]
ipixs = ipixs.reshape(npix).flatten()
mult_val = mult_val.reshape(npix).flatten()
return ipixs, mult_val, npix
def match_hpx_pixel(nside, nest, nside_pix, ipix_ring):
"""
"""
ipix_in = np.arange(12 * nside * nside)
vecs = hp.pix2vec(nside, ipix_in, nest)
pix_match = hp.vec2pix(nside_pix, vecs[0], vecs[1], vecs[2]) == ipix_ring
return ipix_in[pix_match]
def parse_hpxregion(region):
"""Parse the HPX_REG header keyword into a list of tokens."""
m = re.match(r'([A-Za-z\_]*?)\((.*?)\)', region)
if m is None:
raise Exception('Failed to parse hpx region string.')
if not m.group(1):
return re.split(',', m.group(2))
else:
return [m.group(1)] + re.split(',', m.group(2))
def is_power2(n):
"""Check if an integer is a power of 2."""
return bool(n and not n & (n - 1))
def upix_to_pix(upix):
"""Get the nside from a unique pixel number."""
nside = np.power(2, np.floor(np.log2(upix / 4)) / 2).astype(int)
pix = upix - 4 * np.power(nside, 2)
return pix, nside
def pix_to_upix(pix, nside):
"""Compute the unique pixel number from the pixel number and nside."""
return pix + 4 * np.power(nside, 2)
class HPX(object):
""" Encapsulation of basic healpix map parameters """
def __init__(self, nside, nest, coordsys, order=-1, ebins=None, **kwargs):
"""C'tor
Parameters
----------
nside : `int`
HEALPix nside parameter, the total number of pixels is
12*nside*nside.
nest : `bool`
True -> 'NESTED', False -> 'RING' indexing scheme
coordsys : `str`
Coordinate system, 'CEL' | 'GAL'
order : `int`
HEALPix order, nside = 2^order
-1 -> non-standard map
ebins : `np.array` or `None`
Energy bins
Keyword arguments:
------------------
region : `str` or None
String define the healpix region.
conv : `HPX_Conv`
Object defining the convention for column names and the like
pixels : `np.array` or `None`
For use with 'EXPLICIT' region string
"""
conv = kwargs.get('conv', HPX_Conv('FGST_CCUBE'))
if nside >= 0:
if order >= 0:
raise Exception('Specify either nside or oder, not both.')
self._nside = nside
if is_power2(nside):
self._order = int(np.log2(self._nside))
else:
self._order = -1
else:
self._nside = 2**order
self._order = order
self._nest = nest
self._coordsys = coordsys
self._region = kwargs.get('region', None)
self._ipix = kwargs.get('pixels', None)
if self._region is not None:
if self._ipix is None:
self._ipix = self.get_index_list(self._nside, self._nest, self._region)
self._maxpix = 12 * self._nside * self._nside
if self._ipix is None:
self._rmap = None
self._npix = self._maxpix
else:
self._rmap = {}
for i, ipixel in enumerate(self._ipix.flat):
self._rmap[ipixel] = i
self._npix = len(self._ipix)
self._ebins = ebins
self._conv = conv
if self._ebins is not None:
self._evals = np.sqrt(self._ebins[0:-1] * self._ebins[1:])
else:
self._evals = None
def __getitem__(self, sliced):
"""This implements the global-to-local lookup. For all-sky maps it
just returns the input array. For partial-sky maps in returns
the local indices corresponding to the indices in the input
array, and -1 for those pixels that are outside the selected
region.
Parameters
----------
sliced: `~numpy.ndarray`
An array of HEALPix pixel indices
"""
if self._rmap is not None:
retval = np.empty((sliced.size), 'i')
retval.fill(-1)
m = np.in1d(sliced.flat, self._ipix)
retval[m] = np.searchsorted(self._ipix, sliced.flat[m])
return retval.reshape(sliced.shape)
return sliced
@property
def ordering(self):
if self._nest:
return "NESTED"
return "RING"
@property
def nside(self):
return self._nside
@property
def order(self):
return self._order
@property
def nest(self):
return self._nest
@property
def npix(self):
return self._npix
@property
def ebins(self):
return self._ebins
@property
def conv(self):
return self._conv
@property
def coordsys(self):
return self._coordsys
@property
def evals(self):
return self._evals
@property
def region(self):
return self._region
def ud_graded_hpx(self, order):
"""
"""
if self.order < 0:
raise RuntimeError(
"Upgrade and degrade only implemented for standard maps")
# FIXME, this doesn't deal with pixels lists, only regions
if self._region is None:
pixels = None
else:
pixels = HPX.get_index_list(self.nside, self.nest, self.region)
return HPX(-1, self.nest, self.coordsys, order, self.ebins,
region=self.region, conv=self.conv, pixels=pixels)
def make_swapped_hpx(self):
"""
"""
# FIXME, this doesn't deal with pixels lists, only regions
if self.region is None:
pixels = None
else:
pixels = HPX.get_index_list(self.nside, not self.nest, self.region)
return HPX(self.nside, not self.nest, self.coordsys, -1, self.ebins,
region=self.region, conv=self.conv, pixels=pixels)
def copy_and_drop_energy(self, pixels=None):
"""
"""
return HPX(self.nside, self.nest, self.coordsys, -1, None,
region=self.region, conv=self.conv, pixels=pixels)
@classmethod
def create_hpx(cls, nside, nest, coordsys='CEL', order=-1, ebins=None,
region=None, conv=HPX_Conv('FGST_CCUBE'), pixels=None):
"""Create a HPX object.
Parameters
----------
nside : int
HEALPix nside paramter
nest : bool
True for HEALPix "NESTED" indexing scheme, False for "RING" scheme.
coordsys : str
"CEL" or "GAL"
order : int
nside = 2**order
ebins : `~numpy.ndarray`
Energy bin edges
region : str
Allows for partial-sky mappings
conv : `HPX_Conv`
Object defining the convention for column names and the like
pixels : `np.array` or `None`
For use with 'EXPLICIT' region string
"""
return cls(nside, nest, coordsys, order, ebins,
region=region, conv=conv, pixels=pixels)
@staticmethod
def identify_HPX_convention(header):
""" Identify the convention used to write this file """
# Hopefully the file contains the HPX_CONV keyword specifying
# the convention used
try:
return header['HPX_CONV']
except KeyError:
pass
indxschm = header.get('INDXSCHM', None)
# Try based on the EXTNAME keyword
extname = header.get('EXTNAME', None)
if extname == 'HPXEXPOSURES':
return 'FGST_BEXPCUBE'
elif extname == 'SKYMAP2':
if 'COORDTYPE' in header.keys():
return 'GALPROP'
else:
return 'GALPROP2'
# Check for the INDXSCHM keyword
if indxschm == 'SPARSE':
return 'FGST_SRCMAP_SPARSE'
# Check the name of the first column
colname = header['TTYPE1']
if colname == 'PIX':
colname = header['TTYPE2']
if colname == 'KEY':
return 'FGST_SRCMAP_SPARSE'
elif colname == 'ENERGY1':
return 'FGST_TEMPLATE'
elif colname == 'COSBINS':
return 'FGST_LTCUBE'
elif colname == 'Bin0':
return 'GALPROP'
elif colname in ['CHANNEL1', 'Bin 0']:
if extname == 'SKYMAP':
return 'FGST_CCUBE'
else:
return 'FGST_SRCMAP'
else:
raise ValueError("Could not identify HEALPix convention")
@classmethod
def create_from_header(cls, header, ebins=None, pixels=None):
""" Creates an HPX object from a FITS header.
header : The FITS header
ebins : Energy bin edges [optional]
"""
convname = HPX.identify_HPX_convention(header)
conv = HPX_FITS_CONVENTIONS[convname]
if conv.convname not in ['GALPROP', 'GALPROP2']:
if header["PIXTYPE"] != "HEALPIX":
raise Exception("PIXTYPE != HEALPIX")
if header["PIXTYPE"] != "HEALPIX":
raise Exception("PIXTYPE != HEALPIX")
if header["ORDERING"] == "RING":
nest = False
elif header["ORDERING"] == "NESTED":
nest = True
else:
raise Exception("ORDERING != RING | NESTED")
try:
order = header["ORDER"]
except KeyError:
order = -1
if order < 0:
nside = header["NSIDE"]
else:
nside = -1
try:
coordsys = header[conv.coordsys]
except KeyError:
coordsys = header['COORDSYS']
try:
region = header["HPX_REG"]
except KeyError:
try:
region = header["HPXREGION"]
except KeyError:
region = None
try:
if header['INDXSCHM'] in ['EXPLICIT', 'PARTIAL']:
use_pixels = pixels
else:
use_pixels = None
except KeyError:
use_pixels = None
return cls(nside, nest, coordsys, order, ebins, region=region, conv=conv, pixels=use_pixels)
@classmethod
def create_from_hdu(cls, hdu, ebins=None):
""" Creates an HPX object from a FITS header.
hdu : The FITS hdu
ebins : Energy bin edges [optional]
"""
convname = HPX.identify_HPX_convention(hdu.header)
conv = HPX_FITS_CONVENTIONS[convname]
try:
pixels = hdu.data[conv.idxstring]
except KeyError:
pixels = None
return cls.create_from_header(hdu.header, ebins, pixels)
def make_header(self):
""" Builds and returns FITS header for this HEALPix map """
cards = [fits.Card("TELESCOP", "GLAST"),
fits.Card("INSTRUME", "LAT"),
fits.Card(self._conv.coordsys, self._coordsys),
fits.Card("PIXTYPE", "HEALPIX"),
fits.Card("ORDERING", self.ordering),
fits.Card("ORDER", self._order),
fits.Card("NSIDE", self._nside),
fits.Card("FIRSTPIX", 0),
fits.Card("LASTPIX", self._maxpix - 1),
fits.Card("HPX_CONV", self._conv.convname)]
if self._coordsys == "CEL":
cards.append(fits.Card("EQUINOX", 2000.0,
"Equinox of RA & DEC specifications"))
if self._region is not None:
cards.append(fits.Card("HPX_REG", self._region))
cards.append(fits.Card("INDXSCHM", "PARTIAL"))
elif self._ipix is not None:
cards.append(fits.Card("INDXSCHM", "EXPLICIT"))
else:
if self._conv.convname in ['FGST_SRCMAP_SPARSE']:
cards.append(fits.Card("INDXSCHM", "SPARSE"))
else:
cards.append(fits.Card("INDXSCHM", "IMPLICIT"))
header = fits.Header(cards)
return header
def make_hdu(self, data, **kwargs):
""" Builds and returns a FITs HDU with input data
data : The data begin stored
Keyword arguments
-------------------
extname : The HDU extension name
colbase : The prefix for column names
"""
shape = data.shape
extname = kwargs.get('extname', self.conv.extname)
if shape[-1] != self._npix:
raise Exception(
"Size of data array does not match number of pixels")
cols = []
if self._ipix is not None:
cols.append(fits.Column(self.conv.idxstring, "J", array=self._ipix))
if self.conv.convname == 'FGST_SRCMAP_SPARSE':
nonzero = data.nonzero()
nfilled = len(nonzero[0])
if len(shape) == 1:
cols.append(fits.Column("PIX", "J", array=nonzero[0].astype(int)))
cols.append(fits.Column("VALUE", "E", array=data.flat[nonzero].astype(float).reshape(nfilled)))
elif len(shape) == 2:
keys = self._npix * nonzero[0] + nonzero[1]
cols.append(fits.Column("PIX", "J", array=nonzero[1].reshape(nfilled)))
cols.append(fits.Column("CHANNEL", "I", array=nonzero[0].reshape(nfilled)))
cols.append(fits.Column("VALUE", "E",
array=data.flat[keys].astype(float).reshape(nfilled)))
else:
raise Exception("HPX.write_fits only handles 1D and 2D maps")
else:
if len(shape) == 1:
cols.append(fits.Column(self.conv.colname(
indx=self.conv.firstcol), "E", array=data.astype(float)))
elif len(shape) == 2:
for i in range(shape[0]):
cols.append(fits.Column(self.conv.colname(
indx=i + self.conv.firstcol), "E", array=data[i].astype(float)))
else:
raise Exception("HPX.write_fits only handles 1D and 2D maps")
header = self.make_header()
hdu = fits.BinTableHDU.from_columns(cols, header=header, name=extname)
return hdu
def make_energy_bounds_hdu(self, extname="EBOUNDS"):
""" Builds and returns a FITs HDU with the energy bin boundries
extname : The HDU extension name
"""
if self._ebins is None:
return None
cols = [fits.Column("CHANNEL", "I", array=np.arange(1, len(self._ebins + 1))),
fits.Column("E_MIN", "1E", unit='keV',
array=1000 * self._ebins[0:-1]),
fits.Column("E_MAX", "1E", unit='keV', array=1000 * self._ebins[1:])]
hdu = fits.BinTableHDU.from_columns(
cols, self.make_header(), name=extname)
return hdu
def make_energies_hdu(self, extname="ENERGIES"):
""" Builds and returns a FITs HDU with the energy bin boundries
extname : The HDU extension name
"""
if self._evals is None:
return None
cols = [fits.Column("ENERGY", "1E", unit='MeV',
array=self._evals)]
hdu = fits.BinTableHDU.from_columns(
cols, self.make_header(), name=extname)
return hdu
def write_fits(self, data, outfile, extname="SKYMAP", clobber=True):
""" Write input data to a FITS file
data : The data begin stored
outfile : The name of the output file
extname : The HDU extension name
clobber : True -> overwrite existing files
"""
hdu_prim = fits.PrimaryHDU()
hdu_hpx = self.make_hdu(data, extname=extname)
hl = [hdu_prim, hdu_hpx]
if self.conv.energy_hdu == 'EBOUNDS':
hdu_energy = self.make_energy_bounds_hdu()
elif self.conv.energy_hdu == 'ENERGIES':
hdu_energy = self.make_energies_hdu()
if hdu_energy is not None:
hl.append(hdu_energy)
hdulist = fits.HDUList(hl)
hdulist.writeto(outfile, overwrite=clobber)
@staticmethod
def get_index_list(nside, nest, region):
""" Returns the list of pixels indices for all the pixels in a region
nside : HEALPix nside parameter
nest : True for 'NESTED', False = 'RING'
region : HEALPix region string
"""
tokens = parse_hpxregion(region)
if tokens[0] == 'DISK':
vec = coords_to_vec(float(tokens[1]), float(tokens[2]))
ilist = hp.query_disc(nside, vec[0], np.radians(float(tokens[3])),
inclusive=False, nest=nest)
elif tokens[0] == 'DISK_INC':
vec = coords_to_vec(float(tokens[1]), float(tokens[2]))
ilist = hp.query_disc(nside, vec[0], np.radians(float(tokens[3])),
inclusive=True, fact=int(tokens[4]),
nest=nest)
elif tokens[0] == 'HPX_PIXEL':
nside_pix = int(tokens[2])
if tokens[1] == 'NESTED':
ipix_ring = hp.nest2ring(nside_pix, int(tokens[3]))
elif tokens[1] == 'RING':
ipix_ring = int(tokens[3])
else:
raise Exception(
"Did not recognize ordering scheme %s" % tokens[1])
ilist = match_hpx_pixel(nside, nest, nside_pix, ipix_ring)
else:
raise Exception(
"HPX.get_index_list did not recognize region type %s" % tokens[0])
return ilist
@staticmethod
def get_ref_dir(region, coordsys):
""" Finds and returns the reference direction for a given
HEALPix region string.
region : a string describing a HEALPix region
coordsys : coordinate system, GAL | CEL
"""
if region is None:
if coordsys == "GAL":
c = SkyCoord(0., 0., frame=Galactic, unit="deg")
elif coordsys == "CEL":
c = SkyCoord(0., 0., frame=ICRS, unit="deg")
return c
tokens = parse_hpxregion(region)
if tokens[0] in ['DISK', 'DISK_INC']:
if coordsys == "GAL":
c = SkyCoord(float(tokens[1]), float(
tokens[2]), frame=Galactic, unit="deg")
elif coordsys == "CEL":
c = SkyCoord(float(tokens[1]), float(
tokens[2]), frame=ICRS, unit="deg")
return c
elif tokens[0] == 'HPX_PIXEL':
nside_pix = int(tokens[2])
ipix_pix = int(tokens[3])
if tokens[1] == 'NESTED':
nest_pix = True
elif tokens[1] == 'RING':
nest_pix = False
else:
raise Exception(
"Did not recognize ordering scheme %s" % tokens[1])
theta, phi = hp.pix2ang(nside_pix, ipix_pix, nest_pix)
lat = np.degrees((np.pi / 2) - theta)
lon = np.degrees(phi)
if coordsys == "GAL":
c = SkyCoord(lon, lat, frame=Galactic, unit="deg")
elif coordsys == "CEL":
c = SkyCoord(lon, lat, frame=ICRS, unit="deg")
return c
else:
raise Exception(
"HPX.get_ref_dir did not recognize region type %s" % tokens[0])
return None
@staticmethod
def get_region_size(region):
""" Finds and returns the approximate size of region (in degrees)
from a HEALPix region string.
"""
if region is None:
return 180.
tokens = parse_hpxregion(region)
if tokens[0] in ['DISK', 'DISK_INC']:
return float(tokens[3])
elif tokens[0] == 'HPX_PIXEL':
pixel_size = get_pixel_size_from_nside(int(tokens[2]))
return 2. * pixel_size
else:
raise Exception(
"HPX.get_region_size did not recognize region type %s" % tokens[0])
return None
def make_wcs(self, naxis=2, proj='CAR', energies=None, oversample=2):
""" Make a WCS projection appropirate for this HPX pixelization
"""
w = WCS(naxis=naxis)
skydir = self.get_ref_dir(self._region, self.coordsys)
if self.coordsys == 'CEL':
w.wcs.ctype[0] = 'RA---%s' % (proj)
w.wcs.ctype[1] = 'DEC--%s' % (proj)
w.wcs.crval[0] = skydir.ra.deg
w.wcs.crval[1] = skydir.dec.deg
elif self.coordsys == 'GAL':
w.wcs.ctype[0] = 'GLON-%s' % (proj)
w.wcs.ctype[1] = 'GLAT-%s' % (proj)
w.wcs.crval[0] = skydir.galactic.l.deg
w.wcs.crval[1] = skydir.galactic.b.deg
else:
raise Exception('Unrecognized coordinate system.')
pixsize = get_pixel_size_from_nside(self.nside)
roisize = self.get_region_size(self._region)
allsky = False
if roisize > 45:
roisize = 90
allsky = True
npixels = int(2. * roisize / pixsize) * oversample
crpix = npixels / 2.
if allsky:
w.wcs.crpix[0] = 2 * crpix
npix = (2 * npixels, npixels)
else:
w.wcs.crpix[0] = crpix
npix = (npixels, npixels)
w.wcs.crpix[1] = crpix
w.wcs.cdelt[0] = -pixsize / oversample
w.wcs.cdelt[1] = pixsize / oversample
if naxis == 3:
w.wcs.crpix[2] = 1
w.wcs.ctype[2] = 'Energy'
if energies is not None:
w.wcs.crval[2] = 10 ** energies[0]
w.wcs.cdelt[2] = 10 ** energies[1] - 10 ** energies[0]
w = WCS(w.to_header())
wcs_proj = WCSProj(w, npix)
return wcs_proj
def get_sky_coords(self):
""" Get the sky coordinates of all the pixels in this pixelization """
if self._ipix is None:
theta, phi = hp.pix2ang(
self._nside, list(range(self._npix)), self._nest)
else:
theta, phi = hp.pix2ang(self._nside, self._ipix, self._nest)
lat = np.degrees((np.pi / 2) - theta)
lon = np.degrees(phi)
return np.vstack([lon, lat]).T
def get_sky_dirs(self):
lonlat = self.get_sky_coords()
if self.coordsys == 'CEL':
return SkyCoord(ra=lonlat.T[0], dec=lonlat.T[1], unit='deg', frame='icrs')
else:
return SkyCoord(l=lonlat.T[0], b=lonlat.T[1], unit='deg', frame='galactic')
def get_pixel_indices(self, lats, lons):
""" "Return the indices in the flat array corresponding to a set of coordinates """
theta = np.radians(90. - lats)
phi = np.radians(lons)
return hp.ang2pix(self.nside, theta, phi, self.nest)
def skydir_to_pixel(self, skydir):
"""Return the pixel index of a SkyCoord object."""
if self.coordsys in ['CEL', 'EQU']:
skydir = skydir.transform_to('icrs')
lon = skydir.ra.deg
lat = skydir.dec.deg
else:
skydir = skydir.transform_to('galactic')
lon = skydir.l.deg
lat = skydir.b.deg
return self.get_pixel_indices(lat, lon)
class HpxToWcsMapping(object):
""" Stores the indices need to conver from HEALPix to WCS """
def __init__(self, hpx, wcs, mapping_data=None):
"""
"""
self._hpx = hpx
self._wcs = wcs
if mapping_data is None:
self._ipixs, self._mult_val, self._npix = make_hpx_to_wcs_mapping(
self.hpx, self.wcs.wcs)
else:
self._ipixs = mapping_data['ipixs']
self._mult_val = mapping_data['mult_val']
self._npix = mapping_data['npix']
self._lmap = self._hpx[self._ipixs]
self._valid = self._lmap >= 0
@property
def hpx(self):
""" The HEALPix projection """
return self._hpx
@property
def wcs(self):
""" The WCS projection """
return self._wcs
@property
def ipixs(self):
"""An array(nx,ny) of the global HEALPix pixel indices for each WCS
pixel"""
return self._ipixs
@property
def mult_val(self):
"""An array(nx,ny) of 1/number of WCS pixels pointing at each HEALPix
pixel"""
return self._mult_val
@property
def npix(self):
""" A tuple(nx,ny) of the shape of the WCS grid """
return self._npix
@property
def lmap(self):
"""An array(nx,ny) giving the mapping of the local HEALPix pixel
indices for each WCS pixel"""
return self._lmap
@property
def valid(self):
"""An array(nx,ny) of bools giving if each WCS pixel in inside the
HEALPix region"""
return self._valid
def write_to_fitsfile(self, fitsfile, clobber=True):
"""Write this mapping to a FITS file, to avoid having to recompute it
"""
from fermipy.skymap import Map
hpx_header = self._hpx.make_header()
index_map = Map(self.ipixs, self.wcs)
mult_map = Map(self.mult_val, self.wcs)
prim_hdu = index_map.create_primary_hdu()
mult_hdu = index_map.create_image_hdu()
for key in ['COORDSYS', 'ORDERING', 'PIXTYPE',
'ORDERING', 'ORDER', 'NSIDE',
'FIRSTPIX', 'LASTPIX']:
prim_hdu.header[key] = hpx_header[key]
mult_hdu.header[key] = hpx_header[key]
hdulist = fits.HDUList([prim_hdu, mult_hdu])
hdulist.writeto(fitsfile, overwrite=clobber)
@classmethod
def create_from_fitsfile(cls, fitsfile):
""" Read a fits file and use it to make a mapping
"""
from fermipy.skymap import Map
index_map = Map.create_from_fits(fitsfile)
mult_map = Map.create_from_fits(fitsfile, hdu=1)
ff = fits.open(fitsfile)
hpx = HPX.create_from_hdu(ff[0])
mapping_data = dict(ipixs=index_map.counts,
mult_val=mult_map.counts,
npix=mult_map.counts.shape)
return cls(hpx, index_map.wcs, mapping_data)
def fill_wcs_map_from_hpx_data(self, hpx_data, wcs_data, normalize=True):
"""Fills the wcs map from the hpx data using the pre-calculated
mappings
hpx_data : the input HEALPix data
wcs_data : the data array being filled
normalize : True -> perserve integral by splitting HEALPix values between bins
"""
# FIXME, there really ought to be a better way to do this
hpx_naxis = len(hpx_data.shape)
wcs_naxis = len(wcs_data.shape)
if hpx_naxis + 1 != wcs_naxis:
raise ValueError("HPX.fill_wcs_map_from_hpx_data: HPX naxis should be 1 less that WCS naxis: %i, %i"%(hpx_naxis, wcs_naxis))
if hpx_naxis == 2:
if hpx_data.shape[1] != wcs_data.shape[2]:
raise ValueError("HPX.fill_wcs_map_from_hpx_data: size of energy axes don't match: %i, %i"%(hpx_naxis[1], wcs_naxis[2]))
lmap_valid = self._lmap[self._valid]
wcs_layer_shape = wcs_data.shape[0]*wcs_data.shape[1]
if hpx_naxis == 2:
for i in range(hpx_data.shape[1]):
wcs_data_layer = np.zeros(wcs_layer_shape)
wcs_data_layer[self._valid] = hpx_data[:,i][lmap_valid]
orig_value = wcs_data_layer.sum()
if normalize:
wcs_data_layer *= self._mult_val
wcs_data[:,:,i].flat = wcs_data_layer
else:
wcs_data_flat = np.zeros(wcs_layer_shape)
wcs_data_flat[self._valid] = hpx_data[lmap_valid]
if normalize:
wcs_data_flat *= self._mult_val
wcs_data.flat = wcs_data_flat
def make_wcs_data_from_hpx_data(self, hpx_data, wcs, normalize=True):
""" Creates and fills a wcs map from the hpx data using the pre-calculated
mappings
hpx_data : the input HEALPix data
wcs : the WCS object
normalize : True -> perserve integral by splitting HEALPix values between bins
"""
wcs_data = np.zeros(wcs.npix)
self.fill_wcs_map_from_hpx_data(hpx_data, wcs_data, normalize)
return wcs_data
|
|
# Copyright (c) 2013 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import copy
import datetime
import mock
from oslo_config import cfg
from oslo_db import exception as db_exc
import oslo_messaging
from oslo_utils import timeutils
from webob import exc
from neutron.api import extensions
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.api.rpc.handlers import dhcp_rpc
from neutron.api.rpc.handlers import l3_rpc
from neutron.api.v2 import attributes
from neutron.common import constants
from neutron import context
from neutron.db import agents_db
from neutron.db import agentschedulers_db
from neutron.db import l3_agentschedulers_db
from neutron.extensions import agent
from neutron.extensions import dhcpagentscheduler
from neutron.extensions import l3agentscheduler
from neutron import manager
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants as service_constants
from neutron.tests import fake_notifier
from neutron.tests.unit.api import test_extensions
from neutron.tests.unit.db import test_db_base_plugin_v2 as test_plugin
from neutron.tests.unit.extensions import test_agent
from neutron.tests.unit.extensions import test_l3
from neutron.tests.unit import testlib_api
from neutron import wsgi
L3_HOSTA = 'hosta'
DHCP_HOSTA = 'hosta'
L3_HOSTB = 'hostb'
DHCP_HOSTC = 'hostc'
class AgentSchedulerTestMixIn(object):
def _request_list(self, path, admin_context=True,
expected_code=exc.HTTPOk.code):
req = self._path_req(path, admin_context=admin_context)
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, expected_code)
return self.deserialize(self.fmt, res)
def _path_req(self, path, method='GET', data=None,
query_string=None,
admin_context=True):
content_type = 'application/%s' % self.fmt
body = None
if data is not None: # empty dict is valid
body = wsgi.Serializer().serialize(data, content_type)
if admin_context:
return testlib_api.create_request(
path, body, content_type, method, query_string=query_string)
else:
return testlib_api.create_request(
path, body, content_type, method, query_string=query_string,
context=context.Context('', 'tenant_id'))
def _path_create_request(self, path, data, admin_context=True):
return self._path_req(path, method='POST', data=data,
admin_context=admin_context)
def _path_show_request(self, path, admin_context=True):
return self._path_req(path, admin_context=admin_context)
def _path_delete_request(self, path, admin_context=True):
return self._path_req(path, method='DELETE',
admin_context=admin_context)
def _path_update_request(self, path, data, admin_context=True):
return self._path_req(path, method='PUT', data=data,
admin_context=admin_context)
def _list_routers_hosted_by_l3_agent(self, agent_id,
expected_code=exc.HTTPOk.code,
admin_context=True):
path = "/agents/%s/%s.%s" % (agent_id,
l3agentscheduler.L3_ROUTERS,
self.fmt)
return self._request_list(path, expected_code=expected_code,
admin_context=admin_context)
def _list_networks_hosted_by_dhcp_agent(self, agent_id,
expected_code=exc.HTTPOk.code,
admin_context=True):
path = "/agents/%s/%s.%s" % (agent_id,
dhcpagentscheduler.DHCP_NETS,
self.fmt)
return self._request_list(path, expected_code=expected_code,
admin_context=admin_context)
def _list_l3_agents_hosting_router(self, router_id,
expected_code=exc.HTTPOk.code,
admin_context=True):
path = "/routers/%s/%s.%s" % (router_id,
l3agentscheduler.L3_AGENTS,
self.fmt)
return self._request_list(path, expected_code=expected_code,
admin_context=admin_context)
def _list_dhcp_agents_hosting_network(self, network_id,
expected_code=exc.HTTPOk.code,
admin_context=True):
path = "/networks/%s/%s.%s" % (network_id,
dhcpagentscheduler.DHCP_AGENTS,
self.fmt)
return self._request_list(path, expected_code=expected_code,
admin_context=admin_context)
def _add_router_to_l3_agent(self, id, router_id,
expected_code=exc.HTTPCreated.code,
admin_context=True):
path = "/agents/%s/%s.%s" % (id,
l3agentscheduler.L3_ROUTERS,
self.fmt)
req = self._path_create_request(path,
{'router_id': router_id},
admin_context=admin_context)
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, expected_code)
def _add_network_to_dhcp_agent(self, id, network_id,
expected_code=exc.HTTPCreated.code,
admin_context=True):
path = "/agents/%s/%s.%s" % (id,
dhcpagentscheduler.DHCP_NETS,
self.fmt)
req = self._path_create_request(path,
{'network_id': network_id},
admin_context=admin_context)
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, expected_code)
def _remove_network_from_dhcp_agent(self, id, network_id,
expected_code=exc.HTTPNoContent.code,
admin_context=True):
path = "/agents/%s/%s/%s.%s" % (id,
dhcpagentscheduler.DHCP_NETS,
network_id,
self.fmt)
req = self._path_delete_request(path,
admin_context=admin_context)
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, expected_code)
def _remove_router_from_l3_agent(self, id, router_id,
expected_code=exc.HTTPNoContent.code,
admin_context=True):
path = "/agents/%s/%s/%s.%s" % (id,
l3agentscheduler.L3_ROUTERS,
router_id,
self.fmt)
req = self._path_delete_request(path, admin_context=admin_context)
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, expected_code)
def _assert_notify(self, notifications, expected_event_type):
event_types = [event['event_type'] for event in notifications]
self.assertIn(expected_event_type, event_types)
def _register_one_agent_state(self, agent_state):
callback = agents_db.AgentExtRpcCallback()
callback.report_state(self.adminContext,
agent_state={'agent_state': agent_state},
time=timeutils.strtime())
def test_agent_registration_bad_timestamp(self):
dhcp_hosta = {
'binary': 'neutron-dhcp-agent',
'host': DHCP_HOSTA,
'start_flag': True,
'topic': 'DHCP_AGENT',
'configurations': {'dhcp_driver': 'dhcp_driver',
'use_namespaces': True,
},
'agent_type': constants.AGENT_TYPE_DHCP}
callback = agents_db.AgentExtRpcCallback()
delta_time = datetime.datetime.now() - datetime.timedelta(days=1)
str_time = delta_time.strftime('%Y-%m-%dT%H:%M:%S.%f')
callback.report_state(self.adminContext,
agent_state={'agent_state': dhcp_hosta},
time=str_time)
def test_agent_registration_invalid_timestamp_allowed(self):
dhcp_hosta = {
'binary': 'neutron-dhcp-agent',
'host': DHCP_HOSTA,
'start_flag': True,
'topic': 'DHCP_AGENT',
'configurations': {'dhcp_driver': 'dhcp_driver',
'use_namespaces': True,
},
'agent_type': constants.AGENT_TYPE_DHCP}
callback = agents_db.AgentExtRpcCallback()
utc_time = datetime.datetime.utcnow()
delta_time = utc_time - datetime.timedelta(seconds=10)
str_time = delta_time.strftime('%Y-%m-%dT%H:%M:%S.%f')
callback.report_state(self.adminContext,
agent_state={'agent_state': dhcp_hosta},
time=str_time)
def _disable_agent(self, agent_id, admin_state_up=False):
new_agent = {}
new_agent['agent'] = {}
new_agent['agent']['admin_state_up'] = admin_state_up
self._update('agents', agent_id, new_agent)
def _get_agent_id(self, agent_type, host):
agents = self._list_agents()
for agent_data in agents['agents']:
if (agent_data['agent_type'] == agent_type and
agent_data['host'] == host):
return agent_data['id']
class OvsAgentSchedulerTestCaseBase(test_l3.L3NatTestCaseMixin,
test_agent.AgentDBTestMixIn,
AgentSchedulerTestMixIn,
test_plugin.NeutronDbPluginV2TestCase):
fmt = 'json'
plugin_str = 'neutron.plugins.ml2.plugin.Ml2Plugin'
l3_plugin = ('neutron.tests.unit.extensions.test_l3.'
'TestL3NatAgentSchedulingServicePlugin')
def setUp(self):
# Save the global RESOURCE_ATTRIBUTE_MAP before loading plugin
self.saved_attr_map = {}
for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.iteritems():
self.saved_attr_map[resource] = attrs.copy()
if self.l3_plugin:
service_plugins = {'l3_plugin_name': self.l3_plugin}
else:
service_plugins = None
super(OvsAgentSchedulerTestCaseBase, self).setUp(
self.plugin_str, service_plugins=service_plugins)
ext_mgr = extensions.PluginAwareExtensionManager.get_instance()
self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr)
self.adminContext = context.get_admin_context()
# Add the resources to the global attribute map
# This is done here as the setup process won't
# initialize the main API router which extends
# the global attribute map
attributes.RESOURCE_ATTRIBUTE_MAP.update(
agent.RESOURCE_ATTRIBUTE_MAP)
self.addCleanup(self.restore_attribute_map)
self.l3agentscheduler_dbMinxin = (
manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT))
self.l3_notify_p = mock.patch(
'neutron.extensions.l3agentscheduler.notify')
self.patched_l3_notify = self.l3_notify_p.start()
self.l3_periodic_p = mock.patch('neutron.db.l3_agentschedulers_db.'
'L3AgentSchedulerDbMixin.'
'start_periodic_l3_agent_status_check')
self.patched_l3_periodic = self.l3_periodic_p.start()
self.dhcp_notify_p = mock.patch(
'neutron.extensions.dhcpagentscheduler.notify')
self.patched_dhcp_notify = self.dhcp_notify_p.start()
def restore_attribute_map(self):
# Restore the original RESOURCE_ATTRIBUTE_MAP
attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map
class OvsAgentSchedulerTestCase(OvsAgentSchedulerTestCaseBase):
def test_report_states(self):
self._register_agent_states()
agents = self._list_agents()
self.assertEqual(4, len(agents['agents']))
def test_network_scheduling_on_network_creation(self):
self._register_agent_states()
with self.network() as net:
dhcp_agents = self._list_dhcp_agents_hosting_network(
net['network']['id'])
self.assertEqual(0, len(dhcp_agents['agents']))
def test_network_auto_schedule_with_disabled(self):
cfg.CONF.set_override('allow_overlapping_ips', True)
with contextlib.nested(self.subnet(),
self.subnet()):
dhcp_rpc_cb = dhcp_rpc.DhcpRpcCallback()
self._register_agent_states()
hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
DHCP_HOSTA)
hostc_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
DHCP_HOSTC)
self._disable_agent(hosta_id)
dhcp_rpc_cb.get_active_networks(self.adminContext, host=DHCP_HOSTA)
# second agent will host all the networks since first is disabled.
dhcp_rpc_cb.get_active_networks(self.adminContext, host=DHCP_HOSTC)
networks = self._list_networks_hosted_by_dhcp_agent(hostc_id)
num_hostc_nets = len(networks['networks'])
networks = self._list_networks_hosted_by_dhcp_agent(hosta_id)
num_hosta_nets = len(networks['networks'])
self.assertEqual(0, num_hosta_nets)
self.assertEqual(2, num_hostc_nets)
def test_network_auto_schedule_with_no_dhcp(self):
cfg.CONF.set_override('allow_overlapping_ips', True)
with contextlib.nested(self.subnet(enable_dhcp=False),
self.subnet(enable_dhcp=False)):
dhcp_rpc_cb = dhcp_rpc.DhcpRpcCallback()
self._register_agent_states()
hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
DHCP_HOSTA)
hostc_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
DHCP_HOSTC)
self._disable_agent(hosta_id)
dhcp_rpc_cb.get_active_networks(self.adminContext, host=DHCP_HOSTA)
dhcp_rpc_cb.get_active_networks(self.adminContext, host=DHCP_HOSTC)
networks = self._list_networks_hosted_by_dhcp_agent(hostc_id)
num_hostc_nets = len(networks['networks'])
networks = self._list_networks_hosted_by_dhcp_agent(hosta_id)
num_hosta_nets = len(networks['networks'])
self.assertEqual(0, num_hosta_nets)
self.assertEqual(0, num_hostc_nets)
def test_network_auto_schedule_with_multiple_agents(self):
cfg.CONF.set_override('dhcp_agents_per_network', 2)
cfg.CONF.set_override('allow_overlapping_ips', True)
with contextlib.nested(self.subnet(),
self.subnet()):
dhcp_rpc_cb = dhcp_rpc.DhcpRpcCallback()
self._register_agent_states()
hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
DHCP_HOSTA)
hostc_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
DHCP_HOSTC)
dhcp_rpc_cb.get_active_networks(self.adminContext, host=DHCP_HOSTA)
dhcp_rpc_cb.get_active_networks(self.adminContext, host=DHCP_HOSTC)
networks = self._list_networks_hosted_by_dhcp_agent(hostc_id)
num_hostc_nets = len(networks['networks'])
networks = self._list_networks_hosted_by_dhcp_agent(hosta_id)
num_hosta_nets = len(networks['networks'])
self.assertEqual(2, num_hosta_nets)
self.assertEqual(2, num_hostc_nets)
def test_network_auto_schedule_restart_dhcp_agent(self):
cfg.CONF.set_override('dhcp_agents_per_network', 2)
with self.subnet() as sub1:
dhcp_rpc_cb = dhcp_rpc.DhcpRpcCallback()
self._register_agent_states()
dhcp_rpc_cb.get_active_networks(self.adminContext, host=DHCP_HOSTA)
dhcp_rpc_cb.get_active_networks(self.adminContext, host=DHCP_HOSTA)
dhcp_agents = self._list_dhcp_agents_hosting_network(
sub1['subnet']['network_id'])
self.assertEqual(1, len(dhcp_agents['agents']))
def test_network_auto_schedule_with_hosted(self):
# one agent hosts all the networks, other hosts none
cfg.CONF.set_override('allow_overlapping_ips', True)
with contextlib.nested(self.subnet(),
self.subnet()) as (sub1, sub2):
dhcp_rpc_cb = dhcp_rpc.DhcpRpcCallback()
self._register_agent_states()
dhcp_rpc_cb.get_active_networks(self.adminContext, host=DHCP_HOSTA)
# second agent will not host the network since first has got it.
dhcp_rpc_cb.get_active_networks(self.adminContext, host=DHCP_HOSTC)
dhcp_agents = self._list_dhcp_agents_hosting_network(
sub1['subnet']['network_id'])
hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
DHCP_HOSTA)
hostc_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
DHCP_HOSTC)
hosta_nets = self._list_networks_hosted_by_dhcp_agent(hosta_id)
num_hosta_nets = len(hosta_nets['networks'])
hostc_nets = self._list_networks_hosted_by_dhcp_agent(hostc_id)
num_hostc_nets = len(hostc_nets['networks'])
self.assertEqual(2, num_hosta_nets)
self.assertEqual(0, num_hostc_nets)
self.assertEqual(1, len(dhcp_agents['agents']))
self.assertEqual(DHCP_HOSTA, dhcp_agents['agents'][0]['host'])
def test_network_auto_schedule_with_hosted_2(self):
# one agent hosts one network
dhcp_rpc_cb = dhcp_rpc.DhcpRpcCallback()
dhcp_hosta = {
'binary': 'neutron-dhcp-agent',
'host': DHCP_HOSTA,
'topic': 'DHCP_AGENT',
'configurations': {'dhcp_driver': 'dhcp_driver',
'use_namespaces': True,
},
'agent_type': constants.AGENT_TYPE_DHCP}
dhcp_hostc = copy.deepcopy(dhcp_hosta)
dhcp_hostc['host'] = DHCP_HOSTC
cfg.CONF.set_override('allow_overlapping_ips', True)
with self.subnet() as sub1:
self._register_one_agent_state(dhcp_hosta)
dhcp_rpc_cb.get_active_networks(self.adminContext, host=DHCP_HOSTA)
hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
DHCP_HOSTA)
self._disable_agent(hosta_id, admin_state_up=False)
with self.subnet() as sub2:
self._register_one_agent_state(dhcp_hostc)
dhcp_rpc_cb.get_active_networks(self.adminContext,
host=DHCP_HOSTC)
dhcp_agents_1 = self._list_dhcp_agents_hosting_network(
sub1['subnet']['network_id'])
dhcp_agents_2 = self._list_dhcp_agents_hosting_network(
sub2['subnet']['network_id'])
hosta_nets = self._list_networks_hosted_by_dhcp_agent(hosta_id)
num_hosta_nets = len(hosta_nets['networks'])
hostc_id = self._get_agent_id(
constants.AGENT_TYPE_DHCP,
DHCP_HOSTC)
hostc_nets = self._list_networks_hosted_by_dhcp_agent(hostc_id)
num_hostc_nets = len(hostc_nets['networks'])
self.assertEqual(1, num_hosta_nets)
self.assertEqual(1, num_hostc_nets)
self.assertEqual(1, len(dhcp_agents_1['agents']))
self.assertEqual(1, len(dhcp_agents_2['agents']))
self.assertEqual(DHCP_HOSTA, dhcp_agents_1['agents'][0]['host'])
self.assertEqual(DHCP_HOSTC, dhcp_agents_2['agents'][0]['host'])
def test_network_scheduling_on_port_creation(self):
with self.subnet() as subnet:
dhcp_agents = self._list_dhcp_agents_hosting_network(
subnet['subnet']['network_id'])
result0 = len(dhcp_agents['agents'])
self._register_agent_states()
with self.port(subnet=subnet,
device_owner="compute:test:" + DHCP_HOSTA) as port:
dhcp_agents = self._list_dhcp_agents_hosting_network(
port['port']['network_id'])
result1 = len(dhcp_agents['agents'])
self.assertEqual(0, result0)
self.assertEqual(1, result1)
def test_network_ha_scheduling_on_port_creation(self):
cfg.CONF.set_override('dhcp_agents_per_network', 2)
with self.subnet() as subnet:
dhcp_agents = self._list_dhcp_agents_hosting_network(
subnet['subnet']['network_id'])
result0 = len(dhcp_agents['agents'])
self._register_agent_states()
with self.port(subnet=subnet,
device_owner="compute:test:" + DHCP_HOSTA) as port:
dhcp_agents = self._list_dhcp_agents_hosting_network(
port['port']['network_id'])
result1 = len(dhcp_agents['agents'])
self.assertEqual(0, result0)
self.assertEqual(2, result1)
def test_network_ha_scheduling_on_port_creation_with_new_agent(self):
cfg.CONF.set_override('dhcp_agents_per_network', 3)
with self.subnet() as subnet:
dhcp_agents = self._list_dhcp_agents_hosting_network(
subnet['subnet']['network_id'])
result0 = len(dhcp_agents['agents'])
self._register_agent_states()
with self.port(subnet=subnet,
device_owner="compute:test:" + DHCP_HOSTA) as port:
dhcp_agents = self._list_dhcp_agents_hosting_network(
port['port']['network_id'])
result1 = len(dhcp_agents['agents'])
self._register_one_dhcp_agent()
with self.port(subnet=subnet,
device_owner="compute:test:" + DHCP_HOSTA) as port:
dhcp_agents = self._list_dhcp_agents_hosting_network(
port['port']['network_id'])
result2 = len(dhcp_agents['agents'])
self.assertEqual(0, result0)
self.assertEqual(2, result1)
self.assertEqual(3, result2)
def test_network_scheduler_with_disabled_agent(self):
dhcp_hosta = {
'binary': 'neutron-dhcp-agent',
'host': DHCP_HOSTA,
'topic': 'DHCP_AGENT',
'configurations': {'dhcp_driver': 'dhcp_driver',
'use_namespaces': True,
},
'agent_type': constants.AGENT_TYPE_DHCP}
self._register_one_agent_state(dhcp_hosta)
with self.port() as port1:
dhcp_agents = self._list_dhcp_agents_hosting_network(
port1['port']['network_id'])
self._delete('ports', port1['port']['id'])
self._delete('networks', port1['port']['network_id'])
self.assertEqual(1, len(dhcp_agents['agents']))
agents = self._list_agents()
self._disable_agent(agents['agents'][0]['id'])
with self.port() as port2:
dhcp_agents = self._list_dhcp_agents_hosting_network(
port2['port']['network_id'])
self._delete('ports', port2['port']['id'])
self.assertEqual(0, len(dhcp_agents['agents']))
def test_is_eligible_agent(self):
agent_startup = ('neutron.db.agentschedulers_db.'
'DhcpAgentSchedulerDbMixin.agent_starting_up')
is_eligible_agent = ('neutron.db.agentschedulers_db.'
'AgentSchedulerDbMixin.is_eligible_agent')
dhcp_mixin = agentschedulers_db.DhcpAgentSchedulerDbMixin()
with contextlib.nested(
mock.patch(agent_startup),
mock.patch(is_eligible_agent)
) as (startup, elig):
tests = [(True, True),
(True, False),
(False, True),
(False, False)]
for rv1, rv2 in tests:
startup.return_value = rv1
elig.return_value = rv2
self.assertEqual(rv1 or rv2,
dhcp_mixin.is_eligible_agent(None,
None, None))
def test_network_scheduler_with_down_agent(self):
dhcp_hosta = {
'binary': 'neutron-dhcp-agent',
'host': DHCP_HOSTA,
'topic': 'DHCP_AGENT',
'configurations': {'dhcp_driver': 'dhcp_driver',
'use_namespaces': True,
},
'agent_type': constants.AGENT_TYPE_DHCP}
self._register_one_agent_state(dhcp_hosta)
eligible_agent_str = ('neutron.db.agentschedulers_db.'
'DhcpAgentSchedulerDbMixin.is_eligible_agent')
with mock.patch(eligible_agent_str) as eligible_agent:
eligible_agent.return_value = True
with self.port() as port:
dhcp_agents = self._list_dhcp_agents_hosting_network(
port['port']['network_id'])
self._delete('ports', port['port']['id'])
self._delete('networks', port['port']['network_id'])
self.assertEqual(1, len(dhcp_agents['agents']))
with mock.patch(eligible_agent_str) as eligible_agent:
eligible_agent.return_value = False
with self.port() as port:
dhcp_agents = self._list_dhcp_agents_hosting_network(
port['port']['network_id'])
self._delete('ports', port['port']['id'])
self.assertEqual(0, len(dhcp_agents['agents']))
def test_network_scheduler_with_hosted_network(self):
plugin = manager.NeutronManager.get_plugin()
dhcp_hosta = {
'binary': 'neutron-dhcp-agent',
'host': DHCP_HOSTA,
'topic': 'DHCP_AGENT',
'configurations': {'dhcp_driver': 'dhcp_driver',
'use_namespaces': True,
},
'agent_type': constants.AGENT_TYPE_DHCP}
self._register_one_agent_state(dhcp_hosta)
with self.port() as port1:
dhcp_agents = self._list_dhcp_agents_hosting_network(
port1['port']['network_id'])
self.assertEqual(1, len(dhcp_agents['agents']))
with mock.patch.object(plugin,
'get_dhcp_agents_hosting_networks',
autospec=True) as mock_hosting_agents:
mock_hosting_agents.return_value = plugin.get_agents_db(
self.adminContext)
with self.network('test') as net1:
pass
with self.subnet(network=net1,
cidr='10.0.1.0/24') as subnet1:
pass
with self.port(subnet=subnet1) as port2:
pass
dhcp_agents = self._list_dhcp_agents_hosting_network(
port2['port']['network_id'])
self.assertEqual(0, len(dhcp_agents['agents']))
def test_network_policy(self):
with self.network() as net1:
self._register_agent_states()
hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
DHCP_HOSTA)
self._list_networks_hosted_by_dhcp_agent(
hosta_id, expected_code=exc.HTTPForbidden.code,
admin_context=False)
self._add_network_to_dhcp_agent(
hosta_id, net1['network']['id'],
expected_code=exc.HTTPForbidden.code,
admin_context=False)
self._add_network_to_dhcp_agent(hosta_id,
net1['network']['id'])
self._remove_network_from_dhcp_agent(
hosta_id, net1['network']['id'],
expected_code=exc.HTTPForbidden.code,
admin_context=False)
self._list_dhcp_agents_hosting_network(
net1['network']['id'],
expected_code=exc.HTTPForbidden.code,
admin_context=False)
def _test_network_add_to_dhcp_agent(self, admin_state_up=True):
with self.network() as net1:
self._register_agent_states()
hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
DHCP_HOSTA)
if not admin_state_up:
self._set_agent_admin_state_up(DHCP_HOSTA, False)
num_before_add = len(
self._list_networks_hosted_by_dhcp_agent(
hosta_id)['networks'])
self._add_network_to_dhcp_agent(hosta_id,
net1['network']['id'])
num_after_add = len(
self._list_networks_hosted_by_dhcp_agent(
hosta_id)['networks'])
self.assertEqual(0, num_before_add)
self.assertEqual(1, num_after_add)
def test_network_add_to_dhcp_agent(self):
self._test_network_add_to_dhcp_agent()
def test_network_add_to_dhcp_agent_with_admin_state_down(self):
cfg.CONF.set_override(
'enable_services_on_agents_with_admin_state_down', True)
self._test_network_add_to_dhcp_agent(admin_state_up=False)
def test_network_remove_from_dhcp_agent(self):
dhcp_hosta = {
'binary': 'neutron-dhcp-agent',
'host': DHCP_HOSTA,
'topic': 'DHCP_AGENT',
'configurations': {'dhcp_driver': 'dhcp_driver',
'use_namespaces': True,
},
'agent_type': constants.AGENT_TYPE_DHCP}
self._register_one_agent_state(dhcp_hosta)
hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
DHCP_HOSTA)
with self.port() as port1:
num_before_remove = len(
self._list_networks_hosted_by_dhcp_agent(
hosta_id)['networks'])
self._remove_network_from_dhcp_agent(hosta_id,
port1['port']['network_id'])
num_after_remove = len(
self._list_networks_hosted_by_dhcp_agent(
hosta_id)['networks'])
self.assertEqual(1, num_before_remove)
self.assertEqual(0, num_after_remove)
def test_list_active_networks_on_not_registered_yet_dhcp_agent(self):
plugin = manager.NeutronManager.get_plugin()
nets = plugin.list_active_networks_on_active_dhcp_agent(
self.adminContext, host=DHCP_HOSTA)
self.assertEqual([], nets)
def test_reserved_port_after_network_remove_from_dhcp_agent(self):
dhcp_hosta = {
'binary': 'neutron-dhcp-agent',
'host': DHCP_HOSTA,
'topic': 'DHCP_AGENT',
'configurations': {'dhcp_driver': 'dhcp_driver',
'use_namespaces': True,
},
'agent_type': constants.AGENT_TYPE_DHCP}
self._register_one_agent_state(dhcp_hosta)
hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
DHCP_HOSTA)
with self.port(device_owner=constants.DEVICE_OWNER_DHCP,
host=DHCP_HOSTA) as port1:
self._remove_network_from_dhcp_agent(hosta_id,
port1['port']['network_id'])
port_res = self._list_ports(
'json',
200,
network_id=port1['port']['network_id'])
port_list = self.deserialize('json', port_res)
self.assertEqual(port_list['ports'][0]['device_id'],
constants.DEVICE_ID_RESERVED_DHCP_PORT)
def _test_get_active_networks_from_admin_state_down_agent(self,
keep_services):
if keep_services:
cfg.CONF.set_override(
'enable_services_on_agents_with_admin_state_down', True)
dhcp_hosta = {
'binary': 'neutron-dhcp-agent',
'host': DHCP_HOSTA,
'topic': 'DHCP_AGENT',
'configurations': {'dhcp_driver': 'dhcp_driver',
'use_namespaces': True,
},
'agent_type': constants.AGENT_TYPE_DHCP}
self._register_one_agent_state(dhcp_hosta)
dhcp_rpc_cb = dhcp_rpc.DhcpRpcCallback()
with self.port():
nets = dhcp_rpc_cb.get_active_networks(self.adminContext,
host=DHCP_HOSTA)
self.assertEqual(1, len(nets))
self._set_agent_admin_state_up(DHCP_HOSTA, False)
nets = dhcp_rpc_cb.get_active_networks(self.adminContext,
host=DHCP_HOSTA)
if keep_services:
self.assertEqual(1, len(nets))
else:
self.assertEqual(0, len(nets))
def test_dhcp_agent_keep_services_off(self):
self._test_get_active_networks_from_admin_state_down_agent(False)
def test_dhcp_agent_keep_services_on(self):
self._test_get_active_networks_from_admin_state_down_agent(True)
def _take_down_agent_and_run_reschedule(self, host):
# take down the agent on host A and ensure B is alive
self.adminContext.session.begin(subtransactions=True)
query = self.adminContext.session.query(agents_db.Agent)
agt = query.filter_by(host=host).first()
agt.heartbeat_timestamp = (
agt.heartbeat_timestamp - datetime.timedelta(hours=1))
self.adminContext.session.commit()
plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
plugin.reschedule_routers_from_down_agents()
def _set_agent_admin_state_up(self, host, state):
self.adminContext.session.begin(subtransactions=True)
query = self.adminContext.session.query(agents_db.Agent)
agt_db = query.filter_by(host=host).first()
agt_db.admin_state_up = state
self.adminContext.session.commit()
def test_router_rescheduler_catches_rpc_db_and_reschedule_exceptions(self):
with self.router():
l3_rpc_cb = l3_rpc.L3RpcCallback()
self._register_agent_states()
# schedule the router to host A
l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA)
plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
mock.patch.object(
plugin, 'reschedule_router',
side_effect=[
db_exc.DBError(), oslo_messaging.RemoteError(),
l3agentscheduler.RouterReschedulingFailed(router_id='f',
agent_id='f'),
ValueError('this raises')
]).start()
# these first three should not raise any errors
self._take_down_agent_and_run_reschedule(L3_HOSTA) # DBError
self._take_down_agent_and_run_reschedule(L3_HOSTA) # RemoteError
self._take_down_agent_and_run_reschedule(L3_HOSTA) # schedule err
# ValueError is not caught so it should raise
self.assertRaises(ValueError,
self._take_down_agent_and_run_reschedule,
L3_HOSTA)
def test_router_rescheduler_iterates_after_reschedule_failure(self):
plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
l3_rpc_cb = l3_rpc.L3RpcCallback()
self._register_agent_states()
with contextlib.nested(self.router(), self.router()) as (r1, r2):
# schedule the routers to host A
l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA)
rs_mock = mock.patch.object(
plugin, 'reschedule_router',
side_effect=l3agentscheduler.RouterReschedulingFailed(
router_id='f', agent_id='f'),
).start()
self._take_down_agent_and_run_reschedule(L3_HOSTA)
# make sure both had a reschedule attempt even though first failed
rs_mock.assert_has_calls([mock.call(mock.ANY, r1['router']['id']),
mock.call(mock.ANY, r2['router']['id'])],
any_order=True)
def test_router_is_not_rescheduled_from_alive_agent(self):
with self.router():
l3_rpc_cb = l3_rpc.L3RpcCallback()
self._register_agent_states()
# schedule the router to host A
l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA)
with mock.patch('neutron.db.l3_agentschedulers_db.'
'L3AgentSchedulerDbMixin.reschedule_router') as rr:
# take down some unrelated agent and run reschedule check
self._take_down_agent_and_run_reschedule(DHCP_HOSTC)
self.assertFalse(rr.called)
def test_router_reschedule_from_dead_agent(self):
with self.router():
l3_rpc_cb = l3_rpc.L3RpcCallback()
self._register_agent_states()
# schedule the router to host A
ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA)
self._take_down_agent_and_run_reschedule(L3_HOSTA)
# B should now pick up the router
ret_b = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTB)
self.assertEqual(ret_b, ret_a)
def test_router_no_reschedule_from_dead_admin_down_agent(self):
with self.router() as r:
l3_rpc_cb = l3_rpc.L3RpcCallback()
self._register_agent_states()
# schedule the router to host A
l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA)
self._set_agent_admin_state_up(L3_HOSTA, False)
self._take_down_agent_and_run_reschedule(L3_HOSTA)
# A should still have it even though it was inactive due to the
# admin_state being down
rab = l3_agentschedulers_db.RouterL3AgentBinding
binding = (self.adminContext.session.query(rab).
filter(rab.router_id == r['router']['id']).first())
self.assertEqual(binding.l3_agent.host, L3_HOSTA)
# B should not pick up the router
ret_b = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTB)
self.assertFalse(ret_b)
def test_router_auto_schedule_with_invalid_router(self):
with self.router() as router:
l3_rpc_cb = l3_rpc.L3RpcCallback()
self._register_agent_states()
self._delete('routers', router['router']['id'])
# deleted router
ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA,
router_ids=[router['router']['id']])
self.assertFalse(ret_a)
# non-existent router
ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA,
router_ids=[uuidutils.generate_uuid()])
self.assertFalse(ret_a)
def test_router_auto_schedule_with_hosted(self):
with self.router() as router:
l3_rpc_cb = l3_rpc.L3RpcCallback()
self._register_agent_states()
ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA)
ret_b = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTB)
l3_agents = self._list_l3_agents_hosting_router(
router['router']['id'])
self.assertEqual(1, len(ret_a))
self.assertIn(router['router']['id'], [r['id'] for r in ret_a])
self.assertFalse(len(ret_b))
self.assertEqual(1, len(l3_agents['agents']))
self.assertEqual(L3_HOSTA, l3_agents['agents'][0]['host'])
def test_router_auto_schedule_restart_l3_agent(self):
with self.router():
l3_rpc_cb = l3_rpc.L3RpcCallback()
self._register_agent_states()
l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA)
l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA)
def test_router_auto_schedule_with_hosted_2(self):
# one agent hosts one router
l3_rpc_cb = l3_rpc.L3RpcCallback()
l3_hosta = {
'binary': 'neutron-l3-agent',
'host': L3_HOSTA,
'topic': 'L3_AGENT',
'configurations': {'use_namespaces': True,
'router_id': None,
'handle_internal_only_routers':
True,
'gateway_external_network_id':
None,
'interface_driver': 'interface_driver',
},
'agent_type': constants.AGENT_TYPE_L3}
l3_hostb = copy.deepcopy(l3_hosta)
l3_hostb['host'] = L3_HOSTB
with self.router() as router1:
self._register_one_agent_state(l3_hosta)
l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA)
hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3,
L3_HOSTA)
self._disable_agent(hosta_id, admin_state_up=False)
with self.router() as router2:
self._register_one_agent_state(l3_hostb)
l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTB)
l3_agents_1 = self._list_l3_agents_hosting_router(
router1['router']['id'])
l3_agents_2 = self._list_l3_agents_hosting_router(
router2['router']['id'])
hosta_routers = self._list_routers_hosted_by_l3_agent(hosta_id)
num_hosta_routers = len(hosta_routers['routers'])
hostb_id = self._get_agent_id(
constants.AGENT_TYPE_L3,
L3_HOSTB)
hostb_routers = self._list_routers_hosted_by_l3_agent(hostb_id)
num_hostb_routers = len(hostb_routers['routers'])
self.assertEqual(1, num_hosta_routers)
self.assertEqual(1, num_hostb_routers)
self.assertEqual(1, len(l3_agents_1['agents']))
self.assertEqual(1, len(l3_agents_2['agents']))
self.assertEqual(L3_HOSTA, l3_agents_1['agents'][0]['host'])
self.assertEqual(L3_HOSTB, l3_agents_2['agents'][0]['host'])
def test_router_auto_schedule_with_disabled(self):
with contextlib.nested(self.router(),
self.router()):
l3_rpc_cb = l3_rpc.L3RpcCallback()
self._register_agent_states()
hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3,
L3_HOSTA)
hostb_id = self._get_agent_id(constants.AGENT_TYPE_L3,
L3_HOSTB)
self._disable_agent(hosta_id)
# first agent will not host router since it is disabled
l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA)
# second agent will host all the routers since first is disabled.
l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTB)
hostb_routers = self._list_routers_hosted_by_l3_agent(hostb_id)
num_hostb_routers = len(hostb_routers['routers'])
hosta_routers = self._list_routers_hosted_by_l3_agent(hosta_id)
num_hosta_routers = len(hosta_routers['routers'])
self.assertEqual(2, num_hostb_routers)
self.assertEqual(0, num_hosta_routers)
def test_router_auto_schedule_with_candidates(self):
l3_hosta = {
'binary': 'neutron-l3-agent',
'host': L3_HOSTA,
'topic': 'L3_AGENT',
'configurations': {'use_namespaces': False,
'router_id': None,
'handle_internal_only_routers':
True,
'gateway_external_network_id':
None,
'interface_driver': 'interface_driver',
},
'agent_type': constants.AGENT_TYPE_L3}
with contextlib.nested(self.router(),
self.router()) as (router1, router2):
l3_rpc_cb = l3_rpc.L3RpcCallback()
l3_hosta['configurations']['router_id'] = router1['router']['id']
self._register_one_agent_state(l3_hosta)
hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3,
L3_HOSTA)
l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA)
hosta_routers = self._list_routers_hosted_by_l3_agent(hosta_id)
num_hosta_routers = len(hosta_routers['routers'])
l3_agents_1 = self._list_l3_agents_hosting_router(
router1['router']['id'])
l3_agents_2 = self._list_l3_agents_hosting_router(
router2['router']['id'])
# L3 agent will host only the compatible router.
self.assertEqual(1, num_hosta_routers)
self.assertEqual(1, len(l3_agents_1['agents']))
self.assertEqual(0, len(l3_agents_2['agents']))
def test_rpc_sync_routers(self):
l3_rpc_cb = l3_rpc.L3RpcCallback()
self._register_agent_states()
# No routers
ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA)
self.assertEqual(0, len(ret_a))
with contextlib.nested(self.router(),
self.router(),
self.router()) as routers:
router_ids = [r['router']['id'] for r in routers]
# Get all routers
ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA)
self.assertEqual(3, len(ret_a))
self.assertEqual(set(router_ids), set([r['id'] for r in ret_a]))
# Get all routers (router_ids=None)
ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA,
router_ids=None)
self.assertEqual(3, len(ret_a))
self.assertEqual(set(router_ids), set([r['id'] for r in ret_a]))
# Get router2 only
ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA,
router_ids=[router_ids[1]])
self.assertEqual(1, len(ret_a))
self.assertIn(router_ids[1], [r['id'] for r in ret_a])
# Get router1 and router3
ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA,
router_ids=[router_ids[0],
router_ids[2]])
self.assertEqual(2, len(ret_a))
self.assertIn(router_ids[0], [r['id'] for r in ret_a])
self.assertIn(router_ids[2], [r['id'] for r in ret_a])
def test_router_auto_schedule_for_specified_routers(self):
def _sync_router_with_ids(router_ids, exp_synced, exp_hosted, host_id):
ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA,
router_ids=router_ids)
self.assertEqual(exp_synced, len(ret_a))
for r in router_ids:
self.assertIn(r, [r['id'] for r in ret_a])
host_routers = self._list_routers_hosted_by_l3_agent(host_id)
num_host_routers = len(host_routers['routers'])
self.assertEqual(exp_hosted, num_host_routers)
l3_rpc_cb = l3_rpc.L3RpcCallback()
self._register_agent_states()
hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3, L3_HOSTA)
with contextlib.nested(self.router(), self.router(),
self.router(), self.router()) as routers:
router_ids = [r['router']['id'] for r in routers]
# Sync router1 (router1 is scheduled)
_sync_router_with_ids([router_ids[0]], 1, 1, hosta_id)
# Sync router1 only (no router is scheduled)
_sync_router_with_ids([router_ids[0]], 1, 1, hosta_id)
# Schedule router2
_sync_router_with_ids([router_ids[1]], 1, 2, hosta_id)
# Sync router2 and router4 (router4 is scheduled)
_sync_router_with_ids([router_ids[1], router_ids[3]],
2, 3, hosta_id)
# Sync all routers (router3 is scheduled)
_sync_router_with_ids(router_ids, 4, 4, hosta_id)
def test_router_schedule_with_candidates(self):
l3_hosta = {
'binary': 'neutron-l3-agent',
'host': L3_HOSTA,
'topic': 'L3_AGENT',
'configurations': {'use_namespaces': False,
'router_id': None,
'handle_internal_only_routers':
True,
'gateway_external_network_id':
None,
'interface_driver': 'interface_driver',
},
'agent_type': constants.AGENT_TYPE_L3}
with contextlib.nested(self.router(),
self.router(),
self.subnet(),
self.subnet(cidr='10.0.3.0/24')) as (router1,
router2,
subnet1,
subnet2):
l3_hosta['configurations']['router_id'] = router1['router']['id']
self._register_one_agent_state(l3_hosta)
hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3,
L3_HOSTA)
self._router_interface_action('add',
router1['router']['id'],
subnet1['subnet']['id'],
None)
self._router_interface_action('add',
router2['router']['id'],
subnet2['subnet']['id'],
None)
hosta_routers = self._list_routers_hosted_by_l3_agent(hosta_id)
num_hosta_routers = len(hosta_routers['routers'])
l3_agents_1 = self._list_l3_agents_hosting_router(
router1['router']['id'])
l3_agents_2 = self._list_l3_agents_hosting_router(
router2['router']['id'])
# safe cleanup
self._router_interface_action('remove',
router1['router']['id'],
subnet1['subnet']['id'],
None)
self._router_interface_action('remove',
router2['router']['id'],
subnet2['subnet']['id'],
None)
# L3 agent will host only the compatible router.
self.assertEqual(1, num_hosta_routers)
self.assertEqual(1, len(l3_agents_1['agents']))
self.assertEqual(0, len(l3_agents_2['agents']))
def test_router_without_l3_agents(self):
with self.subnet() as s:
self._set_net_external(s['subnet']['network_id'])
data = {'router': {'tenant_id': uuidutils.generate_uuid()}}
data['router']['name'] = 'router1'
data['router']['external_gateway_info'] = {
'network_id': s['subnet']['network_id']}
router_req = self.new_create_request('routers', data, self.fmt)
res = router_req.get_response(self.ext_api)
router = self.deserialize(self.fmt, res)
l3agents = (
self.l3agentscheduler_dbMinxin.get_l3_agents_hosting_routers(
self.adminContext, [router['router']['id']]))
self._delete('routers', router['router']['id'])
self.assertEqual(0, len(l3agents))
def test_router_sync_data(self):
with contextlib.nested(
self.subnet(),
self.subnet(cidr='10.0.2.0/24'),
self.subnet(cidr='10.0.3.0/24')
) as (s1, s2, s3):
self._register_agent_states()
self._set_net_external(s1['subnet']['network_id'])
data = {'router': {'tenant_id': uuidutils.generate_uuid()}}
data['router']['name'] = 'router1'
data['router']['external_gateway_info'] = {
'network_id': s1['subnet']['network_id']}
router_req = self.new_create_request('routers', data, self.fmt)
res = router_req.get_response(self.ext_api)
router = self.deserialize(self.fmt, res)
self._router_interface_action('add',
router['router']['id'],
s2['subnet']['id'],
None)
self._router_interface_action('add',
router['router']['id'],
s3['subnet']['id'],
None)
l3agents = self._list_l3_agents_hosting_router(
router['router']['id'])
self.assertEqual(1, len(l3agents['agents']))
agents = self._list_agents()
another_l3_agent_id = None
another_l3_agent_host = None
default = l3agents['agents'][0]['id']
for com in agents['agents']:
if (com['id'] != default and
com['agent_type'] == constants.AGENT_TYPE_L3):
another_l3_agent_id = com['id']
another_l3_agent_host = com['host']
break
self.assertIsNotNone(another_l3_agent_id)
self._add_router_to_l3_agent(another_l3_agent_id,
router['router']['id'],
expected_code=exc.HTTPConflict.code)
self._remove_router_from_l3_agent(default,
router['router']['id'])
self._add_router_to_l3_agent(another_l3_agent_id,
router['router']['id'])
l3agents = self._list_l3_agents_hosting_router(
router['router']['id'])
self.assertEqual(another_l3_agent_host,
l3agents['agents'][0]['host'])
self._remove_router_from_l3_agent(another_l3_agent_id,
router['router']['id'])
self._router_interface_action('remove',
router['router']['id'],
s2['subnet']['id'],
None)
l3agents = self._list_l3_agents_hosting_router(
router['router']['id'])
self.assertEqual(1,
len(l3agents['agents']))
self._router_interface_action('remove',
router['router']['id'],
s3['subnet']['id'],
None)
self._delete('routers', router['router']['id'])
def _test_router_add_to_l3_agent(self, admin_state_up=True):
with self.router() as router1:
self._register_agent_states()
hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3,
L3_HOSTA)
if not admin_state_up:
self._set_agent_admin_state_up(L3_HOSTA, False)
num_before_add = len(
self._list_routers_hosted_by_l3_agent(
hosta_id)['routers'])
self._add_router_to_l3_agent(hosta_id,
router1['router']['id'])
hostb_id = self._get_agent_id(constants.AGENT_TYPE_L3,
L3_HOSTB)
self._add_router_to_l3_agent(hostb_id,
router1['router']['id'],
expected_code=exc.HTTPConflict.code)
num_after_add = len(
self._list_routers_hosted_by_l3_agent(
hosta_id)['routers'])
self.assertEqual(0, num_before_add)
self.assertEqual(1, num_after_add)
def test_router_add_to_l3_agent(self):
self._test_router_add_to_l3_agent()
def test_router_add_to_l3_agent_with_admin_state_down(self):
cfg.CONF.set_override(
'enable_services_on_agents_with_admin_state_down', True)
self._test_router_add_to_l3_agent(admin_state_up=False)
def test_router_add_to_l3_agent_two_times(self):
with self.router() as router1:
self._register_agent_states()
hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3,
L3_HOSTA)
self._add_router_to_l3_agent(hosta_id,
router1['router']['id'])
# scheduling twice on the same agent is fine
self._add_router_to_l3_agent(hosta_id,
router1['router']['id'])
def test_router_add_to_two_l3_agents(self):
with self.router() as router1:
self._register_agent_states()
hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3,
L3_HOSTA)
hostb_id = self._get_agent_id(constants.AGENT_TYPE_L3,
L3_HOSTB)
self._add_router_to_l3_agent(hosta_id,
router1['router']['id'])
self._add_router_to_l3_agent(hostb_id,
router1['router']['id'],
expected_code=exc.HTTPConflict.code)
def test_router_policy(self):
with self.router() as router1:
self._register_agent_states()
hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3,
L3_HOSTA)
self._list_routers_hosted_by_l3_agent(
hosta_id, expected_code=exc.HTTPForbidden.code,
admin_context=False)
self._add_router_to_l3_agent(
hosta_id, router1['router']['id'],
expected_code=exc.HTTPForbidden.code,
admin_context=False)
self._add_router_to_l3_agent(
hosta_id, router1['router']['id'])
self._remove_router_from_l3_agent(
hosta_id, router1['router']['id'],
expected_code=exc.HTTPForbidden.code,
admin_context=False)
self._list_l3_agents_hosting_router(
router1['router']['id'],
expected_code=exc.HTTPForbidden.code,
admin_context=False)
def _test_sync_routers_from_admin_state_down_agent(self, keep_services):
if keep_services:
cfg.CONF.set_override(
'enable_services_on_agents_with_admin_state_down', True)
l3_rpc_cb = l3_rpc.L3RpcCallback()
self._register_agent_states()
hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3, L3_HOSTA)
with self.router() as router:
self._add_router_to_l3_agent(hosta_id,
router['router']['id'])
routers = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA)
self.assertEqual(1, len(routers))
self._set_agent_admin_state_up(L3_HOSTA, False)
routers = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA)
if keep_services:
self.assertEqual(1, len(routers))
else:
self.assertEqual(0, len(routers))
def test_l3_agent_keep_services_off(self):
self._test_sync_routers_from_admin_state_down_agent(False)
def test_l3_agent_keep_services_on(self):
self._test_sync_routers_from_admin_state_down_agent(True)
def test_list_routers_hosted_by_l3_agent_with_invalid_agent(self):
invalid_agentid = 'non_existing_agent'
self._list_routers_hosted_by_l3_agent(invalid_agentid,
exc.HTTPNotFound.code)
def test_list_networks_hosted_by_dhcp_agent_with_invalid_agent(self):
invalid_agentid = 'non_existing_agent'
self._list_networks_hosted_by_dhcp_agent(invalid_agentid,
exc.HTTPNotFound.code)
class OvsDhcpAgentNotifierTestCase(test_l3.L3NatTestCaseMixin,
test_agent.AgentDBTestMixIn,
AgentSchedulerTestMixIn,
test_plugin.NeutronDbPluginV2TestCase):
plugin_str = 'neutron.plugins.ml2.plugin.Ml2Plugin'
def setUp(self):
# Save the global RESOURCE_ATTRIBUTE_MAP before loading plugin
self.saved_attr_map = {}
for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.iteritems():
self.saved_attr_map[resource] = attrs.copy()
super(OvsDhcpAgentNotifierTestCase, self).setUp(self.plugin_str)
self.dhcp_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
self.dhcp_notifier_cast = mock.patch(
'neutron.api.rpc.agentnotifiers.dhcp_rpc_agent_api.'
'DhcpAgentNotifyAPI._cast_message').start()
ext_mgr = extensions.PluginAwareExtensionManager.get_instance()
self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr)
self.adminContext = context.get_admin_context()
# Add the resources to the global attribute map
# This is done here as the setup process won't
# initialize the main API router which extends
# the global attribute map
attributes.RESOURCE_ATTRIBUTE_MAP.update(
agent.RESOURCE_ATTRIBUTE_MAP)
self.addCleanup(self.restore_attribute_map)
fake_notifier.reset()
def restore_attribute_map(self):
# Restore the original RESOURCE_ATTRIBUTE_MAP
attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map
def test_network_add_to_dhcp_agent_notification(self):
with self.network() as net1:
network_id = net1['network']['id']
self._register_agent_states()
hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
DHCP_HOSTA)
self._add_network_to_dhcp_agent(hosta_id,
network_id)
self.dhcp_notifier_cast.assert_called_with(
mock.ANY, 'network_create_end',
{'network': {'id': network_id}}, DHCP_HOSTA)
notifications = fake_notifier.NOTIFICATIONS
expected_event_type = 'dhcp_agent.network.add'
self._assert_notify(notifications, expected_event_type)
def test_network_remove_from_dhcp_agent_notification(self):
with self.network() as net1:
network_id = net1['network']['id']
self._register_agent_states()
hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
DHCP_HOSTA)
self._add_network_to_dhcp_agent(hosta_id,
network_id)
self._remove_network_from_dhcp_agent(hosta_id,
network_id)
self.dhcp_notifier_cast.assert_called_with(
mock.ANY, 'network_delete_end',
{'network_id': network_id}, DHCP_HOSTA)
notifications = fake_notifier.NOTIFICATIONS
expected_event_type = 'dhcp_agent.network.remove'
self._assert_notify(notifications, expected_event_type)
def test_agent_updated_dhcp_agent_notification(self):
self._register_agent_states()
hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
DHCP_HOSTA)
self._disable_agent(hosta_id, admin_state_up=False)
self.dhcp_notifier_cast.assert_called_with(
mock.ANY, 'agent_updated',
{'admin_state_up': False}, DHCP_HOSTA)
def _network_port_create(
self, hosts, gateway=attributes.ATTR_NOT_SPECIFIED, owner=None):
for host in hosts:
self._register_one_agent_state(
{'binary': 'neutron-dhcp-agent',
'host': host,
'topic': 'dhcp_agent',
'configurations': {'dhcp_driver': 'dhcp_driver',
'use_namespaces': True, },
'agent_type': constants.AGENT_TYPE_DHCP})
with self.network() as net1:
with self.subnet(network=net1,
gateway_ip=gateway) as subnet1:
if owner:
with self.port(subnet=subnet1,
device_owner=owner) as port:
return [net1, subnet1, port]
else:
with self.port(subnet=subnet1) as port:
return [net1, subnet1, port]
def _notification_mocks(self, hosts, net, subnet, port):
host_calls = {}
for host in hosts:
expected_calls = [
mock.call(
mock.ANY,
'network_create_end',
{'network': {'id': net['network']['id']}},
host),
mock.call(
mock.ANY,
'subnet_create_end',
subnet,
host, 'dhcp_agent'),
mock.call(
mock.ANY,
'port_create_end',
{'port': port['port']},
host, 'dhcp_agent')]
host_calls[host] = expected_calls
return host_calls
def test_network_port_create_notification(self):
hosts = [DHCP_HOSTA]
net, subnet, port = self._network_port_create(hosts)
expected_calls = self._notification_mocks(hosts, net, subnet, port)
self.assertEqual(
expected_calls[DHCP_HOSTA], self.dhcp_notifier_cast.call_args_list)
def test_network_ha_port_create_notification(self):
cfg.CONF.set_override('dhcp_agents_per_network', 2)
hosts = [DHCP_HOSTA, DHCP_HOSTC]
net, subnet, port = self._network_port_create(hosts)
expected_calls = self._notification_mocks(hosts, net, subnet, port)
for expected in expected_calls[DHCP_HOSTA]:
self.assertIn(expected, self.dhcp_notifier_cast.call_args_list)
for expected in expected_calls[DHCP_HOSTC]:
self.assertIn(expected, self.dhcp_notifier_cast.call_args_list)
def _is_schedule_network_called(self, device_id):
plugin = manager.NeutronManager.get_plugin()
notifier = plugin.agent_notifiers[constants.AGENT_TYPE_DHCP]
with contextlib.nested(
self.subnet(),
mock.patch.object(plugin,
'get_dhcp_agents_hosting_networks',
return_value=[]),
mock.patch.object(notifier,
'_schedule_network',
return_value=[])
) as (subnet, _, mock_sched):
with self.port(subnet=subnet, device_id=device_id):
return mock_sched.called
def test_reserved_dhcp_port_creation(self):
device_id = constants.DEVICE_ID_RESERVED_DHCP_PORT
self.assertFalse(self._is_schedule_network_called(device_id))
def test_unreserved_dhcp_port_creation(self):
device_id = 'not_reserved'
self.assertTrue(self._is_schedule_network_called(device_id))
class OvsL3AgentNotifierTestCase(test_l3.L3NatTestCaseMixin,
test_agent.AgentDBTestMixIn,
AgentSchedulerTestMixIn,
test_plugin.NeutronDbPluginV2TestCase):
plugin_str = 'neutron.plugins.ml2.plugin.Ml2Plugin'
l3_plugin = ('neutron.tests.unit.extensions.test_l3.'
'TestL3NatAgentSchedulingServicePlugin')
def setUp(self):
self.dhcp_notifier_cls_p = mock.patch(
'neutron.api.rpc.agentnotifiers.dhcp_rpc_agent_api.'
'DhcpAgentNotifyAPI')
self.dhcp_notifier = mock.Mock(name='dhcp_notifier')
self.dhcp_notifier_cls = self.dhcp_notifier_cls_p.start()
self.dhcp_notifier_cls.return_value = self.dhcp_notifier
# Save the global RESOURCE_ATTRIBUTE_MAP
self.saved_attr_map = {}
for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.iteritems():
self.saved_attr_map[resource] = attrs.copy()
if self.l3_plugin:
service_plugins = {'l3_plugin_name': self.l3_plugin}
else:
service_plugins = None
super(OvsL3AgentNotifierTestCase, self).setUp(
self.plugin_str, service_plugins=service_plugins)
ext_mgr = extensions.PluginAwareExtensionManager.get_instance()
self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr)
self.adminContext = context.get_admin_context()
# Add the resources to the global attribute map
# This is done here as the setup process won't
# initialize the main API router which extends
# the global attribute map
attributes.RESOURCE_ATTRIBUTE_MAP.update(
agent.RESOURCE_ATTRIBUTE_MAP)
self.addCleanup(self.restore_attribute_map)
fake_notifier.reset()
def restore_attribute_map(self):
# Restore the original RESOURCE_ATTRIBUTE_MAP
attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map
def test_router_add_to_l3_agent_notification(self):
l3_plugin = (manager.NeutronManager.get_service_plugins()
[service_constants.L3_ROUTER_NAT])
l3_notifier = l3_plugin.agent_notifiers[constants.AGENT_TYPE_L3]
with contextlib.nested(
mock.patch.object(l3_notifier.client, 'prepare',
return_value=l3_notifier.client),
mock.patch.object(l3_notifier.client, 'cast'),
self.router(),
) as (
mock_prepare, mock_cast, router1
):
self._register_agent_states()
hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3,
L3_HOSTA)
self._add_router_to_l3_agent(hosta_id,
router1['router']['id'])
routers = [router1['router']['id']]
mock_prepare.assert_called_with(server='hosta')
mock_cast.assert_called_with(
mock.ANY, 'router_added_to_agent', payload=routers)
notifications = fake_notifier.NOTIFICATIONS
expected_event_type = 'l3_agent.router.add'
self._assert_notify(notifications, expected_event_type)
def test_router_remove_from_l3_agent_notification(self):
l3_plugin = (manager.NeutronManager.get_service_plugins()
[service_constants.L3_ROUTER_NAT])
l3_notifier = l3_plugin.agent_notifiers[constants.AGENT_TYPE_L3]
with contextlib.nested(
mock.patch.object(l3_notifier.client, 'prepare',
return_value=l3_notifier.client),
mock.patch.object(l3_notifier.client, 'cast'),
self.router(),
) as (
mock_prepare, mock_cast, router1
):
self._register_agent_states()
hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3,
L3_HOSTA)
self._add_router_to_l3_agent(hosta_id,
router1['router']['id'])
self._remove_router_from_l3_agent(hosta_id,
router1['router']['id'])
mock_prepare.assert_called_with(server='hosta')
mock_cast.assert_called_with(
mock.ANY, 'router_removed_from_agent',
payload={'router_id': router1['router']['id']})
notifications = fake_notifier.NOTIFICATIONS
expected_event_type = 'l3_agent.router.remove'
self._assert_notify(notifications, expected_event_type)
def test_agent_updated_l3_agent_notification(self):
l3_plugin = (manager.NeutronManager.get_service_plugins()
[service_constants.L3_ROUTER_NAT])
l3_notifier = l3_plugin.agent_notifiers[constants.AGENT_TYPE_L3]
with contextlib.nested(
mock.patch.object(l3_notifier.client, 'prepare',
return_value=l3_notifier.client),
mock.patch.object(l3_notifier.client, 'cast'),
) as (
mock_prepare, mock_cast
):
self._register_agent_states()
hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3,
L3_HOSTA)
self._disable_agent(hosta_id, admin_state_up=False)
mock_prepare.assert_called_with(server='hosta')
mock_cast.assert_called_with(
mock.ANY, 'agent_updated', payload={'admin_state_up': False})
|
|
import os
import logging
import claripy
from cle import MetaELF
from cle.address_translator import AT
from archinfo import ArchX86, ArchAMD64, ArchARM, ArchAArch64, ArchMIPS32, ArchMIPS64, ArchPPC32, ArchPPC64
from ..tablespecs import StringTableSpec
from ..procedures import SIM_PROCEDURES as P, SIM_LIBRARIES as L
from ..state_plugins import SimFilesystem, SimHostFilesystem
from ..storage.file import SimFile, SimFileBase
from ..errors import AngrSyscallError
from .userland import SimUserland
_l = logging.getLogger('angr.simos.linux')
class SimLinux(SimUserland):
"""
OS-specific configuration for \\*nix-y OSes.
"""
def __init__(self, project, **kwargs):
super(SimLinux, self).__init__(project,
syscall_library=L['linux'],
syscall_addr_alignment=project.arch.instruction_alignment,
name="Linux",
**kwargs)
self._loader_addr = None
self._loader_lock_addr = None
self._loader_unlock_addr = None
self._error_catch_tsd_addr = None
self._vsyscall_addr = None
def configure_project(self): # pylint: disable=arguments-differ
self._loader_addr = self.project.loader.extern_object.allocate()
self._loader_lock_addr = self.project.loader.extern_object.allocate()
self._loader_unlock_addr = self.project.loader.extern_object.allocate()
self._error_catch_tsd_addr = self.project.loader.extern_object.allocate()
self._vsyscall_addr = self.project.loader.extern_object.allocate()
self.project.hook(self._loader_addr, P['linux_loader']['LinuxLoader']())
self.project.hook(self._loader_lock_addr, P['linux_loader']['_dl_rtld_lock_recursive']())
self.project.hook(self._loader_unlock_addr, P['linux_loader']['_dl_rtld_unlock_recursive']())
self.project.hook(self._error_catch_tsd_addr,
P['linux_loader']['_dl_initial_error_catch_tsd'](
static_addr=self.project.loader.extern_object.allocate()
)
)
self.project.hook(self._vsyscall_addr, P['linux_kernel']['_vsyscall']())
ld_obj = self.project.loader.linux_loader_object
if ld_obj is not None:
# there are some functions we MUST use the simprocedures for, regardless of what the user wants
self._weak_hook_symbol('__tls_get_addr', L['ld.so'].get('__tls_get_addr', self.arch), ld_obj)
self._weak_hook_symbol('___tls_get_addr', L['ld.so'].get('___tls_get_addr', self.arch), ld_obj)
# set up some static data in the loader object...
_rtld_global = ld_obj.get_symbol('_rtld_global')
if _rtld_global is not None:
if isinstance(self.project.arch, ArchAMD64):
self.project.loader.memory.write_addr_at(_rtld_global.rebased_addr + 0xF08, self._loader_lock_addr)
self.project.loader.memory.write_addr_at(_rtld_global.rebased_addr + 0xF10, self._loader_unlock_addr)
self.project.loader.memory.write_addr_at(_rtld_global.rebased_addr + 0x990, self._error_catch_tsd_addr)
# TODO: what the hell is this
_rtld_global_ro = ld_obj.get_symbol('_rtld_global_ro')
if _rtld_global_ro is not None:
pass
libc_obj = self.project.loader.find_object('libc.so.6')
if libc_obj:
self._weak_hook_symbol('_dl_vdso_vsym', L['libc.so.6'].get('_dl_vdso_vsym', self.arch), libc_obj)
tls_obj = self.project.loader.tls_object
if tls_obj is not None:
if isinstance(self.project.arch, ArchAMD64):
self.project.loader.memory.write_addr_at(tls_obj.thread_pointer + 0x28, 0x5f43414e4152595f)
self.project.loader.memory.write_addr_at(tls_obj.thread_pointer + 0x30, 0x5054524755415244)
elif isinstance(self.project.arch, ArchX86):
self.project.loader.memory.write_addr_at(tls_obj.thread_pointer + 0x10, self._vsyscall_addr)
elif isinstance(self.project.arch, ArchARM):
self.project.hook(0xffff0fe0, P['linux_kernel']['_kernel_user_helper_get_tls']())
# Only set up ifunc resolution if we are using the ELF backend on AMD64
if isinstance(self.project.loader.main_object, MetaELF):
if isinstance(self.project.arch, (ArchAMD64, ArchX86)):
for binary in self.project.loader.all_objects:
if not isinstance(binary, MetaELF):
continue
for reloc in binary.relocs:
if reloc.symbol is None or reloc.resolvedby is None:
continue
try:
if reloc.resolvedby.elftype != 'STT_GNU_IFUNC':
continue
except AttributeError:
continue
gotaddr = reloc.rebased_addr
gotvalue = self.project.loader.memory.read_addr_at(gotaddr)
if self.project.is_hooked(gotvalue):
continue
# Replace it with a ifunc-resolve simprocedure!
kwargs = {
'funcaddr': gotvalue,
'gotaddr': gotaddr,
'funcname': reloc.symbol.name
}
# TODO: should this be replaced with hook_symbol?
randaddr = self.project.loader.extern_object.allocate()
self.project.hook(randaddr, P['linux_loader']['IFuncResolver'](**kwargs))
self.project.loader.memory.write_addr_at(gotaddr, randaddr)
# maybe move this into archinfo?
if self.arch.name == 'X86':
syscall_abis = ['i386']
elif self.arch.name == 'AMD64':
syscall_abis = ['i386', 'amd64']
elif self.arch.name.startswith('ARM'):
syscall_abis = ['arm']
if self.arch.name == 'ARMHF':
syscall_abis.append('armhf')
elif self.arch.name == 'AARCH64':
syscall_abis = ['aarch64']
# https://www.linux-mips.org/wiki/WhatsWrongWithO32N32N64
elif self.arch.name == 'MIPS32':
syscall_abis = ['mips-o32']
elif self.arch.name == 'MIPS64':
syscall_abis = ['mips-n32', 'mips-n64']
elif self.arch.name == 'PPC32':
syscall_abis = ['ppc']
elif self.arch.name == 'PPC64':
syscall_abis = ['ppc64']
else:
syscall_abis = [] # ?
super(SimLinux, self).configure_project(syscall_abis)
def syscall_abi(self, state):
if state.arch.name != 'AMD64':
return None
if state.history.jumpkind == 'Ijk_Sys_int128':
return 'i386'
elif state.history.jumpkind == 'Ijk_Sys_syscall':
return 'amd64'
else:
raise AngrSyscallError("Unknown syscall jumpkind %s" % state.history.jumpkind)
# pylint: disable=arguments-differ
def state_blank(self, fs=None, concrete_fs=False, chroot=None,
cwd='/home/user', pathsep='/', **kwargs):
state = super(SimLinux, self).state_blank(**kwargs)
if self.project.loader.tls_object is not None:
if isinstance(state.arch, ArchAMD64):
state.regs.fs = self.project.loader.tls_object.user_thread_pointer
elif isinstance(state.arch, ArchX86):
state.regs.gs = self.project.loader.tls_object.user_thread_pointer >> 16
elif isinstance(state.arch, (ArchMIPS32, ArchMIPS64)):
state.regs.ulr = self.project.loader.tls_object.user_thread_pointer
elif isinstance(state.arch, ArchPPC32):
state.regs.r2 = self.project.loader.tls_object.user_thread_pointer
elif isinstance(state.arch, ArchPPC64):
state.regs.r13 = self.project.loader.tls_object.user_thread_pointer
elif isinstance(state.arch, ArchAArch64):
state.regs.tpidr_el0 = self.project.loader.tls_object.user_thread_pointer
if fs is None: fs = {}
for name in fs:
if type(fs[name]) is unicode:
fs[name] = fs[name].encode('utf-8')
if type(fs[name]) is bytes:
fs[name] = claripy.BVV(fs[name])
if isinstance(fs[name], claripy.Bits):
fs[name] = SimFile(name, content=fs[name])
if not isinstance(fs[name], SimFileBase):
raise TypeError("Provided fs initializer with unusable type %r" % type(fs[name]))
mounts = {}
if concrete_fs:
mounts[pathsep] = SimHostFilesystem(chroot if chroot is not None else os.path.sep)
state.register_plugin('fs', SimFilesystem(files=fs, pathsep=pathsep, cwd=cwd, mountpoints=mounts))
if self.project.loader.main_object.is_ppc64_abiv1:
state.libc.ppc64_abiv = 'ppc64_1'
return state
def state_entry(self, args=None, env=None, argc=None, **kwargs):
state = super(SimLinux, self).state_entry(**kwargs)
# Handle default values
if args is None:
args = []
if env is None:
env = {}
# Prepare argc
if argc is None:
argc = claripy.BVV(len(args), state.arch.bits)
elif type(argc) in (int, long): # pylint: disable=unidiomatic-typecheck
argc = claripy.BVV(argc, state.arch.bits)
# Make string table for args/env/auxv
table = StringTableSpec()
# Add args to string table
table.append_args(args)
# Add environment to string table
table.append_env(env)
# Prepare the auxiliary vector and add it to the end of the string table
# TODO: Actually construct a real auxiliary vector
# current vector is an AT_RANDOM entry where the "random" value is 0xaec0aec0aec0...
aux = [(25, ("AEC0" * 8).decode('hex'))]
for a, b in aux:
table.add_pointer(a)
if isinstance(b, str):
table.add_string(b)
else:
table.add_pointer(b)
table.add_null()
table.add_null()
# Dump the table onto the stack, calculate pointers to args, env, and auxv
state.memory.store(state.regs.sp - 16, claripy.BVV(0, 8 * 16))
argv = table.dump(state, state.regs.sp - 16)
envp = argv + ((len(args) + 1) * state.arch.bytes)
auxv = argv + ((len(args) + len(env) + 2) * state.arch.bytes)
# Put argc on stack and fix the stack pointer
newsp = argv - state.arch.bytes
state.memory.store(newsp, argc, endness=state.arch.memory_endness)
state.regs.sp = newsp
if state.arch.name in ('PPC32',):
state.stack_push(claripy.BVV(0, 32))
state.stack_push(claripy.BVV(0, 32))
state.stack_push(claripy.BVV(0, 32))
state.stack_push(claripy.BVV(0, 32))
# store argc argv envp auxv in the posix plugin
state.posix.argv = argv
state.posix.argc = argc
state.posix.environ = envp
state.posix.auxv = auxv
self.set_entry_register_values(state)
return state
def set_entry_register_values(self, state):
for reg, val in state.arch.entry_register_values.iteritems():
if isinstance(val, (int, long)):
state.registers.store(reg, val, size=state.arch.bytes)
elif isinstance(val, (str,)):
if val == 'argc':
state.registers.store(reg, state.posix.argc, size=state.arch.bytes)
elif val == 'argv':
state.registers.store(reg, state.posix.argv)
elif val == 'envp':
state.registers.store(reg, state.posix.environ)
elif val == 'auxv':
state.registers.store(reg, state.posix.auxv)
elif val == 'ld_destructor':
# a pointer to the dynamic linker's destructor routine, to be called at exit
# or NULL. We like NULL. It makes things easier.
state.registers.store(reg, 0)
elif val == 'toc':
if self.project.loader.main_object.is_ppc64_abiv1:
state.registers.store(reg, self.project.loader.main_object.ppc64_initial_rtoc)
elif val == 'thread_pointer':
state.registers.store(reg, self.project.loader.tls_object.user_thread_pointer)
else:
_l.warning('Unknown entry point register value indicator "%s"', val)
else:
_l.error('What the ass kind of default value is %s?', val)
def state_full_init(self, **kwargs):
kwargs['addr'] = self._loader_addr
return super(SimLinux, self).state_full_init(**kwargs)
def prepare_function_symbol(self, symbol_name, basic_addr=None):
"""
Prepare the address space with the data necessary to perform relocations pointing to the given symbol.
Returns a 2-tuple. The first item is the address of the function code, the second is the address of the
relocation target.
"""
if self.project.loader.main_object.is_ppc64_abiv1:
if basic_addr is not None:
pointer = self.project.loader.memory.read_addr_at(basic_addr)
return pointer, basic_addr
pseudo_hookaddr = self.project.loader.extern_object.get_pseudo_addr(symbol_name)
pseudo_toc = self.project.loader.extern_object.allocate(size=0x18)
self.project.loader.extern_object.memory.write_addr_at(
AT.from_mva(pseudo_toc, self.project.loader.extern_object).to_rva(), pseudo_hookaddr)
return pseudo_hookaddr, pseudo_toc
else:
if basic_addr is None:
basic_addr = self.project.loader.extern_object.get_pseudo_addr(symbol_name)
return basic_addr, basic_addr
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from StringIO import StringIO
import json
import multiprocessing
import textwrap
from optparse import IndentedHelpFormatter
from optparse import OptionGroup
from optparse import OptionParser
from optparse import OptionValueError
from anvil import actions
from anvil import env
from anvil import settings
from anvil import shell as sh
from anvil import utils
from anvil import version
OVERVIEW = """Overview: Anvil is a forging tool to help build OpenStack components
and their dependencies into a complete system. It git checkouts the components and
builds them and their dependencies into packages."""
STEPS = """Steps: For smooth experience please make sure you go through the
following steps when running."""
STEP_SECTIONS = {
'building': [
'./smithy -a prepare',
'./smithy -a build',
],
}
def _format_list(in_list):
sorted_list = sorted(in_list)
return "[" + ", ".join(sorted_list) + "]"
def _size_cb(option, opt_str, value, parser):
try:
parser.values.show_amount = utils.to_bytes(value)
except (TypeError, ValueError) as e:
raise OptionValueError("Invalid value for %s due to %s" % (opt_str, e))
class SmithyHelpFormatter(IndentedHelpFormatter):
def _wrap_it(self, text):
return textwrap.fill(text, width=self.width,
initial_indent="", subsequent_indent=" ")
def format_epilog(self, epilog):
buf = StringIO()
buf.write(IndentedHelpFormatter.format_epilog(self, epilog))
buf.write("\n")
buf.write(self._wrap_it('For further information check out: '
'http://anvil.readthedocs.org'))
buf.write("\n")
return buf.getvalue()
def format_usage(self, usage):
buf = StringIO()
buf.write(IndentedHelpFormatter.format_usage(self, usage))
buf.write("\n")
buf.write(self._wrap_it(OVERVIEW))
buf.write("\n\n")
buf.write(self._wrap_it(STEPS))
buf.write("\n\n")
for k in sorted(STEP_SECTIONS.keys()):
buf.write("%s:\n" % (k.title()))
for line in STEP_SECTIONS[k]:
buf.write(" %s\n" % (line))
return buf.getvalue()
def _get_default_dir():
root_dir = env.get_key('INSTALL_ROOT')
if root_dir:
return root_dir
return sh.joinpths(sh.gethomedir(), 'openstack')
def parse(previous_settings=None):
version_str = "%s v%s" % ('anvil', version.version_string())
help_formatter = SmithyHelpFormatter(width=120)
parser = OptionParser(version=version_str, formatter=help_formatter,
prog='smithy')
# Root options
parser.add_option("-v", "--verbose",
action="store_true",
dest="verbose",
default=False,
help="make the output logging verbose")
# Install/start/stop/uninstall specific options
base_group = OptionGroup(parser, "Action specific options")
base_group.add_option("-p", "--persona",
action="store",
type="string",
dest="persona_fn",
default=sh.joinpths(settings.PERSONA_DIR, 'in-a-box', 'basic.yaml'),
metavar="FILE",
help="persona yaml file to apply (default: %default)")
base_group.add_option("-a", "--action",
action="store",
type="string",
dest="action",
metavar="ACTION",
help="required action to perform: %s" % (_format_list(actions.names())))
base_group.add_option("-o", "--origins",
action="store",
type="string",
dest="origins_fn",
default=sh.joinpths(settings.ORIGINS_DIR, 'master.yaml'),
metavar="FILE",
help="yaml file describing where to get openstack sources "
"from (default: %default)")
base_group.add_option("--origins-patch",
action="store",
type="string",
dest="origins_patch_fn",
default=None,
metavar="FILE",
help="origins file patch, jsonpath format (rfc6902)")
base_group.add_option("--distros-patch",
action="store",
type="string",
dest="distros_patch_fn",
default=None,
metavar="FILE",
help="distros file patch, jsonpath format (rfc6902)")
base_group.add_option("-j", "--jobs",
action="store",
type="int",
dest="jobs",
default=multiprocessing.cpu_count() + 1,
metavar="JOBS",
help="number of building jobs to run simultaneously (default: %default)")
base_group.add_option("-d", "--directory",
action="store",
type="string",
dest="dir",
metavar="DIR",
default=_get_default_dir(),
help=("empty root DIR or DIR with existing components (default: %default)"))
base_group.add_option("--tee-file",
action="store",
type="string",
dest="tee_file",
metavar="FILE",
default='/var/log/anvil.log',
help=("location to store tee of output (default: %default)"))
parser.add_option_group(base_group)
build_group = OptionGroup(parser, "Build specific options")
build_group.add_option('-u', "--usr-only",
action="store_true",
dest="usr_only",
default=False,
help=("when packaging only store /usr directory"
" (default: %default)"))
build_group.add_option("--venv-deploy-dir",
action="store",
type="string",
dest="venv_deploy_dir",
default=None,
help=("for virtualenv builds, make the virtualenv "
"relocatable to a directory different from "
"build directory"))
build_group.add_option('-c', "--overwrite-configs",
action="store_true",
dest="overwrite_configs",
default=False,
help=("When packaging do you want rpm to mark config "
"files with %config or treat them as files and "
"overwrite them each time on rpm install"))
parser.add_option_group(build_group)
# Extract only what we care about, these will be passed
# to the constructor of actions as arguments
# so don't adjust the naming wily nilly...
if previous_settings:
parser.set_defaults(**previous_settings)
(options, _args) = parser.parse_args()
values = {}
values['dir'] = (options.dir or "")
values['action'] = (options.action or "")
values['jobs'] = options.jobs
values['persona_fn'] = options.persona_fn
values['origins_fn'] = options.origins_fn
values['verbose'] = options.verbose
values['usr_only'] = options.usr_only
values['tee_file'] = options.tee_file
values['overwrite_configs'] = options.overwrite_configs
if options.origins_patch_fn:
with open(options.origins_patch_fn) as fp:
values['origins_patch'] = json.load(fp)
if options.distros_patch_fn:
with open(options.distros_patch_fn) as fp:
values['distros_patch'] = json.load(fp)
values['venv_deploy_dir'] = options.venv_deploy_dir
return values
|
|
#
# PGDB x86 32bit architecture module
#
# implementation notes:
# - don't create class objects in this module that are not quickly
# released by pgdb.py. the x86 modules get switched out dynamically
# when qemu switches in and out of 64bit mode!
# - syntax errors in this module can be found by running $ python pgdb_x86.py
# - darn, with qemu 3.1.0 some awesome changes are in place to handle x86
# registers the right way. unfortunately when qemu starts (with -s -S)
# the rdp protocol says were in 64bit mode when it should be 32bit mode :(
#
# contributors:
# djv - Duane Voth
#
# history:
# 2015/10/12 - v0.01 - djv - released
# 2015/12/27 - v0.02 - djv - add cpu modes
# 2019/05/12 - v0.03 - djv - update for qemu 3.1.x (proper feature support)
version = "PGDB x86 v0.03 2019/05/12"
name = 'i386'
Log = None
DSfns = None
# PGDB can perform an alter ego switch mid-stream (thanks to module re-load).
# If this architecture knows about an alter ego with a different register
# dump length, return the name of that alter ego and PGDB will switch.
# x86's multiple cpu modes can however run in different cores simultaneously
# so this module supports all of them.
def alter_ego(n):
return None
# The 'g' command for gdb's remote debug protocol (rdp)
# returns a long string of hex digits, which are the cpu
# registers all packed together - we have to parse this.
# The following Gspec arrays determine register display
# order, as well as parsing specific register data from
# the gdb cmd response string.
# 16 bit mode is a total guess ... it has never been seen and thus not debugged
gspec16 = [
['ax', 0, 4], ['bx', 12, 16], ['cx', 4, 8], ['dx', 8, 12],
['di', 28, 32], ['si', 24, 28], ['bp', 20, 24], ['flags', 36, 40],
['cs', 40, 44], ['eip', 32, 36], ['ss', 44, 48], ['esp', 16, 20]
]
gspec32 = [
['eax', 0, 8], ['ebx', 24, 32], ['ecx', 8, 16], ['edx', 16, 24],
['edi', 56, 64], ['esi', 48, 56], ['ebp', 40, 48], ['eflags', 72, 80],
['cs', 80, 88], ['eip', 64, 72], ['ss', 88, 96], ['esp', 32, 40],
['ds', 96, 104], ['es', 104, 112], ['fs', 112, 120], ['gs', 120, 128],
['st0', 128, 148], ['st1', 148, 168], ['st2', 168, 188], ['st3', 188, 208],
['st4', 208, 228], ['st5', 228, 248], ['st6', 248, 268], ['st7', 268, 288],
['fctrl', 288, 296], ['fstat', 296, 304], ['ftag', 304, 312], ['fiseg', 312, 320],
['fioff', 320, 328], ['foseg', 328, 336], ['fooff', 336, 344], ['fop', 344, 352],
['xmm0', 352, 384], ['xmm1', 384, 416], ['xmm2', 416, 448], ['xmm3', 448, 480],
['xmm4', 480, 512], ['xmm5', 512, 544], ['xmm6', 544, 576], ['xmm7', 576, 608],
['mxcsr', 608, 616]
]
gspec64 = [
['rax', 0, 16], ['rbx', 16, 32], ['rcx', 32, 48], ['rdx', 48, 64],
['rsi', 64, 80], ['rdi', 80, 96], ['rbp', 96, 112], ['rsp', 112, 128],
['r8', 128, 144], ['r9', 144, 160], ['r10', 160, 176], ['r11', 176, 192],
['r12', 192, 208], ['r13', 208, 224], ['r14', 224, 240], ['r15', 240, 256],
['rip', 256, 272], ['eflags',272, 280], ['cs', 280, 288], ['ss', 288, 296],
['ds', 296, 304], ['es', 304, 312], ['fs', 312, 320], ['gs', 320, 328],
# ok I know I botched the floating point offsets ... and what is 328-392 ?
['st0', 392, 402], ['st1', 402, 412], ['st2', 412, 422], ['st3', 422, 432],
['st4', 432, 442], ['st5', 442, 452], ['st6', 452, 462], ['st7', 462, 472],
['st8', 472, 482], ['st9', 482, 492], ['st10', 492, 502], ['st11', 502, 512],
['st12', 512, 522], ['st13', 522, 532], ['st14', 532, 542], ['st15', 542, 552],
['xmm0', 552, 584], ['xmm1', 584, 616], ['xmm2', 616, 648], ['xmm3', 648, 680],
['xmm4', 680, 712], ['xmm5', 712, 744], ['xmm6', 744, 776], ['xmm7', 776, 808],
['xmm8', 808, 840], ['xmm9', 840, 872], ['xmm10', 872, 904], ['xmm11', 904, 936],
['xmm12', 936, 968], ['xmm13', 968,1000], ['xmm14',1000,1032], ['xmm15',1032,1064],
['mxcsr',1064,1072]
]
gspec = []
gspec_idx = 0
spec = { 111: { 'mode':'16b', 'maxy':7, 'maxx':50, 'gspec':gspec16 },
616: { 'mode':'32b', 'maxy':7, 'maxx':61, 'gspec':gspec32 },
1072: { 'mode':'64b', 'maxy':11, 'maxx':63, 'gspec':gspec64 },
# a qemu 2.9 reg description xml ??
552: { 'mode':'32b', 'maxy':11, 'maxx':63, 'gspec':gspec }
}
def generate_gspec(feat_name, xml_tree):
# may be called multiple times (some reg sets are broken out by processor model)
global gspec, gspec_idx
Log.write('## i386: ' + feat_name + '\n')
if feat_name == 'org.gnu.gdb.i386.64bit':
for r in xml_tree:
n = int(r['bitsize']) // 4 # 4 bits per nibble
gspec.append((r['name'], gspec_idx, gspec_idx+n))
gspec_idx += n
# erm... what's the difference between i386.64bit and i386.core ?
#elif feat_name == 'org.gnu.gdb.i386.core.x':
# for r in xml_tree:
# Log.write('## implement core: ' + r['name'] + ' ' + r['bitsize'] + ' later\n')
# n = int(r['bitsize']) // 4 # 4 bits per nibble
# gspec.append((r['name'], gspec_idx, gspec_idx+n))
# gspec_idx += n
#elif feat_name == 'org.gnu.gdb.i386.64bit.sse':
# for r in xml_tree:
# Log.write('## implement SSE: ' + r['name'] + ' ' + r['bitsize'] + ' later\n')
else:
Log.write('## deal with feature "' + feat_name + '" later ...\n')
#def compute_address(seg, off):
# # deal with protected mode vs. real mode:
# # assume seg values below 0x80 are descriptors
# if seg < 0x80:
# return off # FIXME need to lookup descriptor
# else:
# return seg * 16 + off
# Cpu methods
def cpu_reg_update(self, newregs, mode):
strs = []
def rdiff(y, x, fmt, rname, newr, oldr):
new = newr[rname]
old = oldr[rname] if rname in oldr else new
attr = '' if new == old else '\a'
strs.append((y, x, attr + fmt % new))
if self.mode == mode:
strs.append((0, 10, ' %s ' % spec[mode]['mode']))
else:
strs.append((0, 10, ' \a%s ' % spec[mode]['mode']))
self.mode = mode
if mode == 616:
# zero row is the title row
rdiff(0, 20, ' %04x:', 'cs', newregs, self.regs)
rdiff(0, 26, '%08x ', 'eip', newregs, self.regs)
rdiff(1, 2, 'eax %08x', 'eax', newregs, self.regs)
rdiff(1, 17, 'ebx %08x', 'ebx', newregs, self.regs)
rdiff(1, 32, 'ecx %08x', 'ecx', newregs, self.regs)
rdiff(1, 47, 'edx %08x', 'edx', newregs, self.regs)
rdiff(2, 2, 'edi %08x', 'edi', newregs, self.regs)
rdiff(2, 17, 'esi %08x', 'esi', newregs, self.regs)
rdiff(2, 32, 'ebp %08x', 'ebp', newregs, self.regs)
rdiff(2, 47, 'esp %08x', 'esp', newregs, self.regs)
rdiff(3, 3, 'ds %04x', 'ds', newregs, self.regs)
rdiff(3, 18, 'es %04x', 'es', newregs, self.regs)
rdiff(3, 30, 'mxcsr %08x', 'mxcsr', newregs, self.regs)
rdiff(3, 48, 'ss %04x', 'ss', newregs, self.regs)
rdiff(4, 3, 'fs %04x', 'fs', newregs, self.regs)
rdiff(4, 18, 'gs %04x', 'gs', newregs, self.regs)
rdiff(4, 30, 'fctrl %08x', 'fctrl', newregs, self.regs)
rdiff(4, 45, 'flags %08x', 'eflags', newregs, self.regs)
elif mode == 1072:
# I'm not completely happy with this layout ...
# zero row is the title row
rdiff(0, 20, ' %04x:', 'cs', newregs, self.regs)
rdiff(0, 26, '%016x ', 'rip', newregs, self.regs)
rdiff(1, 2, 'rax %016x', 'rax', newregs, self.regs)
rdiff(1, 24, 'rbx %016x', 'rbx', newregs, self.regs)
rdiff(2, 2, 'rcx %016x', 'rcx', newregs, self.regs)
rdiff(2, 24, 'rdx %016x', 'rdx', newregs, self.regs)
rdiff(2, 54, 'ds %04x', 'ds', newregs, self.regs)
rdiff(3, 2, 'rdi %016x', 'rdi', newregs, self.regs)
rdiff(3, 24, 'rsi %016x', 'rsi', newregs, self.regs)
rdiff(3, 54, 'es %04x', 'es', newregs, self.regs)
rdiff(4, 2, 'rbp %016x', 'rbp', newregs, self.regs)
rdiff(4, 24, 'rsp %016x', 'rsp', newregs, self.regs)
rdiff(4, 54, 'ss %04x', 'ss', newregs, self.regs)
rdiff(5, 2, 'r08 %016x', 'r8', newregs, self.regs)
rdiff(5, 24, 'r09 %016x', 'r9', newregs, self.regs)
rdiff(5, 54, 'fs %04x', 'fs', newregs, self.regs)
rdiff(6, 2, 'r10 %016x', 'r10', newregs, self.regs)
rdiff(6, 24, 'r11 %016x', 'r11', newregs, self.regs)
rdiff(6, 54, 'gs %04x', 'gs', newregs, self.regs)
rdiff(7, 2, 'r12 %016x', 'r12', newregs, self.regs)
rdiff(7, 24, 'r13 %016x', 'r13', newregs, self.regs)
rdiff(7, 47, 'mxcsr %08x', 'mxcsr', newregs, self.regs)
rdiff(8, 2, 'r14 %016x', 'r14', newregs, self.regs)
rdiff(8, 24, 'r15 %016x', 'r15', newregs, self.regs)
rdiff(8, 47, 'flags %08x', 'eflags', newregs, self.regs)
elif mode == 552:
# I'm not completely happy with this layout ...
# zero row is the title row
rdiff(0, 20, ' %04x:', 'cs', newregs, self.regs)
rdiff(0, 26, '%016x ', 'rip', newregs, self.regs)
rdiff(1, 2, 'rax %016x', 'rax', newregs, self.regs)
rdiff(1, 24, 'rbx %016x', 'rbx', newregs, self.regs)
rdiff(2, 2, 'rcx %016x', 'rcx', newregs, self.regs)
rdiff(2, 24, 'rdx %016x', 'rdx', newregs, self.regs)
rdiff(2, 54, 'ds %04x', 'ds', newregs, self.regs)
rdiff(3, 2, 'rdi %016x', 'rdi', newregs, self.regs)
rdiff(3, 24, 'rsi %016x', 'rsi', newregs, self.regs)
rdiff(3, 54, 'es %04x', 'es', newregs, self.regs)
rdiff(4, 2, 'rbp %016x', 'rbp', newregs, self.regs)
rdiff(4, 24, 'rsp %016x', 'rsp', newregs, self.regs)
rdiff(4, 54, 'ss %04x', 'ss', newregs, self.regs)
rdiff(5, 2, 'r08 %016x', 'r8', newregs, self.regs)
rdiff(5, 24, 'r09 %016x', 'r9', newregs, self.regs)
rdiff(5, 54, 'fs %04x', 'fs', newregs, self.regs)
rdiff(6, 2, 'r10 %016x', 'r10', newregs, self.regs)
rdiff(6, 24, 'r11 %016x', 'r11', newregs, self.regs)
rdiff(6, 54, 'gs %04x', 'gs', newregs, self.regs)
rdiff(7, 2, 'r12 %016x', 'r12', newregs, self.regs)
rdiff(7, 24, 'r13 %016x', 'r13', newregs, self.regs)
rdiff(7, 47, 'mxcsr %08x', 'mxcsr', newregs, self.regs)
rdiff(8, 2, 'r14 %016x', 'r14', newregs, self.regs)
rdiff(8, 24, 'r15 %016x', 'r15', newregs, self.regs)
rdiff(8, 47, 'flags %08x', 'eflags', newregs, self.regs)
else:
strs.append((1, 2, 'unknown register set (%d)' % mode))
# lol, messy, but cool
x = newregs['eflags']
fla = '%02x' % (x&0xff) + '%02x' % ((x&0xff00)>>8) + '%02x00' % ((x&0xff0000)>>16)
flstr = DSfns['ds_print_one'](fla, ds_eflags)[0]
if mode == 616 or mode == 552:
# so the max possible eflags string is like 53,
# here I hope that not all flags will be on at the same time
strs.append((5, 14, '%45s' % flstr))
elif mode == 1072:
## lol, messy, but cool
#x = newregs['eflags']
#fla = '%02x' % (x&0xff) + '%02x' % ((x&0xff00)>>8) + '%02x00' % ((x&0xff0000)>>16)
#flstr = DSfns['ds_print_one'](fla, ds_rflags)[0]
strs.append((9, 16, '%45s' % flstr))
# TODO: at one point I used this for XMM and ST regs - only displayed the
# non-zero regs - but it's all too much - need a slick way to deal
# with tons of regs - scroll the cpu windows maybe ... for now,
# no floating point or extended "multimedia" regs are displayed.
# track line length in nibbles displayed
# always print the first 16 regs, then only non-zero regs
# if i < 16 or int(val, 16) != 0:
# n += len(val)
# if (n > 30):
# eol = '\n'
# n = 0
# else:
# eol = ''
# rdata += ' %5s' % spec[0] + ' %s' % val + eol
# i += 1
return strs
def get_seg_register(self):
if 'cs' in self.regs:
return self.regs['cs']
return None
def get_ip_register(self):
# with qemu 3.1.x we are getting a mode == None ?
if self.mode == None:
return None
#Log.write('## get_ip reg data len ' + str(self.mode) + '\n')
if self.mode == 616 or self.mode == 552:
if 'eip' in self.regs:
return self.regs['eip']
if self.mode == 1072:
if 'rip' in self.regs:
return self.regs['rip']
Log.write('undefined mode %s in get_ip_register() in %s\n' % (
self.mode, 'pgdb_i386.py'), CPerr) # 'pgdp_' + name + '.py' ??
return None
def get_ip_bpfmt(self):
# return the current cs:eip formatted for the gdb 'Z0' command
# no, you are right, its not directly useful (because who wants to
# set a breakpoint at the current eip? the emulator is just going
# to stop in exactly the same place), but it does show the user how
# to format the value correctly
rval = ''
# not sure if gdb protocol supports segments for breakpoints, might
# need to convert seg:off to a physical or logical memory address
#if self.regs['cs']:
# rval += '%x' % self.regs['cs']
if self.mode == 616:
rval += '%x' % self.regs['eip']
elif self.mode == 1072 or self.mode == 552:
rval += '%x' % self.regs['rip']
return rval.lower()
# ---------------------------------------------------------------------------
# Format Arbitrary Data Structures
#
# sure, this could be an external module i.e. 'import fads', and someone
# will (in some project of theirs) likely break it out, but I won't (see
# my rant in pgdb.py about 'fads').
class DS(object):
# defines a data structure
def __init__(self, name, lname, dlen, height, width, hdr, elements):
self.name = name
self.lname = lname
self.dlen = dlen # ds data length in bytes
self.height = height # height of one ds
self.width = width # strings will be truncated to this
self.header = hdr # hdr %fmt or None
self.elements = elements # list of DSFLD objects
class DSFLD(object):
# defines a data structure field
# field objects must be sorted for the display, in row then column order
def __init__(self, y, x, name, build, vals):
self.y = y # display row number
self.x = x # display minimum column number
self.name = name
self.build = build # list of DSBLD objects
self.vals = vals # list of DSVAL objects
class DSBLD(object):
# defines how to build a data structure field from data
# a list of DSBLD objects are ORed together to build a field
def __init__(self, firstb, lastb, mask, lshift):
self.firstb = firstb # index of first byte
self.lastb = lastb # index of last byte
self.mask = mask # int
self.lshift = lshift # bits to left shift mask/bytes
class DSVAL(object):
# defines text that identifies a specific field value
def __init__(self, mask, val, txt):
self.mask = mask # field mask
self.val = val # value, or -1 for != 0 test
self.txt = txt # string to print if match
# ------------------------------------------------
# define the data structures specific to i386
# gdt: intel swdev3a s3.4.5 pg 3-13 fig 3-8
# code and data: intel swdev3a s3.4.5.1 pg 3-17
# tss descriptor: intel swdev3a s7.2.2 pg 7-7
_gdt_els = (
DSFLD(0, 0,'',[DSBLD(2,3,0xffff,0),DSBLD(4,4,0xff,16),DSBLD(7,7,0xff,24)], []),
DSFLD(0,10,'',[DSBLD(0,1,0xffff,0),DSBLD(6,6,0x0f,16)], []),
DSFLD(0,17,'',[DSBLD(5,5,0xff,0)],
[DSVAL(0x1f,0x00,' \rres\t'), DSVAL(0x1f,0x08,' \rres\t'),
DSVAL(0x1f,0x0a,' \rres\t'), DSVAL(0x1f,0x0d,' \rres\t'),
DSVAL(0x80,0x00,' \a!pres\t'),
# system types
DSVAL(0x1f,0x01,' tss16'), DSVAL(0x1f,0x02,' ldt'),
DSVAL(0x1f,0x03,' tss16(b)'), DSVAL(0x1f,0x04,' call16'),
DSVAL(0x1f,0x05,' taskg'), DSVAL(0x1f,0x06,' intr16'),
DSVAL(0x1f,0x07,' trap16'), DSVAL(0x1f,0x09,' tss'),
DSVAL(0x1f,0x0b,' tss(b)'), DSVAL(0x1f,0x0c,' call'),
DSVAL(0x1f,0x0e,' intr'), DSVAL(0x1f,0x0f,' trap'),
# non system types
DSVAL(0x18,0x10,' data'), DSVAL(0x18,0x18,' code'),
DSVAL(0x1a,0x10,' r/o'), DSVAL(0x1a,0x12,' r/w'),
DSVAL(0x1a,0x18,' e/o'), DSVAL(0x1a,0x1a,' e/r'),
DSVAL(0x11,0x11,' accessed')]),
DSFLD(0,21,'',[DSBLD(6,6,0xff,0)],
[DSVAL(0x60,0x00,' 16bit'),DSVAL(0x60,0x20,' 64bit'),
DSVAL(0x60,0x60,' \rerr\r')])
)
# name, lname, dlen, height, width, hdr, elements
ds_gdt = DS('gdt', '32 bit global descriptor table', 8, 1, 58, '%03x ', _gdt_els)
# tss: intel swdev3a s7.2.1 pg 7-5
_tss_els = (
DSFLD( 0, 2, 'ss0 ',[DSBLD( 8, 9, 0xffff,0)],[]),
DSFLD( 0, 0, '_res',[DSBLD( 10, 11, 0xffff,0)],
[DSVAL(0xffff,-1,' \rss0 reserved!')]),
DSFLD( 0,12, 'esp0 ',[DSBLD( 4, 7,0xffffffff,0)],[]),
DSFLD( 1, 2, 'ss1 ',[DSBLD( 16, 17, 0xffff,0)],[]),
DSFLD( 1, 0, '_res',[DSBLD( 18, 19, 0xffff,0)],
[DSVAL(0xffff,-1,' \rss1 reserved!')]),
DSFLD( 1,12, 'esp1 ',[DSBLD( 12, 15,0xffffffff,0)],[]),
DSFLD( 2, 2, 'ss2 ',[DSBLD( 24, 25, 0xffff,0)],[]),
DSFLD( 2, 0, '_res',[DSBLD( 26, 27, 0xffff,0)],
[DSVAL(0xffff,-1,' \rss1 reserved!')]),
DSFLD( 2,12, 'esp2 ',[DSBLD( 20, 23,0xffffffff,0)],[]),
DSFLD( 3, 2, 'cr3 ',[DSBLD( 28, 31,0xffffffff,0)],[]),
DSFLD( 3,16, 'flg ',[DSBLD( 36, 39,0xffffffff,0)],[]),
DSFLD( 4, 2, 'eax ',[DSBLD( 40, 43,0xffffffff,0)],[]),
DSFLD( 4,16, 'ecx ',[DSBLD( 44, 47,0xffffffff,0)],[]),
DSFLD( 5, 2, 'edx ',[DSBLD( 48, 51,0xffffffff,0)],[]),
DSFLD( 5,16, 'ebx ',[DSBLD( 52, 55,0xffffffff,0)],[]),
DSFLD( 6, 3, 'cs ',[DSBLD( 76, 77, 0xffff,0)],[]),
DSFLD( 6, 0, '_res',[DSBLD( 78, 79, 0xffff,0)],
[DSVAL(0xffff,-1,' \rcs reserved!')]),
DSFLD( 6,12, 'eip ',[DSBLD( 32, 35,0xffffffff,0)],[]),
DSFLD( 7, 3, 'ss ',[DSBLD( 80, 81, 0xffff,0)],[]),
DSFLD( 7, 0, '_res',[DSBLD( 82, 83, 0xffff,0)],
[DSVAL(0xffff,-1,' \rss reserved!')]),
DSFLD( 7,12, 'esp ',[DSBLD( 56, 59,0xffffffff,0)],[]),
DSFLD( 8,12, 'ebp ',[DSBLD( 60, 63,0xffffffff,0)],[]),
DSFLD( 9, 3, 'es ',[DSBLD( 72, 73, 0xffff,0)],[]),
DSFLD( 9, 0, '_res',[DSBLD( 74, 75, 0xffff,0)],
[DSVAL(0xffff,-1,' \res reserved!')]),
DSFLD( 9,12, 'esi ',[DSBLD( 64, 67,0xffffffff,0)],[]),
DSFLD(10, 3, 'ds ',[DSBLD( 84, 85, 0xffff,0)],[]),
DSFLD(10, 0, '_res',[DSBLD( 86, 87, 0xffff,0)],
[DSVAL(0xffff,-1,' \rds reserved!')]),
DSFLD(10,12, 'edi ',[DSBLD( 68, 71,0xffffffff,0)],[]),
DSFLD(11, 3, 'fs ',[DSBLD( 88, 89, 0xffff,0)],[]),
DSFLD(11, 0, '_res',[DSBLD( 90, 91, 0xffff,0)],
[DSVAL(0xffff,-1,' \rfs reserved!')]),
DSFLD(11,20, 'ldt ',[DSBLD( 96, 97, 0xffff,0)],[]),
DSFLD(11, 0, '_res',[DSBLD( 98, 99, 0xffff,0)],
[DSVAL(0xffff,-1,' \rldt reserved!')]),
DSFLD(12, 3, 'gs ',[DSBLD( 92, 93, 0xffff,0)],[]),
DSFLD(12, 0, '_res',[DSBLD( 94, 95, 0xffff,0)],
[DSVAL(0xffff,-1,' \rgs reserved!')]),
DSFLD(12,19, 'link ',[DSBLD( 0, 1, 0xffff,0)],[]),
DSFLD(12, 0, '_res',[DSBLD( 2, 3, 0xffff,0)],
[DSVAL(0xffff,-1,' \rlink reserved!')]),
DSFLD(13, 0, 'iomap ',[DSBLD(102,103, 0xffff,0)],[]),
DSFLD(13, 0, '_',[DSBLD(100,100, 0x1,0)],
[DSVAL(0x1,0x1,' \vdbg trap')]),
DSFLD(13, 0, '_',[DSBLD(100,100, 0xfffe,0)],
[DSVAL(0xfffe,-1,' \rt reserved!')])
)
ds_tss = DS('tss', 'task state', 104, 15, 30, '\b---- tss @ 0x%x', _tss_els)
# eflags: intel swdev1 s3.4.3 pg 3-21 fig 3-8
_eflags_els = (
DSFLD(0, 0, '_',[DSBLD(0,2,0xffffff,0)],
# print the flags left to right
[DSVAL(0x200000,0x200000,'id'),
DSVAL(0x100000,0x100000,' vp'), DSVAL(0x080000,0x080000,' vi'),
DSVAL(0x040000,0x040000,' ac'), DSVAL(0x020000,0x020000,' v8'),
DSVAL(0x010000,0x010000,' r'), DSVAL(0x004000,0x004000,' nt'),
DSVAL(0x003000,0x001000,' io1'),
DSVAL(0x003000,0x002000,' io2'), DSVAL(0x003000,0x003000,' io3'),
DSVAL(0x000800,0x000800,' o'),
DSVAL(0x000400,0x000400,' d'), DSVAL(0x000200,0x000200,' i'),
DSVAL(0x000100,0x000100,' t'), DSVAL(0x000080,0x000080,' s'),
DSVAL(0x000040,0x000040,' z'), DSVAL(0x000010,0x000010,' a'),
DSVAL(0x000004,0x000004,' p'), DSVAL(0x000001,0x000001,' c')]),
)
ds_eflags = DS('flags', '32 bit cpu flags', 4, 1, 53, None, _eflags_els)
# ------------------------------------------------
# define the data structures specific to x86_64
# gdt: intel swdev3a s3.4.5 pg 3-13 fig 3-8
# code and data: intel swdev3a s3.4.5.1 pg 3-17
# tss descriptor: intel swdev3a s7.2.2 pg 7-7
_gdt64_els = (
DSFLD(0, 0,'',[DSBLD(2,3,0xffff,0),DSBLD(4,4,0xff,16),
DSBLD(7,7,0xff,24), DSBLD(8,16,0xffffffff,32)], []),
DSFLD(0,18,'',[DSBLD(0,1,0xffff,0),DSBLD(6,6,0x0f,16)], []),
DSFLD(0,25,'',[DSBLD(5,5,0xff,0)],
[DSVAL(0x1f,0x00,' \rres\t'), DSVAL(0x1f,0x08,' \rres\t'),
DSVAL(0x1f,0x0a,' \rres\t'), DSVAL(0x1f,0x0d,' \rres\t'),
DSVAL(0x80,0x00,' \a!pres\t'),
# system types
DSVAL(0x1d,0x01,' tss16'), DSVAL(0x1f,0x02,' ldt'),
DSVAL(0x1f,0x04,' call16'), DSVAL(0x1f,0x05,' taskg'),
DSVAL(0x1f,0x06,' intr16'), DSVAL(0x1f,0x07,' trap16'),
DSVAL(0x1d,0x09,' tss'), DSVAL(0x1f,0x0c,' call'),
DSVAL(0x1f,0x0e,' intr'), DSVAL(0x1f,0x0f,' trap'),
# non system types
DSVAL(0x18,0x10,' data'), DSVAL(0x18,0x18,' code'),
DSVAL(0x1a,0x10,' r/o'), DSVAL(0x1a,0x12,' r/w'),
DSVAL(0x1a,0x18,' e/o'), DSVAL(0x1a,0x1a,' e/r'),
DSVAL(0x11,0x11,' accessed')]),
DSFLD(0,29,'',[DSBLD(6,6,0xff,0)],
[DSVAL(0x60,0x00,' 16bit'),DSVAL(0x60,0x20,' 64bit')])
)
ds_gdt64 = DS('gdt64', '64 bit global descriptor table', 16, 1, 62, '%03x ', _gdt64_els)
# tss: intel swdev3a s7.2.1 pg 7-5
_tss_els = (
DSFLD( 0, 2, 'ss0 ',[DSBLD( 8, 9, 0xffff,0)],[]),
DSFLD( 0, 0, '_res',[DSBLD( 10, 11, 0xffff,0)],
[DSVAL(0xffff,-1,' \rss0 reserved!')]),
DSFLD( 0,12, 'esp0 ',[DSBLD( 4, 7,0xffffffff,0)],[]),
DSFLD( 1, 2, 'ss1 ',[DSBLD( 16, 17, 0xffff,0)],[]),
DSFLD( 1, 0, '_res',[DSBLD( 18, 19, 0xffff,0)],
[DSVAL(0xffff,-1,' \rss1 reserved!')]),
DSFLD( 1,12, 'esp1 ',[DSBLD( 12, 15,0xffffffff,0)],[]),
DSFLD( 2, 2, 'ss2 ',[DSBLD( 24, 25, 0xffff,0)],[]),
DSFLD( 2, 0, '_res',[DSBLD( 26, 27, 0xffff,0)],
[DSVAL(0xffff,-1,' \rss1 reserved!')]),
DSFLD( 2,12, 'esp2 ',[DSBLD( 20, 23,0xffffffff,0)],[]),
DSFLD( 3, 2, 'cr3 ',[DSBLD( 28, 31,0xffffffff,0)],[]),
DSFLD( 3,16, 'flg ',[DSBLD( 36, 39,0xffffffff,0)],[]),
DSFLD( 4, 2, 'eax ',[DSBLD( 40, 43,0xffffffff,0)],[]),
DSFLD( 4,16, 'ecx ',[DSBLD( 44, 47,0xffffffff,0)],[]),
DSFLD( 5, 2, 'edx ',[DSBLD( 48, 51,0xffffffff,0)],[]),
DSFLD( 5,16, 'ebx ',[DSBLD( 52, 55,0xffffffff,0)],[]),
DSFLD( 6, 3, 'cs ',[DSBLD( 76, 77, 0xffff,0)],[]),
DSFLD( 6, 0, '_res',[DSBLD( 78, 79, 0xffff,0)],
[DSVAL(0xffff,-1,' \rcs reserved!')]),
DSFLD( 6,12, 'eip ',[DSBLD( 32, 35,0xffffffff,0)],[]),
DSFLD( 7, 3, 'ss ',[DSBLD( 80, 81, 0xffff,0)],[]),
DSFLD( 7, 0, '_res',[DSBLD( 82, 83, 0xffff,0)],
[DSVAL(0xffff,-1,' \rss reserved!')]),
DSFLD( 7,12, 'esp ',[DSBLD( 56, 59,0xffffffff,0)],[]),
DSFLD( 8,12, 'ebp ',[DSBLD( 60, 63,0xffffffff,0)],[]),
DSFLD( 9, 3, 'es ',[DSBLD( 72, 73, 0xffff,0)],[]),
DSFLD( 9, 0, '_res',[DSBLD( 74, 75, 0xffff,0)],
[DSVAL(0xffff,-1,' \res reserved!')]),
DSFLD( 9,12, 'esi ',[DSBLD( 64, 67,0xffffffff,0)],[]),
DSFLD(10, 3, 'ds ',[DSBLD( 84, 85, 0xffff,0)],[]),
DSFLD(10, 0, '_res',[DSBLD( 86, 87, 0xffff,0)],
[DSVAL(0xffff,-1,' \rds reserved!')]),
DSFLD(10,12, 'edi ',[DSBLD( 68, 71,0xffffffff,0)],[]),
DSFLD(11, 3, 'fs ',[DSBLD( 88, 89, 0xffff,0)],[]),
DSFLD(11, 0, '_res',[DSBLD( 90, 91, 0xffff,0)],
[DSVAL(0xffff,-1,' \rfs reserved!')]),
DSFLD(11,20, 'ldt ',[DSBLD( 96, 97, 0xffff,0)],[]),
DSFLD(11, 0, '_res',[DSBLD( 98, 99, 0xffff,0)],
[DSVAL(0xffff,-1,' \rldt reserved!')]),
DSFLD(12, 3, 'gs ',[DSBLD( 92, 93, 0xffff,0)],[]),
DSFLD(12, 0, '_res',[DSBLD( 94, 95, 0xffff,0)],
[DSVAL(0xffff,-1,' \rgs reserved!')]),
DSFLD(12,19, 'link ',[DSBLD( 0, 1, 0xffff,0)],[]),
DSFLD(12, 0, '_res',[DSBLD( 2, 3, 0xffff,0)],
[DSVAL(0xffff,-1,' \rlink reserved!')]),
DSFLD(13, 0, 'iomap ',[DSBLD(102,103, 0xffff,0)],[]),
DSFLD(13, 0, '_',[DSBLD(100,100, 0x1,0)],
[DSVAL(0x1,0x1,' \vdbg trap')]),
DSFLD(13, 0, '_',[DSBLD(100,100, 0xfffe,0)],
[DSVAL(0xfffe,-1,' \rt reserved!')])
)
ds_tss = DS('tss', '32 bit task state', 104, 15, 30, '\b---- tss @ 0x%x', _tss_els)
# rflags: intel swdev1 s3.4.3 pg 3-21 fig 3-8
_rflags_els = (
DSFLD(0, 0, '_',[DSBLD(0,2,0xffffff,0)],
# print the flags left to right
[DSVAL(0x200000,0x200000,'id'),
DSVAL(0x100000,0x100000,' vp'), DSVAL(0x080000,0x080000,' vi'),
DSVAL(0x040000,0x040000,' ac'), DSVAL(0x020000,0x020000,' v8'),
DSVAL(0x010000,0x010000,' r'), DSVAL(0x004000,0x004000,' nt'),
DSVAL(0x003000,0x001000,' io1'),
DSVAL(0x003000,0x002000,' io2'), DSVAL(0x003000,0x003000,' io3'),
DSVAL(0x000800,0x000800,' o'),
DSVAL(0x000400,0x000400,' d'), DSVAL(0x000200,0x000200,' i'),
DSVAL(0x000100,0x000100,' t'), DSVAL(0x000080,0x000080,' s'),
DSVAL(0x000040,0x000040,' z'), DSVAL(0x000010,0x000010,' a'),
DSVAL(0x000004,0x000004,' p'), DSVAL(0x000001,0x000001,' c')]),
)
ds_rflags = DS('flags64', '64 bit cpu flags', 4, 1, 45, None, _rflags_els)
# sample_gdt = "0000000000000000ffff0000009acf00ffff00000093cf00ff1f0010009300009f0f00800b930000ffff0000009a0f00ffff000000920f006800808d00890000"
# sample_tss = "00000000e01f0000100000000000000000000000000000000000000000300000d8004000000000000000000000000000000000000000000000204000000000000000000000000000170000000f000000170000011700000000000000000000004b00010000000000000000000000000000000000000000000000000000000000"
# if __name__ == "__main__":
# # example: multiple gdt entries
# loop_offset = 0 # in bytes
# while loop_offset*2 < len(sample_gdt):
# # break data into descriptor size chunks
# data = sample_gdt[loop_offset*2:(loop_offset+ds_gdt.dlen)*2]
# for ln in ds_print_one(data, ds_gdt):
# print(ln)
# loop_offset += ds_gdt.dlen
#
# # example: one tss
# for ln in ds_print_one(sample_tss, ds_tss):
# print(ln)
data_structs = [ds_gdt, ds_gdt64, ds_tss, ds_eflags, ds_rflags]
|
|
import collections.abc
import contextlib
import copy
import itertools
import random
import string
import threading
from functools import total_ordering, wraps
from typing import TYPE_CHECKING, Iterable, List, Optional, Union
from loguru import logger
from sqlalchemy import Column, Integer, String, Unicode
from flexget import config_schema, db_schema
from flexget.db_schema import VersionedBaseMeta
from flexget.entry import Entry, EntryState, EntryUnicodeError
from flexget.event import event, fire_event
from flexget.manager import Session
from flexget.plugin import (
DependencyError,
PluginError,
PluginWarning,
get_plugins,
phase_methods,
plugin_schemas,
)
from flexget.plugin import plugins as all_plugins
from flexget.plugin import task_phases
from flexget.terminal import capture_console
from flexget.utils import requests
from flexget.utils.database import with_session
from flexget.utils.simple_persistence import SimpleTaskPersistence
from flexget.utils.sqlalchemy_utils import ContextSession
from flexget.utils.template import FlexGetTemplate, render_from_task
from flexget.utils.tools import MergeException, get_config_hash, merge_dict_from_to
logger = logger.bind(name='task')
if TYPE_CHECKING:
Base = VersionedBaseMeta
else:
Base = db_schema.versioned_base('feed', 0)
class TaskConfigHash(Base):
"""Stores the config hash for tasks so that we can tell if the config has changed since last run."""
__tablename__ = 'feed_config_hash'
id = Column(Integer, primary_key=True)
task = Column('name', Unicode, index=True, nullable=False)
hash = Column('hash', String)
def __repr__(self) -> str:
return f'<TaskConfigHash(task={self.task},hash={self.hash})>'
@with_session
def config_changed(task: str = None, session: ContextSession = None) -> None:
"""
Forces config_modified flag to come out true on next run of `task`. Used when the db changes, and all
entries need to be reprocessed.
.. WARNING: DO NOT (FURTHER) USE FROM PLUGINS
:param task: Name of the task. If `None`, will be set for all tasks.
:param session: sqlalchemy Session instance
"""
logger.debug('Marking config for {} as changed.', (task or 'all tasks'))
task_hash = session.query(TaskConfigHash)
if task:
task_hash = task_hash.filter(TaskConfigHash.task == task)
task_hash.delete()
def use_task_logging(func):
@wraps(func)
def wrapper(self, *args, **kw):
# Set the appropriate logger context while running task
cms = [logger.contextualize(task=self.name, task_id=self.id, session_id=self.session_id)]
# Capture console output if configured to do so
if self.output:
cms.append(capture_console(self.output))
with contextlib.ExitStack() as stack:
for cm in cms:
stack.enter_context(cm)
return func(self, *args, **kw)
return wrapper
class EntryIterator:
"""An iterator over a subset of entries to emulate old task.accepted/rejected/failed/entries properties."""
def __init__(self, entries: List[Entry], states: Union[EntryState, Iterable[EntryState]]):
self.all_entries = entries
if isinstance(states, EntryState):
states = [states]
self.filter = lambda e: e._state in states
def __iter__(self) -> Iterable[Entry]:
return filter(self.filter, self.all_entries)
def __bool__(self):
return any(e for e in self)
def __len__(self):
return sum(1 for _e in self)
def __add__(self, other):
return itertools.chain(self, other)
def __radd__(self, other):
return itertools.chain(other, self)
def __getitem__(self, item) -> Union[Entry, Iterable[Entry]]:
if isinstance(item, slice):
return list(itertools.islice(self, item.start, item.stop))
if not isinstance(item, int):
raise ValueError('Index must be integer.')
for index, entry in enumerate(self):
if index == item:
return entry
else:
raise IndexError(f'{item} is out of bounds')
def reverse(self):
self.all_entries.sort(reverse=True)
def sort(self, *args, **kwargs):
self.all_entries.sort(*args, **kwargs)
class EntryContainer(list):
"""Container for a list of entries, also contains accepted, rejected failed iterators over them."""
def __init__(self, iterable: list = None):
list.__init__(self, iterable or [])
self._entries = EntryIterator(self, [EntryState.UNDECIDED, EntryState.ACCEPTED])
self._accepted = EntryIterator(
self, EntryState.ACCEPTED
) # accepted entries, can still be rejected
self._rejected = EntryIterator(
self, EntryState.REJECTED
) # rejected entries, can not be accepted
self._failed = EntryIterator(self, EntryState.FAILED) # failed entries
self._undecided = EntryIterator(self, EntryState.UNDECIDED) # undecided entries (default)
# Make these read-only properties
entries: EntryIterator = property(lambda self: self._entries)
accepted: EntryIterator = property(lambda self: self._accepted)
rejected: EntryIterator = property(lambda self: self._rejected)
failed: EntryIterator = property(lambda self: self._failed)
undecided: EntryIterator = property(lambda self: self._undecided)
def __repr__(self) -> str:
return f'<EntryContainer({list.__repr__(self)})>'
class TaskAbort(Exception):
def __init__(self, reason: str, silent: bool = False) -> None:
self.reason = reason
self.silent = silent
def __repr__(self):
return f'TaskAbort(reason={self.reason}, silent={self.silent})'
@total_ordering
class Task:
"""
Represents one task in the configuration.
**Fires events:**
* task.execute.before_plugin
Before a plugin is about to be executed. Note that since this will also include all
builtin plugins the amount of calls can be quite high
``parameters: task, keyword``
* task.execute.after_plugin
After a plugin has been executed.
``parameters: task, keyword``
* task.execute.started
Before a task starts execution
* task.execute.completed
After task execution has been completed
``parameters: task``
"""
# Used to determine task order, when priority is the same
_counter = itertools.count()
RERUN_DEFAULT = 5
RERUN_MAX = 100
def __init__(
self,
manager,
name,
config=None,
options=None,
output=None,
session_id=None,
priority=None,
suppress_warnings=None,
):
"""
:param Manager manager: Manager instance.
:param string name: Name of the task.
:param dict config: Task configuration.
:param options: dict or argparse namespace with options for this task
:param output: A filelike that all console output will be sent to for this task.
:param session_id: Session id that will be attached to all log messages for filtering
:param priority: If multiple tasks are waiting to run, the task with the lowest priority will be run first.
The default is 0, if the cron option is set though, the default is lowered to 10.
:param suppress_warnings: Allows suppressing log warning about missing plugin in key phases
"""
self.name = str(name)
self.id = ''.join(random.choice(string.digits) for _ in range(6))
self.manager = manager
if config is None:
config = manager.config['tasks'].get(name, {})
self.config = copy.deepcopy(config)
self.prepared_config = None
if options is None:
options = copy.copy(self.manager.options.execute)
elif isinstance(options, dict):
options_namespace = copy.copy(self.manager.options.execute)
options_namespace.__dict__.update(options)
options = options_namespace
# If execution hasn't specifically set the `allow_manual` flag, set it to False by default
if not hasattr(options, 'allow_manual'):
setattr(options, 'allow_manual', False)
self.options = options
self.output = output
self.session_id = session_id
self.suppress_warnings = suppress_warnings or []
if priority is None:
self.priority = 10 if self.options.cron else 0
else:
self.priority = priority
self.priority = priority
self._count = next(self._counter)
self.finished_event = threading.Event()
# simple persistence
self.simple_persistence = SimpleTaskPersistence(self)
# rerun related flags and values
self._rerun_count = 0
self._max_reruns = Task.RERUN_DEFAULT
self._reruns_locked = False
self.config_modified = None
self.enabled = not self.name.startswith('_')
# These are just to query what happened in task. Call task.abort to set.
self.aborted = False
self.abort_reason = None
self.silent_abort = False
self.session = None
self.requests = requests.Session()
# List of all entries in the task
self._all_entries = EntryContainer()
self._rerun = False
self.disabled_phases = []
self.disabled_plugins = []
# current state
self.current_phase = None
self.current_plugin = None
self.traceback: Optional[str] = None
@property
def max_reruns(self):
"""How many times task can be rerunned before stopping"""
return self._max_reruns
@max_reruns.setter
def max_reruns(self, value):
"""Set new maximum value for reruns unless property has been locked"""
if not self._reruns_locked:
self._max_reruns = value
else:
logger.debug('max_reruns is locked, {} tried to modify it', self.current_plugin)
def lock_reruns(self):
"""Prevent modification of max_reruns property"""
logger.debug('Enabling rerun lock')
self._reruns_locked = True
def unlock_reruns(self):
"""Allow modification of max_reruns property"""
logger.debug('Releasing rerun lock')
self._reruns_locked = False
@property
def reruns_locked(self):
return self._reruns_locked
@property
def is_rerun(self):
return bool(self._rerun_count)
@property
def rerun_count(self):
return self._rerun_count
@property
def undecided(self):
"""
.. deprecated:: Use API v3
.. note:: We did not migrate to v3
If I remember correctly the idea was to make v3 signature
on_task_xxx(task, config, entries)
Param entries would be EntryContainer, which has convenience
iterator methods:
- entries.accepted
- entries.failed
- etc, which you see here
"""
return self.all_entries.undecided
@property
def failed(self):
"""
.. deprecated:: Use API v3
"""
return self.all_entries.failed
@property
def rejected(self):
"""
.. deprecated:: Use API v3
"""
return self.all_entries.rejected
@property
def accepted(self):
"""
.. deprecated:: Use API v3
"""
return self.all_entries.accepted
@property
def entries(self):
"""
.. deprecated:: Use API v3
"""
return self.all_entries.entries
@property
def all_entries(self):
"""
.. deprecated:: Use API v3
"""
return self._all_entries
def __lt__(self, other):
return (self.priority, self._count) < (other.priority, other._count)
def __eq__(self, other):
return (self.priority, self._count) == (other.priority, other._count)
def __str__(self):
return '<Task(name=%s,aborted=%s)>' % (self.name, self.aborted)
def disable_phase(self, phase):
"""Disable ``phase`` from execution.
:param string phase: Name of ``phase``
:raises ValueError: *phase* could not be found.
"""
if phase not in task_phases:
raise ValueError('%s is not a valid phase' % phase)
if phase not in self.disabled_phases:
logger.debug('Disabling {} phase', phase)
self.disabled_phases.append(phase)
def disable_plugin(self, plugin):
"""Disable ``plugin`` from execution.
:param string plugin: Name of ``plugin``
:raises ValueError: *plugin* could not be found.
"""
if plugin not in all_plugins:
raise ValueError(f'`{plugin}` is not a valid plugin.')
self.disabled_plugins.append(plugin)
def abort(self, reason='Unknown', silent=False, traceback: str = None):
"""Abort this task execution, no more plugins will be executed except the abort handling ones."""
self.aborted = True
self.abort_reason = reason
self.silent_abort = silent
self.traceback = traceback
if not self.silent_abort:
logger.warning('Aborting task (plugin: {})', self.current_plugin)
else:
logger.debug('Aborting task (plugin: {})', self.current_plugin)
raise TaskAbort(reason, silent=silent)
def find_entry(self, category='entries', **values):
"""
Find and return :class:`~flexget.entry.Entry` with given attributes from task or None
:param string category: entries, accepted, rejected or failed. Defaults to entries.
:param values: Key values of entries to be searched
:return: Entry or None
"""
cat = getattr(self, category)
if not isinstance(cat, EntryIterator):
raise TypeError('category must be a EntryIterator')
for entry in cat:
for k, v in values.items():
if not (k in entry and entry[k] == v):
break
else:
return entry
return None
def plugins(self, phase=None):
"""Get currently enabled plugins.
:param string phase:
Optional, limits to plugins currently configured on given phase, sorted in phase order.
:return:
An iterator over configured :class:`flexget.plugin.PluginInfo` instances enabled on this task.
"""
if phase:
plugins = sorted(
get_plugins(phase=phase), key=lambda p: p.phase_handlers[phase], reverse=True
)
else:
plugins = iter(all_plugins.values())
return (p for p in plugins if p.name in self.config or p.builtin)
def __run_task_phase(self, phase):
"""Executes task phase, ie. call all enabled plugins on the task.
Fires events:
* task.execute.before_plugin
* task.execute.after_plugin
:param string phase: Name of the phase
"""
if phase not in phase_methods:
raise Exception('%s is not a valid task phase' % phase)
# warn if no inputs, filters or outputs in the task
if phase in ['input', 'filter', 'output']:
if not self.manager.unit_test:
# Check that there is at least one manually configured plugin for these phases
for p in self.plugins(phase):
if not p.builtin:
break
else:
if phase not in self.suppress_warnings:
if phase == 'filter':
logger.warning(
'Task does not have any filter plugins to accept entries. '
'You need at least one to accept the entries you want.'
)
else:
logger.warning(
'Task doesn\'t have any {} plugins, you should add (at least) one!',
phase,
)
for plugin in self.plugins(phase):
# Abort this phase if one of the plugins disables it
if phase in self.disabled_phases:
return
if plugin.name in self.disabled_plugins:
continue
# store execute info, except during entry events
self.current_phase = phase
self.current_plugin = plugin.name
if plugin.api_ver == 1:
# backwards compatibility
# pass method only task (old behaviour)
args = (self,)
else:
# pass method task, copy of config (so plugin cannot modify it)
args = (self, copy.copy(self.config.get(plugin.name)))
# Hack to make task.session only active for a single plugin
with Session() as session:
self.session = session
try:
fire_event('task.execute.before_plugin', self, plugin.name)
response = self.__run_plugin(plugin, phase, args)
if phase == 'input' and response:
# add entries returned by input to self.all_entries
for e in response:
e.task = self
self.all_entries.append(e)
finally:
fire_event('task.execute.after_plugin', self, plugin.name)
self.session = None
# check config hash for changes at the end of 'prepare' phase
if phase == 'prepare':
self.check_config_hash()
def __run_plugin(self, plugin, phase, args=None, kwargs=None):
"""
Execute given plugins phase method, with supplied args and kwargs.
If plugin throws unexpected exceptions :meth:`abort` will be called.
:param PluginInfo plugin: Plugin to be executed
:param string phase: Name of the phase to be executed
:param args: Passed to the plugin
:param kwargs: Passed to the plugin
"""
keyword = plugin.name
method = plugin.phase_handlers[phase]
if args is None:
args = []
if kwargs is None:
kwargs = {}
# log.trace('Running %s method %s' % (keyword, method))
# call the plugin
try:
result = method(*args, **kwargs)
# We exhaust any iterator inputs here to make sure we catch exceptions properly.
if isinstance(result, collections.abc.Iterable):
result = list(result)
return result
except TaskAbort:
raise
except PluginWarning as warn:
# check if this warning should be logged only once (may keep repeating)
if warn.kwargs.get('log_once', False):
from flexget.utils.log import log_once
log_once(warn.value, warn.logger)
else:
warn.logger.warning(warn)
except EntryUnicodeError as eue:
msg = 'Plugin %s tried to create non-unicode compatible entry (key: %s, value: %r)' % (
keyword,
eue.key,
eue.value,
)
logger.critical(msg)
self.abort(msg)
except PluginError as err:
err.logger.critical(err.value)
self.abort(err.value)
except DependencyError as e:
msg = 'Plugin `%s` cannot be used because dependency `%s` is missing.' % (
keyword,
e.missing,
)
logger.critical(e.message)
self.abort(msg)
except Warning as e:
# If warnings have been elevated to errors
msg = 'Warning during plugin %s: %s' % (keyword, e)
logger.exception(msg)
self.abort(msg)
except Exception as e:
msg = 'BUG: Unhandled error in plugin %s: %s' % (keyword, e)
logger.opt(exception=True).critical(msg)
traceback = self.manager.crash_report()
self.abort(msg, traceback=traceback)
def rerun(self, plugin=None, reason=None):
"""
Immediately re-run the task after execute has completed,
task can be re-run up to :attr:`.max_reruns` times.
:param str plugin: Plugin name
:param str reason: Why the rerun is done
"""
msg = (
'Plugin {0} has requested task to be ran again after execution has completed.'.format(
self.current_plugin if plugin is None else plugin
)
)
if reason:
msg += ' Reason: {0}'.format(reason)
# Only print the first request for a rerun to the info log
if self._rerun:
logger.debug(msg)
else:
logger.info(msg)
self._rerun = True
def config_changed(self):
"""
Sets config_modified flag to True for the remainder of this run.
Used when the db changes, and all entries need to be reprocessed.
"""
self.config_modified = True
def merge_config(self, new_config):
try:
merge_dict_from_to(new_config, self.config)
except MergeException as e:
raise PluginError('Failed to merge configs for task %s: %s' % (self.name, e))
def check_config_hash(self):
"""
Checks the task's config hash and updates the hash if necessary.
"""
# Save current config hash and set config_modified flag
config_hash = get_config_hash(self.config)
if self.is_rerun:
# Restore the config to state right after start phase
if self.prepared_config:
self.config = copy.deepcopy(self.prepared_config)
else:
logger.error('BUG: No prepared_config on rerun, please report.')
with Session() as session:
last_hash = (
session.query(TaskConfigHash).filter(TaskConfigHash.task == self.name).first()
)
if not last_hash:
session.add(TaskConfigHash(task=self.name, hash=config_hash))
self.config_changed()
elif last_hash.hash != config_hash:
last_hash.hash = config_hash
self.config_changed()
def _execute(self):
"""Executes the task without rerunning."""
if not self.enabled:
logger.debug('Not running disabled task {}', self.name)
return
logger.debug('executing {}', self.name)
# Handle keyword args
if self.options.learn:
logger.info('Disabling download and output phases because of --learn')
self.disable_phase('download')
self.disable_phase('output')
if self.options.disable_phases:
list(map(self.disable_phase, self.options.disable_phases))
if self.options.inject:
# If entries are passed for this execution (eg. rerun), disable the input phase
self.disable_phase('input')
self.all_entries.extend(copy.deepcopy(self.options.inject))
# run phases
try:
for phase in task_phases:
if phase in self.disabled_phases:
# log keywords not executed
if phase not in self.suppress_warnings:
for plugin in self.plugins(phase):
if plugin.name in self.config:
logger.info(
'Plugin {} is not executed in {} phase because the phase is disabled '
'(e.g. --test, --inject)',
plugin.name,
phase,
)
continue
if phase in ('start', 'prepare') and self.is_rerun:
logger.debug('skipping phase {} during rerun', phase)
continue
if phase == 'exit':
# Make sure we run the entry complete hook before exit phase. These hooks may call for a rerun,
# which would mean we should skip the exit phase during this run.
for entry in self.all_entries:
entry.complete()
if self._rerun and self._rerun_count < self.max_reruns:
logger.debug('not running task_exit yet because task will rerun')
continue
# run all plugins with this phase
self.__run_task_phase(phase)
if phase == 'start':
# Store a copy of the config state after start phase to restore for reruns
self.prepared_config = copy.deepcopy(self.config)
except TaskAbort:
try:
self.__run_task_phase('abort')
except TaskAbort as e:
logger.exception('abort handlers aborted: {}', e)
raise
@use_task_logging
def execute(self):
"""
Executes the the task.
If :attr:`.enabled` is False task is not executed. Certain :attr:`.options`
affect how execution is handled.
- :attr:`.options.disable_phases` is a list of phases that are not enabled
for this execution.
- :attr:`.options.inject` is a list of :class:`Entry` instances used instead
of running input phase.
"""
self.finished_event.clear()
try:
if self.options.cron:
self.manager.db_cleanup()
fire_event('task.execute.started', self)
while True:
self._execute()
# rerun task
if (
self._rerun
and self._rerun_count < self.max_reruns
and self._rerun_count < Task.RERUN_MAX
):
logger.info('Rerunning the task in case better resolution can be achieved.')
self._rerun_count += 1
self._all_entries = EntryContainer()
self._rerun = False
continue
elif self._rerun:
logger.info(
'Task has been re-run {} times already, stopping for now',
self._rerun_count,
)
break
fire_event('task.execute.completed', self)
finally:
self.finished_event.set()
@staticmethod
def validate_config(config):
schema = plugin_schemas(interface='task')
# Don't validate commented out plugins
schema['patternProperties'] = {'^_': {}}
return config_schema.process_config(config, schema)
def __copy__(self):
new = type(self)(self.manager, self.name, self.config, self.options)
# Update all the variables of new instance to match our own
new.__dict__.update(self.__dict__)
# Some mutable objects need to be copies
new.options = copy.copy(self.options)
new.config = copy.deepcopy(self.config)
return new
copy = __copy__
def render(self, template):
"""
Renders a template string based on fields in the entry.
:param template: A template string or FlexGetTemplate that uses jinja2 or python string replacement format.
:return: The result of the rendering.
:rtype: string
:raises RenderError: If there is a problem.
"""
if not isinstance(template, (str, FlexGetTemplate)):
raise ValueError(
'Trying to render non string template or unrecognized template format, got %s'
% repr(template)
)
logger.trace('rendering: {}', template)
return render_from_task(template, self)
@event('config.register')
def register_config_key():
task_config_schema = {
'type': 'object',
'additionalProperties': plugin_schemas(interface='task'),
}
config_schema.register_config_key('tasks', task_config_schema, required=True)
|
|
# Copyright (c) 2014, Fundacion Dr. Manuel Sadosky
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
This is the gadget classifier. It classify gadgets in 10 different
types: No Operation, Jump, Move Register, Load Constant, Arithmetic,
Load Memory, Store Memory, Arithmetic Load, Arithmetic Store or
Undefined. This classification is based on the paper "Q: Exploit
Hardening Made Easy." So, given a gadget (RawGadget object) it generate
one o more TypedGadgets with its type.
This algorithm is architecture agnostic since it operates on the IR
representation of the underlying assembly code.
"""
import random
from barf.analysis.gadget import GadgetType
from barf.analysis.gadget import TypedGadget
from barf.core.reil import ReilEmptyOperand
from barf.core.reil import ReilImmediateOperand
from barf.core.reil import ReilRegisterOperand
class GadgetClassifier(object):
"""Gadget Classifier.
"""
def __init__(self, ir_emulator, architecture_info):
# An instance of a REIL emulator
self._ir_emulator = ir_emulator
# Classifiers ordered by gadget type.
self._classifiers = {
GadgetType.NoOperation : self._classify_no_operation,
GadgetType.Jump : self._classify_jump,
GadgetType.MoveRegister : self._classify_move_register,
GadgetType.LoadConstant : self._classify_load_constant,
GadgetType.Arithmetic : self._classify_arithmetic,
GadgetType.LoadMemory : self._classify_load_memory,
GadgetType.StoreMemory : self._classify_store_memory,
GadgetType.ArithmeticLoad : self._classify_arithmetic_load,
GadgetType.ArithmeticStore : self._classify_arithmetic_store,
}
# Supported arithmetic and logical operations for arithmetic
# gadgets.
self._binary_ops = {
# Arithmetic
"+" : lambda x, y : x + y,
"-" : lambda x, y : x - y,
# "*" : lambda x, y : x * y,
# "/" : lambda x, y : x / y,
# "%" : lambda x, y : x % y,
# Bitwise
"&" : lambda x, y : x & y,
"^" : lambda x, y : x ^ y,
"|" : lambda x, y : x | y,
# "<<" : lambda x, y : x << y,
# ">>" : lambda x, y : x >> y,
}
# Architecture information.
self._arch_info = architecture_info
self._arch_regs = self._arch_info.registers_gp_all
self._arch_regs_parent = self._arch_info.registers_gp_base
self._arch_regs_size = self._arch_info.registers_size
self._address_size = self._arch_info.address_size
# Number of simulation iterations.
self._emu_iters = 10
def classify(self, gadget):
"""Classify gadget.
"""
typed_gadgets = []
for g_type, g_classifier in self._classifiers.items():
try:
typed_gadgets += self._classify(gadget, g_classifier, g_type, self._emu_iters)
except:
import traceback
print("[-] Error classifying gadgets :")
print(gadget)
print("")
print(traceback.format_exc())
return typed_gadgets
# Classifiers
# ======================================================================== #
def _classify_no_operation(self, regs_init, regs_fini, mem_fini, written_regs, read_regs):
"""Classify no-operation gadgets.
"""
# TODO: Flags should be taken into account
matchings = []
# Check that registers didn't change their value.
regs_changed = any(regs_init[r] != regs_fini[r] for r in regs_init)
# Check that flags didn't change their value.
flags_changed = False
# Check that memory didn't change.
mem_changed = mem_fini.get_write_count() != 0
if not regs_changed and not flags_changed and not mem_changed:
matchings.append({
"op" : "nop",
})
return matchings
def _classify_jump(self, regs_init, regs_fini, mem_fini, written_regs, read_regs):
"""Classify jump gadgets.
"""
# TODO: Implement.
matchings = []
return matchings
def _classify_move_register(self, regs_init, regs_fini, mem_fini, written_regs, read_regs):
"""Classify move-register gadgets.
"""
matchings = []
regs_init_inv = self._invert_dictionary(regs_init)
# Check for "dst_reg <- src_reg" pattern.
for dst_reg, dst_val in regs_fini.items():
# Make sure the *dst* register was written.
if dst_reg not in written_regs:
continue
for src_reg in regs_init_inv.get(dst_val, []):
# Make sure the *src* register was read.
if src_reg not in read_regs:
continue
# Check restrictions...
if self._arch_regs_size[src_reg] != self._arch_regs_size[dst_reg]:
continue
if src_reg == dst_reg:
continue
if regs_init[dst_reg] == regs_init[src_reg]:
continue
src_reg_ir = ReilRegisterOperand(src_reg, self._arch_regs_size[src_reg])
dst_reg_ir = ReilRegisterOperand(dst_reg, self._arch_regs_size[dst_reg])
matchings.append({
"src" : [src_reg_ir],
"dst" : [dst_reg_ir]
})
return matchings
def _classify_load_constant(self, regs_init, regs_fini, mem_fini, written_regs, read_regs):
"""Classify load-constant gadgets.
"""
matchings = []
# Check for "dst_reg <- constant" pattern.
for dst_reg, dst_val in regs_fini.items():
# Make sure the *dst* register was written.
if dst_reg not in written_regs:
continue
# Check restrictions...
if dst_val == regs_init[dst_reg]:
continue
dst_val_ir = ReilImmediateOperand(dst_val, self._arch_regs_size[dst_reg])
dst_reg_ir = ReilRegisterOperand(dst_reg, self._arch_regs_size[dst_reg])
matchings.append({
"src" : [dst_val_ir],
"dst" : [dst_reg_ir]
})
return matchings
def _classify_arithmetic(self, regs_init, regs_fini, mem_fini, written_regs, read_regs):
"""Classify binary-operation gadgets.
"""
matchings = []
# TODO: Review these restrictions.
op_restrictions = {
"+" : lambda x, y : False,
"-" : lambda x, y : x == y,
"|" : lambda x, y : x == y,
"&" : lambda x, y : x == y,
"^" : lambda x, y : x == y,
}
# Check for "dst_reg <- src1_reg OP src2_reg" pattern.
for op_name, op_fn in self._binary_ops.items():
for src_1_reg, src_1_val in regs_init.items():
# Make sure the *src* register was read.
if src_1_reg not in read_regs:
continue
for src_2_reg, src_2_val in regs_init.items():
# Make sure the *src* register was read.
if src_2_reg not in read_regs:
continue
for dst_reg, dst_val in regs_fini.items():
# Make sure the *dst* register was written.
if dst_reg not in written_regs:
continue
# Check restrictions.
if self._arch_regs_size[src_1_reg] != self._arch_regs_size[src_2_reg] or \
self._arch_regs_size[src_1_reg] != self._arch_regs_size[dst_reg]:
continue
# Avoid trivial operations.
if op_restrictions[op_name](src_1_reg, src_2_reg):
continue
size = self._arch_regs_size[src_1_reg]
if dst_val == op_fn(src_1_val, src_2_val) & (2**size - 1):
src = sorted([src_1_reg, src_2_reg])
src_ir = [
ReilRegisterOperand(src[0], self._arch_regs_size[src[0]]),
ReilRegisterOperand(src[1], self._arch_regs_size[src[1]])
]
dst_reg_ir = ReilRegisterOperand(dst_reg, self._arch_regs_size[dst_reg])
matchings.append({
"src" : src_ir,
"dst" : [dst_reg_ir],
"op" : op_name
})
return matchings
def _classify_load_memory(self, regs_init, regs_fini, mem_fini, written_regs, read_regs):
"""Classify load-memory gadgets.
"""
matchings = []
regs_init_inv = self._invert_dictionary(regs_init)
# Check for "dst_reg <- mem[src_reg + offset]" pattern.
for dst_reg, dst_val in regs_fini.items():
# Make sure the *dst* register was written.
if dst_reg not in written_regs:
continue
dst_size = self._arch_regs_size[dst_reg]
# Look for memory addresses that contain *dst_val*.
for src_addr in mem_fini.read_inverse(dst_val, dst_size):
# Look for registers whose values are used as memory
# addresses.
for src_reg, src_val in regs_init.items():
# Make sure the *src* register was read.
if src_reg not in read_regs:
continue
# Check restrictions.
if self._arch_regs_size[src_reg] != self._address_size:
continue
offset = (src_addr - src_val) & (2**self._address_size - 1)
src_reg_ir = ReilRegisterOperand(src_reg, self._arch_regs_size[src_reg])
src_off_ir = ReilImmediateOperand(offset, self._arch_regs_size[src_reg])
dst_reg_ir = ReilRegisterOperand(dst_reg, self._arch_regs_size[dst_reg])
matchings.append({
"src" : [src_reg_ir, src_off_ir],
"dst" : [dst_reg_ir]
})
# Check for "dst_reg <- mem[offset]" pattern.
for dst_reg, dst_val in regs_fini.items():
# Make sure the *dst* register was written.
if dst_reg not in written_regs:
continue
dst_size = self._arch_regs_size[dst_reg]
for src_addr in mem_fini.read_inverse(dst_val, dst_size):
src_reg_ir = ReilEmptyOperand()
src_off_ir = ReilImmediateOperand(src_addr, self._address_size)
dst_reg_ir = ReilRegisterOperand(dst_reg, self._arch_regs_size[dst_reg])
matchings.append({
"src" : [src_reg_ir, src_off_ir],
"dst" : [dst_reg_ir]
})
return matchings
def _classify_store_memory(self, regs_init, regs_fini, mem_fini, written_regs, read_regs):
"""Classify store-memory gadgets.
"""
matchings = []
regs_init_inv = self._invert_dictionary(regs_init)
# Check for "mem[dst_reg + offset] <- src_reg" pattern.
for src_reg, src_val in regs_init.items():
# Make sure the *src* register was read.
if not src_reg in read_regs:
continue
src_size = self._arch_regs_size[src_reg]
for addr in mem_fini.read_inverse(src_val, src_size):
for dst_reg, dst_val in regs_init.items():
# Make sure the *dst* register was written.
if not dst_reg in read_regs:
continue
# Check restrictions.
if self._arch_regs_size[dst_reg] != self._address_size:
continue
offset = (addr - dst_val) & (2**self._address_size - 1)
src_reg_ir = ReilRegisterOperand(src_reg, self._arch_regs_size[src_reg])
dst_reg_ir = ReilRegisterOperand(dst_reg, self._arch_regs_size[dst_reg])
dst_off_ir = ReilImmediateOperand(offset, self._arch_regs_size[dst_reg])
matchings.append({
"src" : [src_reg_ir],
"dst" : [dst_reg_ir, dst_off_ir]
})
# Check for "mem[offset] <- src_reg" pattern.
for src_reg, src_val in regs_init.items():
# Make sure the *src* register was read.
if not src_reg in read_regs:
continue
src_size = self._arch_regs_size[src_reg]
for addr in mem_fini.read_inverse(src_val, src_size):
offset = addr & (2**self._address_size - 1)
src_reg_ir = ReilRegisterOperand(src_reg, self._arch_regs_size[src_reg])
dst_reg_ir = ReilEmptyOperand()
dst_off_ir = ReilImmediateOperand(offset, self._address_size)
matchings.append({
"src" : [src_reg_ir],
"dst" : [dst_reg_ir, dst_off_ir]
})
return matchings
def _classify_arithmetic_load(self, regs_init, regs_fini, mem_fini, written_regs, read_regs):
"""Classify arithmetic-load gadgets.
"""
matchings = []
# Check for "dst_reg <- dst_reg OP mem[src_reg + offset]" pattern.
for op_name, op_fn in self._binary_ops.items():
for dst_reg, dst_val in regs_fini.items():
# Make sure the *dst* register was read and written.
if dst_reg not in written_regs or dst_reg not in read_regs:
continue
dst_size = self._arch_regs_size[dst_reg]
for addr in mem_fini.get_addresses():
success, val = mem_fini.try_read(addr, dst_size)
if success and dst_val == op_fn(regs_init[dst_reg], val) & (2**dst_size - 1):
for src_reg, src_val in regs_init.items():
# Make sure the *src* register was read.
if not src_reg in read_regs:
continue
# Check restrictions.
if self._arch_regs_size[src_reg] != self._address_size:
continue
offset = (addr - src_val) & (2**self._address_size - 1)
src_reg_ir = ReilRegisterOperand(src_reg, self._arch_regs_size[src_reg])
src_off_ir = ReilImmediateOperand(offset, self._address_size)
dst_reg_ir = ReilRegisterOperand(dst_reg, self._arch_regs_size[dst_reg])
matchings.append({
"src" : [dst_reg_ir, src_reg_ir, src_off_ir],
"dst" : [dst_reg_ir],
"op" : op_name
})
# Check for "dst_reg <- dst_reg OP mem[offset]" pattern.
for op_name, op_fn in self._binary_ops.items():
for dst_reg, dst_val in regs_fini.items():
# Make sure the *dst* register was read and written.
if dst_reg not in written_regs or dst_reg not in read_regs:
continue
dst_size = self._arch_regs_size[dst_reg]
for addr in mem_fini.get_addresses():
success, val = mem_fini.try_read(addr, dst_size)
if success and dst_val == op_fn(regs_init[dst_reg], val) & (2**dst_size - 1):
src_reg_ir = ReilEmptyOperand()
src_off_ir = ReilImmediateOperand(addr, self._address_size)
dst_reg_ir = ReilRegisterOperand(dst_reg, self._arch_regs_size[dst_reg])
matchings.append({
"src" : [dst_reg_ir, src_reg_ir, src_off_ir],
"dst" : [dst_reg_ir],
"op" : op_name
})
return matchings
def _classify_arithmetic_store(self, regs_init, regs_fini, mem_fini, written_regs, read_regs):
"""Classify arithmetic-store gadgets.
"""
matchings = []
# Check for "m[dst_reg + offset] <- m[dst_reg + offset] OP src_reg" pattern.
for op_name, op_fn in self._binary_ops.items():
for size in [8, 16, 32, 64]:
for addr in mem_fini.get_addresses():
success_read_curr, val_curr = mem_fini.try_read(addr, size)
success_read_prev, val_prev = mem_fini.try_read_prev(addr, size)
if success_read_curr and success_read_prev:
for src_reg, src_val in regs_init.items():
# Make sure the *src* register was read.
if not src_reg in read_regs:
continue
# Check restrictions.
if self._arch_regs_size[src_reg] != size:
continue
if val_curr == op_fn(src_val, val_prev) & (2**size - 1):
# find dst + offset
for dst_reg, dst_val in regs_init.items():
# Make sure the *dst* register was written.
if not dst_reg in read_regs:
continue
# Check restrictions.
if self._arch_regs_size[dst_reg] != self._address_size:
continue
offset = (addr - dst_val) & (2**self._address_size - 1)
src_reg_ir = ReilRegisterOperand(src_reg, self._arch_regs_size[src_reg])
dst_reg_ir = ReilRegisterOperand(dst_reg, self._arch_regs_size[dst_reg])
dst_off_ir = ReilImmediateOperand(offset, self._address_size)
matchings.append({
"src" : [dst_reg_ir, dst_off_ir, \
src_reg_ir],
"dst" : [dst_reg_ir, dst_off_ir],
"op" : op_name
})
# Check for "m[offset] <- m[offset] OP src_reg" pattern.
for op_name, op_fn in self._binary_ops.items():
for size in [8, 16, 32, 64]:
for addr in mem_fini.get_addresses():
success_read_curr, val_curr = mem_fini.try_read(addr, size)
success_read_prev, val_prev = mem_fini.try_read_prev(addr, size)
if success_read_curr and success_read_prev:
for src_reg, src_val in regs_init.items():
# Make sure the *src* register was read.
if not src_reg in read_regs:
continue
# Check restrictions.
if self._arch_regs_size[src_reg] != size:
continue
if val_curr == op_fn(src_val, val_prev) & (2**size - 1):
src_reg_ir = ReilRegisterOperand(src_reg, self._arch_regs_size[src_reg])
dst_reg_ir = ReilEmptyOperand()
dst_off_ir = ReilImmediateOperand(addr, self._address_size)
matchings.append({
"src" : [dst_reg_ir, dst_off_ir, src_reg_ir],
"dst" : [dst_reg_ir, dst_off_ir],
"op" : op_name
})
return matchings
# Auxiliary functions
# ======================================================================== #
def _classify(self, gadget, classifier, gadget_type, iters):
"""Classify gadgets.
"""
# Collect REIL instructions of the gadget.
instrs = [ir_instr for g_instrs in gadget.instrs
for ir_instr in g_instrs.ir_instrs]
# Repeat classification.
results = []
for _ in xrange(iters):
# Reset emulator.
self._ir_emulator.reset()
# Generate random values for registers.
regs_initial = self._init_regs_random()
# Emulate gadget.
try:
regs_final, mem_final = self._ir_emulator.execute_lite(
instrs,
regs_initial
)
except:
# Catch emulator exceptions like ZeroDivisionError, etc.
results += [([], [])]
continue
# Compute values for all registers. For example, in x86, it
# computes 'al' from 'eax'.
regs_initial_full = self._compute_full_context(regs_initial)
regs_final_full = self._compute_full_context(regs_final)
# Get written and read registers.
regs_written = self._ir_emulator.written_registers
regs_read = self._ir_emulator.read_registers
# Compute modifiead registers.
mod_regs = self._compute_mod_regs(
regs_initial_full,
regs_final_full
)
# Classified gadgets based on initial and final context.
matchings = classifier(
regs_initial_full,
regs_final_full,
mem_final,
regs_written,
regs_read
)
# Save results.
results += [(matchings, mod_regs)]
# Analyze results and compute candidate gadgets.
candidates, mod_regs = self._analize_execution_results(results)
# Create classified gadgets.
classified = self._create_typed_gadgets(
gadget,
candidates,
mod_regs,
gadget_type
)
return classified
def _analize_execution_results(self, results):
matching_candidates, _ = results[0]
classified_candidates = []
for matching_candidate in matching_candidates:
valid_matching = True
for matchings, _ in results[1:]:
if matching_candidate not in matchings:
valid_matching = False
if valid_matching and \
matching_candidate not in classified_candidates:
classified_candidates.append(matching_candidate)
modified_regs = set()
for _, mod_regs in results:
modified_regs = modified_regs.union(set(mod_regs))
return classified_candidates, list(modified_regs)
def _create_typed_gadgets(self, gadget, classified_gadgets, modified_regs, \
gadget_type):
typed_gadgets = []
# Remove register aliases
mod_regs = []
for r in modified_regs:
alias, _ = self._arch_info.alias_mapper.get(r, (None, None))
if not alias:
mod_regs += [r]
elif alias not in modified_regs:
mod_regs += [r]
modified_regs_ir = [ReilRegisterOperand(r, self._arch_regs_size[r]) for r in mod_regs]
for candidate in classified_gadgets:
typed_gadget = TypedGadget(gadget, gadget_type)
if gadget_type in [GadgetType.Arithmetic, \
GadgetType.ArithmeticLoad, GadgetType.ArithmeticStore]:
typed_gadget.operation = candidate["op"]
if candidate.get("op", "") != "nop":
typed_gadget.sources = candidate["src"]
typed_gadget.destination = candidate["dst"]
if gadget_type == GadgetType.StoreMemory:
typed_gadget.modified_registers = [r for r in modified_regs_ir]
else:
typed_gadget.modified_registers = [r for r in modified_regs_ir \
if r not in typed_gadget.destination]
typed_gadgets += [typed_gadget]
return typed_gadgets
def _init_regs_random(self):
"""Initialize register with random values.
"""
# Generate random values and make sure they are all different.
values = set()
while len(values) != len(self._arch_regs_parent):
values.add(random.randint(0, 2**self._arch_info.operand_size - 1))
values = list(values)
# Assign random values to registers.
regs = {}
for idx, reg in enumerate(self._arch_regs_parent):
regs[reg] = values[idx] & (2**self._arch_regs_size[reg] - 1)
return regs
def _compute_mod_regs(self, regs_init, regs_fini):
"""Compute modified registers.
"""
assert regs_init.keys() == regs_fini.keys()
modified_regs = []
for reg in regs_init:
if regs_init[reg] != regs_fini[reg]:
modified_regs.append(reg)
return modified_regs
def _invert_dictionary(self, d):
"""Invert a dictinary.
"""
inv_dict = {}
for k, v in d.items():
inv_dict[v] = inv_dict.get(v, [])
inv_dict[v] += [k]
return inv_dict
def _print_memory(self, memory):
"""Print memory.
"""
for addr, value in memory.items():
print(" 0x%08x : 0x%08x (%d)" % (addr, value, value))
def _print_registers(self, registers):
"""Print registers.
"""
for reg, value in registers.items():
print(" %s : 0x%08x (%d)" % (reg, value, value))
def _compute_full_context(self, registers):
regs_full = {}
reg_mapper = self._arch_info.alias_mapper
for reg in self._arch_regs:
base_reg_name, offset = reg_mapper.get(reg, (None, None))
if base_reg_name:
reg_value = registers[base_reg_name]
reg_value = self._extract_value(reg_value, offset, self._arch_info.registers_size[reg])
else:
reg_value = registers[reg]
regs_full[reg] = reg_value
return regs_full
def _extract_value(self, main_value, offset, size):
return (main_value >> offset) & 2**size-1
def _insert_value(self, main_value, value_to_insert, offset, size):
main_value &= ~((2**size-1) << offset)
main_value |= (value_to_insert & 2**size-1) << offset
return main_value
|
|
# -*- coding: utf-8 -*-
"""Provides `nTorque <http://ntorque.com>`_ task queue clients."""
__all__ = [
'DEFAULTS',
'HookDispatcher',
'WebTestDispatcher',
'WorkEngineClient',
'get_torque_api',
'includeme',
]
import logging
logger = logging.getLogger(__name__)
import json
import os
import urlparse
from collections import namedtuple
from os.path import join as join_path
from pyramid.settings import asbool
from ntorque import client
from ntorque import model as ntorque_model
from ntorque.model import constants as nc
from ntorque.tests.ftests import test_client
from . import constants as c
from . import render
from . import util
env = os.environ
DEFAULTS = {
'engine.api_key': util.get_var(env, c.ENGINE_API_KEY_NAMES),
'engine.url': util.get_var(env, c.ENGINE_URL_NAMES, '/engine'),
'torque.api_key': env.get(c.TORQUE_API_KEY, None),
'torque.url': env.get(c.TORQUE_URL, '/ntorque'),
'webhooks.api_key': util.get_var(env, c.WEBHOOKS_API_KEY_NAMES),
'webhooks.url': util.get_var(env, c.WEBHOOKS_URL_NAMES, '/hooks'),
}
def client_factory(client_cls, dispatcher, settings):
"""Shared logic to instantiate a configured torque client utility."""
torque_url = settings.get('torque.url')
torque_api_key = settings.get('torque.api_key')
return client_cls(dispatcher, torque_url, torque_api_key)
class WebTestDispatcher(client.DirectDispatcher):
"""A dispatcher that skips nTorque and just makes the request directly
using a ``ntorque.tests.ftests.test_client.WestTestPoster``.
"""
def __init__(self, webtest_poster, **kwargs):
self.webtest_poster = webtest_poster
self.parse_qsl = kwargs.get('parse_qsl', urlparse.parse_qsl)
self.header_prefix = kwargs.get('header_prefix', nc.PROXY_HEADER_PREFIX)
self.default_method = kwargs.get('default_method', nc.DEFAULT_METHOD)
def __call__(self, url, post_data, request_headers):
"""Extract the relevant parts of the request data and use it to make
a request directly to the destination url.
"""
# Parse `url` and `method` out of the query params.
params = dict(self.parse_qsl(url.split('?')[1]))
url = params['url']
method = params.get('method', 'POST')
# Get the relevant headers.
headers = {'Content-Type': request_headers['Content-Type']}
for key, value in request_headers.items():
if key.lower().startswith(self.header_prefix.lower()):
k = key[len(self.header_prefix):]
headers[k] = value
# Make and handle the response.
r = self.webtest_poster(url, post_data, headers, method=method)
return self.handle(r)
class HookDispatcher(object):
"""Instantiate and authenticate a generic torque client and use it to
dispatch web hooks tasks.
"""
def __init__(self, request, **kwargs):
"""Compose and instantiate client."""
# Compose.
self.request = request
self.join_path = kwargs.get('join_path', join_path)
client_cls = kwargs.get('client_cls', client.HybridTorqueClient)
dispatcher = kwargs.get('dispatcher', client.AfterCommitDispatcher())
settings = request.registry.settings
self.client = client_factory(client_cls, dispatcher, settings)
def __call__(self, path, data=None, headers=None, timeout=None):
"""Use the request to instantiate a client and dispatch a request."""
# Unpack.
request = self.request
settings = request.registry.settings
webhooks_url = settings.get('webhooks.url')
webhooks_api_key = settings.get('webhooks.api_key')
# Authenticate.
if headers is None:
headers = {}
if webhooks_api_key:
for item in c.WEBHOOKS_API_KEY_NAMES:
key = 'NTORQUE-PASSTHROUGH-{0}'.format(item)
headers[key] = webhooks_api_key
# JSONify.
if not headers.has_key('Content-Type'):
headers['Content-Type'] = 'application/json; utf-8'
if data is not None and not isinstance(data, basestring):
data = render.json_dumps(request, data)
# Dispatch.
url = self.join_path(webhooks_url, path)
status, response_data, response_headers = self.client(url, data=data,
headers=headers, timeout=timeout)
# Return.
headers_dict = dict(response_headers.items()) if response_headers else {}
return {
'data': data,
'path': path,
'response': response_data,
'response_headers': headers_dict,
'status': status,
'url': url,
}
class WorkEngineClient(object):
"""Instantiate and authenticate a generic torque client and use it to
dispatch work engine updates.
"""
def __init__(self, request, **kwargs):
"""Compose and instantiate client."""
# Compose.
self.request = request
self.join_path = kwargs.get('join_path', join_path)
self.unpack = kwargs.get('unpack', util.get_unpacked_object_id)
client_cls = kwargs.get('client_cls', client.HybridTorqueClient)
dispatcher = kwargs.get('dispatcher', client.AfterCommitDispatcher())
settings = request.registry.settings
self.client = client_factory(client_cls, dispatcher, settings)
def _get_traversal_path(self, route, context):
"""Get the traversal path to context, prefixed with the route.
E.g.: the path to <Job id=1234> on the events route is
`events/jobs/1234`.
"""
# Get the request path from the `context`, e.g.: a `Job` instance
# with id `1234` will result in a path of ``jobs/1234``. If there
# is no context the path will be empty.
parts = self.unpack(context) if context else []
# Prepend the route part.
parts = [route] + list(parts)
# Lose any ``None``s.
parts = (str(item) for item in parts if item is not None)
# Return as a `/` joined string.
return self.join_path(*parts)
def dispatch(self, path, data=None, headers=None, timeout=None):
"""Use the request to instantiate a client and dispatch a request."""
# Unpack.
request = self.request
settings = request.registry.settings
engine_url = settings.get('engine.url')
engine_api_key = settings.get('engine.api_key')
# Authenticate.
if headers is None:
headers = {}
if engine_api_key:
for item in c.ENGINE_API_KEY_NAMES:
key = 'NTORQUE-PASSTHROUGH-{0}'.format(item)
headers[key] = engine_api_key
# JSONify.
if not headers.has_key('Content-Type'):
headers['Content-Type'] = 'application/json; utf-8'
if data is not None and not isinstance(data, basestring):
data = render.json_dumps(request, data)
# Dispatch.
url = self.join_path(engine_url, path)
status, response_data, response_headers = self.client(url, data=data,
headers=headers, timeout=timeout)
# Return.
headers_dict = dict(response_headers.items()) if response_headers else {}
return {
'data': data,
'path': path,
'response': response_data,
'response_headers': headers_dict,
'status': status,
'url': url,
}
def changed(self, context, event, state=None):
"""Tell the work engine that a ``context`` changed state."""
# Get the path to the context on the events route.
path = self._get_traversal_path('events', context)
# Either use the state passed in or look it up on the context.
if state is None:
state = context.work_status.value
# Build the post data.
data = {
'state': state,
}
if event:
data['event_id'] = event.id
logger.info((
'torque.engine.changed',
'context: ', context.class_slug, context.id,
'new state: ', state,
))
# Dispatch to the engine.
return self.dispatch(path, data=data)
def happened(self, context, action, event=None, **kwargs):
"""Tell the work engine that an action happened to a ``context``."""
# Get the path to the context on the events route.
path = self._get_traversal_path('events', context)
# Build the post data.
data = {
'action': action,
}
if event:
data['event_id'] = event.id
logger.info((
'torque.engine.happened',
'context: ', context.class_slug, context.id,
'action: ', action,
))
# Dispatch to the engine.
return self.dispatch(path, data=data)
def result(self, context, operation, result, event=None, event_id=None, **kwargs):
"""Tell the work engine that an ``operation`` had the specified ``result``."""
# Get the path to the context on the results route.
path = self._get_traversal_path('results', context)
# Build the post data.
data = {
'operation': operation,
'result': result,
}
if event:
data['event_id'] = event.id
elif event_id:
data['event_id'] = event_id
else:
raise Exception('You either need an event or an event_id.')
logger.info((
'torque.engine.result',
'context: ', context.class_slug, context.id,
'operation: ', operation,
'result', result,
))
# Dispatch to the engine.
return self.dispatch(path, data=data)
def get_torque_api(request):
"""Provide a ``request.torque`` api, where the dispatchers used depend
on whether we're ftesting or not.
"""
# Are we ftesting and do we explicitly want to enable dispatch anyway?
is_testing = request.environ.get('paste.testing', False)
if is_testing:
settings = request.registry.settings
key = 'torque.enable_ftesting_dispatch'
should_enable = asbool(settings.get(key, False))
if should_enable:
poster = test_client.WebTestPoster(settings['webtest_app'])
default = WebTestDispatcher(poster)
immediate = WebTestDispatcher(poster)
else:
default = client.NoopDispatcher()
immediate = client.NoopDispatcher()
client_cls=client.HTTPTorqueClient
else:
default = client.AfterCommitDispatcher()
immediate = client.DirectDispatcher()
client_cls=client.HybridTorqueClient
# Provide the api.
api = {
'dispatch': HookDispatcher(request, dispatcher=default,
client_cls=client_cls),
'dispatch_now': HookDispatcher(request, dispatcher=immediate,
client_cls=client_cls),
'engine': WorkEngineClient(request, dispatcher=default,
client_cls=client_cls),
}
keys, values = zip(*api.items())
return namedtuple('Torque', keys)(*values)
def includeme(config, **kwargs):
"""Apply default settings and register the torque application id."""
# Compose.
lookup = kwargs.get('lookup', ntorque_model.LookupApplication())
# Apply the default settings.
settings = config.get_settings()
for key, value in DEFAULTS.items():
settings.setdefault(key, value)
config.add_request_method(get_torque_api, 'torque', reify=True)
# Register the api authenticated torque `application.id`.
api_key = settings.get(c.TORQUE_API_KEY, None)
if api_key:
app = lookup(api_key)
if app:
settings.setdefault('torque.api_authenticated_app_id', app.id)
|
|
"""
Handlers for predicates related to set membership: integer, rational, etc.
"""
from __future__ import print_function, division
from sympy.assumptions import Q, ask
from sympy.assumptions.handlers import CommonHandler, test_closed_group
from sympy.core.numbers import pi
from sympy.functions.elementary.exponential import exp, log
from sympy import I
class AskIntegerHandler(CommonHandler):
"""
Handler for Q.integer
Test that an expression belongs to the field of integer numbers
"""
@staticmethod
def Symbol(expr, assumptions):
return expr.is_integer
@staticmethod
def _number(expr, assumptions):
# helper method
try:
i = int(expr.round())
if not (expr - i).equals(0):
raise TypeError
return True
except TypeError:
return False
@staticmethod
def Add(expr, assumptions):
"""
Integer + Integer -> Integer
Integer + !Integer -> !Integer
!Integer + !Integer -> ?
"""
if expr.is_number:
return AskIntegerHandler._number(expr, assumptions)
return test_closed_group(expr, assumptions, Q.integer)
@staticmethod
def Mul(expr, assumptions):
"""
Integer*Integer -> Integer
Integer*Irrational -> !Integer
Odd/Even -> !Integer
Integer*Rational -> ?
"""
if expr.is_number:
return AskIntegerHandler._number(expr, assumptions)
_output = True
for arg in expr.args:
if not ask(Q.integer(arg), assumptions):
if arg.is_Rational:
if arg.q == 2:
return ask(Q.even(2*expr), assumptions)
if ~(arg.q & 1):
return None
elif ask(Q.irrational(arg), assumptions):
if _output:
_output = False
else:
return
else:
return
else:
return _output
Pow = Add
int, Integer = [staticmethod(CommonHandler.AlwaysTrue)]*2
Pi, Exp1, GoldenRatio, Infinity, NegativeInfinity, ImaginaryUnit = \
[staticmethod(CommonHandler.AlwaysFalse)]*6
@staticmethod
def Rational(expr, assumptions):
# rationals with denominator one get
# evaluated to Integers
return False
@staticmethod
def Float(expr, assumptions):
return int(expr) == expr
@staticmethod
def Abs(expr, assumptions):
return ask(Q.integer(expr.args[0]), assumptions)
@staticmethod
def MatrixElement(expr, assumptions):
return ask(Q.integer_elements(expr.args[0]), assumptions)
Determinant = Trace = MatrixElement
class AskRationalHandler(CommonHandler):
"""
Handler for Q.rational
Test that an expression belongs to the field of rational numbers
"""
@staticmethod
def Symbol(expr, assumptions):
return expr.is_rational
@staticmethod
def Add(expr, assumptions):
"""
Rational + Rational -> Rational
Rational + !Rational -> !Rational
!Rational + !Rational -> ?
"""
if expr.is_number:
if expr.as_real_imag()[1]:
return False
return test_closed_group(expr, assumptions, Q.rational)
Mul = Add
@staticmethod
def Pow(expr, assumptions):
"""
Rational ** Integer -> Rational
Irrational ** Rational -> Irrational
Rational ** Irrational -> ?
"""
if ask(Q.integer(expr.exp), assumptions):
return ask(Q.rational(expr.base), assumptions)
elif ask(Q.rational(expr.exp), assumptions):
if ask(Q.prime(expr.base), assumptions):
return False
Rational, Float = \
[staticmethod(CommonHandler.AlwaysTrue)]*2 # Float is finite-precision
ImaginaryUnit, Infinity, NegativeInfinity, Pi, Exp1, GoldenRatio = \
[staticmethod(CommonHandler.AlwaysFalse)]*6
@staticmethod
def exp(expr, assumptions):
x = expr.args[0]
if ask(Q.rational(x), assumptions):
return ask(~Q.nonzero(x), assumptions)
@staticmethod
def cot(expr, assumptions):
x = expr.args[0]
if ask(Q.rational(x), assumptions):
return False
@staticmethod
def log(expr, assumptions):
x = expr.args[0]
if ask(Q.rational(x), assumptions):
return ask(~Q.nonzero(x - 1), assumptions)
sin, cos, tan, asin, atan = [exp]*5
acos, acot = log, cot
class AskIrrationalHandler(CommonHandler):
@staticmethod
def Symbol(expr, assumptions):
return expr.is_irrational
@staticmethod
def Basic(expr, assumptions):
_real = ask(Q.real(expr), assumptions)
if _real:
_rational = ask(Q.rational(expr), assumptions)
if _rational is None:
return None
return not _rational
else:
return _real
class AskRealHandler(CommonHandler):
"""
Handler for Q.real
Test that an expression belongs to the field of real numbers
"""
@staticmethod
def Symbol(expr, assumptions):
return expr.is_real
@staticmethod
def _number(expr, assumptions):
# let as_real_imag() work first since the expression may
# be simpler to evaluate
i = expr.as_real_imag()[1].evalf(2)
if i._prec != 1:
return not i
# allow None to be returned if we couldn't show for sure
# that i was 0
@staticmethod
def Add(expr, assumptions):
"""
Real + Real -> Real
Real + (Complex & !Real) -> !Real
"""
if expr.is_number:
return AskRealHandler._number(expr, assumptions)
return test_closed_group(expr, assumptions, Q.real)
@staticmethod
def Mul(expr, assumptions):
"""
Real*Real -> Real
Real*Imaginary -> !Real
Imaginary*Imaginary -> Real
"""
if expr.is_number:
return AskRealHandler._number(expr, assumptions)
result = True
for arg in expr.args:
if ask(Q.real(arg), assumptions):
pass
elif ask(Q.imaginary(arg), assumptions):
result = result ^ True
else:
break
else:
return result
@staticmethod
def Pow(expr, assumptions):
"""
Real**Integer -> Real
Positive**Real -> Real
Real**(Integer/Even) -> Real if base is nonnegative
Real**(Integer/Odd) -> Real
Imaginary**(Integer/Even) -> Real
Imaginary**(Integer/Odd) -> not Real
Imaginary**Real -> ? since Real could be 0 (giving real) or 1 (giving imaginary)
b**Imaginary -> Real if log(b) is imaginary and b != 0 and exponent != integer multiple of I*pi/log(b)
Real**Real -> ? e.g. sqrt(-1) is imaginary and sqrt(2) is not
"""
if expr.is_number:
return AskRealHandler._number(expr, assumptions)
if expr.base.func == exp:
if ask(Q.imaginary(expr.base.args[0]), assumptions):
if ask(Q.imaginary(expr.exp), assumptions):
return True
# If the i = (exp's arg)/(I*pi) is an integer or half-integer
# multiple of I*pi then 2*i will be an integer. In addition,
# exp(i*I*pi) = (-1)**i so the overall realness of the expr
# can be determined by replacing exp(i*I*pi) with (-1)**i.
i = expr.base.args[0]/I/pi
if ask(Q.integer(2*i), assumptions):
return ask(Q.real(((-1)**i)**expr.exp), assumptions)
return
if ask(Q.imaginary(expr.base), assumptions):
if ask(Q.integer(expr.exp), assumptions):
odd = ask(Q.odd(expr.exp), assumptions)
if odd is not None:
return not odd
return
if ask(Q.imaginary(expr.exp), assumptions):
imlog = ask(Q.imaginary(log(expr.base)), assumptions)
if imlog is not None:
# I**i -> real, log(I) is imag;
# (2*I)**i -> complex, log(2*I) is not imag
return imlog
if ask(Q.real(expr.base), assumptions):
if ask(Q.real(expr.exp), assumptions):
if expr.exp.is_Rational and \
ask(Q.even(expr.exp.q), assumptions):
return ask(Q.positive(expr.base), assumptions)
elif ask(Q.integer(expr.exp), assumptions):
return True
elif ask(Q.positive(expr.base), assumptions):
return True
elif ask(Q.negative(expr.base), assumptions):
return False
Rational, Float, Pi, Exp1, GoldenRatio, Abs, re, im = \
[staticmethod(CommonHandler.AlwaysTrue)]*8
ImaginaryUnit, Infinity, NegativeInfinity = \
[staticmethod(CommonHandler.AlwaysFalse)]*3
@staticmethod
def sin(expr, assumptions):
if ask(Q.real(expr.args[0]), assumptions):
return True
cos = sin
@staticmethod
def exp(expr, assumptions):
return ask(Q.integer(expr.args[0]/I/pi) | Q.real(expr.args[0]), assumptions)
@staticmethod
def log(expr, assumptions):
return ask(Q.positive(expr.args[0]), assumptions)
@staticmethod
def MatrixElement(expr, assumptions):
return ask(Q.real_elements(expr.args[0]), assumptions)
Determinant = Trace = MatrixElement
class AskExtendedRealHandler(AskRealHandler):
"""
Handler for Q.extended_real
Test that an expression belongs to the field of extended real numbers,
that is real numbers union {Infinity, -Infinity}
"""
@staticmethod
def Add(expr, assumptions):
return test_closed_group(expr, assumptions, Q.extended_real)
Mul, Pow = [Add]*2
Infinity, NegativeInfinity = [staticmethod(CommonHandler.AlwaysTrue)]*2
class AskHermitianHandler(AskRealHandler):
"""
Handler for Q.hermitian
Test that an expression belongs to the field of Hermitian operators
"""
@staticmethod
def Add(expr, assumptions):
"""
Hermitian + Hermitian -> Hermitian
Hermitian + !Hermitian -> !Hermitian
"""
if expr.is_number:
return AskRealHandler._number(expr, assumptions)
return test_closed_group(expr, assumptions, Q.hermitian)
@staticmethod
def Mul(expr, assumptions):
"""
As long as there is at most only one noncommutative term:
Hermitian*Hermitian -> Hermitian
Hermitian*Antihermitian -> !Hermitian
Antihermitian*Antihermitian -> Hermitian
"""
if expr.is_number:
return AskRealHandler._number(expr, assumptions)
nccount = 0
result = True
for arg in expr.args:
if ask(Q.antihermitian(arg), assumptions):
result = result ^ True
elif not ask(Q.hermitian(arg), assumptions):
break
if ask(~Q.commutative(arg), assumptions):
nccount += 1
if nccount > 1:
break
else:
return result
@staticmethod
def Pow(expr, assumptions):
"""
Hermitian**Integer -> Hermitian
"""
if expr.is_number:
return AskRealHandler._number(expr, assumptions)
if ask(Q.hermitian(expr.base), assumptions):
if ask(Q.integer(expr.exp), assumptions):
return True
@staticmethod
def sin(expr, assumptions):
if ask(Q.hermitian(expr.args[0]), assumptions):
return True
cos, exp = [sin]*2
class AskComplexHandler(CommonHandler):
"""
Handler for Q.complex
Test that an expression belongs to the field of complex numbers
"""
@staticmethod
def Symbol(expr, assumptions):
return expr.is_complex
@staticmethod
def Add(expr, assumptions):
return test_closed_group(expr, assumptions, Q.complex)
Mul, Pow = [Add]*2
Number, sin, cos, log, exp, re, im, NumberSymbol, Abs, ImaginaryUnit = \
[staticmethod(CommonHandler.AlwaysTrue)]*10 # they are all complex functions or expressions
Infinity, NegativeInfinity = [staticmethod(CommonHandler.AlwaysFalse)]*2
@staticmethod
def MatrixElement(expr, assumptions):
return ask(Q.complex_elements(expr.args[0]), assumptions)
Determinant = Trace = MatrixElement
class AskImaginaryHandler(CommonHandler):
"""
Handler for Q.imaginary
Test that an expression belongs to the field of imaginary numbers,
that is, numbers in the form x*I, where x is real
"""
@staticmethod
def Symbol(expr, assumptions):
return expr.is_imaginary
@staticmethod
def _number(expr, assumptions):
# let as_real_imag() work first since the expression may
# be simpler to evaluate
r = expr.as_real_imag()[0].evalf(2)
if r._prec != 1:
return not r
# allow None to be returned if we couldn't show for sure
# that r was 0
@staticmethod
def Add(expr, assumptions):
"""
Imaginary + Imaginary -> Imaginary
Imaginary + Complex -> ?
Imaginary + Real -> !Imaginary
"""
if expr.is_number:
return AskImaginaryHandler._number(expr, assumptions)
reals = 0
for arg in expr.args:
if ask(Q.imaginary(arg), assumptions):
pass
elif ask(Q.real(arg), assumptions):
reals += 1
else:
break
else:
if reals == 0:
return True
if reals == 1 or (len(expr.args) == reals):
# two reals could sum 0 thus giving an imaginary
return False
@staticmethod
def Mul(expr, assumptions):
"""
Real*Imaginary -> Imaginary
Imaginary*Imaginary -> Real
"""
if expr.is_number:
return AskImaginaryHandler._number(expr, assumptions)
result = False
reals = 0
for arg in expr.args:
if ask(Q.imaginary(arg), assumptions):
result = result ^ True
elif not ask(Q.real(arg), assumptions):
break
else:
if reals == len(expr.args):
return False
return result
@staticmethod
def Pow(expr, assumptions):
"""
Imaginary**Odd -> Imaginary
Imaginary**Even -> Real
b**Imaginary -> !Imaginary if exponent is an integer multiple of I*pi/log(b)
Imaginary**Real -> ?
Positive**Real -> Real
Negative**Integer -> Real
Negative**(Integer/2) -> Imaginary
Negative**Real -> not Imaginary if exponent is not Rational
"""
if expr.is_number:
return AskImaginaryHandler._number(expr, assumptions)
if expr.base.func == exp:
if ask(Q.imaginary(expr.base.args[0]), assumptions):
if ask(Q.imaginary(expr.exp), assumptions):
return False
i = expr.base.args[0]/I/pi
if ask(Q.integer(2*i), assumptions):
return ask(Q.imaginary(((-1)**i)**expr.exp), assumptions)
if ask(Q.imaginary(expr.base), assumptions):
if ask(Q.integer(expr.exp), assumptions):
odd = ask(Q.odd(expr.exp), assumptions)
if odd is not None:
return odd
return
if ask(Q.imaginary(expr.exp), assumptions):
imlog = ask(Q.imaginary(log(expr.base)), assumptions)
if imlog is not None:
return False # I**i -> real; (2*I)**i -> complex ==> not imaginary
if ask(Q.real(expr.base) & Q.real(expr.exp), assumptions):
if ask(Q.positive(expr.base), assumptions):
return False
else:
rat = ask(Q.rational(expr.exp), assumptions)
if not rat:
return rat
if ask(Q.integer(expr.exp), assumptions):
return False
else:
half = ask(Q.integer(2*expr.exp), assumptions)
if half:
return ask(Q.negative(expr.base), assumptions)
return half
@staticmethod
def log(expr, assumptions):
if ask(Q.real(expr.args[0]), assumptions):
if ask(Q.positive(expr.args[0]), assumptions):
return False
return
# XXX it should be enough to do
# return ask(Q.nonpositive(expr.args[0]), assumptions)
# but ask(Q.nonpositive(exp(x)), Q.imaginary(x)) -> None;
# it should return True since exp(x) will be either 0 or complex
if expr.args[0].func == exp:
if expr.args[0].args[0] in [I, -I]:
return True
im = ask(Q.imaginary(expr.args[0]), assumptions)
if im is False:
return False
@staticmethod
def exp(expr, assumptions):
a = expr.args[0]/I/pi
return ask(Q.integer(2*a) & ~Q.integer(a), assumptions)
@staticmethod
def Number(expr, assumptions):
return not (expr.as_real_imag()[1] == 0)
NumberSymbol = Number
ImaginaryUnit = staticmethod(CommonHandler.AlwaysTrue)
class AskAntiHermitianHandler(AskImaginaryHandler):
"""
Handler for Q.antihermitian
Test that an expression belongs to the field of anti-Hermitian operators,
that is, operators in the form x*I, where x is Hermitian
"""
@staticmethod
def Add(expr, assumptions):
"""
Antihermitian + Antihermitian -> Antihermitian
Antihermitian + !Antihermitian -> !Antihermitian
"""
if expr.is_number:
return AskImaginaryHandler._number(expr, assumptions)
return test_closed_group(expr, assumptions, Q.antihermitian)
@staticmethod
def Mul(expr, assumptions):
"""
As long as there is at most only one noncommutative term:
Hermitian*Hermitian -> !Antihermitian
Hermitian*Antihermitian -> Antihermitian
Antihermitian*Antihermitian -> !Antihermitian
"""
if expr.is_number:
return AskImaginaryHandler._number(expr, assumptions)
nccount = 0
result = False
for arg in expr.args:
if ask(Q.antihermitian(arg), assumptions):
result = result ^ True
elif not ask(Q.hermitian(arg), assumptions):
break
if ask(~Q.commutative(arg), assumptions):
nccount += 1
if nccount > 1:
break
else:
return result
@staticmethod
def Pow(expr, assumptions):
"""
Hermitian**Integer -> !Antihermitian
Antihermitian**Even -> !Antihermitian
Antihermitian**Odd -> Antihermitian
"""
if expr.is_number:
return AskImaginaryHandler._number(expr, assumptions)
if ask(Q.hermitian(expr.base), assumptions):
if ask(Q.integer(expr.exp), assumptions):
return False
elif ask(Q.antihermitian(expr.base), assumptions):
if ask(Q.even(expr.exp), assumptions):
return False
elif ask(Q.odd(expr.exp), assumptions):
return True
class AskAlgebraicHandler(CommonHandler):
"""Handler for Q.algebraic key. """
@staticmethod
def Add(expr, assumptions):
return test_closed_group(expr, assumptions, Q.algebraic)
@staticmethod
def Mul(expr, assumptions):
return test_closed_group(expr, assumptions, Q.algebraic)
@staticmethod
def Pow(expr, assumptions):
return expr.exp.is_Rational and ask(
Q.algebraic(expr.base), assumptions)
@staticmethod
def Rational(expr, assumptions):
return expr.q != 0
Float, GoldenRatio, ImaginaryUnit, AlgebraicNumber = \
[staticmethod(CommonHandler.AlwaysTrue)]*4
Infinity, NegativeInfinity, ComplexInfinity, Pi, Exp1 = \
[staticmethod(CommonHandler.AlwaysFalse)]*5
@staticmethod
def exp(expr, assumptions):
x = expr.args[0]
if ask(Q.algebraic(x), assumptions):
return ask(~Q.nonzero(x), assumptions)
@staticmethod
def cot(expr, assumptions):
x = expr.args[0]
if ask(Q.algebraic(x), assumptions):
return False
@staticmethod
def log(expr, assumptions):
x = expr.args[0]
if ask(Q.algebraic(x), assumptions):
return ask(~Q.nonzero(x - 1), assumptions)
sin, cos, tan, asin, atan = [exp]*5
acos, acot = log, cot
|
|
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import mock
import testtools
import uuid
import webob
from neutron.common import constants
from neutron.common import exceptions as exc
from neutron.common import utils
from neutron import context
from neutron.db import db_base_plugin_v2 as base_plugin
from neutron.extensions import external_net as external_net
from neutron.extensions import l3agentscheduler
from neutron.extensions import multiprovidernet as mpnet
from neutron.extensions import portbindings
from neutron.extensions import providernet as pnet
from neutron import manager
from neutron.plugins.common import constants as service_constants
from neutron.plugins.ml2.common import exceptions as ml2_exc
from neutron.plugins.ml2 import config
from neutron.plugins.ml2 import db as ml2_db
from neutron.plugins.ml2 import driver_api
from neutron.plugins.ml2 import driver_context
from neutron.plugins.ml2.drivers import type_vlan
from neutron.plugins.ml2 import models
from neutron.plugins.ml2 import plugin as ml2_plugin
from neutron.tests import base
from neutron.tests.unit import _test_extension_portbindings as test_bindings
from neutron.tests.unit.ml2.drivers import mechanism_logger as mech_logger
from neutron.tests.unit.ml2.drivers import mechanism_test as mech_test
from neutron.tests.unit import test_db_plugin as test_plugin
from neutron.tests.unit import test_extension_allowedaddresspairs as test_pair
from neutron.tests.unit import test_extension_extradhcpopts as test_dhcpopts
from neutron.tests.unit import test_security_groups_rpc as test_sg_rpc
config.cfg.CONF.import_opt('network_vlan_ranges',
'neutron.plugins.ml2.drivers.type_vlan',
group='ml2_type_vlan')
PLUGIN_NAME = 'neutron.plugins.ml2.plugin.Ml2Plugin'
class Ml2PluginV2TestCase(test_plugin.NeutronDbPluginV2TestCase):
_plugin_name = PLUGIN_NAME
_mechanism_drivers = ['logger', 'test']
def setUp(self):
# We need a L3 service plugin
l3_plugin = ('neutron.tests.unit.test_l3_plugin.'
'TestL3NatServicePlugin')
service_plugins = {'l3_plugin_name': l3_plugin}
# Enable the test mechanism driver to ensure that
# we can successfully call through to all mechanism
# driver apis.
config.cfg.CONF.set_override('mechanism_drivers',
self._mechanism_drivers,
group='ml2')
self.physnet = 'physnet1'
self.vlan_range = '1:100'
self.vlan_range2 = '200:300'
self.physnet2 = 'physnet2'
self.phys_vrange = ':'.join([self.physnet, self.vlan_range])
self.phys2_vrange = ':'.join([self.physnet2, self.vlan_range2])
config.cfg.CONF.set_override('network_vlan_ranges',
[self.phys_vrange, self.phys2_vrange],
group='ml2_type_vlan')
super(Ml2PluginV2TestCase, self).setUp(PLUGIN_NAME,
service_plugins=service_plugins)
self.port_create_status = 'DOWN'
self.driver = ml2_plugin.Ml2Plugin()
self.context = context.get_admin_context()
class TestMl2BulkToggleWithBulkless(Ml2PluginV2TestCase):
_mechanism_drivers = ['logger', 'test', 'bulkless']
def test_bulk_disable_with_bulkless_driver(self):
self.assertTrue(self._skip_native_bulk)
class TestMl2BulkToggleWithoutBulkless(Ml2PluginV2TestCase):
_mechanism_drivers = ['logger', 'test']
def test_bulk_enabled_with_bulk_drivers(self):
self.assertFalse(self._skip_native_bulk)
class TestMl2BasicGet(test_plugin.TestBasicGet,
Ml2PluginV2TestCase):
pass
class TestMl2V2HTTPResponse(test_plugin.TestV2HTTPResponse,
Ml2PluginV2TestCase):
pass
class TestMl2NetworksV2(test_plugin.TestNetworksV2,
Ml2PluginV2TestCase):
pass
class TestMl2SubnetsV2(test_plugin.TestSubnetsV2,
Ml2PluginV2TestCase):
pass
class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
def test_update_port_status_build(self):
with self.port() as port:
self.assertEqual('DOWN', port['port']['status'])
self.assertEqual('DOWN', self.port_create_status)
def test_update_non_existent_port(self):
ctx = context.get_admin_context()
plugin = manager.NeutronManager.get_plugin()
data = {'port': {'admin_state_up': False}}
self.assertRaises(exc.PortNotFound, plugin.update_port, ctx,
'invalid-uuid', data)
def test_delete_non_existent_port(self):
ctx = context.get_admin_context()
plugin = manager.NeutronManager.get_plugin()
with mock.patch.object(ml2_plugin.LOG, 'debug') as log_debug:
plugin.delete_port(ctx, 'invalid-uuid', l3_port_check=False)
log_debug.assert_has_calls([
mock.call(_("Deleting port %s"), 'invalid-uuid'),
mock.call(_("The port '%s' was deleted"), 'invalid-uuid')
])
def test_l3_cleanup_on_net_delete(self):
l3plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
kwargs = {'arg_list': (external_net.EXTERNAL,),
external_net.EXTERNAL: True}
with self.network(**kwargs) as n:
with self.subnet(network=n, cidr='200.0.0.0/22'):
l3plugin.create_floatingip(
context.get_admin_context(),
{'floatingip': {'floating_network_id': n['network']['id'],
'tenant_id': n['network']['tenant_id']}}
)
self._delete('networks', n['network']['id'])
flips = l3plugin.get_floatingips(context.get_admin_context())
self.assertFalse(flips)
def test_delete_port_no_notify_in_disassociate_floatingips(self):
ctx = context.get_admin_context()
plugin = manager.NeutronManager.get_plugin()
l3plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
with contextlib.nested(
self.port(do_delete=False),
mock.patch.object(l3plugin, 'disassociate_floatingips'),
mock.patch.object(l3plugin, 'notify_routers_updated')
) as (port, disassociate_floatingips, notify):
port_id = port['port']['id']
plugin.delete_port(ctx, port_id)
# check that no notification was requested while under
# transaction
disassociate_floatingips.assert_has_calls([
mock.call(ctx, port_id, do_notify=False)
])
# check that notifier was still triggered
notify.assert_has_calls([
mock.call(ctx, disassociate_floatingips.return_value)
])
def test_check_if_compute_port_serviced_by_dvr(self):
self.assertTrue(utils.is_dvr_serviced('compute:None'))
def test_check_if_lbaas_vip_port_serviced_by_dvr(self):
self.assertTrue(utils.is_dvr_serviced(
constants.DEVICE_OWNER_LOADBALANCER))
def test_check_if_dhcp_port_serviced_by_dvr(self):
self.assertTrue(utils.is_dvr_serviced(constants.DEVICE_OWNER_DHCP))
def test_check_if_port_not_serviced_by_dvr(self):
self.assertFalse(utils.is_dvr_serviced(
constants.DEVICE_OWNER_ROUTER_INTF))
def test_disassociate_floatingips_do_notify_returns_nothing(self):
ctx = context.get_admin_context()
l3plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
with self.port() as port:
port_id = port['port']['id']
# check that nothing is returned when notifications are handled
# by the called method
self.assertIsNone(l3plugin.disassociate_floatingips(ctx, port_id))
class TestMl2DvrPortsV2(TestMl2PortsV2):
def setUp(self):
super(TestMl2DvrPortsV2, self).setUp()
extensions = ['router',
constants.L3_AGENT_SCHEDULER_EXT_ALIAS,
constants.L3_DISTRIBUTED_EXT_ALIAS]
self.plugin = manager.NeutronManager.get_plugin()
self.l3plugin = mock.Mock()
type(self.l3plugin).supported_extension_aliases = (
mock.PropertyMock(return_value=extensions))
self.service_plugins = {'L3_ROUTER_NAT': self.l3plugin}
def _test_delete_dvr_serviced_port(self, device_owner, floating_ip=False):
ns_to_delete = {'host': 'myhost', 'agent_id': 'vm_l3_agent',
'router_id': 'my_router'}
fip_set = set()
if floating_ip:
fip_set.add(ns_to_delete['router_id'])
with contextlib.nested(
mock.patch.object(manager.NeutronManager,
'get_service_plugins',
return_value=self.service_plugins),
self.port(do_delete=False,
device_owner=device_owner),
mock.patch.object(self.l3plugin, 'notify_routers_updated'),
mock.patch.object(self.l3plugin, 'disassociate_floatingips',
return_value=fip_set),
mock.patch.object(self.l3plugin, 'dvr_deletens_if_no_port',
return_value=[ns_to_delete]),
mock.patch.object(self.l3plugin, 'remove_router_from_l3_agent')
) as (get_service_plugin, port, notify, disassociate_floatingips,
dvr_delns_ifno_port, remove_router_from_l3_agent):
port_id = port['port']['id']
self.plugin.delete_port(self.context, port_id)
notify.assert_has_calls([mock.call(self.context, fip_set)])
dvr_delns_ifno_port.assert_called_once_with(self.context,
port['port']['id'])
remove_router_from_l3_agent.assert_has_calls([
mock.call(self.context, ns_to_delete['agent_id'],
ns_to_delete['router_id'])
])
def test_delete_last_vm_port(self):
self._test_delete_dvr_serviced_port(device_owner='compute:None')
def test_delete_last_vm_port_with_floatingip(self):
self._test_delete_dvr_serviced_port(device_owner='compute:None',
floating_ip=True)
def test_delete_vm_port_namespace_already_deleted(self):
ns_to_delete = {'host': 'myhost',
'agent_id': 'vm_l3_agent',
'router_id': 'my_router'}
with contextlib.nested(
mock.patch.object(manager.NeutronManager,
'get_service_plugins',
return_value=self.service_plugins),
self.port(do_delete=False,
device_owner='compute:None'),
mock.patch.object(self.l3plugin, 'dvr_deletens_if_no_port',
return_value=[ns_to_delete]),
mock.patch.object(self.l3plugin, 'remove_router_from_l3_agent',
side_effect=l3agentscheduler.RouterNotHostedByL3Agent(
router_id=ns_to_delete['router_id'],
agent_id=ns_to_delete['agent_id']))
) as (get_service_plugin, port, dvr_delns_ifno_port,
remove_router_from_l3_agent):
self.plugin.delete_port(self.context, port['port']['id'])
remove_router_from_l3_agent.assert_called_once_with(self.context,
ns_to_delete['agent_id'], ns_to_delete['router_id'])
def test_delete_lbaas_vip_port(self):
self._test_delete_dvr_serviced_port(
device_owner=constants.DEVICE_OWNER_LOADBALANCER)
class TestMl2PortBinding(Ml2PluginV2TestCase,
test_bindings.PortBindingsTestCase):
# Test case does not set binding:host_id, so ml2 does not attempt
# to bind port
VIF_TYPE = portbindings.VIF_TYPE_UNBOUND
HAS_PORT_FILTER = False
ENABLE_SG = True
FIREWALL_DRIVER = test_sg_rpc.FIREWALL_HYBRID_DRIVER
def setUp(self, firewall_driver=None):
test_sg_rpc.set_firewall_driver(self.FIREWALL_DRIVER)
config.cfg.CONF.set_override(
'enable_security_group', self.ENABLE_SG,
group='SECURITYGROUP')
super(TestMl2PortBinding, self).setUp()
def _check_port_binding_profile(self, port, profile=None):
self.assertIn('id', port)
self.assertIn(portbindings.PROFILE, port)
value = port[portbindings.PROFILE]
self.assertEqual(profile or {}, value)
def test_create_port_binding_profile(self):
self._test_create_port_binding_profile({'a': 1, 'b': 2})
def test_update_port_binding_profile(self):
self._test_update_port_binding_profile({'c': 3})
def test_create_port_binding_profile_too_big(self):
s = 'x' * 5000
profile_arg = {portbindings.PROFILE: {'d': s}}
try:
with self.port(expected_res_status=400,
arg_list=(portbindings.PROFILE,),
**profile_arg):
pass
except webob.exc.HTTPClientError:
pass
def test_remove_port_binding_profile(self):
profile = {'e': 5}
profile_arg = {portbindings.PROFILE: profile}
with self.port(arg_list=(portbindings.PROFILE,),
**profile_arg) as port:
self._check_port_binding_profile(port['port'], profile)
port_id = port['port']['id']
profile_arg = {portbindings.PROFILE: None}
port = self._update('ports', port_id,
{'port': profile_arg})['port']
self._check_port_binding_profile(port)
port = self._show('ports', port_id)['port']
self._check_port_binding_profile(port)
def test_return_on_concurrent_delete_and_binding(self):
# create a port and delete it so we have an expired mechanism context
with self.port() as port:
plugin = manager.NeutronManager.get_plugin()
binding = ml2_db.get_locked_port_and_binding(self.context.session,
port['port']['id'])[1]
binding['host'] = 'test'
mech_context = driver_context.PortContext(
plugin, self.context, port['port'],
plugin.get_network(self.context, port['port']['network_id']),
binding)
with contextlib.nested(
mock.patch('neutron.plugins.ml2.plugin.'
'db.get_locked_port_and_binding',
return_value=(None, None)),
mock.patch('neutron.plugins.ml2.plugin.Ml2Plugin._make_port_dict')
) as (glpab_mock, mpd_mock):
plugin._bind_port_if_needed(mech_context)
# called during deletion to get port
self.assertTrue(glpab_mock.mock_calls)
# should have returned before calling _make_port_dict
self.assertFalse(mpd_mock.mock_calls)
def test_port_binding_profile_not_changed(self):
profile = {'e': 5}
profile_arg = {portbindings.PROFILE: profile}
with self.port(arg_list=(portbindings.PROFILE,),
**profile_arg) as port:
self._check_port_binding_profile(port['port'], profile)
port_id = port['port']['id']
state_arg = {'admin_state_up': True}
port = self._update('ports', port_id,
{'port': state_arg})['port']
self._check_port_binding_profile(port, profile)
port = self._show('ports', port_id)['port']
self._check_port_binding_profile(port, profile)
def test_process_dvr_port_binding_update_router_id(self):
host_id = 'host'
binding = models.DVRPortBinding(
port_id='port_id',
host=host_id,
router_id='old_router_id',
vif_type=portbindings.VIF_TYPE_OVS,
vnic_type=portbindings.VNIC_NORMAL,
cap_port_filter=False,
status=constants.PORT_STATUS_DOWN)
plugin = manager.NeutronManager.get_plugin()
mock_network = {'id': 'net_id'}
context = mock.Mock()
new_router_id = 'new_router'
attrs = {'device_id': new_router_id, portbindings.HOST_ID: host_id}
with mock.patch.object(plugin, '_update_port_dict_binding'):
with mock.patch.object(ml2_db, 'get_network_segments',
return_value=[]):
mech_context = driver_context.DvrPortContext(
self, context, 'port', mock_network, binding)
plugin._process_dvr_port_binding(mech_context, context, attrs)
self.assertEqual(new_router_id,
mech_context._binding.router_id)
self.assertEqual(host_id, mech_context._binding.host)
def test_update_dvr_port_binding_on_non_existent_port(self):
plugin = manager.NeutronManager.get_plugin()
port = {
'id': 'foo_port_id',
'binding:host_id': 'foo_host',
}
with mock.patch.object(ml2_db, 'ensure_dvr_port_binding') as mock_dvr:
plugin.update_dvr_port_binding(
self.context, 'foo_port_id', {'port': port})
self.assertFalse(mock_dvr.called)
class TestMl2PortBindingNoSG(TestMl2PortBinding):
HAS_PORT_FILTER = False
ENABLE_SG = False
FIREWALL_DRIVER = test_sg_rpc.FIREWALL_NOOP_DRIVER
class TestMl2PortBindingHost(Ml2PluginV2TestCase,
test_bindings.PortBindingsHostTestCaseMixin):
pass
class TestMl2PortBindingVnicType(Ml2PluginV2TestCase,
test_bindings.PortBindingsVnicTestCaseMixin):
pass
class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
def setUp(self, plugin=None):
super(TestMultiSegmentNetworks, self).setUp()
def test_allocate_dynamic_segment(self):
data = {'network': {'name': 'net1',
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
segment = {driver_api.NETWORK_TYPE: 'vlan',
driver_api.PHYSICAL_NETWORK: 'physnet1'}
network_id = network['network']['id']
self.driver.type_manager.allocate_dynamic_segment(
self.context.session, network_id, segment)
dynamic_segment = ml2_db.get_dynamic_segment(self.context.session,
network_id,
'physnet1')
self.assertEqual('vlan', dynamic_segment[driver_api.NETWORK_TYPE])
self.assertEqual('physnet1',
dynamic_segment[driver_api.PHYSICAL_NETWORK])
self.assertTrue(dynamic_segment[driver_api.SEGMENTATION_ID] > 0)
segment2 = {driver_api.NETWORK_TYPE: 'vlan',
driver_api.SEGMENTATION_ID: 1234,
driver_api.PHYSICAL_NETWORK: 'physnet3'}
self.driver.type_manager.allocate_dynamic_segment(
self.context.session, network_id, segment2)
dynamic_segment = ml2_db.get_dynamic_segment(self.context.session,
network_id,
segmentation_id='1234')
self.assertEqual('vlan', dynamic_segment[driver_api.NETWORK_TYPE])
self.assertEqual('physnet3',
dynamic_segment[driver_api.PHYSICAL_NETWORK])
self.assertEqual(dynamic_segment[driver_api.SEGMENTATION_ID], 1234)
def test_allocate_dynamic_segment_multiple_physnets(self):
data = {'network': {'name': 'net1',
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
segment = {driver_api.NETWORK_TYPE: 'vlan',
driver_api.PHYSICAL_NETWORK: 'physnet1'}
network_id = network['network']['id']
self.driver.type_manager.allocate_dynamic_segment(
self.context.session, network_id, segment)
dynamic_segment = ml2_db.get_dynamic_segment(self.context.session,
network_id,
'physnet1')
self.assertEqual('vlan', dynamic_segment[driver_api.NETWORK_TYPE])
self.assertEqual('physnet1',
dynamic_segment[driver_api.PHYSICAL_NETWORK])
dynamic_segmentation_id = dynamic_segment[driver_api.SEGMENTATION_ID]
self.assertTrue(dynamic_segmentation_id > 0)
dynamic_segment1 = ml2_db.get_dynamic_segment(self.context.session,
network_id,
'physnet1')
dynamic_segment1_id = dynamic_segment1[driver_api.SEGMENTATION_ID]
self.assertEqual(dynamic_segmentation_id, dynamic_segment1_id)
segment2 = {driver_api.NETWORK_TYPE: 'vlan',
driver_api.PHYSICAL_NETWORK: 'physnet2'}
self.driver.type_manager.allocate_dynamic_segment(
self.context.session, network_id, segment2)
dynamic_segment2 = ml2_db.get_dynamic_segment(self.context.session,
network_id,
'physnet2')
dynamic_segmentation2_id = dynamic_segment2[driver_api.SEGMENTATION_ID]
self.assertNotEqual(dynamic_segmentation_id, dynamic_segmentation2_id)
def test_allocate_release_dynamic_segment(self):
data = {'network': {'name': 'net1',
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
segment = {driver_api.NETWORK_TYPE: 'vlan',
driver_api.PHYSICAL_NETWORK: 'physnet1'}
network_id = network['network']['id']
self.driver.type_manager.allocate_dynamic_segment(
self.context.session, network_id, segment)
dynamic_segment = ml2_db.get_dynamic_segment(self.context.session,
network_id,
'physnet1')
self.assertEqual('vlan', dynamic_segment[driver_api.NETWORK_TYPE])
self.assertEqual('physnet1',
dynamic_segment[driver_api.PHYSICAL_NETWORK])
dynamic_segmentation_id = dynamic_segment[driver_api.SEGMENTATION_ID]
self.assertTrue(dynamic_segmentation_id > 0)
self.driver.type_manager.release_dynamic_segment(
self.context.session, dynamic_segment[driver_api.ID])
self.assertIsNone(ml2_db.get_dynamic_segment(
self.context.session, network_id, 'physnet1'))
def test_create_network_provider(self):
data = {'network': {'name': 'net1',
pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1,
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
self.assertEqual('vlan', network['network'][pnet.NETWORK_TYPE])
self.assertEqual('physnet1', network['network'][pnet.PHYSICAL_NETWORK])
self.assertEqual(1, network['network'][pnet.SEGMENTATION_ID])
self.assertNotIn(mpnet.SEGMENTS, network['network'])
def test_create_network_single_multiprovider(self):
data = {'network': {'name': 'net1',
mpnet.SEGMENTS:
[{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1}],
'tenant_id': 'tenant_one'}}
net_req = self.new_create_request('networks', data)
network = self.deserialize(self.fmt, net_req.get_response(self.api))
self.assertEqual('vlan', network['network'][pnet.NETWORK_TYPE])
self.assertEqual('physnet1', network['network'][pnet.PHYSICAL_NETWORK])
self.assertEqual(1, network['network'][pnet.SEGMENTATION_ID])
self.assertNotIn(mpnet.SEGMENTS, network['network'])
# Tests get_network()
net_req = self.new_show_request('networks', network['network']['id'])
network = self.deserialize(self.fmt, net_req.get_response(self.api))
self.assertEqual('vlan', network['network'][pnet.NETWORK_TYPE])
self.assertEqual('physnet1', network['network'][pnet.PHYSICAL_NETWORK])
self.assertEqual(1, network['network'][pnet.SEGMENTATION_ID])
self.assertNotIn(mpnet.SEGMENTS, network['network'])
def test_create_network_multiprovider(self):
data = {'network': {'name': 'net1',
mpnet.SEGMENTS:
[{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1},
{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 2}],
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
tz = network['network'][mpnet.SEGMENTS]
for tz in data['network'][mpnet.SEGMENTS]:
for field in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK,
pnet.SEGMENTATION_ID]:
self.assertEqual(tz.get(field), tz.get(field))
# Tests get_network()
net_req = self.new_show_request('networks', network['network']['id'])
network = self.deserialize(self.fmt, net_req.get_response(self.api))
tz = network['network'][mpnet.SEGMENTS]
for tz in data['network'][mpnet.SEGMENTS]:
for field in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK,
pnet.SEGMENTATION_ID]:
self.assertEqual(tz.get(field), tz.get(field))
def test_create_network_with_provider_and_multiprovider_fail(self):
data = {'network': {'name': 'net1',
mpnet.SEGMENTS:
[{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1}],
pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1,
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
res = network_req.get_response(self.api)
self.assertEqual(400, res.status_int)
def test_create_network_duplicate_full_segments(self):
data = {'network': {'name': 'net1',
mpnet.SEGMENTS:
[{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1},
{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1}],
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
res = network_req.get_response(self.api)
self.assertEqual(400, res.status_int)
def test_create_network_duplicate_partial_segments(self):
data = {'network': {'name': 'net1',
mpnet.SEGMENTS:
[{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1'},
{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1'}],
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
res = network_req.get_response(self.api)
self.assertEqual(201, res.status_int)
def test_release_network_segments(self):
data = {'network': {'name': 'net1',
'admin_state_up': True,
'shared': False,
pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1,
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
res = network_req.get_response(self.api)
network = self.deserialize(self.fmt, res)
network_id = network['network']['id']
segment = {driver_api.NETWORK_TYPE: 'vlan',
driver_api.PHYSICAL_NETWORK: 'physnet2'}
self.driver.type_manager.allocate_dynamic_segment(
self.context.session, network_id, segment)
dynamic_segment = ml2_db.get_dynamic_segment(self.context.session,
network_id,
'physnet2')
self.assertEqual('vlan', dynamic_segment[driver_api.NETWORK_TYPE])
self.assertEqual('physnet2',
dynamic_segment[driver_api.PHYSICAL_NETWORK])
self.assertTrue(dynamic_segment[driver_api.SEGMENTATION_ID] > 0)
with mock.patch.object(type_vlan.VlanTypeDriver,
'release_segment') as rs:
req = self.new_delete_request('networks', network_id)
res = req.get_response(self.api)
self.assertEqual(2, rs.call_count)
self.assertEqual(ml2_db.get_network_segments(
self.context.session, network_id), [])
self.assertIsNone(ml2_db.get_dynamic_segment(
self.context.session, network_id, 'physnet2'))
def test_release_segment_no_type_driver(self):
data = {'network': {'name': 'net1',
'admin_state_up': True,
'shared': False,
pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1,
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
res = network_req.get_response(self.api)
network = self.deserialize(self.fmt, res)
network_id = network['network']['id']
segment = {driver_api.NETWORK_TYPE: 'faketype',
driver_api.PHYSICAL_NETWORK: 'physnet1',
driver_api.ID: 1}
with mock.patch('neutron.plugins.ml2.managers.LOG') as log:
with mock.patch('neutron.plugins.ml2.managers.db') as db:
db.get_network_segments.return_value = (segment,)
self.driver.type_manager.release_network_segments(
self.context.session, network_id)
log.error.assert_called_once_with(
"Failed to release segment '%s' because "
"network type is not supported.", segment)
def test_create_provider_fail(self):
segment = {pnet.NETWORK_TYPE: None,
pnet.PHYSICAL_NETWORK: 'phys_net',
pnet.SEGMENTATION_ID: None}
with testtools.ExpectedException(exc.InvalidInput):
self.driver.type_manager._process_provider_create(segment)
def test_create_network_plugin(self):
data = {'network': {'name': 'net1',
'admin_state_up': True,
'shared': False,
pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1,
'tenant_id': 'tenant_one'}}
def raise_mechanism_exc(*args, **kwargs):
raise ml2_exc.MechanismDriverError(
method='create_network_postcommit')
with mock.patch('neutron.plugins.ml2.managers.MechanismManager.'
'create_network_precommit', new=raise_mechanism_exc):
with testtools.ExpectedException(ml2_exc.MechanismDriverError):
self.driver.create_network(self.context, data)
def test_extend_dictionary_no_segments(self):
network = dict(name='net_no_segment', id='5', tenant_id='tenant_one')
self.driver.type_manager._extend_network_dict_provider(self.context,
network)
self.assertIsNone(network[pnet.NETWORK_TYPE])
self.assertIsNone(network[pnet.PHYSICAL_NETWORK])
self.assertIsNone(network[pnet.SEGMENTATION_ID])
class TestMl2AllowedAddressPairs(Ml2PluginV2TestCase,
test_pair.TestAllowedAddressPairs):
def setUp(self, plugin=None):
super(test_pair.TestAllowedAddressPairs, self).setUp(
plugin=PLUGIN_NAME)
class DHCPOptsTestCase(test_dhcpopts.TestExtraDhcpOpt):
def setUp(self, plugin=None):
super(test_dhcpopts.ExtraDhcpOptDBTestCase, self).setUp(
plugin=PLUGIN_NAME)
class Ml2PluginV2FaultyDriverTestCase(test_plugin.NeutronDbPluginV2TestCase):
def setUp(self):
# Enable the test mechanism driver to ensure that
# we can successfully call through to all mechanism
# driver apis.
config.cfg.CONF.set_override('mechanism_drivers',
['test', 'logger'],
group='ml2')
super(Ml2PluginV2FaultyDriverTestCase, self).setUp(PLUGIN_NAME)
self.port_create_status = 'DOWN'
class TestFaultyMechansimDriver(Ml2PluginV2FaultyDriverTestCase):
def test_create_network_faulty(self):
with mock.patch.object(mech_test.TestMechanismDriver,
'create_network_postcommit',
side_effect=ml2_exc.MechanismDriverError):
tenant_id = str(uuid.uuid4())
data = {'network': {'name': 'net1',
'tenant_id': tenant_id}}
req = self.new_create_request('networks', data)
res = req.get_response(self.api)
self.assertEqual(500, res.status_int)
error = self.deserialize(self.fmt, res)
self.assertEqual('MechanismDriverError',
error['NeutronError']['type'])
query_params = "tenant_id=%s" % tenant_id
nets = self._list('networks', query_params=query_params)
self.assertFalse(nets['networks'])
def test_delete_network_faulty(self):
with mock.patch.object(mech_test.TestMechanismDriver,
'delete_network_postcommit',
side_effect=ml2_exc.MechanismDriverError):
with mock.patch.object(mech_logger.LoggerMechanismDriver,
'delete_network_postcommit') as dnp:
data = {'network': {'name': 'net1',
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
network_res = network_req.get_response(self.api)
self.assertEqual(201, network_res.status_int)
network = self.deserialize(self.fmt, network_res)
net_id = network['network']['id']
req = self.new_delete_request('networks', net_id)
res = req.get_response(self.api)
self.assertEqual(204, res.status_int)
# Test if other mechanism driver was called
self.assertTrue(dnp.called)
self._show('networks', net_id,
expected_code=webob.exc.HTTPNotFound.code)
def test_update_network_faulty(self):
with mock.patch.object(mech_test.TestMechanismDriver,
'update_network_postcommit',
side_effect=ml2_exc.MechanismDriverError):
with mock.patch.object(mech_logger.LoggerMechanismDriver,
'update_network_postcommit') as unp:
data = {'network': {'name': 'net1',
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
network_res = network_req.get_response(self.api)
self.assertEqual(201, network_res.status_int)
network = self.deserialize(self.fmt, network_res)
net_id = network['network']['id']
new_name = 'a_brand_new_name'
data = {'network': {'name': new_name}}
req = self.new_update_request('networks', data, net_id)
res = req.get_response(self.api)
self.assertEqual(500, res.status_int)
error = self.deserialize(self.fmt, res)
self.assertEqual('MechanismDriverError',
error['NeutronError']['type'])
# Test if other mechanism driver was called
self.assertTrue(unp.called)
net = self._show('networks', net_id)
self.assertEqual(new_name, net['network']['name'])
self._delete('networks', net_id)
def test_create_subnet_faulty(self):
with mock.patch.object(mech_test.TestMechanismDriver,
'create_subnet_postcommit',
side_effect=ml2_exc.MechanismDriverError):
with self.network() as network:
net_id = network['network']['id']
data = {'subnet': {'network_id': net_id,
'cidr': '10.0.20.0/24',
'ip_version': '4',
'name': 'subnet1',
'tenant_id':
network['network']['tenant_id'],
'gateway_ip': '10.0.20.1'}}
req = self.new_create_request('subnets', data)
res = req.get_response(self.api)
self.assertEqual(500, res.status_int)
error = self.deserialize(self.fmt, res)
self.assertEqual('MechanismDriverError',
error['NeutronError']['type'])
query_params = "network_id=%s" % net_id
subnets = self._list('subnets', query_params=query_params)
self.assertFalse(subnets['subnets'])
def test_delete_subnet_faulty(self):
with mock.patch.object(mech_test.TestMechanismDriver,
'delete_subnet_postcommit',
side_effect=ml2_exc.MechanismDriverError):
with mock.patch.object(mech_logger.LoggerMechanismDriver,
'delete_subnet_postcommit') as dsp:
with self.network() as network:
data = {'subnet': {'network_id':
network['network']['id'],
'cidr': '10.0.20.0/24',
'ip_version': '4',
'name': 'subnet1',
'tenant_id':
network['network']['tenant_id'],
'gateway_ip': '10.0.20.1'}}
subnet_req = self.new_create_request('subnets', data)
subnet_res = subnet_req.get_response(self.api)
self.assertEqual(201, subnet_res.status_int)
subnet = self.deserialize(self.fmt, subnet_res)
subnet_id = subnet['subnet']['id']
req = self.new_delete_request('subnets', subnet_id)
res = req.get_response(self.api)
self.assertEqual(204, res.status_int)
# Test if other mechanism driver was called
self.assertTrue(dsp.called)
self._show('subnets', subnet_id,
expected_code=webob.exc.HTTPNotFound.code)
def test_update_subnet_faulty(self):
with mock.patch.object(mech_test.TestMechanismDriver,
'update_subnet_postcommit',
side_effect=ml2_exc.MechanismDriverError):
with mock.patch.object(mech_logger.LoggerMechanismDriver,
'update_subnet_postcommit') as usp:
with self.network() as network:
data = {'subnet': {'network_id':
network['network']['id'],
'cidr': '10.0.20.0/24',
'ip_version': '4',
'name': 'subnet1',
'tenant_id':
network['network']['tenant_id'],
'gateway_ip': '10.0.20.1'}}
subnet_req = self.new_create_request('subnets', data)
subnet_res = subnet_req.get_response(self.api)
self.assertEqual(201, subnet_res.status_int)
subnet = self.deserialize(self.fmt, subnet_res)
subnet_id = subnet['subnet']['id']
new_name = 'a_brand_new_name'
data = {'subnet': {'name': new_name}}
req = self.new_update_request('subnets', data, subnet_id)
res = req.get_response(self.api)
self.assertEqual(500, res.status_int)
error = self.deserialize(self.fmt, res)
self.assertEqual('MechanismDriverError',
error['NeutronError']['type'])
# Test if other mechanism driver was called
self.assertTrue(usp.called)
subnet = self._show('subnets', subnet_id)
self.assertEqual(new_name, subnet['subnet']['name'])
self._delete('subnets', subnet['subnet']['id'])
def test_create_port_faulty(self):
with mock.patch.object(mech_test.TestMechanismDriver,
'create_port_postcommit',
side_effect=ml2_exc.MechanismDriverError):
with self.network() as network:
net_id = network['network']['id']
data = {'port': {'network_id': net_id,
'tenant_id':
network['network']['tenant_id'],
'name': 'port1',
'admin_state_up': 1,
'fixed_ips': []}}
req = self.new_create_request('ports', data)
res = req.get_response(self.api)
self.assertEqual(500, res.status_int)
error = self.deserialize(self.fmt, res)
self.assertEqual('MechanismDriverError',
error['NeutronError']['type'])
query_params = "network_id=%s" % net_id
ports = self._list('ports', query_params=query_params)
self.assertFalse(ports['ports'])
def test_update_port_faulty(self):
with mock.patch.object(mech_test.TestMechanismDriver,
'update_port_postcommit',
side_effect=ml2_exc.MechanismDriverError):
with mock.patch.object(mech_logger.LoggerMechanismDriver,
'update_port_postcommit') as upp:
with self.network() as network:
data = {'port': {'network_id': network['network']['id'],
'tenant_id':
network['network']['tenant_id'],
'name': 'port1',
'admin_state_up': 1,
'fixed_ips': []}}
port_req = self.new_create_request('ports', data)
port_res = port_req.get_response(self.api)
self.assertEqual(201, port_res.status_int)
port = self.deserialize(self.fmt, port_res)
port_id = port['port']['id']
new_name = 'a_brand_new_name'
data = {'port': {'name': new_name}}
req = self.new_update_request('ports', data, port_id)
res = req.get_response(self.api)
self.assertEqual(500, res.status_int)
error = self.deserialize(self.fmt, res)
self.assertEqual('MechanismDriverError',
error['NeutronError']['type'])
# Test if other mechanism driver was called
self.assertTrue(upp.called)
port = self._show('ports', port_id)
self.assertEqual(new_name, port['port']['name'])
self._delete('ports', port['port']['id'])
class TestMl2PluginCreateUpdatePort(base.BaseTestCase):
def setUp(self):
super(TestMl2PluginCreateUpdatePort, self).setUp()
self.context = mock.MagicMock()
def _ensure_transaction_is_closed(self):
transaction = self.context.session.begin(subtransactions=True)
enter = transaction.__enter__.call_count
exit = transaction.__exit__.call_count
self.assertEqual(enter, exit)
def _create_plugin_for_create_update_port(self, new_host_port):
plugin = ml2_plugin.Ml2Plugin()
plugin.extension_manager = mock.Mock()
plugin.type_manager = mock.Mock()
plugin.mechanism_manager = mock.Mock()
plugin.notifier = mock.Mock()
plugin._get_host_port_if_changed = mock.Mock(
return_value=new_host_port)
plugin._notify_l3_agent_new_port = mock.Mock()
plugin._notify_l3_agent_new_port.side_effect = (
lambda c, p: self._ensure_transaction_is_closed())
return plugin
def test_create_port_rpc_outside_transaction(self):
with contextlib.nested(
mock.patch.object(ml2_plugin.Ml2Plugin, '__init__'),
mock.patch.object(base_plugin.NeutronDbPluginV2, 'create_port'),
) as (init, super_create_port):
init.return_value = None
new_host_port = mock.Mock()
plugin = self._create_plugin_for_create_update_port(new_host_port)
plugin.create_port(self.context, mock.MagicMock())
plugin._notify_l3_agent_new_port.assert_called_once_with(
self.context, new_host_port)
def test_update_port_rpc_outside_transaction(self):
with contextlib.nested(
mock.patch.object(ml2_plugin.Ml2Plugin, '__init__'),
mock.patch.object(base_plugin.NeutronDbPluginV2, 'update_port'),
) as (init, super_update_port):
init.return_value = None
new_host_port = mock.Mock()
plugin = self._create_plugin_for_create_update_port(new_host_port)
plugin.update_port(self.context, 'fake_id', mock.MagicMock())
plugin._notify_l3_agent_new_port.assert_called_once_with(
self.context, new_host_port)
|
|
import datetime
from unittest import skipIf, skipUnless
from django.db import connection
from django.db.models import CASCADE, ForeignKey, Index, Q
from django.test import (
TestCase, TransactionTestCase, skipIfDBFeature, skipUnlessDBFeature,
)
from django.test.utils import override_settings
from django.utils import timezone
from .models import (
Article, ArticleTranslation, IndexedArticle2, IndexTogetherSingleList,
)
class SchemaIndexesTests(TestCase):
"""
Test index handling by the db.backends.schema infrastructure.
"""
def test_index_name_hash(self):
"""
Index names should be deterministic.
"""
editor = connection.schema_editor()
index_name = editor._create_index_name(
table_name=Article._meta.db_table,
column_names=("c1",),
suffix="123",
)
self.assertEqual(index_name, "indexes_article_c1_a52bd80b123")
def test_index_name(self):
"""
Index names on the built-in database backends::
* Are truncated as needed.
* Include all the column names.
* Include a deterministic hash.
"""
long_name = 'l%sng' % ('o' * 100)
editor = connection.schema_editor()
index_name = editor._create_index_name(
table_name=Article._meta.db_table,
column_names=('c1', 'c2', long_name),
suffix='ix',
)
expected = {
'mysql': 'indexes_article_c1_c2_looooooooooooooooooo_255179b2ix',
'oracle': 'indexes_a_c1_c2_loo_255179b2ix',
'postgresql': 'indexes_article_c1_c2_loooooooooooooooooo_255179b2ix',
'sqlite': 'indexes_article_c1_c2_l%sng_255179b2ix' % ('o' * 100),
}
if connection.vendor not in expected:
self.skipTest('This test is only supported on the built-in database backends.')
self.assertEqual(index_name, expected[connection.vendor])
def test_index_together(self):
editor = connection.schema_editor()
index_sql = [str(statement) for statement in editor._model_indexes_sql(Article)]
self.assertEqual(len(index_sql), 1)
# Ensure the index name is properly quoted
self.assertIn(
connection.ops.quote_name(
editor._create_index_name(Article._meta.db_table, ['headline', 'pub_date'], suffix='_idx')
),
index_sql[0]
)
def test_index_together_single_list(self):
# Test for using index_together with a single list (#22172)
index_sql = connection.schema_editor()._model_indexes_sql(IndexTogetherSingleList)
self.assertEqual(len(index_sql), 1)
def test_columns_list_sql(self):
index = Index(fields=['headline'], name='whitespace_idx')
editor = connection.schema_editor()
self.assertIn(
'(%s)' % editor.quote_name('headline'),
str(index.create_sql(Article, editor)),
)
def test_descending_columns_list_sql(self):
index = Index(fields=['-headline'], name='whitespace_idx')
editor = connection.schema_editor()
self.assertIn(
'(%s DESC)' % editor.quote_name('headline'),
str(index.create_sql(Article, editor)),
)
@skipIf(connection.vendor == 'postgresql', 'opclasses are PostgreSQL only')
class SchemaIndexesNotPostgreSQLTests(TransactionTestCase):
available_apps = ['indexes']
def test_create_index_ignores_opclasses(self):
index = Index(
name='test_ops_class',
fields=['headline'],
opclasses=['varchar_pattern_ops'],
)
with connection.schema_editor() as editor:
# This would error if opclasses weren't ignored.
editor.add_index(IndexedArticle2, index)
# The `condition` parameter is ignored by databases that don't support partial
# indexes.
@skipIfDBFeature('supports_partial_indexes')
class PartialIndexConditionIgnoredTests(TransactionTestCase):
available_apps = ['indexes']
def test_condition_ignored(self):
index = Index(
name='test_condition_ignored',
fields=['published'],
condition=Q(published=True),
)
with connection.schema_editor() as editor:
# This would error if condition weren't ignored.
editor.add_index(Article, index)
self.assertNotIn(
'WHERE %s' % editor.quote_name('published'),
str(index.create_sql(Article, editor))
)
@skipUnless(connection.vendor == 'postgresql', 'PostgreSQL tests')
class SchemaIndexesPostgreSQLTests(TransactionTestCase):
available_apps = ['indexes']
get_opclass_query = '''
SELECT opcname, c.relname FROM pg_opclass AS oc
JOIN pg_index as i on oc.oid = ANY(i.indclass)
JOIN pg_class as c on c.oid = i.indexrelid
WHERE c.relname = '%s'
'''
def test_text_indexes(self):
"""Test creation of PostgreSQL-specific text indexes (#12234)"""
from .models import IndexedArticle
index_sql = [str(statement) for statement in connection.schema_editor()._model_indexes_sql(IndexedArticle)]
self.assertEqual(len(index_sql), 5)
self.assertIn('("headline" varchar_pattern_ops)', index_sql[1])
self.assertIn('("body" text_pattern_ops)', index_sql[3])
# unique=True and db_index=True should only create the varchar-specific
# index (#19441).
self.assertIn('("slug" varchar_pattern_ops)', index_sql[4])
def test_virtual_relation_indexes(self):
"""Test indexes are not created for related objects"""
index_sql = connection.schema_editor()._model_indexes_sql(Article)
self.assertEqual(len(index_sql), 1)
def test_ops_class(self):
index = Index(
name='test_ops_class',
fields=['headline'],
opclasses=['varchar_pattern_ops'],
)
with connection.schema_editor() as editor:
editor.add_index(IndexedArticle2, index)
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query % 'test_ops_class')
self.assertEqual(cursor.fetchall(), [('varchar_pattern_ops', 'test_ops_class')])
def test_ops_class_multiple_columns(self):
index = Index(
name='test_ops_class_multiple',
fields=['headline', 'body'],
opclasses=['varchar_pattern_ops', 'text_pattern_ops'],
)
with connection.schema_editor() as editor:
editor.add_index(IndexedArticle2, index)
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query % 'test_ops_class_multiple')
expected_ops_classes = (
('varchar_pattern_ops', 'test_ops_class_multiple'),
('text_pattern_ops', 'test_ops_class_multiple'),
)
self.assertCountEqual(cursor.fetchall(), expected_ops_classes)
def test_ops_class_partial(self):
index = Index(
name='test_ops_class_partial',
fields=['body'],
opclasses=['text_pattern_ops'],
condition=Q(headline__contains='China'),
)
with connection.schema_editor() as editor:
editor.add_index(IndexedArticle2, index)
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query % 'test_ops_class_partial')
self.assertCountEqual(cursor.fetchall(), [('text_pattern_ops', 'test_ops_class_partial')])
def test_ops_class_partial_tablespace(self):
indexname = 'test_ops_class_tblspace'
index = Index(
name=indexname,
fields=['body'],
opclasses=['text_pattern_ops'],
condition=Q(headline__contains='China'),
db_tablespace='pg_default',
)
with connection.schema_editor() as editor:
editor.add_index(IndexedArticle2, index)
self.assertIn('TABLESPACE "pg_default" ', str(index.create_sql(IndexedArticle2, editor)))
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query % indexname)
self.assertCountEqual(cursor.fetchall(), [('text_pattern_ops', indexname)])
def test_ops_class_descending(self):
indexname = 'test_ops_class_ordered'
index = Index(
name=indexname,
fields=['-body'],
opclasses=['text_pattern_ops'],
)
with connection.schema_editor() as editor:
editor.add_index(IndexedArticle2, index)
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query % indexname)
self.assertCountEqual(cursor.fetchall(), [('text_pattern_ops', indexname)])
def test_ops_class_descending_partial(self):
indexname = 'test_ops_class_ordered_partial'
index = Index(
name=indexname,
fields=['-body'],
opclasses=['text_pattern_ops'],
condition=Q(headline__contains='China'),
)
with connection.schema_editor() as editor:
editor.add_index(IndexedArticle2, index)
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query % indexname)
self.assertCountEqual(cursor.fetchall(), [('text_pattern_ops', indexname)])
def test_ops_class_columns_lists_sql(self):
index = Index(
fields=['headline'],
name='whitespace_idx',
opclasses=['text_pattern_ops'],
)
with connection.schema_editor() as editor:
self.assertIn(
'(%s text_pattern_ops)' % editor.quote_name('headline'),
str(index.create_sql(Article, editor)),
)
def test_ops_class_descending_columns_list_sql(self):
index = Index(
fields=['-headline'],
name='whitespace_idx',
opclasses=['text_pattern_ops'],
)
with connection.schema_editor() as editor:
self.assertIn(
'(%s text_pattern_ops DESC)' % editor.quote_name('headline'),
str(index.create_sql(Article, editor)),
)
@skipUnless(connection.vendor == 'mysql', 'MySQL tests')
class SchemaIndexesMySQLTests(TransactionTestCase):
available_apps = ['indexes']
def test_no_index_for_foreignkey(self):
"""
MySQL on InnoDB already creates indexes automatically for foreign keys.
(#14180). An index should be created if db_constraint=False (#26171).
"""
with connection.cursor() as cursor:
storage = connection.introspection.get_storage_engine(
cursor, ArticleTranslation._meta.db_table,
)
if storage != "InnoDB":
self.skip("This test only applies to the InnoDB storage engine")
index_sql = [str(statement) for statement in connection.schema_editor()._model_indexes_sql(ArticleTranslation)]
self.assertEqual(index_sql, [
'CREATE INDEX `indexes_articletranslation_article_no_constraint_id_d6c0806b` '
'ON `indexes_articletranslation` (`article_no_constraint_id`)'
])
# The index also shouldn't be created if the ForeignKey is added after
# the model was created.
field_created = False
try:
with connection.schema_editor() as editor:
new_field = ForeignKey(Article, CASCADE)
new_field.set_attributes_from_name('new_foreign_key')
editor.add_field(ArticleTranslation, new_field)
field_created = True
# No deferred SQL. The FK constraint is included in the
# statement to add the field.
self.assertFalse(editor.deferred_sql)
finally:
if field_created:
with connection.schema_editor() as editor:
editor.remove_field(ArticleTranslation, new_field)
@skipUnlessDBFeature('supports_partial_indexes')
# SQLite doesn't support timezone-aware datetimes when USE_TZ is False.
@override_settings(USE_TZ=True)
class PartialIndexTests(TransactionTestCase):
# Schema editor is used to create the index to test that it works.
available_apps = ['indexes']
def test_partial_index(self):
with connection.schema_editor() as editor:
index = Index(
name='recent_article_idx',
fields=['pub_date'],
condition=Q(
pub_date__gt=datetime.datetime(
year=2015, month=1, day=1,
# PostgreSQL would otherwise complain about the lookup
# being converted to a mutable function (by removing
# the timezone in the cast) which is forbidden.
tzinfo=timezone.get_current_timezone(),
),
)
)
self.assertIn(
'WHERE %s' % editor.quote_name('pub_date'),
str(index.create_sql(Article, schema_editor=editor))
)
editor.add_index(index=index, model=Article)
with connection.cursor() as cursor:
self.assertIn(index.name, connection.introspection.get_constraints(
cursor=cursor, table_name=Article._meta.db_table,
))
editor.remove_index(index=index, model=Article)
def test_integer_restriction_partial(self):
with connection.schema_editor() as editor:
index = Index(
name='recent_article_idx',
fields=['id'],
condition=Q(pk__gt=1),
)
self.assertIn(
'WHERE %s' % editor.quote_name('id'),
str(index.create_sql(Article, schema_editor=editor))
)
editor.add_index(index=index, model=Article)
with connection.cursor() as cursor:
self.assertIn(index.name, connection.introspection.get_constraints(
cursor=cursor, table_name=Article._meta.db_table,
))
editor.remove_index(index=index, model=Article)
def test_boolean_restriction_partial(self):
with connection.schema_editor() as editor:
index = Index(
name='published_index',
fields=['published'],
condition=Q(published=True),
)
self.assertIn(
'WHERE %s' % editor.quote_name('published'),
str(index.create_sql(Article, schema_editor=editor))
)
editor.add_index(index=index, model=Article)
with connection.cursor() as cursor:
self.assertIn(index.name, connection.introspection.get_constraints(
cursor=cursor, table_name=Article._meta.db_table,
))
editor.remove_index(index=index, model=Article)
@skipUnlessDBFeature('supports_functions_in_partial_indexes')
def test_multiple_conditions(self):
with connection.schema_editor() as editor:
index = Index(
name='recent_article_idx',
fields=['pub_date', 'headline'],
condition=(
Q(pub_date__gt=datetime.datetime(
year=2015,
month=1,
day=1,
tzinfo=timezone.get_current_timezone(),
)) & Q(headline__contains='China')
),
)
sql = str(index.create_sql(Article, schema_editor=editor))
where = sql.find('WHERE')
self.assertIn(
'WHERE (%s' % editor.quote_name('pub_date'),
sql
)
# Because each backend has different syntax for the operators,
# check ONLY the occurrence of headline in the SQL.
self.assertGreater(sql.rfind('headline'), where)
editor.add_index(index=index, model=Article)
with connection.cursor() as cursor:
self.assertIn(index.name, connection.introspection.get_constraints(
cursor=cursor, table_name=Article._meta.db_table,
))
editor.remove_index(index=index, model=Article)
def test_is_null_condition(self):
with connection.schema_editor() as editor:
index = Index(
name='recent_article_idx',
fields=['pub_date'],
condition=Q(pub_date__isnull=False),
)
self.assertIn(
'WHERE %s IS NOT NULL' % editor.quote_name('pub_date'),
str(index.create_sql(Article, schema_editor=editor))
)
editor.add_index(index=index, model=Article)
with connection.cursor() as cursor:
self.assertIn(index.name, connection.introspection.get_constraints(
cursor=cursor, table_name=Article._meta.db_table,
))
editor.remove_index(index=index, model=Article)
|
|
#!/usr/bin/python
# (C) Copyright 2018 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Authors:
Slavisa Sarafijanovic (sla@zurich.ibm.com)
Harald Seipp (seipp@de.ibm.com)
"""
"""
This file implements SwiftHLM Connector for LTFS Data Management (LTFS DM)
backend, i.e. the connector between SwiftHLM and the tape-enabled file storage
backend LTFS DM. LTFS DM is a software that adds tape storage to a standard
disk based Linux filesystem - it keeps the original namespace of the disk file
system exposed to users and applications (via standard POSIX interface) but
allows migrating file data to and from attached tape storage. LTFS DM is open
sourced at https://github.com/ibm-research/LTFS-Data-Management.
Toward SwiftHLM the connector implements SwiftHLM Generic Backend API as
declared in dummy_connector.py of SwiftHLM. On the backend side the connector
maps SwiftHLM requests to the backend's migrate, recall and query status
operations.
"""
from swift.common.utils import readconf
from swift.common.utils import json, get_logger
from sys import stdin, stdout
import os
import errno
import uuid
import subprocess
# SwiftHLM Backend Connector
class SwiftHlmBackendConnector(object):
def __init__(self):
self.__request_in = {}
self.__request_out = {}
self.__request_out_request = ''
self.__request_out_filelist = ''
self.__response_in = {}
self.__response_out = {}
# Config
configFile = r'/etc/swift/object-server.conf'
self.conf = readconf(configFile)
# Logging
self.hlm_stor_node_config = self.conf.get('hlm', None)
if self.hlm_stor_node_config:
hlm_stor_node_log_level = self.hlm_stor_node_config.get(
'set log_level', None)
if hlm_stor_node_log_level:
self.conf['log_level'] = hlm_stor_node_log_level
self.logger = get_logger(self.conf, name='hlm-connector',
log_route='swifthlm', fmt="%(server)s: %(msecs)03d "
"[%(filename)s:%(funcName)20s():%(lineno)s] %(message)s")
self.logger.info('info: Initialized Connector')
self.logger.debug('dbg: Initialized Connector')
self.logger.info('conf: %s', self.conf['log_level'])
#self.logger.info('conf: %s', self.conf)
self.logger.debug('conf: %s', json.dumps(self.conf.get('hlm', None)))
self.logger.debug('conf: %s', json.dumps(
self.conf.get('ltfsdm', None)))
# Connector settings
self.ltfsdm_cfg = self.conf.get('ltfsdm', None)
if not self.ltfsdm_cfg:
self.logger.error('LTFS DM connector not configured in \
/etc/swift/object-server.conf')
raise
# Check connector settings, make temporary directory if it does not
# exist
self.ltfsdm_path = self.ltfsdm_cfg.get('ltfsdm_path',
'/usr/local/bin/ltfsdm')
# if not os.path.isfile(self.ltfsdm_path):
if os.system('sudo -i ' + self.ltfsdm_path +
' help > /dev/null 2>&1') != 0:
self.logger.error("ERROR: ltfsdm binary not present at"
" configured (or default) path %s", self.ltfsdm_path)
raise
self.connector_tmp_dir = self.ltfsdm_cfg.get('connector_tmp_dir', None)
if self.connector_tmp_dir:
self.mkdir_minus_p(self.connector_tmp_dir)
else:
self.logger.error('Swifthlm temporary directory not configured')
raise
self.tape_storage_pool = self.ltfsdm_cfg.get('tape_storage_pool', None)
if not self.tape_storage_pool:
self.logger.error('Tape storage pool not configured.')
raise
# Next method is invoked by SwiftHLM Handler using SwiftHLM Generic Backend
# Interface (GBI). It adapts SwiftHLM request for LTFS DM backend, invokes
# the backend operations, reformats the backend response to GBI format, and
# returns the response to SwitHLM Handler
def submit_request_get_response(self, request):
self.__receive_request(request)
self.__reformat_swifthlm_request_to_specific_backend_api()
self.__submit_request_to_backend_get_response()
self.__reformat_backend_response_to_generic_backend_api()
return self.__response_out
# This method receives the request from SwiftHLM Handler
def __receive_request(self, request):
self.logger.debug('Receiving request from Handler')
self.__request_in = request
return
# This method reformats request to backend API
def __reformat_swifthlm_request_to_specific_backend_api(self):
self.logger.debug('Reformatting request to the specific Backend API')
self.logger.debug('request_in(first 1024 bytes): %s',
str(self.__request_in)[0:1023])
# Backend specific part
self.__request_out_request = self.__request_in['request']
if str.lower((self.__request_in['request']).encode('utf-8')) == 'status':
# status: reuse input request as is
self.__request_out = self.__request_in
else:
# migration or recall: prepare list for bulk migration/recall
# in a temporary file
tmp_filename = str(uuid.uuid1())
self.__request_out_list = self.connector_tmp_dir + '/' + \
tmp_filename
f = open(self.__request_out_list, 'w')
for obj_and_file in self.__request_in['objects']:
f.write(str(obj_and_file['file']) + '\n')
f.close()
fr = open(self.__request_out_list, 'r')
file_list_content = fr.read()
self.logger.debug('file_list: %s', file_list_content)
fr.close()
return
# This method submits request to Backend and gets Response from Backend
def __submit_request_to_backend_get_response(self):
self.logger.debug('Submitting request to backend')
if self.__request_out_request == 'status':
# query status
self.query_status_receive_response()
elif self.__request_out_request == 'migrate':
#self.__response_in = 0
self.migrate_receive_response()
elif self.__request_out_request == 'recall':
self.recall_receive_response()
else: # wrong request, TODO: move this check, do early as possible
raise
return
def __reformat_backend_response_to_generic_backend_api(self):
self.logger.debug('Reformatting response to Generic Backend API')
self.logger.debug('response_in(first 1024 bytes): %s',
str(self.__response_in)[0:1023])
# In this connector implementaiton, the mapping of the response from
# the backend to the GBI is done in the functions
# migrate_receive_response(), recall_receive_response() and
# query_status_receive_response() when setting response_in varible, it
# only remains to copy it to response_out.
self.__response_out = self.__response_in
return
def mkdir_minus_p(self, dir_path):
try:
os.makedirs(dir_path)
except OSError as err: # TODO: check python 3.x
if err.errno == errno.EEXIST and os.path.isdir(dir_path):
pass
else:
raise
return
def migrate_receive_response(self):
self.logger.debug('In migrate_receive_response()')
listfile = self.__request_out_list
request = self.__request_out_request
# Migrate object files - unfortunately ltfsdm migrate must be run as
# root
self.logger.debug('self.ltfsdm_path: %s', self.ltfsdm_path)
cmd = ["sudo", "-i", self.ltfsdm_path, "migrate", "-f", listfile, '-P']
for pool in self.tape_storage_pool.split():
cmd.append(pool)
self.logger.debug('cmd: %s', cmd)
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
out, error = p.communicate()
rc = p.returncode
self.logger.debug('migrate.out(first 1024 bytes): %s',
str(out)[0:1023])
self.logger.debug('rc: %s', rc)
if rc == 6:
rc = 0
self.__response_in = rc
return
def recall_receive_response(self):
listfile = self.__request_out_list
request = self.__request_out_request
# Recall object files - unfortunately ltfsdm migrate must be run as
# root
cmd = ["sudo", "-i", self.ltfsdm_path, "recall", "-f", listfile]
self.logger.debug('cmd: %s', cmd)
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
out, error = p.communicate()
rc = p.returncode
self.logger.debug('recall.out(first 1024 bytes): %s', str(out)[0:1023])
self.logger.debug('rc: %s', rc)
self.__response_in = rc
return
def query_status_receive_response(self):
self.logger.debug('query_status_receive_response()')
# prepare temporary lists unique file name prefix
lists_prefix = str(uuid.uuid1())
input_list = self.connector_tmp_dir + '/' + lists_prefix + \
'.list.status.input'
self.logger.debug('input_list: %s', input_list)
f = open(input_list, 'w')
for obj_and_file in self.__request_in['objects']:
f.write(str(obj_and_file['file']) + '\n')
f.close()
# mmapplypolicy output is by default owned by root, 0600 file mode
# so we create it as swift user to be able to process it later
output_list = self.connector_tmp_dir + '/' + lists_prefix + \
'.list.status.output'
self.logger.debug('output_list: %s', output_list)
open(output_list, 'w').close()
output_list_prefix = self.connector_tmp_dir + '/' + lists_prefix
# Prepare status scan command
cmd = ["sudo" +
" -i " +
self.ltfsdm_path +
" info" +
" files" +
" -f " + input_list +
" | awk 'NR > 1 { print }'" +
" >" + output_list]
self.logger.debug('cmd: %s', cmd)
# Invoke the command
p = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
# Check result
if p.returncode:
self.logger.error('Status query errors: %s', err)
return
fr = open(output_list, 'r')
file_list_content = fr.read()
self.logger.debug('output_list(first 1024 bytes): %s',
str(file_list_content)[0:1023])
fr.close()
# get file-status pairs
names_statuses = {}
fr = open(output_list, 'r')
for line in fr.readlines():
self.logger.debug('line: %s', str(line))
file_name = line.split()[-1]
file_status = line.split()[0]
if file_status == 'r':
file_status = 'resident'
elif file_status == 'p':
file_status = 'premigrated'
elif file_status == 'm':
file_status = 'migrated'
self.logger.debug('file_name: %s', file_name)
self.logger.debug('file_status: %s', file_status)
names_statuses[file_name] = file_status
# create object to file to status mapping
objects = []
for obj_and_file in self.__request_out['objects']:
obj_file_status = {}
obj_file_status['object'] = obj_and_file['object']
obj_file_status['file'] = obj_and_file['file']
filenamekey = obj_and_file['file']
self.logger.debug('filenamekey: %s', filenamekey)
filenamekey = os.path.realpath(filenamekey)
self.logger.debug('filenamekey: %s', filenamekey)
obj_file_status['status'] = names_statuses[filenamekey]
objects.append(obj_file_status)
self.__response_in['objects'] = objects
# TODO: uncomment or modify next 2 lines once major defects are fixed
# os.remove(input_list)
# os.remove(output_list)
return
def set_statuses_to_unknown(self):
objects = []
for obj_and_file in self.__request_out['objects']:
obj_file_status = obj_and_file
obj_file_status['status'] = 'unknown'
objects.append(obj_file_status)
self.__response_in['objects'] = objects
return
if __name__ == '__main__':
# SwiftHlmConnector class is not assumed to be used standalone, instead it
# is imported for a configured backend by SwiftHLM Handler and invoked from
# the Handler.
raise
|
|
from django.shortcuts import render,resolve_url,redirect
from django.template.response import TemplateResponse
from django.core.urlresolvers import reverse,reverse_lazy
from django.contrib.auth.views import deprecate_current_app
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.cache import never_cache
from django.contrib.auth.tokens import default_token_generator
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.http import urlsafe_base64_encode
from django.contrib.auth import get_user_model
from django.conf import settings
from django.utils.encoding import force_text
from django.http import HttpResponseRedirect
from django.views.decorators.debug import sensitive_post_parameters
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.utils.http import urlsafe_base64_encode,urlsafe_base64_decode
import random
import warnings
from email.mime.text import MIMEText
from smtplib import SMTP
from django.core.mail import send_mail
from .forms import UsuarioForm
from django.contrib.auth.models import Group
from django.contrib.auth.forms import PasswordResetForm,SetPasswordForm
from django.contrib.auth.models import User
from cuentas_usuarioapp.models import UsuarioEmpleado
from cuentas_usuarioapp.forms import CambiarPassword,ResetPasswordForm
from empleadosapp.models import Empleado
UserModel = get_user_model()
"""def crear_usuario(request):
if request.method == "POST":
form = CrearUsuario(request.POST)
#messages.success(request,'Antes de is_valid')
if form.is_valid():
#messages.success(request,'Entro a is_valid')
x = form.save(commit=False)
x.save()
x.groups = request.POST.getlist('grupos')
x.save()
messages.success(request,'Usuario '+request.POST.get('usuario')+ ' Creado Satisfactoriamente')
else:
messages.success(request,'No entra a is_valid')
else:
#messages.success(request,'Error')
form=CrearUsuario()
return render(request,"cuentas_usuarioapp/crear.html",{'form':form})"""
@login_required(login_url='logins')
def empleado_usuario(request):
formU = UsuarioForm()
psw = generarpassword()
if request.method == "POST":
formU = UsuarioForm(request.POST)
#codEmp = request.POST.get('empleado')
#dataemp = Empleado.objects.get(pk=codEmp)
#usr = generausername(dataemp)
if formU.is_valid():
userAux = formU.cleaned_data
aux = userAux.get('codigoEmpleado')
dataemp = aux.nombrePrimero + ' '+ aux.apellidoPrimero
usr = generausername(dataemp)
passw = request.POST.get('password')
user = User.objects.create_user(usr,userAux.get('email'),passw)
user.last_name=aux.apellidoPrimero
user.first_name=aux.nombrePrimero
user.save()
codigoU = User.objects.order_by('-id')[0]
UsuarioEmpleado.objects.create(codigoEmpleado=aux,codigoUsuario=codigoU)
user.groups = request.POST.getlist('grupos')
user.save()
messages.success(request,'Se han guardado los datos del Usuario Exitosamente')
remitente = "Bienestar Universitario <bienestaruessv@gmail.com>"
destinatario = dataemp.split(' ')[0] +' '+ dataemp.split(' ')[1] + "<"+user.email+">"
asunto = "Credenciales BU"
saludo = "Este es un e-mail enviando desde el Sistema Informatico de gestion de expedientes y citas de Bienestar Universitario, UES"
mensaje = "Hola!\n"+saludo+"\nNombre de usuario: "+ usr +"\n" + "Password: "+ passw + \
"\n\n\nDebera cambiar en el primer ingreso su contraseña y luego si deseea podra cambiar su nombre de usuario."
mime_message = MIMEText(mensaje)
mime_message["From"] = remitente
mime_message["To"] = destinatario
mime_message["Subject"] = asunto
smtp = SMTP(settings.EMAIL_HOST, settings.EMAIL_PORT)
smtp.ehlo()
smtp.starttls()
smtp.ehlo()
smtp.login(settings.EMAIL_HOST_USER, settings.EMAIL_HOST_PASSWORD)
smtp.sendmail(remitente, destinatario, mime_message.as_string())
smtp.quit()
return redirect('empleadosuario-new')
else:
messages.success(request,'No validos')
formU = UsuarioForm()
return render(request,"cuentas_usuarioapp/empleado_usuario.html",{'form':formU,'pass':psw})
def generausername(data):
nombre = data.split(' ')
#limiteNombre = len(nombre[0])
#limiteApellido = len(nombre[1])
usr = nombre[0] +'.'+ nombre[1] + str(random.randint(1,99))
return usr
def generarpassword():
cadena = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890$/*-.@"
longitudCadena = len(cadena)
psw = ""
longitudPsw=10
for i in range(0,longitudPsw):
pos = random.randint(0,longitudCadena-1)
psw = psw + cadena[pos]
return psw
#Sobreescritura de las vista de django para poder aceptar passwords mas sencillas
@deprecate_current_app
@csrf_protect
def password_reset(request,
template_name='registration/password_reset_form.html',
email_template_name='registration/password_reset_email.html',
subject_template_name='registration/password_reset_subject.txt',
password_reset_form=ResetPasswordForm,
token_generator=default_token_generator,
post_reset_redirect=None,
from_email=None,
extra_context=None,
html_email_template_name=None,
extra_email_context=None):
warnings.warn("The password_reset() view is superseded by the "
"class-based PasswordResetView().",
RemovedInDjango20Warning, stacklevel=2)
if post_reset_redirect is None:
post_reset_redirect = reverse('password_reset_done')
else:
post_reset_redirect = resolve_url(post_reset_redirect)
if request.method == "POST":
form = password_reset_form(request.POST)
if form.is_valid():
opts = {
'use_https': request.is_secure(),
'token_generator': token_generator,
'from_email': from_email,
'email_template_name': email_template_name,
'subject_template_name': subject_template_name,
'request': request,
'html_email_template_name': html_email_template_name,
'extra_email_context': extra_email_context,
}
form.save(**opts)
return HttpResponseRedirect(post_reset_redirect)
else:
form = password_reset_form()
context = {
'form': form,
'title': 'Password reset',
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
@deprecate_current_app
def password_reset_done(request,
template_name='registration/password_reset_done.html',
extra_context=None):
warnings.warn("The password_reset_done() view is superseded by the "
"class-based PasswordResetDoneView().",
RemovedInDjango20Warning, stacklevel=2)
context = {
'title': 'Password reset sent',
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
@sensitive_post_parameters()
@never_cache
@deprecate_current_app
def password_reset_confirm(request, uidb64=None, token=None,
template_name='registration/password_reset_confirm.html',
token_generator=default_token_generator,
set_password_form=CambiarPassword,
post_reset_redirect=None,
extra_context=None):
"""
View that checks the hash in a password reset link and presents a
form for entering a new password.
"""
warnings.warn("The password_reset_confirm() view is superseded by the "
"class-based PasswordResetConfirmView().",
RemovedInDjango20Warning, stacklevel=2)
assert uidb64 is not None and token is not None # checked by URLconf
if post_reset_redirect is None:
post_reset_redirect = reverse('password_reset_complete')
else:
post_reset_redirect = resolve_url(post_reset_redirect)
try:
# urlsafe_base64_decode() decodes to bytestring on Python 3
uid = force_text(urlsafe_base64_decode(uidb64))
user = UserModel._default_manager.get(pk=uid)
except (TypeError, ValueError, OverflowError, UserModel.DoesNotExist):
user = None
if user is not None and token_generator.check_token(user, token):
validlink = True
title = 'Entre el nuevo Password'
if request.method == 'POST':
form = set_password_form(request.POST)
#messages.success(request, "hola "+user)
if form.is_valid():
x = form.cleaned_data
usr = User.objects.get(username=user)
usr.set_password(x.get('pass1'))
usr.save()
return HttpResponseRedirect(post_reset_redirect)
else:
form = set_password_form()
else:
validlink = False
form = None
title = 'Password no reestablecido'
context = {
'form': form,
'title': title,
'validlink': validlink,
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
@deprecate_current_app
def password_reset_complete(request,
template_name='registration/password_reset_complete.html',
extra_context=None):
warnings.warn("The password_reset_complete() view is superseded by the "
"class-based PasswordResetCompleteView().",
RemovedInDjango20Warning, stacklevel=2)
context = {
'login_url': resolve_url(settings.LOGIN_URL),
'title': 'Password Reestablecido correctamente',
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
|
|
import os
import struct
import scipy.spatial, scipy.optimize
import numpy as np
from ibex.utilities import dataIO
from ibex.utilities.constants import *
def ReadSkeletonEndpoints(filename):
endpoints = []
with open(filename, 'rb') as fd:
zres, yres, xres, max_label, = struct.unpack('qqqq', fd.read(32))
# go through every label
for label in range(max_label):
nelements, = struct.unpack('q', fd.read(8))
endpoints.append([])
# go through all elements
for _ in range(nelements):
index, = struct.unpack('q', fd.read(8))
if index > 0: continue
index = -1 * index
# convert to cartesian coordinates
iz = index / (yres * xres)
iy = (index - iz * yres * xres) / xres
ix = index % xres
endpoints[label].append((ix, iy, iz))
return endpoints
def ReadGroundTruth(prefix, max_label):
examples_filename = 'benchmarks/skeleton/{}-skeleton-benchmark-examples.bin'.format(prefix)
gt_examples = [[] for _ in range(max_label)]
with open(examples_filename, "rb") as fd:
cutoff, = struct.unpack('q', fd.read(8))
for iv in range(cutoff):
label, = struct.unpack('q', fd.read(8))
# read all the examples
example_filename = 'benchmarks/skeleton/{}/skeleton-endpoints-{:05d}.pts'.format(prefix, label)
if not os.path.exists(example_filename): continue
with open(example_filename, 'rb') as efd:
npts, = struct.unpack('q', efd.read(8))
for _ in range(npts):
zpt, ypt, xpt, = struct.unpack('qqq', efd.read(24))
gt_examples[label].append((xpt, ypt, zpt))
# read the list
return gt_examples
def FindEndpointMatches(prefix, algorithm, params, resolution, ground_truth):
# read the endpoints for this set of parameters
skeleton_filename = 'benchmarks/skeleton/{}-{}-{:03d}x{:03d}x{:03d}-upsample-{}-skeleton.pts'.format(prefix, algorithm, resolution[IB_X], resolution[IB_Y], resolution[IB_Z], params)
if not os.path.exists(skeleton_filename): return 0, 0, 0
# read the endpoints
proposed = ReadSkeletonEndpoints(skeleton_filename)
assert (len(ground_truth) == len(proposed))
# don't allow points to be connected over this distance
max_distance = 800
# go through every label
max_label = len(ground_truth)
output_filename = 'benchmarks/skeleton/matchings/{}-{}-{:03d}x{:03d}x{:03d}-{}-matching-pairs.pts'.format(prefix, algorithm, resolution[IB_X], resolution[IB_Y], resolution[IB_Z], params)
true_positives = 0
false_positives = 0
false_negatives = 0
with open(output_filename, 'wb') as fd:
# need resolution for max distance
resolution = dataIO.Resolution(prefix)
fd.write(struct.pack('q', max_label))
for label in range(max_label):
# no ground truth for this label
if not len(ground_truth[label]):
fd.write(struct.pack('q', 0))
continue
ngt_pts = len(ground_truth[label])
npr_pts = len(proposed[label])
gt_pts = np.zeros((ngt_pts, 3), dtype=np.int64)
pr_pts = np.zeros((npr_pts, 3), dtype=np.int64)
# can not use IB_NDIMS because coordinates are (x, y, z) here
for pt in range(ngt_pts):
gt_pts[pt,0] = resolution[IB_X] * ground_truth[label][pt][0]
gt_pts[pt,1] = resolution[IB_Y] * ground_truth[label][pt][1]
gt_pts[pt,2] = resolution[IB_Z] * ground_truth[label][pt][2]
for pt in range(npr_pts):
pr_pts[pt,0] = resolution[IB_X] * proposed[label][pt][0]
pr_pts[pt,1] = resolution[IB_Y] * proposed[label][pt][1]
pr_pts[pt,2] = resolution[IB_Z] * proposed[label][pt][2]
cost_matrix = scipy.spatial.distance.cdist(gt_pts, pr_pts)
matching = scipy.optimize.linear_sum_assignment(cost_matrix)
valid_matches = set()
for match in zip(matching[0], matching[1]):
# valid pairs must be within max_distance (in nanometers)
if cost_matrix[match[0], match[1]] > max_distance: continue
valid_matches.add(match)
true_positives += len(valid_matches)
false_positives += npr_pts - len(valid_matches)
false_negatives += ngt_pts - len(valid_matches)
# write the ground truth and the corresponding segment endpoints
fd.write(struct.pack('q', len(valid_matches)))
for match in valid_matches:
fd.write(struct.pack('qq', match[0], match[1]))
precision = true_positives / float(true_positives + false_positives)
recall = true_positives / float(true_positives + false_negatives)
fscore = 2 * (precision * recall) / float(precision + recall)
return fscore, precision, recall
def EvaluateEndpoints(prefix):
gold = dataIO.ReadGoldData(prefix)
max_label = np.amax(gold) + 1
resolutions = [(iv, iv, iv) for iv in range(30, 210, 10)] # all downsampled resolutions
# get the human labeled ground truth
gt_endpoints = ReadGroundTruth(prefix, max_label)
best_fscore_precision = 0.0
best_fscore_recall = 0.0
best_fscore = 0.0
algorithm = ''
min_precision, min_recall = (0.80, 0.90)
# go through all possible configurations
for resolution in resolutions:
# go through parameters for medial axis strategy
for astar_expansion in [0, 11, 13, 15, 17, 19, 21, 23, 25]:
fscore, precision, recall = FindEndpointMatches(prefix, 'thinning', '{:02d}'.format(astar_expansion), resolution, gt_endpoints)
if (precision > min_precision and recall > min_recall):
print 'Thinning {:03d}x{:03d}x{:03d} {:02d}'.format(resolution[IB_X], resolution[IB_Y], resolution[IB_Z], astar_expansion)
print ' F1-Score: {}'.format(fscore)
print ' Precision: {}'.format(precision)
print ' Recall: {}'.format(recall)
if (fscore > best_fscore):
best_fscore = fscore
best_fscore_precision = precision
best_fscore_recall = recall
algorithm = 'thinning-{:03d}x{:03d}x{:03d}-{:02d}'.format(resolution[IB_X], resolution[IB_Y], resolution[IB_Z], astar_expansion)
fscore, precision, recall = FindEndpointMatches(prefix, 'medial-axis', '{:02d}'.format(astar_expansion), resolution, gt_endpoints)
if (precision > min_precision and recall > min_recall):
print 'Medial Axis {:03d}x{:03d}x{:03d} {:02d}'.format(resolution[IB_X], resolution[IB_Y], resolution[IB_Z], astar_expansion)
print ' F1-Score: {}'.format(fscore)
print ' Precision: {}'.format(precision)
print ' Recall: {}'.format(recall)
if (fscore > best_fscore):
best_fscore = fscore
best_fscore_precision = precision
best_fscore_recall = recall
algorithm = 'medial-axis-{:03d}x{:03d}x{:03d}-{:02d}'.format(resolution[IB_X], resolution[IB_Y], resolution[IB_Z], astar_expansion)
for tscale in [7, 9, 11, 13, 15, 17]:
for tbuffer in [1, 2, 3, 4, 5]:
fscore, precision, recall = FindEndpointMatches(prefix, 'teaser', '{:02d}-{:02d}-00'.format(tscale, tbuffer), resolution, gt_endpoints)
if (precision > min_precision and recall > min_recall):
print 'TEASER {:03d}x{:03d}x{:03d} {:02d} {:02d}'.format(resolution[IB_X], resolution[IB_Y], resolution[IB_Z], tscale, tbuffer)
print ' F1-Score: {}'.format(fscore)
print ' Precision: {}'.format(precision)
print ' Recall: {}'.format(recall)
if (fscore > best_fscore):
best_fscore = fscore
best_fscore_precision = precision
best_fscore_recall = recall
algorithm = 'teaser-{:03d}x{:03d}x{:03d}-{:02d}-{:02d}-00'.format(resolution[IB_X], resolution[IB_Y], resolution[IB_Z], tscale, tbuffer)
print 'Best method: {}'.format(algorithm)
print 'F1-Score: {}'.format(best_fscore)
print 'Precision: {}'.format(best_fscore_precision)
print 'Recall: {}'.format(best_fscore_recall)
# find skeleton benchmark information
def GenerateExamples(prefix, cutoff=500):
gold = dataIO.ReadGoldData(prefix)
labels, counts = np.unique(gold, return_counts=True)
filename = 'benchmarks/skeleton/{}-skeleton-benchmark-examples.bin'.format(prefix)
with open(filename, 'wb') as fd:
fd.write(struct.pack('q', cutoff))
if labels[0] == 0: cutoff += 1
for ie, (count, label) in enumerate(sorted(zip(counts, labels), reverse=True)):
if not label: continue
# don't include more than cutoff examples
if ie == cutoff: break
fd.write(struct.pack('q', label))
|
|
"""Database models for tracking project schema history."""
from __future__ import unicode_literals
import json
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models.signals import post_init
from django.utils.timezone import now
from django_evolution.compat import six
from django_evolution.compat.datastructures import OrderedDict
from django_evolution.compat.py23 import pickle_dumps, pickle_loads
from django_evolution.compat.six import python_2_unicode_compatible
from django_evolution.compat.translation import gettext_lazy as _
from django_evolution.signature import ProjectSignature
class VersionManager(models.Manager):
"""Manage Version models.
This introduces a convenience function for finding the current Version
model for the database.
"""
def current_version(self, using=None):
"""Return the Version model for the current schema.
This will find the Version with both the latest timestamp and the
latest ID. It's here as a replacement for the old call to
:py:meth:`latest`, which only operated on the timestamp and would
find the wrong entry if two had the same exact timestamp.
Args:
using (unicode):
The database alias name to use for the query. Defaults
to ``None``, the default database.
Raises:
Version.DoesNotExist: No such version exists.
Returns:
Version: The current Version object for the database.
"""
versions = self.using(using).order_by('-when', '-id')
try:
return versions[0]
except IndexError:
raise self.model.DoesNotExist
class SignatureField(models.TextField):
"""A field for loading and storing project signatures.
This will handle deserializing any project signatures stored in the
database, converting them into a
:py:class:`~django_evolution.signatures.ProjectSignature`, and then
writing a serialized version back to the database.
"""
description = _('Signature')
def contribute_to_class(self, cls, name):
"""Perform operations when added to a class.
This will listen for when an instance is constructed in order to
perform some initial work.
Args:
cls (type):
The model class.
name (str):
The name of the field.
"""
super(SignatureField, self).contribute_to_class(cls, name)
post_init.connect(self._post_init, sender=cls)
def value_to_string(self, obj):
"""Return a serialized string value from the field.
Args:
obj (django.db.models.Model):
The model instance.
Returns:
unicode:
The serialized string contents.
"""
return self._dumps(self.value_from_object(obj))
def to_python(self, value):
"""Return a ProjectSignature value from the field contents.
Args:
value (object):
The current value assigned to the field. This might be
serialized string content or a
:py:class:`~django_evolution.signatures.ProjectSignature`
instance.
Returns:
django_evolution.signatures.ProjectSignature:
The project signature stored in the field.
Raises:
django.core.exceptions.ValidationError:
The field contents are of an unexpected type.
"""
if not value:
return ProjectSignature()
elif isinstance(value, six.string_types):
if value.startswith('json!'):
loaded_value = json.loads(value[len('json!'):],
object_pairs_hook=OrderedDict)
else:
loaded_value = pickle_loads(value)
return ProjectSignature.deserialize(loaded_value)
elif isinstance(value, ProjectSignature):
return value
else:
raise ValidationError(
'Unsupported serialized signature type %s' % type(value),
code='invalid',
params={
'value': value,
})
def get_prep_value(self, value):
"""Return a prepared Python value to work with.
This simply wraps :py:meth:`to_python`.
Args:
value (object):
The current value assigned to the field. This might be
serialized string content or a
:py:class:`~django_evolution.signatures.ProjectSignature`
instance.
Returns:
django_evolution.signatures.ProjectSignature:
The project signature stored in the field.
Raises:
django.core.exceptions.ValidationError:
The field contents are of an unexpected type.
"""
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
"""Return a prepared value for use in database operations.
Args:
value (object):
The current value assigned to the field. This might be
serialized string content or a
:py:class:`~django_evolution.signatures.ProjectSignature`
instance.
connection (django.db.backends.base.BaseDatabaseWrapper):
The database connection to operate on.
prepared (bool, optional):
Whether the value is already prepared for Python.
Returns:
unicode:
The value prepared for database operations.
"""
if not prepared:
value = self.get_prep_value(value)
return self._dumps(value)
def _post_init(self, instance, **kwargs):
"""Handle the construction of a model instance.
This will ensure the value set on the field is a valid
:py:class:`~django_evolution.signatures.ProjectSignature` object.
Args:
instance (django.db.models.Model):
The model instance being constructed.
**kwargs (dict, unused):
Additional keyword arguments from the signal.
"""
setattr(instance, self.attname,
self.to_python(self.value_from_object(instance)))
def _dumps(self, data):
"""Serialize the project signature to a string.
Args:
data (object):
The signature data to dump. This might be serialized string
content or a
:py:class:`~django_evolution.signatures.ProjectSignature`
instance.
Returns:
unicode:
The project signature stored in the field.
Raises:
TypeError:
The data provided was not of a supported type.
"""
if isinstance(data, six.string_types):
return data
elif isinstance(data, ProjectSignature):
serialized_data = data.serialize()
sig_version = serialized_data['__version__']
if sig_version >= 2:
return 'json!%s' % json.dumps(serialized_data)
else:
return pickle_dumps(serialized_data)
else:
raise TypeError('Unsupported signature type %s' % type(data))
@python_2_unicode_compatible
class Version(models.Model):
signature = SignatureField()
when = models.DateTimeField(default=now)
objects = VersionManager()
def is_hinted(self):
"""Return whether this is a hinted version.
Hinted versions store a signature without any accompanying evolutions.
Returns:
bool:
``True`` if this is a hinted evolution. ``False`` if it's based on
explicit evolutions.
"""
return not self.evolutions.exists()
def __str__(self):
if self.is_hinted():
return 'Hinted version, updated on %s' % self.when
return 'Stored version, updated on %s' % self.when
class Meta:
ordering = ('-when',)
db_table = 'django_project_version'
@python_2_unicode_compatible
class Evolution(models.Model):
version = models.ForeignKey(Version,
related_name='evolutions',
on_delete=models.CASCADE)
app_label = models.CharField(max_length=200)
label = models.CharField(max_length=100)
def __str__(self):
return 'Evolution %s, applied to %s' % (self.label, self.app_label)
class Meta:
db_table = 'django_evolution'
ordering = ('id',)
|
|
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from .common import BaseTest
import time
import json
from c7n.exceptions import PolicyValidationError
from .common import event_data
class TestGlueConnections(BaseTest):
def test_connections_query(self):
session_factory = self.replay_flight_data("test_glue_query_resources")
p = self.load_policy(
{"name": "list-glue-connections", "resource": "glue-connection"},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_connection_subnet_filter(self):
session_factory = self.replay_flight_data("test_glue_subnet_filter")
p = self.load_policy(
{
"name": "glue-connection",
"resource": "glue-connection",
"filters": [
{"type": "subnet", "key": "tag:Name", "value": "Default-48"}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(
resources[0]["PhysicalConnectionRequirements"]["SubnetId"],
"subnet-3a334610",
)
def test_connection_sg_filter(self):
session_factory = self.replay_flight_data("test_glue_sg_filter")
p = self.load_policy(
{
"name": "glue-connection",
"resource": "glue-connection",
"filters": [
{"type": "security-group", "key": "GroupName", "value": "default"}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(
resources[0]["PhysicalConnectionRequirements"]["SecurityGroupIdList"],
["sg-6c7fa917"],
)
def test_connection_delete(self):
session_factory = self.replay_flight_data("test_glue_delete_connection")
p = self.load_policy(
{
"name": "glue-connection",
"resource": "glue-connection",
"filters": [{"ConnectionType": "JDBC"}],
"actions": [{"type": "delete"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory().client("glue")
connections = client.get_connections()["ConnectionList"]
self.assertFalse(connections)
def test_connection_password_hidden(self):
session_factory = self.replay_flight_data("test_connection_password_hidden")
p = self.load_policy(
{
"name": "glue-connection",
"resource": "glue-connection",
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual('PASSWORD' in resources[0].get('ConnectionProperties'), False)
class TestGlueDevEndpoints(BaseTest):
def test_dev_endpoints_query(self):
session_factory = self.replay_flight_data("test_glue_query_resources")
p = self.load_policy(
{"name": "list-glue-dev-endpoints", "resource": "glue-dev-endpoint"},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_dev_endpoints_delete(self):
session_factory = self.replay_flight_data("test_glue_dev_endpoint_delete")
p = self.load_policy(
{
"name": "glue-dev-endpoint-delete",
"resource": "glue-dev-endpoint",
"filters": [{"PublicAddress": "present"}],
"actions": [{"type": "delete"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory().client("glue")
dev_endpoints = client.get_dev_endpoints()["DevEndpoints"]
self.assertFalse(dev_endpoints)
class TestGlueTag(BaseTest):
def test_glue_tags(self):
session_factory = self.replay_flight_data("test_glue_tags")
client = session_factory().client("glue")
tags = client.get_tags(ResourceArn='arn:aws:glue:us-east-1:644160558196:devEndpoint/test')
self.assertEqual(tags.get('Tags'), {})
policy = {
'name': 'test',
'resource': 'glue-dev-endpoint',
'actions': [
{
'type': 'tag',
'key': 'abcd',
'value': 'xyz'
},
]
}
p = self.load_policy(
policy,
config={'account_id': '644160558196'},
session_factory=session_factory)
resources = p.run()
arn = p.resource_manager.generate_arn(resources[0]['EndpointName'])
self.assertEqual(arn, 'arn:aws:glue:us-east-1:644160558196:devEndpoint/test')
tags = client.get_tags(ResourceArn=arn)
self.assertEqual(len(resources), 1)
self.assertEqual(tags.get('Tags'), {'abcd': 'xyz'})
def test_glue_untag(self):
session_factory = self.replay_flight_data("test_glue_untag")
policy = {
'name': 'test',
'resource': 'glue-dev-endpoint',
'actions': [{'type': 'remove-tag', 'tags': ['abcd']}]
}
p = self.load_policy(
policy,
config={'account_id': '644160558196'},
session_factory=session_factory)
resources = p.run()
client = session_factory().client("glue")
arn = p.resource_manager.generate_arn(resources[0]['EndpointName'])
tags = client.get_tags(ResourceArn=arn)
self.assertEqual(arn, 'arn:aws:glue:us-east-1:644160558196:devEndpoint/test')
self.assertEqual(tags.get('Tags'), {})
self.assertEqual(len(resources), 1)
def test_glue_job_tag(self):
session_factory = self.replay_flight_data("test_glue_job_tags")
client = session_factory().client("glue")
policy = {
'name': 'test',
'resource': 'glue-job',
'filters': [{'tag:abcd': 'absent'}],
'actions': [
{
'type': 'tag',
'key': 'abcd',
'value': 'xyz'
},
]
}
p = self.load_policy(
policy,
config={'account_id': '644160558196'},
session_factory=session_factory)
resources = p.run()
arn = p.resource_manager.generate_arn(resources[0]['Name'])
self.assertEqual(arn, 'arn:aws:glue:us-east-1:644160558196:job/test')
tags = client.get_tags(ResourceArn=arn)
self.assertEqual(len(resources), 1)
self.assertEqual(tags.get('Tags'), {'abcd': 'xyz'})
def test_glue_job_untag(self):
session_factory = self.replay_flight_data("test_glue_job_untag")
policy = {
'name': 'test',
'resource': 'glue-job',
'filters': [{'tag:abcd': 'present'}],
'actions': [{'type': 'remove-tag', 'tags': ['abcd']}]
}
p = self.load_policy(
policy,
config={'account_id': '644160558196'},
session_factory=session_factory)
resources = p.run()
client = session_factory().client("glue")
arn = p.resource_manager.generate_arn(resources[0]['Name'])
tags = client.get_tags(ResourceArn=arn)
self.assertEqual(arn, 'arn:aws:glue:us-east-1:644160558196:job/test')
self.assertEqual(tags.get('Tags'), {})
self.assertEqual(len(resources), 1)
def test_glue_crawler_tag(self):
session_factory = self.replay_flight_data("test_crawler_tags")
client = session_factory().client("glue")
policy = {
'name': 'test',
'resource': 'glue-crawler',
'filters': [{'tag:abcd': 'absent'}],
'actions': [
{
'type': 'tag',
'key': 'abcd',
'value': 'xyz'
},
]
}
p = self.load_policy(
policy,
config={'account_id': '644160558196'},
session_factory=session_factory)
resources = p.run()
arn = p.resource_manager.generate_arn(resources[0]['Name'])
self.assertEqual(arn, 'arn:aws:glue:us-east-1:644160558196:crawler/test')
tags = client.get_tags(ResourceArn=arn)
self.assertEqual(len(resources), 1)
self.assertEqual(tags.get('Tags'), {'abcd': 'xyz'})
def test_glue_crawler_untag(self):
session_factory = self.replay_flight_data("test_glue_crawler_untag")
policy = {
'name': 'test',
'resource': 'glue-crawler',
'filters': [{'tag:abcd': 'present'}],
'actions': [{'type': 'remove-tag', 'tags': ['abcd']}]
}
p = self.load_policy(
policy,
config={'account_id': '644160558196'},
session_factory=session_factory)
resources = p.run()
client = session_factory().client("glue")
arn = p.resource_manager.generate_arn(resources[0]['Name'])
tags = client.get_tags(ResourceArn=arn)
self.assertEqual(arn, 'arn:aws:glue:us-east-1:644160558196:crawler/test')
self.assertEqual(tags.get('Tags'), {})
self.assertEqual(len(resources), 1)
class TestGlueJobs(BaseTest):
def test_jobs_delete(self):
session_factory = self.replay_flight_data("test_glue_job_delete")
p = self.load_policy(
{
"name": "glue-job-delete",
"resource": "glue-job",
"filters": [{"Name": "test"}],
"actions": [{"type": "delete"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory().client("glue")
jobs = client.get_jobs()["Jobs"]
self.assertFalse(jobs)
class TestGlueCrawlers(BaseTest):
def test_crawlers_delete(self):
session_factory = self.replay_flight_data("test_glue_crawler_delete")
p = self.load_policy(
{
"name": "glue-crawler-delete",
"resource": "glue-crawler",
"filters": [{"Name": "test"}],
"actions": [{"type": "delete"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory().client("glue")
crawlers = client.get_crawlers()["Crawlers"]
self.assertFalse("test" in [c.get("Name") for c in crawlers])
def test_security_config_missing_filter(self):
p = self.load_policy(
{
"name": "glue-crawler-security-config",
"resource": "glue-crawler",
"filters": [{
"type": "security-config",
"missing": True}]
},
)
resources = p.resource_manager.filter_resources([{
'Name': 'bad-crawler',
'S3Targets': [{'Path': 's3://wicked'}]}])
assert len(resources) == 1
assert resources[0]['Name'] == 'bad-crawler'
def test_security_config_filter(self):
session_factory = self.replay_flight_data("test_glue_sec_config_filter")
p = self.load_policy(
{
"name": "glue-crawler-security-config",
"resource": "glue-crawler",
"filters": [
{"type": "security-config",
"key": "EncryptionConfiguration.CloudWatchEncryption.CloudWatchEncryptionMode",
"value": "SSE-KMS",
"op": "eq"},
{"type": "security-config",
"key": "EncryptionConfiguration.CloudWatchEncryption.KmsKeyArn",
"value": "arn:aws:kms:us-east-1:123456789123:key/358f7699-4ea5-455a-9c78-1c868301e5a8", # noqa
"op": "eq"}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['Name'], 'test-filter-crawler')
class TestGlueTables(BaseTest):
def test_tables_delete(self):
session_factory = self.replay_flight_data("test_glue_table_delete")
p = self.load_policy(
{
"name": "glue-table-delete",
"resource": "glue-table",
"filters": [{"Name": "test"}],
"actions": [{"type": "delete"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory().client("glue")
tables = client.get_tables(DatabaseName='test')["TableList"]
self.assertFalse("test" in [t.get("Name") for t in tables])
class TestGlueDatabases(BaseTest):
def test_databases_delete(self):
session_factory = self.replay_flight_data("test_glue_database_delete")
p = self.load_policy(
{
"name": "glue-database-delete",
"resource": "glue-database",
"filters": [{"Name": "test"}],
"actions": [{"type": "delete"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory().client("glue")
databases = client.get_databases()
self.assertFalse("test" in [t.get("Name") for t in databases.get("DatabaseList", [])])
class TestGlueClassifiers(BaseTest):
def test_classifiers_delete(self):
session_factory = self.replay_flight_data("test_glue_classifier_delete")
p = self.load_policy(
{
"name": "glue-classifier-delete",
"resource": "glue-classifier",
"filters": [{"CsvClassifier.Name": "test"}],
"actions": [{"type": "delete"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory().client("glue")
classifiers = client.get_classifiers()
self.assertFalse("test" in [t.get('CsvClassifier').get("Name")
for t in classifiers.get("Classifiers", [])])
class GlueMLTransform(BaseTest):
def test_ml_transforms_delete(self):
session_factory = self.replay_flight_data("test_glue_ml_transform_delete")
p = self.load_policy(
{
"name": "glue-ml-transform-delete",
"resource": "glue-ml-transform",
"filters": [{"Name": 'test'}],
"actions": [{"type": "delete"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory().client("glue")
ml_transforms = client.get_ml_transforms()
self.assertFalse("test" in [t.get("Name") for t in ml_transforms.get("Transforms", [])])
class TestGlueSecurityConfiguration(BaseTest):
def test_security_configurations_delete(self):
session_factory = self.replay_flight_data("test_glue_security_configuration_delete")
p = self.load_policy(
{
"name": "glue-security-configuration-delete",
"resource": "glue-security-configuration",
"filters": [{"Name": "test"}],
"actions": [{"type": "delete"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory().client("glue")
security_configrations = client.get_security_configurations()
self.assertFalse("test" in [t.get("Name")
for t in security_configrations.get("SecurityConfigurations", [])])
def test_kms_alias(self):
factory = self.replay_flight_data("test_glue_security_configuration_kms_key_filter")
p = self.load_policy(
{
"name": "glue-security-configuration-s3-kms-alias",
"resource": "glue-security-configuration",
"filters": [
{
"type": "kms-key",
"key": "c7n:AliasName",
"value": "^(alias/)",
"op": "regex",
"key-type": "cloudwatch"
}
]
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(
resources[0]['EncryptionConfiguration']['CloudWatchEncryption']['KmsKeyArn'],
'arn:aws:kms:us-east-1:0123456789012:key/358f7699-4ea5-455a-9c78-1c868301e5a8')
class TestGlueTriggers(BaseTest):
def test_triggers_delete(self):
session_factory = self.replay_flight_data("test_glue_trigger_delete")
p = self.load_policy(
{
"name": "glue-trigger-delete",
"resource": "glue-trigger",
"filters": [{"Name": "test"}],
"actions": [{"type": "delete"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
if self.recording:
time.sleep(60)
client = session_factory().client("glue")
triggers = client.get_triggers()
self.assertFalse("test" in [t.get("Name") for t in triggers.get("Triggers", [])])
class TestGlueWorkflows(BaseTest):
def test_workflows_delete(self):
session_factory = self.replay_flight_data("test_glue_workflow_delete")
p = self.load_policy(
{
"name": "glue-workflow-delete",
"resource": "glue-workflow",
"filters": [{"Name": "test"}],
"actions": [{"type": "delete"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory().client("glue")
workflows = client.list_workflows()
self.assertFalse("test" in [t.get("Name") for t in workflows.get("Workflows", [])])
class TestGlueDataCatalog(BaseTest):
def test_glue_datacat_put_encryption(self):
session_factory = self.replay_flight_data("test_glue_datacat_put_encryption")
client = session_factory().client("glue")
cat_setting = client.get_data_catalog_encryption_settings()
self.assertEqual(cat_setting.get('DataCatalogEncryptionSettings').get(
'EncryptionAtRest').get('SseAwsKmsKeyId'), 'alias/skunk/trails')
p = self.load_policy(
{
"name": "glue-security-config",
"resource": "glue-catalog",
'filters': [{
'type': 'value',
'key': 'DataCatalogEncryptionSettings.EncryptionAtRest.SseAwsKmsKeyId',
'value': 'alias/skunk/trails',
'op': 'eq'},
],
"actions": [{
"type": "set-encryption",
"attributes": {
"EncryptionAtRest": {
"CatalogEncryptionMode": "SSE-KMS",
"SseAwsKmsKeyId": "alias/skunk/glue/encrypted"},
},
}]
},
session_factory=session_factory,)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory().client("glue")
datacatlog = client.get_data_catalog_encryption_settings()
self.assertEqual(datacatlog.get('DataCatalogEncryptionSettings').get(
'EncryptionAtRest'),
{'CatalogEncryptionMode': 'SSE-KMS', 'SseAwsKmsKeyId': 'alias/skunk/glue/encrypted'})
def test_glue_catalog_cross_account(self):
session_factory = self.replay_flight_data("test_glue_catalog_cross_account")
p = self.load_policy(
{
"name": "glue-catalog-cross-account",
"resource": "glue-catalog",
"filters": [{"type": "cross-account"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_catalog_remove_matched(self):
session_factory = self.replay_flight_data("test_catalog_remove_matched")
client = session_factory().client("glue")
client.put_resource_policy(PolicyInJson=json.dumps(
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "SpecificAllow",
"Effect": "Allow",
"Principal": {"AWS": "arn:aws:iam::644160558196:root"},
"Action": "glue:GetDatabase",
"Resource": "arn:aws:glue:us-east-1:644160558196:catalog"
},
{
"Sid": "CrossAccount",
"Effect": "Allow",
"Principal": {"AWS": "arn:aws:iam::123456789123:root"},
"Action": "glue:GetDatabase",
"Resource": "arn:aws:glue:us-east-1:644160558196:catalog"
},
]
}))
p = self.load_policy(
{
"name": "glue-catalog-rm-matched",
"resource": "glue-catalog",
"filters": [{"type": "cross-account"}],
"actions": [{"type": "remove-statements", "statement_ids": "matched"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
data = json.loads(client.get_resource_policy().get("PolicyInJson"))
self.assertEqual(len(data.get('Statement')), 1)
self.assertEqual([s['Sid'] for s in data.get('Statement')], ["SpecificAllow"])
def test_remove_statements_validation_error(self):
self.assertRaises(
PolicyValidationError,
self.load_policy,
{
"name": "glue-catalog-remove-matched",
"resource": "glue-catalog",
"actions": [{"type": "remove-statements", "statement_ids": "matched"}],
}
)
def test_catalog_change_encryption_event(self):
session_factory = self.replay_flight_data("test_catalog_change_encryption_event")
session = session_factory()
client = session.client("glue")
before_cat_setting = client.get_data_catalog_encryption_settings()
self.assertJmes(
'DataCatalogEncryptionSettings.EncryptionAtRest.CatalogEncryptionMode',
before_cat_setting,
'DISABLED'
)
self.assertJmes(
'DataCatalogEncryptionSettings.EncryptionAtRest.SseAwsKmsKeyId',
before_cat_setting,
None
)
p = self.load_policy(
{
"name": "net-change-rbp-cross-account",
"resource": "glue-catalog",
"mode": {
"type": "cloudtrail",
"role": "arn:aws:iam::644160558196:role/CloudCustodianRole",
"events": [
{
"source": "glue.amazonaws.com",
"event": "PutDataCatalogEncryptionSettings",
"ids": "userIdentity.accountId"
}
],
},
'filters': [{
'type': 'value',
'key': 'DataCatalogEncryptionSettings.EncryptionAtRest.SseAwsKmsKeyId',
'value': 'alias/skunk/trails',
'op': 'ne'},
],
"actions": [
{
"type": "set-encryption",
"attributes": {
"EncryptionAtRest": {
"CatalogEncryptionMode": "SSE-KMS"
}
}
}
],
},
session_factory=session_factory,
)
p.push(event_data("event-cloud-trail-catalog-set-encryption.json"), None)
after_cat_setting = client.get_data_catalog_encryption_settings()
self.assertJmes(
'DataCatalogEncryptionSettings.EncryptionAtRest.CatalogEncryptionMode',
after_cat_setting,
'SSE-KMS'
)
self.assertJmes(
'DataCatalogEncryptionSettings.EncryptionAtRest.SseAwsKmsKeyId',
after_cat_setting,
'alias/aws/glue'
)
def test_catalog_change_rbp_event(self):
session_factory = self.replay_flight_data("test_catalog_change_rbp_event")
session = session_factory()
client = session.client("glue")
before_cat_setting = client.get_resource_policy()
assert('o-4amkskbcf3' in before_cat_setting.get('PolicyInJson'))
p = self.load_policy(
{
"name": "net-change-rbp-cross-account",
"resource": "glue-catalog",
"mode": {
"type": "cloudtrail",
"role": "arn:aws:iam::644160558196:role/CloudCustodianRole",
"events": [
{
"source": "glue.amazonaws.com",
"event": "PutResourcePolicy",
"ids": "awsRegion"
}
],
},
"filters": [
{
"type": "cross-account",
"whitelist_orgids": [
"o-4amkskbcf1"
]
}
],
"actions": [{"type": "remove-statements", "statement_ids": "matched"}],
},
session_factory=session_factory,
)
p.push(event_data("event-cloud-trail-catalog-put-resource-policy.json"), None)
after_cat_setting = client.get_resource_policy()
assert('o-4amkskbcf3' not in after_cat_setting.get('PolicyInJson'))
|
|
# Thanks to Zhao Yu for converting the .ipynb notebook to
# this simplified Python script that I edited a little.
# Note that the dataset must be already downloaded for this script to work, do:
# $ cd data/
# $ python download_dataset.py
import tensorflow as tf
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn import metrics
import os
if __name__ == "__main__":
# -----------------------------
# step1: load and prepare data
# -----------------------------
# Those are separate normalised input features for the neural network
INPUT_SIGNAL_TYPES = [
"body_acc_x_",
"body_acc_y_",
"body_acc_z_",
"body_gyro_x_",
"body_gyro_y_",
"body_gyro_z_",
"total_acc_x_",
"total_acc_y_",
"total_acc_z_"
]
# Output classes to learn how to classify
LABELS = [
"WALKING",
"WALKING_UPSTAIRS",
"WALKING_DOWNSTAIRS",
"SITTING",
"STANDING",
"LAYING"
]
DATA_PATH = "data/"
DATASET_PATH = DATA_PATH + "UCI HAR Dataset/"
print("\n" + "Dataset is now located at: " + DATASET_PATH)
# Preparing data set:
TRAIN = "train/"
TEST = "test/"
# Load "X" (the neural network's training and testing inputs)
def load_X(X_signals_paths):
X_signals = []
for signal_type_path in X_signals_paths:
file = open(signal_type_path, 'rb')
# Read dataset from disk, dealing with text files' syntax
X_signals.append(
[np.array(serie, dtype=np.float32) for serie in [
row.replace(' ', ' ').strip().split(' ') for row in file
]]
)
file.close()
"""Examples
--------
>> > x = np.arange(4).reshape((2, 2))
>> > x
array([[0, 1],
[2, 3]])
>> > np.transpose(x)
array([[0, 2],
[1, 3]])
>> > x = np.ones((1, 2, 3))
>> > np.transpose(x, (1, 0, 2)).shape
(2, 1, 3)
"""
return np.transpose(np.array(X_signals), (1, 2, 0))
X_train_signals_paths = [
DATASET_PATH + TRAIN + "Inertial Signals/" + signal + "train.txt" for signal in INPUT_SIGNAL_TYPES
]
X_test_signals_paths = [
DATASET_PATH + TEST + "Inertial Signals/" + signal + "test.txt" for signal in INPUT_SIGNAL_TYPES
]
X_train = load_X(X_train_signals_paths) # [7352, 128, 9]
X_test = load_X(X_test_signals_paths) # [7352, 128, 9]
# print(X_train)
print(len(X_train)) # 7352
print(len(X_train[0])) # 128
print(len(X_train[0][0])) # 9
print(type(X_train))
X_train = np.reshape(X_train, [-1, 32, 36])
X_test = np.reshape(X_test, [-1, 32, 36])
print("-----------------X_train---------------")
# print(X_train)
print(len(X_train)) # 7352
print(len(X_train[0])) # 32
print(len(X_train[0][0])) # 36
print(type(X_train))
# exit()
y_train_path = DATASET_PATH + TRAIN + "y_train.txt"
y_test_path = DATASET_PATH + TEST + "y_test.txt"
def one_hot(label):
"""convert label from dense to one hot
argument:
label: ndarray dense label ,shape: [sample_num,1]
return:
one_hot_label: ndarray one hot, shape: [sample_num,n_class]
"""
label_num = len(label)
new_label = label.reshape(label_num) # shape : [sample_num]
# because max is 5, and we will create 6 columns
n_values = np.max(new_label) + 1
return np.eye(n_values)[np.array(new_label, dtype=np.int32)]
# Load "y" (the neural network's training and testing outputs)
def load_y(y_path):
file = open(y_path, 'rb')
# Read dataset from disk, dealing with text file's syntax
y_ = np.array(
[elem for elem in [
row.replace(' ', ' ').strip().split(' ') for row in file
]],
dtype=np.int32
)
file.close()
# Subtract 1 to each output class for friendly 0-based indexing
return y_ - 1
y_train = one_hot(load_y(y_train_path))
y_test = one_hot(load_y(y_test_path))
print("---------y_train----------")
# print(y_train)
print(len(y_train)) # 7352
print(len(y_train[0])) # 6
# exit()
# -----------------------------------
# step2: define parameters for model
# -----------------------------------
class Config(object):
"""
define a class to store parameters,
the input should be feature mat of training and testing
"""
def __init__(self, X_train, X_test):
# Input data
self.train_count = len(X_train) # 7352 training series
self.test_data_count = len(X_test) # 2947 testing series
self.n_steps = len(X_train[0]) # 128 time_steps per series
# Training
self.learning_rate = 0.0025 # 1e-4
self.lambda_loss_amount = 0.0015
self.training_epochs = 300
self.batch_size = 640
# LSTM structure
self.n_inputs = len(X_train[0][0]) # Features count is of 9: three 3D sensors features over time
self.n_hidden = 16 # nb of neurons inside the neural network
self.n_classes = 6 # Final output classes
self.W = {
'hidden': tf.Variable(tf.random_normal([self.n_inputs, self.n_hidden])), # [9, 32]
'output': tf.Variable(tf.random_normal([self.n_hidden, self.n_classes])) # [32, 6]
# 'output': tf.Variable(tf.random_normal([4, self.n_classes])) # [32, 6]
}
self.biases = {
'hidden': tf.Variable(tf.random_normal([self.n_hidden], mean=1.0)), # [32]
# 'hidden': tf.Variable(tf.random_normal([16], mean=1.0)), # [32]
'output': tf.Variable(tf.random_normal([self.n_classes])) # [6]
}
config = Config(X_train, X_test)
# print("Some useful info to get an insight on dataset's shape and normalisation:")
# print("features shape, labels shape, each features mean, each features standard deviation")
# print(X_test.shape, y_test.shape,
# np.mean(X_test), np.std(X_test))
# print("the dataset is therefore properly normalised, as expected.")
#
#
# ------------------------------------------------------
# step3: Let's get serious and build the neural network
# ------------------------------------------------------
# [none, 128, 9]
X = tf.placeholder(tf.float32, [None, config.n_steps, config.n_inputs])
# [none, 6]
Y = tf.placeholder(tf.float32, [None, config.n_classes])
print("-------X Y----------")
print(X)
X = tf.reshape(X, shape=[-1, 32, 36])
print(X)
print(Y)
Y = tf.reshape(Y, shape=[-1, 6])
print(Y)
# Weight Initialization
def weight_variable(shape):
# tra ve 1 gia tri random theo thuat toan truncated_ normal
initial = tf.truncated_normal(shape, mean=0.0, stddev=0.1, dtype=tf.float32)
return tf.Variable(initial)
def bias_varibale(shape):
initial = tf.constant(0.1, shape=shape, name='Bias')
return tf.Variable(initial)
# Convolution and Pooling
def conv2d(x, W):
# Must have `strides[0] = strides[3] = 1 `.
# For the most common case of the same horizontal and vertices strides, `strides = [1, stride, stride, 1] `.
return tf.nn.conv2d(input=x, filter=W, strides=[1, 1, 1, 1], padding='SAME', name='conv_2d')
def max_pool_2x2(x):
return tf.nn.max_pool(value=x, ksize=[1, 2, 2, 1],
strides=[1, 1, 1, 1], padding='SAME', name='max_pool')
def LSTM_Network(feature_mat, config):
"""model a LSTM Network,
it stacks 2 LSTM layers, each layer has n_hidden=32 cells
and 1 output layer, it is a full connet layer
argument:
feature_mat: ndarray feature matrix, shape=[batch_size,time_steps,n_inputs]
config: class containing config of network
return:
: matrix output shape [batch_size,n_classes]
"""
W_conv1 = weight_variable([5, 5, 1, 8])
b_conv1 = bias_varibale([8])
# x_image = tf.reshape(x, shape=[-1, 28, 28, 1])
feature_mat_image = tf.reshape(feature_mat, shape=[-1, 32, 36, 1])
print("----feature_mat_image-----")
print(feature_mat_image.get_shape())
h_conv1 = tf.nn.relu(conv2d(feature_mat_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
# Second Convolutional Layer
W_conv2 = weight_variable([5, 5, 8, 1])
b_conv2 = weight_variable([1])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
h_pool2 = tf.reshape(h_pool2, shape=[-1, 32, 36])
feature_mat = h_pool2
print("----feature_mat-----")
print(feature_mat)
# exit()
# W_fc1 = weight_variable([8 * 9 * 1, 1024])
# b_fc1 = bias_varibale([1024])
# h_pool2_flat = tf.reshape(h_pool2, [-1, 8 * 9 * 1])
# h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# print("----h_fc1_drop-----")
# print(h_fc1)
# exit()
#
# # keep_prob = tf.placeholder(tf.float32)
# keep_prob = tf.placeholder(1.0)
# h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob=keep_prob)
# print("----h_fc1_drop-----")
# print(h_fc1_drop)
# exit()
#
# W_fc2 = weight_variable([1024, 10])
# b_fc2 = bias_varibale([10])
#
# y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
# print("----y_conv-----")
# print(y_conv)
# exit()
# Exchange dim 1 and dim 0
# Ban dau: [0,1,2] = [batch_size, 128, 9] => [batch_size, 32, 36]
feature_mat = tf.transpose(feature_mat, [1, 0, 2])
# New feature_mat's shape: [time_steps, batch_size, n_inputs] [128, batch_size, 9]
print("----feature_mat-----")
print(feature_mat)
# exit()
# Temporarily crush the feature_mat's dimensions
feature_mat = tf.reshape(feature_mat, [-1, config.n_inputs]) # 9
# New feature_mat's shape: [time_steps*batch_size, n_inputs] # 128 * batch_size, 9
# Linear activation, reshaping inputs to the LSTM's number of hidden:
hidden = tf.nn.relu(tf.matmul(
feature_mat, config.W['hidden']
) + config.biases['hidden'])
# New feature_mat (hidden) shape: [time_steps*batch_size, n_hidden] [128*batch_size, 32]
print("--n_steps--")
print(config.n_steps)
print("--n_steps--")
print(hidden)
# exit()
# Split the series because the rnn cell needs time_steps features, each of shape:
hidden = tf.split(0, config.n_steps, hidden) # (0, 128, [128*batch_size, 32])
# New hidden's shape: a list of length "time_step" containing tensors of shape [batch_size, n_hidden]
# Define LSTM cell of first hidden layer:
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(config.n_hidden, forget_bias=1.0)
# Stack two LSTM layers, both layers has the same shape
lsmt_layers = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * 2)
# Get LSTM outputs, the states are internal to the LSTM cells,they are not our attention here
outputs, _ = tf.nn.rnn(lsmt_layers, hidden, dtype=tf.float32)
# outputs' shape: a list of lenght "time_step" containing tensors of shape [batch_size, n_hidden]
print("------------------list-------------------")
print(outputs)
# Get last time step's output feature for a "many to one" style classifier,
# as in the image describing RNNs at the top of this page
lstm_last_output = outputs[-1] # Chi lay phan tu cuoi cung voi shape: [?, 32]
print("------------------last outputs-------------------")
print (lstm_last_output)
# Linear activation
return tf.matmul(lstm_last_output, config.W['output']) + config.biases['output']
pred_Y = LSTM_Network(X, config) # shape[?,6]
print("------------------pred_Y-------------------")
print(pred_Y)
# exit()
# Loss,train_step,evaluation
l2 = config.lambda_loss_amount * \
sum(tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables())
# Softmax loss and L2
cost = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(pred_Y, Y)) + l2
train_step = tf.train.AdamOptimizer(
learning_rate=config.learning_rate).minimize(cost)
correct_prediction = tf.equal(tf.argmax(pred_Y, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, dtype=tf.float32))
# --------------------------------------------
# step4: Hooray, now train the neural network
# --------------------------------------------
# Note that log_device_placement can be turned ON but will cause console spam.
sess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=False))
tf.initialize_all_variables().run()
best_accuracy = 0.0
# Start training for each batch and loop epochs
for i in range(config.training_epochs):
for start, end in zip(range(0, config.train_count, config.batch_size), # (0, 7352, 1500)
range(config.batch_size, config.train_count + 1,
config.batch_size)): # (1500, 7353, 1500)
print(start)
print(end)
sess.run(train_step, feed_dict={X: X_train[start:end],
Y: y_train[start:end]})
# Test completely at every epoch: calculate accuracy
pred_out, accuracy_out, loss_out = sess.run([pred_Y, accuracy, cost], feed_dict={
X: X_test, Y: y_test})
print("traing iter: {},".format(i) + \
" test accuracy : {},".format(accuracy_out) + \
" loss : {}".format(loss_out))
best_accuracy = max(best_accuracy, accuracy_out)
print("")
print("final test accuracy: {}".format(accuracy_out))
print("best epoch's test accuracy: {}".format(best_accuracy))
print("")
#
# #------------------------------------------------------------------
# # step5: Training is good, but having visual insight is even better
# #------------------------------------------------------------------
# # The code is in the .ipynb
#
# #------------------------------------------------------------------
# # step6: And finally, the multi-class confusion matrix and metrics!
# #------------------------------------------------------------------
# # The code is in the .ipynb
|
|
#!/usr/bin/env python
from wsgiref.simple_server import make_server
import sys
import json
import traceback
import datetime
from multiprocessing import Process
from getopt import getopt, GetoptError
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError,\
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from os import environ
from ConfigParser import ConfigParser
from biokbase import log
import biokbase.nexus
import requests as _requests
import random as _random
import os
import requests.packages.urllib3
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'gaprice_convert_assy_file_to_contigs'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from gaprice_convert_assy_file_to_contigs.gaprice_convert_assy_file_to_contigsImpl import gaprice_convert_assy_file_to_contigs
impl_gaprice_convert_assy_file_to_contigs = gaprice_convert_assy_file_to_contigs(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
newerr.data = e.message
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if self.method_data[request['method']].has_key('types'): # @IgnorePep8
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'gaprice_convert_assy_file_to_contigs'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_gaprice_convert_assy_file_to_contigs.convert,
name='gaprice_convert_assy_file_to_contigs.convert',
types=[dict])
self.method_authentication['gaprice_convert_assy_file_to_contigs.convert'] = 'required'
self.rpc_service.add(impl_gaprice_convert_assy_file_to_contigs.status,
name='gaprice_convert_assy_file_to_contigs.status',
types=[dict])
self.auth_client = biokbase.nexus.Client(
config={'server': 'nexus.api.globusonline.org',
'verify_ssl': True,
'client': None,
'client_secret': None})
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {'call_stack': [{'time':self.now_in_utc(), 'method': req['method']}]}
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(method_name,
"none")
if auth_req != "none":
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = "Authentication required for " + \
"gaprice_convert_assy_file_to_contigs but no authentication header was passed"
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user, _, _ = \
self.auth_client.validate_token(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception, e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception, e:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print 'The request method was %s\n' % environ['REQUEST_METHOD']
# print 'The environment dictionary is:\n%s\n' % pprint.pformat(environ) @IgnorePep8
# print 'The request body was: %s' % request_body
# print 'The result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result)
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
if 'error' not in error['error'] or error['error']['error'] is None:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh,mm = divmod((delta.days * 24*60*60 + delta.seconds + 30) // 60, 60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print "Monkeypatching std libraries for async"
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {
'': application
}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print "Listening on port %s" % port
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user, _, _ = application.auth_client.validate_token(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception, e:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
requests.packages.urllib3.disable_warnings()
if len(sys.argv) >= 3 and len(sys.argv) <= 4 and os.path.isfile(sys.argv[1]):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print "Host set to %s" % host
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print "Listening on port %s" % port
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import redis, frappe, re
from six.moves import cPickle as pickle
from frappe.utils import cstr
from six import iteritems
class RedisWrapper(redis.Redis):
"""Redis client that will automatically prefix conf.db_name"""
def make_key(self, key, user=None, shared=False):
if shared:
return key
if user:
if user == True:
user = frappe.session.user
key = "user:{0}:{1}".format(user, key)
return "{0}|{1}".format(frappe.conf.db_name, key).encode('utf-8')
def set_value(self, key, val, user=None, expires_in_sec=None):
"""Sets cache value.
:param key: Cache key
:param val: Value to be cached
:param user: Prepends key with User
:param expires_in_sec: Expire value of this key in X seconds
"""
key = self.make_key(key, user)
if not expires_in_sec:
frappe.local.cache[key] = val
try:
if expires_in_sec:
self.setex(key, pickle.dumps(val), expires_in_sec)
else:
self.set(key, pickle.dumps(val))
except redis.exceptions.ConnectionError:
return None
def get_value(self, key, generator=None, user=None, expires=False):
"""Returns cache value. If not found and generator function is
given, it will call the generator.
:param key: Cache key.
:param generator: Function to be called to generate a value if `None` is returned.
:param expires: If the key is supposed to be with an expiry, don't store it in frappe.local
"""
original_key = key
key = self.make_key(key, user)
if key in frappe.local.cache:
val = frappe.local.cache[key]
else:
val = None
try:
val = self.get(key)
except redis.exceptions.ConnectionError:
pass
if val is not None:
val = pickle.loads(val)
if not expires:
if val is None and generator:
val = generator()
self.set_value(original_key, val, user=user)
else:
frappe.local.cache[key] = val
return val
def get_all(self, key):
ret = {}
for k in self.get_keys(key):
ret[key] = self.get_value(k)
return ret
def get_keys(self, key):
"""Return keys starting with `key`."""
try:
key = self.make_key(key + "*")
return self.keys(key)
except redis.exceptions.ConnectionError:
regex = re.compile(cstr(key).replace("|", "\|").replace("*", "[\w]*"))
return [k for k in list(frappe.local.cache) if regex.match(k.decode())]
def delete_keys(self, key):
"""Delete keys with wildcard `*`."""
try:
self.delete_value(self.get_keys(key), make_keys=False)
except redis.exceptions.ConnectionError:
pass
def delete_key(self, *args, **kwargs):
self.delete_value(*args, **kwargs)
def delete_value(self, keys, user=None, make_keys=True, shared=False):
"""Delete value, list of values."""
if not isinstance(keys, (list, tuple)):
keys = (keys, )
for key in keys:
if make_keys:
key = self.make_key(key, shared=shared)
if key in frappe.local.cache:
del frappe.local.cache[key]
try:
self.delete(key)
except redis.exceptions.ConnectionError:
pass
def lpush(self, key, *values):
super(redis.Redis, self).lpush(self.make_key(key), *values)
def rpush(self, key, *values):
super(redis.Redis, self).rpush(self.make_key(key), *values)
def lpop(self, key):
return super(redis.Redis, self).lpop(self.make_key(key))
def llen(self, key):
return super(redis.Redis, self).llen(self.make_key(key))
def hset(self, name, key, value, shared=False):
_name = self.make_key(name, shared=shared)
# set in local
if not _name in frappe.local.cache:
frappe.local.cache[_name] = {}
frappe.local.cache[_name][key] = value
# set in redis
try:
super(redis.Redis, self).hset(_name,
key, pickle.dumps(value))
except redis.exceptions.ConnectionError:
pass
def hgetall(self, name):
return {key: pickle.loads(value) for key, value in
iteritems(super(redis.Redis, self).hgetall(self.make_key(name)))}
def hget(self, name, key, generator=None, shared=False):
_name = self.make_key(name, shared=shared)
if not _name in frappe.local.cache:
frappe.local.cache[_name] = {}
if key in frappe.local.cache[_name]:
return frappe.local.cache[_name][key]
value = None
try:
value = super(redis.Redis, self).hget(_name, key)
except redis.exceptions.ConnectionError:
pass
if value:
value = pickle.loads(value)
frappe.local.cache[_name][key] = value
elif generator:
value = generator()
try:
self.hset(name, key, value)
except redis.exceptions.ConnectionError:
pass
return value
def hdel(self, name, key, shared=False):
_name = self.make_key(name, shared=shared)
if _name in frappe.local.cache:
if key in frappe.local.cache[_name]:
del frappe.local.cache[_name][key]
try:
super(redis.Redis, self).hdel(_name, key)
except redis.exceptions.ConnectionError:
pass
def hdel_keys(self, name_starts_with, key):
"""Delete hash names with wildcard `*` and key"""
for name in frappe.cache().get_keys(name_starts_with):
name = name.split("|", 1)[1]
self.hdel(name, key)
def hkeys(self, name):
try:
return super(redis.Redis, self).hkeys(self.make_key(name))
except redis.exceptions.ConnectionError:
return []
def sadd(self, name, *values):
"""Add a member/members to a given set"""
super(redis.Redis, self).sadd(self.make_key(name), *values)
def srem(self, name, *values):
"""Remove a specific member/list of members from the set"""
super(redis.Redis, self).srem(self.make_key(name), *values)
def sismember(self, name, value):
"""Returns True or False based on if a given value is present in the set"""
return super(redis.Redis, self).sismember(self.make_key(name), value)
def spop(self, name):
"""Removes and returns a random member from the set"""
return super(redis.Redis, self).spop(self.make_key(name))
def srandmember(self, name, count=None):
"""Returns a random member from the set"""
return super(redis.Redis, self).srandmember(self.make_key(name))
def smembers(self, name):
"""Return all members of the set"""
return super(redis.Redis, self).smembers(self.make_key(name))
|
|
# Copyright (c) 2008-2009 Aryeh Leib Taurog, http://www.aryehleib.com
# All rights reserved.
#
# Modified from original contribution by Aryeh Leib Taurog, which was
# released under the New BSD license.
import unittest
from django.contrib.gis.geos.mutable_list import ListMixin
from django.utils import six
class UserListA(ListMixin):
_mytype = tuple
def __init__(self, i_list, *args, **kwargs):
self._list = self._mytype(i_list)
super(UserListA, self).__init__(*args, **kwargs)
def __len__(self): return len(self._list)
def __str__(self): return str(self._list)
def __repr__(self): return repr(self._list)
def _set_list(self, length, items):
# this would work:
# self._list = self._mytype(items)
# but then we wouldn't be testing length parameter
itemList = ['x'] * length
for i, v in enumerate(items):
itemList[i] = v
self._list = self._mytype(itemList)
def _get_single_external(self, index):
return self._list[index]
class UserListB(UserListA):
_mytype = list
def _set_single(self, index, value):
self._list[index] = value
def nextRange(length):
nextRange.start += 100
return range(nextRange.start, nextRange.start + length)
nextRange.start = 0
class ListMixinTest(unittest.TestCase):
"""
Tests base class ListMixin by comparing a list clone which is
a ListMixin subclass with a real Python list.
"""
limit = 3
listType = UserListA
def lists_of_len(self, length=None):
if length is None: length = self.limit
pl = list(range(length))
return pl, self.listType(pl)
def limits_plus(self, b):
return range(-self.limit - b, self.limit + b)
def step_range(self):
return list(range(-1 - self.limit, 0)) + list(range(1, 1 + self.limit))
def test01_getslice(self):
'Slice retrieval'
pl, ul = self.lists_of_len()
for i in self.limits_plus(1):
self.assertEqual(pl[i:], ul[i:], 'slice [%d:]' % (i))
self.assertEqual(pl[:i], ul[:i], 'slice [:%d]' % (i))
for j in self.limits_plus(1):
self.assertEqual(pl[i:j], ul[i:j], 'slice [%d:%d]' % (i,j))
for k in self.step_range():
self.assertEqual(pl[i:j:k], ul[i:j:k], 'slice [%d:%d:%d]' % (i,j,k))
for k in self.step_range():
self.assertEqual(pl[i::k], ul[i::k], 'slice [%d::%d]' % (i,k))
self.assertEqual(pl[:i:k], ul[:i:k], 'slice [:%d:%d]' % (i,k))
for k in self.step_range():
self.assertEqual(pl[::k], ul[::k], 'slice [::%d]' % (k))
def test02_setslice(self):
'Slice assignment'
def setfcn(x,i,j,k,L): x[i:j:k] = range(L)
pl, ul = self.lists_of_len()
for slen in range(self.limit + 1):
ssl = nextRange(slen)
ul[:] = ssl
pl[:] = ssl
self.assertEqual(pl, ul[:], 'set slice [:]')
for i in self.limits_plus(1):
ssl = nextRange(slen)
ul[i:] = ssl
pl[i:] = ssl
self.assertEqual(pl, ul[:], 'set slice [%d:]' % (i))
ssl = nextRange(slen)
ul[:i] = ssl
pl[:i] = ssl
self.assertEqual(pl, ul[:], 'set slice [:%d]' % (i))
for j in self.limits_plus(1):
ssl = nextRange(slen)
ul[i:j] = ssl
pl[i:j] = ssl
self.assertEqual(pl, ul[:], 'set slice [%d:%d]' % (i, j))
for k in self.step_range():
ssl = nextRange( len(ul[i:j:k]) )
ul[i:j:k] = ssl
pl[i:j:k] = ssl
self.assertEqual(pl, ul[:], 'set slice [%d:%d:%d]' % (i, j, k))
sliceLen = len(ul[i:j:k])
self.assertRaises(ValueError, setfcn, ul, i, j, k, sliceLen + 1)
if sliceLen > 2:
self.assertRaises(ValueError, setfcn, ul, i, j, k, sliceLen - 1)
for k in self.step_range():
ssl = nextRange( len(ul[i::k]) )
ul[i::k] = ssl
pl[i::k] = ssl
self.assertEqual(pl, ul[:], 'set slice [%d::%d]' % (i, k))
ssl = nextRange( len(ul[:i:k]) )
ul[:i:k] = ssl
pl[:i:k] = ssl
self.assertEqual(pl, ul[:], 'set slice [:%d:%d]' % (i, k))
for k in self.step_range():
ssl = nextRange(len(ul[::k]))
ul[::k] = ssl
pl[::k] = ssl
self.assertEqual(pl, ul[:], 'set slice [::%d]' % (k))
def test03_delslice(self):
'Delete slice'
for Len in range(self.limit):
pl, ul = self.lists_of_len(Len)
del pl[:]
del ul[:]
self.assertEqual(pl[:], ul[:], 'del slice [:]')
for i in range(-Len - 1, Len + 1):
pl, ul = self.lists_of_len(Len)
del pl[i:]
del ul[i:]
self.assertEqual(pl[:], ul[:], 'del slice [%d:]' % (i))
pl, ul = self.lists_of_len(Len)
del pl[:i]
del ul[:i]
self.assertEqual(pl[:], ul[:], 'del slice [:%d]' % (i))
for j in range(-Len - 1, Len + 1):
pl, ul = self.lists_of_len(Len)
del pl[i:j]
del ul[i:j]
self.assertEqual(pl[:], ul[:], 'del slice [%d:%d]' % (i,j))
for k in list(range(-Len - 1, 0)) + list(range(1, Len)):
pl, ul = self.lists_of_len(Len)
del pl[i:j:k]
del ul[i:j:k]
self.assertEqual(pl[:], ul[:], 'del slice [%d:%d:%d]' % (i,j,k))
for k in list(range(-Len - 1, 0)) + list(range(1, Len)):
pl, ul = self.lists_of_len(Len)
del pl[:i:k]
del ul[:i:k]
self.assertEqual(pl[:], ul[:], 'del slice [:%d:%d]' % (i,k))
pl, ul = self.lists_of_len(Len)
del pl[i::k]
del ul[i::k]
self.assertEqual(pl[:], ul[:], 'del slice [%d::%d]' % (i,k))
for k in list(range(-Len - 1, 0)) + list(range(1, Len)):
pl, ul = self.lists_of_len(Len)
del pl[::k]
del ul[::k]
self.assertEqual(pl[:], ul[:], 'del slice [::%d]' % (k))
def test04_get_set_del_single(self):
'Get/set/delete single item'
pl, ul = self.lists_of_len()
for i in self.limits_plus(0):
self.assertEqual(pl[i], ul[i], 'get single item [%d]' % i)
for i in self.limits_plus(0):
pl, ul = self.lists_of_len()
pl[i] = 100
ul[i] = 100
self.assertEqual(pl[:], ul[:], 'set single item [%d]' % i)
for i in self.limits_plus(0):
pl, ul = self.lists_of_len()
del pl[i]
del ul[i]
self.assertEqual(pl[:], ul[:], 'del single item [%d]' % i)
def test05_out_of_range_exceptions(self):
'Out of range exceptions'
def setfcn(x, i): x[i] = 20
def getfcn(x, i): return x[i]
def delfcn(x, i): del x[i]
pl, ul = self.lists_of_len()
for i in (-1 - self.limit, self.limit):
self.assertRaises(IndexError, setfcn, ul, i) # 'set index %d' % i)
self.assertRaises(IndexError, getfcn, ul, i) # 'get index %d' % i)
self.assertRaises(IndexError, delfcn, ul, i) # 'del index %d' % i)
def test06_list_methods(self):
'List methods'
pl, ul = self.lists_of_len()
pl.append(40)
ul.append(40)
self.assertEqual(pl[:], ul[:], 'append')
pl.extend(range(50,55))
ul.extend(range(50,55))
self.assertEqual(pl[:], ul[:], 'extend')
pl.reverse()
ul.reverse()
self.assertEqual(pl[:], ul[:], 'reverse')
for i in self.limits_plus(1):
pl, ul = self.lists_of_len()
pl.insert(i,50)
ul.insert(i,50)
self.assertEqual(pl[:], ul[:], 'insert at %d' % i)
for i in self.limits_plus(0):
pl, ul = self.lists_of_len()
self.assertEqual(pl.pop(i), ul.pop(i), 'popped value at %d' % i)
self.assertEqual(pl[:], ul[:], 'after pop at %d' % i)
pl, ul = self.lists_of_len()
self.assertEqual(pl.pop(), ul.pop(i), 'popped value')
self.assertEqual(pl[:], ul[:], 'after pop')
pl, ul = self.lists_of_len()
def popfcn(x, i): x.pop(i)
self.assertRaises(IndexError, popfcn, ul, self.limit)
self.assertRaises(IndexError, popfcn, ul, -1 - self.limit)
pl, ul = self.lists_of_len()
for val in range(self.limit):
self.assertEqual(pl.index(val), ul.index(val), 'index of %d' % val)
for val in self.limits_plus(2):
self.assertEqual(pl.count(val), ul.count(val), 'count %d' % val)
for val in range(self.limit):
pl, ul = self.lists_of_len()
pl.remove(val)
ul.remove(val)
self.assertEqual(pl[:], ul[:], 'after remove val %d' % val)
def indexfcn(x, v): return x.index(v)
def removefcn(x, v): return x.remove(v)
self.assertRaises(ValueError, indexfcn, ul, 40)
self.assertRaises(ValueError, removefcn, ul, 40)
def test07_allowed_types(self):
'Type-restricted list'
pl, ul = self.lists_of_len()
ul._allowed = six.integer_types
ul[1] = 50
ul[:2] = [60, 70, 80]
def setfcn(x, i, v): x[i] = v
self.assertRaises(TypeError, setfcn, ul, 2, 'hello')
self.assertRaises(TypeError, setfcn, ul, slice(0,3,2), ('hello','goodbye'))
def test08_min_length(self):
'Length limits'
pl, ul = self.lists_of_len()
ul._minlength = 1
def delfcn(x,i): del x[:i]
def setfcn(x,i): x[:i] = []
for i in range(self.limit - ul._minlength + 1, self.limit + 1):
self.assertRaises(ValueError, delfcn, ul, i)
self.assertRaises(ValueError, setfcn, ul, i)
del ul[:ul._minlength]
ul._maxlength = 4
for i in range(0, ul._maxlength - len(ul)):
ul.append(i)
self.assertRaises(ValueError, ul.append, 10)
def test09_iterable_check(self):
'Error on assigning non-iterable to slice'
pl, ul = self.lists_of_len(self.limit + 1)
def setfcn(x, i, v): x[i] = v
self.assertRaises(TypeError, setfcn, ul, slice(0,3,2), 2)
def test10_checkindex(self):
'Index check'
pl, ul = self.lists_of_len()
for i in self.limits_plus(0):
if i < 0:
self.assertEqual(ul._checkindex(i), i + self.limit, '_checkindex(neg index)')
else:
self.assertEqual(ul._checkindex(i), i, '_checkindex(pos index)')
for i in (-self.limit - 1, self.limit):
self.assertRaises(IndexError, ul._checkindex, i)
ul._IndexError = TypeError
self.assertRaises(TypeError, ul._checkindex, -self.limit - 1)
def test_11_sorting(self):
'Sorting'
pl, ul = self.lists_of_len()
pl.insert(0, pl.pop())
ul.insert(0, ul.pop())
pl.sort()
ul.sort()
self.assertEqual(pl[:], ul[:], 'sort')
mid = pl[len(pl) // 2]
pl.sort(key=lambda x: (mid-x)**2)
ul.sort(key=lambda x: (mid-x)**2)
self.assertEqual(pl[:], ul[:], 'sort w/ key')
pl.insert(0, pl.pop())
ul.insert(0, ul.pop())
pl.sort(reverse=True)
ul.sort(reverse=True)
self.assertEqual(pl[:], ul[:], 'sort w/ reverse')
mid = pl[len(pl) // 2]
pl.sort(key=lambda x: (mid-x)**2)
ul.sort(key=lambda x: (mid-x)**2)
self.assertEqual(pl[:], ul[:], 'sort w/ key')
def test_12_arithmetic(self):
'Arithmetic'
pl, ul = self.lists_of_len()
al = list(range(10,14))
self.assertEqual(list(pl + al), list(ul + al), 'add')
self.assertEqual(type(ul), type(ul + al), 'type of add result')
self.assertEqual(list(al + pl), list(al + ul), 'radd')
self.assertEqual(type(al), type(al + ul), 'type of radd result')
objid = id(ul)
pl += al
ul += al
self.assertEqual(pl[:], ul[:], 'in-place add')
self.assertEqual(objid, id(ul), 'in-place add id')
for n in (-1,0,1,3):
pl, ul = self.lists_of_len()
self.assertEqual(list(pl * n), list(ul * n), 'mul by %d' % n)
self.assertEqual(type(ul), type(ul * n), 'type of mul by %d result' % n)
self.assertEqual(list(n * pl), list(n * ul), 'rmul by %d' % n)
self.assertEqual(type(ul), type(n * ul), 'type of rmul by %d result' % n)
objid = id(ul)
pl *= n
ul *= n
self.assertEqual(pl[:], ul[:], 'in-place mul by %d' % n)
self.assertEqual(objid, id(ul), 'in-place mul by %d id' % n)
pl, ul = self.lists_of_len()
self.assertEqual(pl, ul, 'cmp for equal')
self.assertFalse(ul == pl + [2], 'cmp for not equal')
self.assertTrue(pl >= ul, 'cmp for gte self')
self.assertTrue(pl <= ul, 'cmp for lte self')
self.assertTrue(ul >= pl, 'cmp for self gte')
self.assertTrue(ul <= pl, 'cmp for self lte')
self.assertTrue(pl + [5] > ul, 'cmp')
self.assertTrue(pl + [5] >= ul, 'cmp')
self.assertTrue(pl < ul + [2], 'cmp')
self.assertTrue(pl <= ul + [2], 'cmp')
self.assertTrue(ul + [5] > pl, 'cmp')
self.assertTrue(ul + [5] >= pl, 'cmp')
self.assertTrue(ul < pl + [2], 'cmp')
self.assertTrue(ul <= pl + [2], 'cmp')
# Also works with a custom IndexError
ul_longer = ul + [2]
ul_longer._IndexError = TypeError
ul._IndexError = TypeError
self.assertFalse(ul_longer == pl)
self.assertFalse(ul == ul_longer)
self.assertTrue(ul_longer > ul)
pl[1] = 20
self.assertTrue(pl > ul, 'cmp for gt self')
self.assertTrue(ul < pl, 'cmp for self lt')
pl[1] = -20
self.assertTrue(pl < ul, 'cmp for lt self')
self.assertTrue(pl < ul, 'cmp for lt self')
class ListMixinTestSingle(ListMixinTest):
listType = UserListB
|
|
"""Command line interface tests."""
# pylint: disable=missing-function-docstring,protected-access,no-self-use
# pylint: disable=redefined-outer-name,missing-class-docstring
from datetime import datetime, timedelta
from os.path import expanduser
from typing import NamedTuple, Optional
import json
import pathlib
import re
from click.testing import CliRunner
from pytest_mock.plugin import MockerFixture
import pytest
import requests_mock as req_mock
from xirvik.typing import (FileDownloadStrategy, FilePriority,
TorrentTrackedFile)
from ..root import xirvik
def test_fix_rtorrent(requests_mock: req_mock.Mocker, runner: CliRunner):
requests_mock.get('https://somehost.com:443/userpanel/index.php/services/'
'restart/rtorrent')
assert runner.invoke(
xirvik, ('rtorrent', 'fix', '-H', 'somehost.com')).exit_code == 0
def test_fix_rtorrent_fail(requests_mock: req_mock.Mocker, runner: CliRunner):
requests_mock.get(
'https://somehost.com:443/userpanel/index.php/services/'
'restart/rtorrent',
status_code=500)
assert runner.invoke(
xirvik, ('rtorrent', 'fix', '-H', 'somehost.com')).exit_code == 1
def test_start_torrents_zero_files(runner: CliRunner, tmp_path: pathlib.Path,
monkeypatch: pytest.MonkeyPatch):
netrc = tmp_path / '.netrc'
netrc.write_text('machine machine.com login somename password pass\n')
monkeypatch.setenv('HOME', str(tmp_path))
assert runner.invoke(xirvik, ('rtorrent', 'add', '-H', 'machine.com',
expanduser('~'))).exit_code == 0
def test_start_torrents_zero_torrent_files(runner: CliRunner,
tmp_path: pathlib.Path,
monkeypatch: pytest.MonkeyPatch):
netrc = tmp_path / '.netrc'
netrc.write_text('machine machine.com login somename password pass\n')
monkeypatch.setenv('HOME', str(tmp_path))
non_torrent = tmp_path / 'a.not-a-torrent'
non_torrent.write_bytes(b'\xFF')
assert runner.invoke(xirvik, ('rtorrent', 'add', '-H', 'machine.com',
expanduser('~'))).exit_code == 0
def test_start_torrents_normal(
runner: CliRunner, requests_mock: req_mock.Mocker,
tmp_path: pathlib.Path, monkeypatch: pytest.MonkeyPatch):
netrc = tmp_path / '.netrc'
netrc.write_text('machine machine.com login somename password pass\n')
monkeypatch.setenv('HOME', str(tmp_path))
torrent = tmp_path / 'a.torrent'
torrent.write_bytes(b'\xFF')
m = requests_mock.post(
'https://machine.com:443/rtorrent/php/addtorrent.php?')
assert runner.invoke(xirvik, ('rtorrent', 'add', '-H', 'machine.com',
expanduser('~'))).exit_code == 0
assert not torrent.is_file()
assert m.called_once is True
def test_start_torrents_error_uploading(
runner: CliRunner, requests_mock: req_mock.Mocker,
tmp_path: pathlib.Path, monkeypatch: pytest.MonkeyPatch):
netrc = tmp_path / '.netrc'
netrc.write_text('machine machine.com login somename password pass\n')
monkeypatch.setenv('HOME', str(tmp_path))
torrent = tmp_path / 'a.torrent'
torrent.write_bytes(b'\xFF')
m = requests_mock.post(
'https://machine.com:443/rtorrent/php/addtorrent.php?',
status_code=500)
assert runner.invoke(xirvik, ('rtorrent', 'add', '-H', 'machine.com',
expanduser('~'))).exit_code == 0
assert torrent.is_file()
assert m.called_once is True
def test_start_torrents_start_stopped(
runner: CliRunner, requests_mock: req_mock.Mocker,
tmp_path: pathlib.Path, monkeypatch: pytest.MonkeyPatch):
netrc = tmp_path / '.netrc'
netrc.write_text('machine machine.com login somename password pass\n')
monkeypatch.setenv('HOME', str(tmp_path))
torrent = tmp_path / 'a.torrent'
torrent.write_text('')
m = requests_mock.post(
'https://machine.com:443/rtorrent/php/addtorrent.php?')
assert runner.invoke(xirvik,
('rtorrent', 'add', '--start-stopped', '-d', '-H',
'machine.com', expanduser('~'))).exit_code == 0
assert m.called_once is True
assert not torrent.is_file()
assert (m.last_request and m.last_request.text and
'name="torrents_start_stopped"\r\n\r\non' in m.last_request.text)
def test_add_ftp_user(runner: CliRunner, requests_mock: req_mock.Mocker,
tmp_path: pathlib.Path, monkeypatch: pytest.MonkeyPatch):
netrc = tmp_path / '.netrc'
netrc.write_text('machine machine.com login somename password pass\n')
monkeypatch.setenv('HOME', str(tmp_path))
m = requests_mock.post(
'https://machine.com:443/userpanel/index.php/ftp_users/add_user')
assert runner.invoke(xirvik, ('ftp', 'add-user', '-H', 'machine.com',
'newuser', 'newpass')).exit_code == 0
assert m.called_once is True
def test_add_ftp_user_error(runner: CliRunner, requests_mock: req_mock.Mocker,
tmp_path: pathlib.Path,
monkeypatch: pytest.MonkeyPatch):
netrc = tmp_path / '.netrc'
netrc.write_text('machine machine.com login somename password pass\n')
monkeypatch.setenv('HOME', str(tmp_path))
m = requests_mock.post(
'https://machine.com:443/userpanel/index.php/ftp_users/add_user',
status_code=500)
assert runner.invoke(xirvik, ('ftp', 'add-user', '-H', 'machine.com',
'newuser', 'newpass')).exit_code != 0
assert m.called_once is True
def test_delete_ftp_user(runner: CliRunner, requests_mock: req_mock.Mocker,
tmp_path: pathlib.Path,
monkeypatch: pytest.MonkeyPatch):
netrc = tmp_path / '.netrc'
netrc.write_text('machine machine.com login somename password pass\n')
monkeypatch.setenv('HOME', str(tmp_path))
m = requests_mock.get(
('https://machine.com:443/userpanel/index.php/ftp_users/delete/'
'bmV3dXNlcg==')) # cspell: disable-line
assert runner.invoke(
xirvik,
('ftp', 'delete-user', '-H', 'machine.com', 'newuser')).exit_code == 0
assert m.called_once is True
def test_delete_ftp_user_error(
runner: CliRunner, requests_mock: req_mock.Mocker,
tmp_path: pathlib.Path, monkeypatch: pytest.MonkeyPatch):
netrc = tmp_path / '.netrc'
netrc.write_text('machine machine.com login somename password pass\n')
monkeypatch.setenv('HOME', str(tmp_path))
m = requests_mock.get(
('https://machine.com:443/userpanel/index.php/ftp_users/delete/'
'bmV3dXNlcg=='), # cspell: disable-line
status_code=500)
assert runner.invoke(
xirvik,
('ftp', 'delete-user', '-H', 'machine.com', 'newuser')).exit_code != 0
assert m.called_once is True
def test_list_ftp_users(runner: CliRunner, requests_mock: req_mock.Mocker,
tmp_path: pathlib.Path,
monkeypatch: pytest.MonkeyPatch):
netrc = tmp_path / '.netrc'
netrc.write_text('machine machine.com login somename password pass\n')
monkeypatch.setenv('HOME', str(tmp_path))
requests_mock.get('https://machine.com:443/userpanel/index.php/ftp_users',
text='''<table>
<tbody>
<tr class="gradeX">
<td>someuser</td>
<td>Yes</td>
<td>/somedir</td>
<td></td>
</tr>
</tbody>
</table>''')
run = runner.invoke(xirvik, ('ftp', 'list-users', '-H', 'machine.com'))
assert run.exit_code == 0
assert 'someuser' in run.output
assert '/somedir' in run.output
def test_list_ftp_users_error(
runner: CliRunner, requests_mock: req_mock.Mocker,
tmp_path: pathlib.Path, monkeypatch: pytest.MonkeyPatch):
netrc = tmp_path / '.netrc'
netrc.write_text('machine machine.com login somename password pass\n')
monkeypatch.setenv('HOME', str(tmp_path))
requests_mock.get('https://machine.com:443/userpanel/index.php/ftp_users',
status_code=500)
assert runner.invoke(
xirvik, ('ftp', 'list-users', '-H', 'machine.com')).exit_code != 0
def test_authorize_ip(runner: CliRunner, requests_mock: req_mock.Mocker,
tmp_path: pathlib.Path, monkeypatch: pytest.MonkeyPatch):
netrc = tmp_path / '.netrc'
netrc.write_text('machine machine.com login somename password pass\n')
monkeypatch.setenv('HOME', str(tmp_path))
m = requests_mock.get(
('https://machine.com:443/userpanel/index.php/virtual_machine/'
'authorize_ip'))
assert runner.invoke(
xirvik, ('vm', 'authorize-ip', '-H', 'machine.com')).exit_code == 0
assert m.called_once is True
def test_authorize_ip_error(runner: CliRunner, requests_mock: req_mock.Mocker,
tmp_path: pathlib.Path,
monkeypatch: pytest.MonkeyPatch):
netrc = tmp_path / '.netrc'
netrc.write_text('machine machine.com login somename password pass\n')
monkeypatch.setenv('HOME', str(tmp_path))
m = requests_mock.get(
('https://machine.com:443/userpanel/index.php/virtual_machine/'
'authorize_ip'),
status_code=500)
assert runner.invoke(
xirvik, ('vm', 'authorize-ip', '-H', 'machine.com')).exit_code != 0
assert m.called_once is True
class MinimalTorrentDict(NamedTuple):
hash: str
custom1: Optional[str] = None
left_bytes: int = 0
name: str = ''
ratio: float = 0
creation_date: Optional[datetime] = None
state_changed: Optional[datetime] = None
is_hash_checking: bool = False
base_path: Optional[str] = None
finished: Optional[datetime] = None
def test_list_torrents(runner: CliRunner, mocker: MockerFixture,
tmp_path: pathlib.Path,
monkeypatch: pytest.MonkeyPatch):
netrc = tmp_path / '.netrc'
netrc.write_text('machine machine.com login somename password pass\n')
monkeypatch.setenv('HOME', str(tmp_path))
client_mock = mocker.patch('xirvik.commands.simple.ruTorrentClient')
client_mock.return_value.list_torrents.return_value = [
MinimalTorrentDict(
'hash1',
custom1='TEST me',
name='The Name',
is_hash_checking=False,
base_path=f'/torrents/{client_mock.return_value.name}/_completed')
]
lines = runner.invoke(xirvik,
('rtorrent', 'list-torrents')).output.splitlines()
assert re.match(r'^Hash\s+Name\s+Label\s+Finished', lines[0])
assert re.match(r'^hash1\s+The Name\s+TEST me', lines[1])
def test_list_torrents_json(runner: CliRunner, mocker: MockerFixture,
tmp_path: pathlib.Path,
monkeypatch: pytest.MonkeyPatch):
netrc = tmp_path / '.netrc'
netrc.write_text('machine machine.com login somename password pass\n')
monkeypatch.setenv('HOME', str(tmp_path))
client_mock = mocker.patch('xirvik.commands.simple.ruTorrentClient')
client_mock.return_value.list_torrents.return_value = [
MinimalTorrentDict(
'hash1',
custom1='TEST me',
name='The Name',
is_hash_checking=False,
base_path=f'/torrents/{client_mock.return_value.name}/_completed')
]
data = json.loads(
runner.invoke(
xirvik,
('rtorrent', 'list-torrents', '-F', 'json')).output.strip())
assert isinstance(data, list)
assert data[0]['name'] == 'The Name'
def test_list_torrents_json_reversed(runner: CliRunner, mocker: MockerFixture,
tmp_path: pathlib.Path,
monkeypatch: pytest.MonkeyPatch):
netrc = tmp_path / '.netrc'
netrc.write_text('machine machine.com login somename password pass\n')
monkeypatch.setenv('HOME', str(tmp_path))
client_mock = mocker.patch('xirvik.commands.simple.ruTorrentClient')
client_mock.return_value.list_torrents.return_value = [
MinimalTorrentDict(
'hash1',
custom1='TEST me',
name='The Name',
is_hash_checking=False,
base_path=f'/torrents/{client_mock.return_value.name}/_completed'),
MinimalTorrentDict(
'hash2',
custom1='TEST me',
name='The Name2',
is_hash_checking=False,
base_path=f'/torrents/{client_mock.return_value.name}/_completed')
]
data = json.loads(
runner.invoke(xirvik, ('rtorrent', 'list-torrents', '--reverse-order',
'--table-format', 'json')).output.strip())
assert isinstance(data, list)
assert data[0]['hash'] == 'hash2'
assert data[1]['hash'] == 'hash1'
def test_list_torrents_json_sort_finished(
runner: CliRunner, mocker: MockerFixture, tmp_path: pathlib.Path,
monkeypatch: pytest.MonkeyPatch):
netrc = tmp_path / '.netrc'
netrc.write_text('machine machine.com login somename password pass\n')
monkeypatch.setenv('HOME', str(tmp_path))
client_mock = mocker.patch('xirvik.commands.simple.ruTorrentClient')
client_mock.return_value.list_torrents.return_value = [
MinimalTorrentDict(
'hash1',
custom1='TEST me',
name='The Name',
is_hash_checking=False,
base_path=f'/torrents/{client_mock.return_value.name}/_completed',
finished=datetime.now()),
MinimalTorrentDict(
'hash2',
custom1='TEST me',
name='The Name2',
is_hash_checking=False,
base_path=f'/torrents/{client_mock.return_value.name}/_completed',
finished=datetime.now() - timedelta(days=7)),
]
data = json.loads(
runner.invoke(xirvik,
('rtorrent', 'list-torrents', '--sort', 'finished',
'--table-format', 'json')).output.strip())
assert isinstance(data, list)
assert data[0]['hash'] == 'hash2'
assert data[1]['hash'] == 'hash1'
def test_list_torrents_json_sort_finished_missing(
runner: CliRunner, mocker: MockerFixture, tmp_path: pathlib.Path,
monkeypatch: pytest.MonkeyPatch):
netrc = tmp_path / '.netrc'
netrc.write_text('machine machine.com login somename password pass\n')
monkeypatch.setenv('HOME', str(tmp_path))
client_mock = mocker.patch('xirvik.commands.simple.ruTorrentClient')
client_mock.return_value.list_torrents.return_value = [
MinimalTorrentDict(
'hash1',
custom1='TEST me',
name='The Name',
is_hash_checking=False,
base_path=f'/torrents/{client_mock.return_value.name}/_completed'),
MinimalTorrentDict(
'hash2',
custom1='TEST me',
name='The Name2',
is_hash_checking=False,
base_path=f'/torrents/{client_mock.return_value.name}/_completed',
finished=datetime.now() - timedelta(days=7)),
]
data = json.loads(
runner.invoke(xirvik,
('rtorrent', 'list-torrents', '--sort', 'finished',
'--table-format', 'json')).output.strip())
assert isinstance(data, list)
assert data[0]['hash'] == 'hash1'
assert data[1]['hash'] == 'hash2'
def test_list_torrents_json_sort_missing_attr(
runner: CliRunner, mocker: MockerFixture, tmp_path: pathlib.Path,
monkeypatch: pytest.MonkeyPatch):
netrc = tmp_path / '.netrc'
netrc.write_text('machine machine.com login somename password pass\n')
monkeypatch.setenv('HOME', str(tmp_path))
client_mock = mocker.patch('xirvik.commands.simple.ruTorrentClient')
client_mock.return_value.list_torrents.return_value = [
MinimalTorrentDict(
'hash1',
custom1='TEST me',
name='The Name',
is_hash_checking=False,
base_path=f'/torrents/{client_mock.return_value.name}/_completed',
finished=datetime.now() - timedelta(days=8)),
MinimalTorrentDict(
'hash2',
name='The Name2',
is_hash_checking=False,
base_path=f'/torrents/{client_mock.return_value.name}/_completed'),
]
data = json.loads(
runner.invoke(xirvik, ('rtorrent', 'list-torrents', '--sort', 'label',
'--table-format', 'json')).output.strip())
assert isinstance(data, list)
assert data[0]['hash'] == 'hash2'
assert data[1]['hash'] == 'hash1'
def test_list_torrents_json_sort_other_criteria(
runner: CliRunner, mocker: MockerFixture, tmp_path: pathlib.Path,
monkeypatch: pytest.MonkeyPatch):
netrc = tmp_path / '.netrc'
netrc.write_text('machine machine.com login somename password pass\n')
monkeypatch.setenv('HOME', str(tmp_path))
client_mock = mocker.patch('xirvik.commands.simple.ruTorrentClient')
client_mock.return_value.list_torrents.return_value = [
MinimalTorrentDict(
'hash1',
custom1='TEST me',
name='The Name',
is_hash_checking=False,
base_path=f'/torrents/{client_mock.return_value.name}/_completed'),
MinimalTorrentDict(
'hash2',
name='AThe Name2',
is_hash_checking=False,
base_path=f'/torrents/{client_mock.return_value.name}/_completed'),
]
data = json.loads(
runner.invoke(xirvik, ('rtorrent', 'list-torrents', '--sort', 'name',
'--table-format', 'json')).output.strip())
assert isinstance(data, list)
assert data[0]['hash'] == 'hash2'
assert data[1]['hash'] == 'hash1'
def test_list_files(runner: CliRunner, mocker: MockerFixture,
tmp_path: pathlib.Path, monkeypatch: pytest.MonkeyPatch):
netrc = tmp_path / '.netrc'
netrc.write_text('machine machine.com login somename password pass\n')
monkeypatch.setenv('HOME', str(tmp_path))
client_mock = mocker.patch('xirvik.commands.simple.ruTorrentClient')
client_mock.return_value.list_files.return_value = [
TorrentTrackedFile('file1', 512, 512, 1000, FilePriority.NORMAL,
FileDownloadStrategy.NORMAL),
TorrentTrackedFile('file2', 512, 512, 1200, FilePriority.NORMAL,
FileDownloadStrategy.NORMAL),
]
data = json.loads(
runner.invoke(xirvik, ('rtorrent', 'list-files', '--table-format',
'json', 'hash1')).output.strip())
assert isinstance(data, list)
assert data[0]['name'] == 'file1'
assert data[1]['name'] == 'file2'
def test_list_files_reversed(runner: CliRunner, mocker: MockerFixture,
tmp_path: pathlib.Path,
monkeypatch: pytest.MonkeyPatch):
netrc = tmp_path / '.netrc'
netrc.write_text('machine machine.com login somename password pass\n')
monkeypatch.setenv('HOME', str(tmp_path))
client_mock = mocker.patch('xirvik.commands.simple.ruTorrentClient')
client_mock.return_value.list_files.return_value = [
TorrentTrackedFile('file1', 512, 512, 1000, FilePriority.NORMAL,
FileDownloadStrategy.NORMAL),
TorrentTrackedFile('file2', 512, 512, 1200, FilePriority.NORMAL,
FileDownloadStrategy.NORMAL),
]
data = json.loads(
runner.invoke(xirvik,
('rtorrent', 'list-files', '--reverse-order',
'--table-format', 'json', 'hash1')).output.strip())
assert isinstance(data, list)
assert data[0]['name'] == 'file2'
assert data[1]['name'] == 'file1'
def test_list_files_sort_size(runner: CliRunner, mocker: MockerFixture,
tmp_path: pathlib.Path,
monkeypatch: pytest.MonkeyPatch):
netrc = tmp_path / '.netrc'
netrc.write_text('machine machine.com login somename password pass\n')
monkeypatch.setenv('HOME', str(tmp_path))
client_mock = mocker.patch('xirvik.commands.simple.ruTorrentClient')
client_mock.return_value.list_files.return_value = [
TorrentTrackedFile('file1', 512, 512, 1000, FilePriority.NORMAL,
FileDownloadStrategy.NORMAL),
TorrentTrackedFile('file2', 512, 512, 1200, FilePriority.NORMAL,
FileDownloadStrategy.NORMAL),
]
data = json.loads(
runner.invoke(
xirvik,
('rtorrent', 'list-files', '--table-format', 'json', '--sort',
'size_bytes', '--reverse-order', 'hash1')).output.strip())
assert isinstance(data, list)
assert data[0]['name'] == 'file2'
assert data[1]['name'] == 'file1'
def test_list_files_normal(runner: CliRunner, mocker: MockerFixture,
tmp_path: pathlib.Path,
monkeypatch: pytest.MonkeyPatch):
netrc = tmp_path / '.netrc'
netrc.write_text('machine machine.com login somename password pass\n')
monkeypatch.setenv('HOME', str(tmp_path))
client_mock = mocker.patch('xirvik.commands.simple.ruTorrentClient')
client_mock.return_value.list_files.return_value = [
TorrentTrackedFile('file1', 512, 512, 1000, FilePriority.NORMAL,
FileDownloadStrategy.NORMAL),
]
lines = runner.invoke(xirvik,
('rtorrent', 'list-files', '--sort', 'size_bytes',
'--reverse-order', 'hash1')).output.splitlines()
assert re.match(
r'^Name\s+Size\s+Downloaded Pieces\s+Number of Pieces\s+Priority ID',
lines[0])
assert re.match(r'file1\s+1000\s+512\s+512', lines[1])
|
|
import os
import io
import re
import pytest
from contextlib import redirect_stdout
import numpy as np
from sklearn.neighbors import KDTree
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import normalize
import pickle
import joblib
import scipy
from pynndescent import NNDescent, PyNNDescentTransformer
def test_nn_descent_neighbor_accuracy(nn_data, seed):
knn_indices, _ = NNDescent(
nn_data, "euclidean", {}, 10, random_state=np.random.RandomState(seed)
)._neighbor_graph
tree = KDTree(nn_data)
true_indices = tree.query(nn_data, 10, return_distance=False)
num_correct = 0.0
for i in range(nn_data.shape[0]):
num_correct += np.sum(np.in1d(true_indices[i], knn_indices[i]))
percent_correct = num_correct / (nn_data.shape[0] * 10)
assert percent_correct >= 0.98, (
"NN-descent did not get 99% " "accuracy on nearest neighbors"
)
def test_angular_nn_descent_neighbor_accuracy(nn_data, seed):
knn_indices, _ = NNDescent(
nn_data, "cosine", {}, 10, random_state=np.random.RandomState(seed)
)._neighbor_graph
angular_data = normalize(nn_data, norm="l2")
tree = KDTree(angular_data)
true_indices = tree.query(angular_data, 10, return_distance=False)
num_correct = 0.0
for i in range(nn_data.shape[0]):
num_correct += np.sum(np.in1d(true_indices[i], knn_indices[i]))
percent_correct = num_correct / (nn_data.shape[0] * 10)
assert percent_correct >= 0.98, (
"NN-descent did not get 99% " "accuracy on nearest neighbors"
)
@pytest.mark.skipif(
list(map(int, scipy.version.version.split("."))) < [1, 3, 0],
reason="requires scipy >= 1.3.0",
)
def test_sparse_nn_descent_neighbor_accuracy(sparse_nn_data, seed):
knn_indices, _ = NNDescent(
sparse_nn_data, "euclidean", n_neighbors=20, random_state=None
)._neighbor_graph
tree = KDTree(sparse_nn_data.toarray())
true_indices = tree.query(sparse_nn_data.toarray(), 10, return_distance=False)
num_correct = 0.0
for i in range(sparse_nn_data.shape[0]):
num_correct += np.sum(np.in1d(true_indices[i], knn_indices[i]))
percent_correct = num_correct / (sparse_nn_data.shape[0] * 10)
assert percent_correct >= 0.85, (
"Sparse NN-descent did not get 95% " "accuracy on nearest neighbors"
)
@pytest.mark.skipif(
list(map(int, scipy.version.version.split("."))) < [1, 3, 0],
reason="requires scipy >= 1.3.0",
)
def test_sparse_angular_nn_descent_neighbor_accuracy(sparse_nn_data):
knn_indices, _ = NNDescent(
sparse_nn_data, "cosine", {}, 20, random_state=None
)._neighbor_graph
angular_data = normalize(sparse_nn_data, norm="l2").toarray()
tree = KDTree(angular_data)
true_indices = tree.query(angular_data, 10, return_distance=False)
num_correct = 0.0
for i in range(sparse_nn_data.shape[0]):
num_correct += np.sum(np.in1d(true_indices[i], knn_indices[i]))
percent_correct = num_correct / (sparse_nn_data.shape[0] * 10)
assert percent_correct >= 0.85, (
"Sparse angular NN-descent did not get 98% " "accuracy on nearest neighbors"
)
def test_nn_descent_query_accuracy(nn_data):
nnd = NNDescent(nn_data[200:], "euclidean", n_neighbors=10, random_state=None)
knn_indices, _ = nnd.query(nn_data[:200], k=10, epsilon=0.2)
tree = KDTree(nn_data[200:])
true_indices = tree.query(nn_data[:200], 10, return_distance=False)
num_correct = 0.0
for i in range(true_indices.shape[0]):
num_correct += np.sum(np.in1d(true_indices[i], knn_indices[i]))
percent_correct = num_correct / (true_indices.shape[0] * 10)
assert percent_correct >= 0.95, (
"NN-descent query did not get 95% " "accuracy on nearest neighbors"
)
def test_nn_descent_query_accuracy_angular(nn_data):
nnd = NNDescent(nn_data[200:], "cosine", n_neighbors=30, random_state=None)
knn_indices, _ = nnd.query(nn_data[:200], k=10, epsilon=0.32)
nn = NearestNeighbors(metric="cosine").fit(nn_data[200:])
true_indices = nn.kneighbors(nn_data[:200], n_neighbors=10, return_distance=False)
num_correct = 0.0
for i in range(true_indices.shape[0]):
num_correct += np.sum(np.in1d(true_indices[i], knn_indices[i]))
percent_correct = num_correct / (true_indices.shape[0] * 10)
assert percent_correct >= 0.95, (
"NN-descent query did not get 95% " "accuracy on nearest neighbors"
)
def test_sparse_nn_descent_query_accuracy(sparse_nn_data):
nnd = NNDescent(
sparse_nn_data[200:], "euclidean", n_neighbors=15, random_state=None
)
knn_indices, _ = nnd.query(sparse_nn_data[:200], k=10, epsilon=0.24)
tree = KDTree(sparse_nn_data[200:].toarray())
true_indices = tree.query(sparse_nn_data[:200].toarray(), 10, return_distance=False)
num_correct = 0.0
for i in range(true_indices.shape[0]):
num_correct += np.sum(np.in1d(true_indices[i], knn_indices[i]))
percent_correct = num_correct / (true_indices.shape[0] * 10)
assert percent_correct >= 0.95, (
"Sparse NN-descent query did not get 95% " "accuracy on nearest neighbors"
)
def test_sparse_nn_descent_query_accuracy_angular(sparse_nn_data):
nnd = NNDescent(sparse_nn_data[200:], "cosine", n_neighbors=50, random_state=None)
knn_indices, _ = nnd.query(sparse_nn_data[:200], k=10, epsilon=0.36)
nn = NearestNeighbors(metric="cosine").fit(sparse_nn_data[200:].toarray())
true_indices = nn.kneighbors(
sparse_nn_data[:200].toarray(), n_neighbors=10, return_distance=False
)
num_correct = 0.0
for i in range(true_indices.shape[0]):
num_correct += np.sum(np.in1d(true_indices[i], knn_indices[i]))
percent_correct = num_correct / (true_indices.shape[0] * 10)
assert percent_correct >= 0.95, (
"Sparse NN-descent query did not get 95% " "accuracy on nearest neighbors"
)
def test_transformer_equivalence(nn_data):
N_NEIGHBORS = 15
EPSILON = 0.15
train = nn_data[:400]
test = nn_data[:200]
# Note we shift N_NEIGHBORS to conform to sklearn's KNeighborTransformer defn
nnd = NNDescent(
data=train, n_neighbors=N_NEIGHBORS + 1, random_state=42, compressed=False
)
indices, dists = nnd.query(test, k=N_NEIGHBORS, epsilon=EPSILON)
sort_idx = np.argsort(indices, axis=1)
indices_sorted = np.vstack(
[indices[i, sort_idx[i]] for i in range(sort_idx.shape[0])]
)
dists_sorted = np.vstack([dists[i, sort_idx[i]] for i in range(sort_idx.shape[0])])
# Note we shift N_NEIGHBORS to conform to sklearn' KNeighborTransformer defn
transformer = PyNNDescentTransformer(
n_neighbors=N_NEIGHBORS, search_epsilon=EPSILON, random_state=42
).fit(train, compress_index=False)
Xt = transformer.transform(test).sorted_indices()
assert np.all(Xt.indices == indices_sorted.flatten())
assert np.allclose(Xt.data, dists_sorted.flat)
def test_random_state_none(nn_data, spatial_data):
knn_indices, _ = NNDescent(
nn_data, "euclidean", {}, 10, random_state=None
)._neighbor_graph
tree = KDTree(nn_data)
true_indices = tree.query(nn_data, 10, return_distance=False)
num_correct = 0.0
for i in range(nn_data.shape[0]):
num_correct += np.sum(np.in1d(true_indices[i], knn_indices[i]))
percent_correct = num_correct / (spatial_data.shape[0] * 10)
assert percent_correct >= 0.99, (
"NN-descent did not get 99% " "accuracy on nearest neighbors"
)
def test_deterministic():
seed = np.random.RandomState(42)
x1 = seed.normal(0, 100, (1000, 50))
x2 = seed.normal(0, 100, (1000, 50))
index1 = NNDescent(x1, random_state=np.random.RandomState(42))
neighbors1, distances1 = index1.query(x2)
index2 = NNDescent(x1, random_state=np.random.RandomState(42))
neighbors2, distances2 = index2.query(x2)
np.testing.assert_equal(neighbors1, neighbors2)
np.testing.assert_equal(distances1, distances2)
# This tests a recursion error on cosine metric reported at:
# https://github.com/lmcinnes/umap/issues/99
# graph_data used is a cut-down version of that provided by @scharron
# It contains lots of all-zero vectors and some other duplicates
def test_rp_trees_should_not_stack_overflow_with_duplicate_data(seed, cosine_hang_data):
n_neighbors = 10
knn_indices, _ = NNDescent(
cosine_hang_data,
"cosine",
{},
n_neighbors,
random_state=np.random.RandomState(seed),
n_trees=20,
)._neighbor_graph
for i in range(cosine_hang_data.shape[0]):
assert len(knn_indices[i]) == len(
np.unique(knn_indices[i])
), "Duplicate graph_indices in knn graph"
def test_deduplicated_data_behaves_normally(seed, cosine_hang_data):
data = np.unique(cosine_hang_data, axis=0)
data = data[~np.all(data == 0, axis=1)]
data = data[:1000]
n_neighbors = 10
knn_indices, _ = NNDescent(
data,
"cosine",
{},
n_neighbors,
random_state=np.random.RandomState(seed),
n_trees=20,
)._neighbor_graph
for i in range(data.shape[0]):
assert len(knn_indices[i]) == len(
np.unique(knn_indices[i])
), "Duplicate graph_indices in knn graph"
angular_data = normalize(data, norm="l2")
tree = KDTree(angular_data)
true_indices = tree.query(angular_data, n_neighbors, return_distance=False)
num_correct = 0
for i in range(data.shape[0]):
num_correct += np.sum(np.in1d(true_indices[i], knn_indices[i]))
proportion_correct = num_correct / (data.shape[0] * n_neighbors)
assert proportion_correct >= 0.95, (
"NN-descent did not get 95%" " accuracy on nearest neighbors"
)
def test_output_when_verbose_is_true(spatial_data, seed):
out = io.StringIO()
with redirect_stdout(out):
_ = NNDescent(
data=spatial_data,
metric="euclidean",
metric_kwds={},
n_neighbors=4,
random_state=np.random.RandomState(seed),
n_trees=5,
n_iters=2,
verbose=True,
)
output = out.getvalue()
assert re.match("^.*5 trees", output, re.DOTALL)
assert re.match("^.*2 iterations", output, re.DOTALL)
def test_no_output_when_verbose_is_false(spatial_data, seed):
out = io.StringIO()
with redirect_stdout(out):
_ = NNDescent(
data=spatial_data,
metric="euclidean",
metric_kwds={},
n_neighbors=4,
random_state=np.random.RandomState(seed),
n_trees=5,
n_iters=2,
verbose=False,
)
output = out.getvalue().strip()
assert len(output) == 0
# same as the previous two test, but this time using the PyNNDescentTransformer
# interface
def test_transformer_output_when_verbose_is_true(spatial_data, seed):
out = io.StringIO()
with redirect_stdout(out):
_ = PyNNDescentTransformer(
n_neighbors=4,
metric="euclidean",
metric_kwds={},
random_state=np.random.RandomState(seed),
n_trees=5,
n_iters=2,
verbose=True,
).fit_transform(spatial_data)
output = out.getvalue()
assert re.match("^.*5 trees", output, re.DOTALL)
assert re.match("^.*2 iterations", output, re.DOTALL)
def test_transformer_output_when_verbose_is_false(spatial_data, seed):
out = io.StringIO()
with redirect_stdout(out):
_ = PyNNDescentTransformer(
n_neighbors=4,
metric="standardised_euclidean",
metric_kwds={"sigma": np.ones(spatial_data.shape[1])},
random_state=np.random.RandomState(seed),
n_trees=5,
n_iters=2,
verbose=False,
).fit_transform(spatial_data)
output = out.getvalue().strip()
assert len(output) == 0
def test_pickle_unpickle():
seed = np.random.RandomState(42)
x1 = seed.normal(0, 100, (1000, 50))
x2 = seed.normal(0, 100, (1000, 50))
index1 = NNDescent(x1, "euclidean", {}, 10, random_state=None)
neighbors1, distances1 = index1.query(x2)
mem_temp = io.BytesIO()
pickle.dump(index1, mem_temp)
mem_temp.seek(0)
index2 = pickle.load(mem_temp)
neighbors2, distances2 = index2.query(x2)
np.testing.assert_equal(neighbors1, neighbors2)
np.testing.assert_equal(distances1, distances2)
def test_compressed_pickle_unpickle():
seed = np.random.RandomState(42)
x1 = seed.normal(0, 100, (1000, 50))
x2 = seed.normal(0, 100, (1000, 50))
index1 = NNDescent(x1, "euclidean", {}, 10, random_state=None, compressed=True)
neighbors1, distances1 = index1.query(x2)
mem_temp = io.BytesIO()
pickle.dump(index1, mem_temp)
mem_temp.seek(0)
index2 = pickle.load(mem_temp)
neighbors2, distances2 = index2.query(x2)
np.testing.assert_equal(neighbors1, neighbors2)
np.testing.assert_equal(distances1, distances2)
def test_transformer_pickle_unpickle():
seed = np.random.RandomState(42)
x1 = seed.normal(0, 100, (1000, 50))
x2 = seed.normal(0, 100, (1000, 50))
index1 = PyNNDescentTransformer(n_neighbors=10).fit(x1)
result1 = index1.transform(x2)
mem_temp = io.BytesIO()
pickle.dump(index1, mem_temp)
mem_temp.seek(0)
index2 = pickle.load(mem_temp)
result2 = index2.transform(x2)
np.testing.assert_equal(result1.indices, result2.indices)
np.testing.assert_equal(result1.data, result2.data)
def test_joblib_dump():
seed = np.random.RandomState(42)
x1 = seed.normal(0, 100, (1000, 50))
x2 = seed.normal(0, 100, (1000, 50))
index1 = NNDescent(x1, "euclidean", {}, 10, random_state=None)
neighbors1, distances1 = index1.query(x2)
mem_temp = io.BytesIO()
joblib.dump(index1, mem_temp)
mem_temp.seek(0)
index2 = joblib.load(mem_temp)
neighbors2, distances2 = index2.query(x2)
np.testing.assert_equal(neighbors1, neighbors2)
np.testing.assert_equal(distances1, distances2)
|
|
# coding=utf-8
# Copyright 2022 The init2winit Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Flax implementation of the MLPerf ResNet V1.5 model."""
import functools
from typing import Any, Optional, Tuple
from flax import linen as nn
from init2winit import utils
from init2winit.model_lib import base_model
from init2winit.model_lib import model_utils
from init2winit.model_lib import normalization
import jax.numpy as jnp
from ml_collections.config_dict import config_dict
FAKE_MODEL_DEFAULT_HPARAMS = config_dict.ConfigDict(dict(
num_filters=16,
num_layers=18, # Must be one of [18, 34, 50, 101, 152, 200]
layer_rescale_factors={},
lr_hparams={
'batch_size': 128,
'base_lr': 10.0,
'decay_end': -1,
'end_lr': 1e-4,
'power': 2.0,
'schedule': 'mlperf_polynomial',
'start_lr': 0.0,
'steps_per_epoch': 10009.250000000002,
'warmup_steps': 18,
},
optimizer='mlperf_lars_resnet',
opt_hparams={
'weight_decay': 2e-4,
'beta': 0.9
},
batch_size=128,
l2_decay_factor=None,
l2_decay_rank_threshold=2,
label_smoothing=.1,
use_shallue_label_smoothing=False,
model_dtype='float32',
virtual_batch_size=64,
data_format='NHWC',
))
# Used for the mlperf version of Resnet.
MLPERF_DEFAULT_HPARAMS = config_dict.ConfigDict(dict(
num_filters=16,
# We set default to 18 for faster unit tests.
num_layers=18, # Must be one of [18, 34, 50, 101, 152, 200]
layer_rescale_factors={},
lr_hparams={
'schedule': 'constant',
'base_lr': 0.2,
},
optimizer='momentum',
opt_hparams={
'momentum': 0.9,
},
batch_size=128,
bn_output_scale=0.0,
l2_decay_factor=None,
l2_decay_rank_threshold=2,
label_smoothing=None,
rng_seed=-1,
use_shallue_label_smoothing=False,
batch_norm_momentum=0.9,
batch_norm_epsilon=1e-5,
model_dtype='float32',
virtual_batch_size=64,
total_accumulated_batch_size=None,
data_format='NHWC',
))
def _constant_init(factor):
def init_fn(key, shape, dtype=jnp.float32):
del key
return jnp.ones(shape, dtype) * factor
return init_fn
class ResidualBlock(nn.Module):
"""Bottleneck ResNet block."""
filters: int
strides: Tuple[int, int] = (1, 1)
axis_name: Optional[str] = None
axis_index_groups: Optional[Any] = None
dtype: model_utils.Dtype = jnp.float32
batch_norm_momentum: float = 0.9
batch_norm_epsilon: float = 1e-5
bn_output_scale: float = 0.0
batch_size: Optional[int] = None
virtual_batch_size: Optional[int] = None
total_batch_size: Optional[int] = None
data_format: Optional[str] = None
@nn.compact
def __call__(self, x, train):
needs_projection = x.shape[-1] != self.filters * 4 or self.strides != (1, 1)
batch_norm = functools.partial(
normalization.VirtualBatchNorm,
momentum=self.batch_norm_momentum,
epsilon=self.batch_norm_epsilon,
axis_name=self.axis_name,
axis_index_groups=self.axis_index_groups,
dtype=self.dtype,
batch_size=self.batch_size,
virtual_batch_size=self.virtual_batch_size,
total_batch_size=self.total_batch_size,
data_format=self.data_format)
conv = functools.partial(nn.Conv, use_bias=False, dtype=self.dtype)
residual = x
if needs_projection:
residual = conv(
self.filters * 4, (1, 1), self.strides, name='proj_conv')(residual)
residual = batch_norm(name='proj_bn')(
residual, use_running_average=not train)
y = conv(self.filters, (1, 1), name='conv1')(x)
y = batch_norm(name='bn1')(y, use_running_average=not train)
y = nn.relu(y)
y = conv(self.filters, (3, 3), self.strides, name='conv2')(y)
y = batch_norm(name='bn2')(y, use_running_average=not train)
y = nn.relu(y)
y = conv(self.filters * 4, (1, 1), name='conv3')(y)
y = batch_norm(
name='bn3', scale_init=_constant_init(self.bn_output_scale))(
y, use_running_average=not train)
y = nn.relu(residual + y)
return y
class ResNet(nn.Module):
"""ResNetV1."""
num_classes: int
num_filters: int = 64
num_layers: int = 50
axis_name: Optional[str] = None
axis_index_groups: Optional[Any] = None
dtype: model_utils.Dtype = jnp.float32
batch_norm_momentum: float = 0.9
batch_norm_epsilon: float = 1e-5
bn_output_scale: float = 0.0
batch_size: Optional[int] = None
virtual_batch_size: Optional[int] = None
total_batch_size: Optional[int] = None
data_format: Optional[str] = None
@nn.compact
def __call__(self, x, train):
if self.num_layers not in _block_size_options:
raise ValueError('Please provide a valid number of layers')
block_sizes = _block_size_options[self.num_layers]
conv = functools.partial(nn.Conv, padding=[(3, 3), (3, 3)])
x = conv(self.num_filters, kernel_size=(7, 7), strides=(2, 2),
use_bias=False, dtype=self.dtype, name='conv0')(x)
x = normalization.VirtualBatchNorm(
momentum=self.batch_norm_momentum,
epsilon=self.batch_norm_epsilon,
name='init_bn',
axis_name=self.axis_name,
axis_index_groups=self.axis_index_groups,
dtype=self.dtype,
batch_size=self.batch_size,
virtual_batch_size=self.virtual_batch_size,
total_batch_size=self.total_batch_size,
data_format=self.data_format)(x, use_running_average=not train)
x = nn.relu(x) # MLPerf-required
x = nn.max_pool(x, (3, 3), strides=(2, 2), padding='SAME')
for i, block_size in enumerate(block_sizes):
for j in range(block_size):
strides = (2, 2) if i > 0 and j == 0 else (1, 1)
x = ResidualBlock(
self.num_filters * 2 ** i,
strides=strides,
axis_name=self.axis_name,
axis_index_groups=self.axis_index_groups,
dtype=self.dtype,
batch_norm_momentum=self.batch_norm_momentum,
batch_norm_epsilon=self.batch_norm_epsilon,
bn_output_scale=self.bn_output_scale,
batch_size=self.batch_size,
virtual_batch_size=self.virtual_batch_size,
total_batch_size=self.total_batch_size,
data_format=self.data_format)(x, train=train)
x = jnp.mean(x, axis=(1, 2))
x = nn.Dense(self.num_classes, kernel_init=nn.initializers.normal(),
dtype=self.dtype)(x)
return x
# a dictionary mapping the number of layers in a resnet to the number of blocks
# in each stage of the model.
_block_size_options = {
18: [2, 2, 2, 2],
34: [3, 4, 6, 3],
50: [3, 4, 6, 3],
101: [3, 4, 23, 3],
152: [3, 8, 36, 3],
200: [3, 24, 36, 3]
}
class FakeResNet(nn.Module):
"""Minimal NN (for debugging) with the same signature as a ResNet."""
num_classes: int
axis_name: Optional[str] = None
axis_index_groups: Optional[Any] = None
dtype: model_utils.Dtype = jnp.float32
@nn.compact
def __call__(self, x, train):
x = nn.BatchNorm(
use_running_average=not train,
momentum=0.9,
epsilon=1e-5,
name='init_bn',
axis_name=self.axis_name,
axis_index_groups=self.axis_index_groups,
dtype=self.dtype)(x)
x = jnp.mean(x, axis=(1, 2))
x = nn.Dense(self.num_classes, kernel_init=nn.initializers.normal(),
dtype=self.dtype)(x)
return x
class ResnetModelMLPerf(base_model.BaseModel):
"""MLPerf ResNet."""
def build_flax_module(self):
return ResNet(
num_classes=self.hps['output_shape'][-1],
num_filters=self.hps.num_filters,
num_layers=self.hps.num_layers,
dtype=utils.dtype_from_str(self.hps.model_dtype),
batch_norm_momentum=self.hps.batch_norm_momentum,
batch_norm_epsilon=self.hps.batch_norm_epsilon,
bn_output_scale=self.hps.bn_output_scale,
batch_size=self.hps.batch_size,
virtual_batch_size=self.hps.virtual_batch_size,
total_batch_size=self.hps.total_accumulated_batch_size,
data_format=self.hps.data_format)
class FakeModel(base_model.BaseModel):
"""Fake Model for easy debugging."""
def build_flax_module(self):
return FakeResNet(num_classes=self.hps['output_shape'][-1])
|
|
import mcpi.minecraft as minecraft
mc = minecraft.Minecraft.create()
NORTH = 2
SOUTH = 0
def fill(start_x, start_y, start_z, block=49, size=5, facing=NORTH):
for x in range(0, size):
for y in range(0, 5):
mc.setBlock(start_x + (facing-1) * x, start_y + y, start_z, block)
def a(x, y, z, fg_block=57, bg_block=49, facing=NORTH):
fill(x, y, z, bg_block, facing=facing)
# vertical lines
for height in range(0, 5):
mc.setBlock(x, y + height, z, fg_block)
mc.setBlock(x + (facing-1) * 4, y + height, z, fg_block)
# horizontal lines
for width in range(0, 5):
mc.setBlock(x + (facing-1) * width, y + 4, z, fg_block)
mc.setBlock(x + (facing-1) * width, y + 2, z, fg_block)
def b(x, y, z, fg_block=57, bg_block=49, facing=NORTH):
fill(x, y, z, bg_block, facing=facing)
# vertical lines
for height in range(0, 5):
mc.setBlock(x, y + height, z, fg_block)
# horizontal lines
for width in range(0, 4):
mc.setBlock(x + (facing-1) * width, y + 4, z, fg_block)
mc.setBlock(x + (facing-1) * width, y + 2, z, fg_block)
mc.setBlock(x + (facing-1) * width, y + 0, z, fg_block)
# right side dots
mc.setBlock(x + (facing-1) * 4, y + 1, z, fg_block)
mc.setBlock(x + (facing-1) * 4, y + 3, z, fg_block)
def c(x, y, z, fg_block=57, bg_block=49, facing=NORTH):
fill(x, y, z, bg_block, facing=facing)
# vertical lines
for height in range(0, 5):
mc.setBlock(x, y + height, z, fg_block)
# horizontal lines
for width in range(0, 5):
mc.setBlock(x + (facing-1) * width, y + 4, z, fg_block)
mc.setBlock(x + (facing-1) * width, y + 0, z, fg_block)
def d(x, y, z, fg_block=57, bg_block=49, facing=NORTH):
fill(x, y, z, bg_block, facing=facing)
# vertical lines
for height in range(0, 5):
mc.setBlock(x, y + height, z, fg_block)
if (height < 4) and (height > 0):
mc.setBlock(x + (facing-1) * 4, y + height, z, fg_block)
# horizontal lines
for width in range(0, 4):
mc.setBlock(x + (facing-1) * width, y + 4, z, fg_block)
mc.setBlock(x + (facing-1) * width, y + 0, z, fg_block)
def e(x, y, z, fg_block=57, bg_block=49, facing=NORTH):
fill(x, y, z, bg_block, facing=facing)
# vertical lines
for height in range(0, 5):
mc.setBlock(x, y + height, z, fg_block)
# horizontal lines
for width in range(0, 5):
mc.setBlock(x + (facing-1) * width, y + 4, z, fg_block)
mc.setBlock(x + (facing-1) * width, y + 2, z, fg_block)
mc.setBlock(x + (facing-1) * width, y + 0, z, fg_block)
def f(x, y, z, fg_block=57, bg_block=49, facing=NORTH):
fill(x, y, z, bg_block, facing=facing)
# vertical lines
for height in range(0, 5):
mc.setBlock(x, y + height, z, fg_block)
# horizontal lines
for width in range(0, 5):
mc.setBlock(x + (facing-1) * width, y + 4, z, fg_block)
if (width < 4):
mc.setBlock(x + (facing-1) * width, y + 2, z, fg_block)
def g(x, y, z, fg_block=57, bg_block=49, facing=NORTH):
fill(x, y, z, bg_block, facing=facing)
# vertical lines
for height in range(0, 5):
mc.setBlock(x, y + height, z, fg_block)
if height < 3:
mc.setBlock(x + (facing-1) * 4, y + height, z, fg_block)
# horizontal lines
for width in range(0, 5):
mc.setBlock(x + (facing-1) * width, y + 0, z, fg_block)
if (width > 1):
mc.setBlock(x + (facing-1) * width, y + 2, z, fg_block)
if (width < 4):
mc.setBlock(x + (facing-1) * width, y + 4, z, fg_block)
def h(x, y, z, fg_block=57, bg_block=49, facing=NORTH):
fill(x, y, z, bg_block, facing=facing)
# vertical lines
for height in range(0, 5):
mc.setBlock(x, y + height, z, fg_block)
mc.setBlock(x + (facing-1) * 4, y + height, z, fg_block)
# horizontal lines
for width in range(0, 5):
mc.setBlock(x + (facing-1) * width, y + 2, z, fg_block)
def i(x, y, z, fg_block=57, bg_block=49, facing=NORTH):
fill(x, y, z, bg_block, facing=facing)
# vertical lines
for height in range(0, 5):
mc.setBlock(x + (facing-1) * 2, y + height, z, fg_block)
# horizontal lines
for width in range(0, 5):
mc.setBlock(x + (facing-1) * width, y + 4, z, fg_block)
mc.setBlock(x + (facing-1) * width, y + 0, z, fg_block)
def j(x, y, z, fg_block=57, bg_block=49, facing=NORTH):
fill(x, y, z, bg_block, facing=facing)
# vertical lines
for height in range(0, 5):
mc.setBlock(x + (facing-1) * 3, y + height, z, fg_block)
if height < 2:
mc.setBlock(x + (facing-1) * 0, y + height, z, fg_block)
# horizontal lines
for width in range(0, 5):
if width < 4:
mc.setBlock(x + (facing-1) * width, y + 0, z, fg_block)
if width > 1:
mc.setBlock(x + (facing-1) * width, y + 4, z, fg_block)
def k(x, y, z, fg_block=57, bg_block=49, facing=NORTH):
fill(x, y, z, bg_block, facing=facing)
# vertical lines
for height in range(0, 5):
mc.setBlock(x, y + height, z, fg_block)
if height == 2:
mc.setBlock(x + (facing-1) * 1, y + height, z, fg_block)
# Diagnal lines
mc.setBlock(x + (facing-1) * 2, y + 1, z, fg_block)
mc.setBlock(x + (facing-1) * 2, y + 3, z, fg_block)
mc.setBlock(x + (facing-1) * 3, y + 0, z, fg_block)
mc.setBlock(x + (facing-1) * 3, y + 4, z, fg_block)
def l(x, y, z, fg_block=57, bg_block=49, facing=NORTH):
fill(x, y, z, bg_block, facing=facing)
# vertical lines
for height in range(0, 5):
mc.setBlock(x + (facing-1) * 0, y + height, z, fg_block)
# horizontal lines
for width in range(0, 5):
mc.setBlock(x + (facing-1) * width, y + 0, z, fg_block)
def m(x, y, z, fg_block=57, bg_block=49, facing=NORTH):
fill(x, y, z, bg_block, facing=facing)
# vertical lines
for height in range(0, 4):
mc.setBlock(x + (facing-1) * 0, y + height, z, fg_block)
mc.setBlock(x + (facing-1) * 4, y + height, z, fg_block)
if (height > 1) and (height < 5):
mc.setBlock(x + (facing-1) * 2, y + height, z, fg_block)
# top dots
mc.setBlock(x + (facing-1) * 1, y + 4, z, fg_block)
mc.setBlock(x + (facing-1) * 3, y + 4, z, fg_block)
def n(x, y, z, fg_block=57, bg_block=49, facing=NORTH):
fill(x, y, z, bg_block, facing=facing)
# vertical lines
for height in range(0, 5):
mc.setBlock(x + (facing-1) * 0, y + height, z, fg_block)
mc.setBlock(x + (facing-1) * 4, y + height, z, fg_block)
if (height > 2):
mc.setBlock(x + (facing-1) * 1, y + height, z, fg_block)
if (height > 1) and (height < 4):
mc.setBlock(x + (facing-1) * 2, y + height, z, fg_block)
if (height > 0) and (height < 3):
mc.setBlock(x + (facing-1) * 3, y + height, z, fg_block)
def o(x, y, z, fg_block=57, bg_block=49, facing=NORTH):
fill(x, y, z, bg_block, facing=facing)
# vertical lines
for height in range(0, 5):
if (height > 0) and (height < 4):
mc.setBlock(x + (facing-1) * 0, y + height, z, fg_block)
mc.setBlock(x + (facing-1) * 4, y + height, z, fg_block)
# horizontal lines
for width in range(0, 5):
if (width > 0) and (width < 4):
mc.setBlock(x + (facing-1) * width, y + 0, z, fg_block)
mc.setBlock(x + (facing-1) * width, y + 4, z, fg_block)
def p(x, y, z, fg_block=57, bg_block=49, facing=NORTH):
fill(x, y, z, bg_block, facing=facing)
# vertical lines
for height in range(0, 5):
mc.setBlock(x, y + height, z, fg_block)
if (height == 3):
mc.setBlock(x + (facing-1) * 4, y + height, z, fg_block)
# horizontal lines
for width in range(0, 4):
mc.setBlock(x + (facing-1) * width, y + 4, z, fg_block)
mc.setBlock(x + (facing-1) * width, y + 2, z, fg_block)
def q(x, y, z, fg_block=57, bg_block=49, facing=NORTH):
fill(x, y, z, bg_block, facing=facing)
# vertical lines
for height in range(0, 5):
if (height > 0) and (height < 4):
mc.setBlock(x + (facing-1) * 0, y + height, z, fg_block)
if (height > 1):
mc.setBlock(x + (facing-1) * 4, y + height, z, fg_block)
# horizontal lines
for width in range(0, 5):
if (width > 0) and (width < 4):
mc.setBlock(x + (facing-1) * width, y + 4, z, fg_block)
if (width < 3):
mc.setBlock(x + (facing-1) * width, y + 0, z, fg_block)
# diagnal line
mc.setBlock(x + (facing-1) * 4, y, z, fg_block)
mc.setBlock(x + (facing-1) * 3, y + 1, z, fg_block)
def r(x, y, z, fg_block=57, bg_block=49, facing=NORTH):
fill(x, y, z, bg_block, facing=facing)
# vertical lines
for height in range(0, 5):
mc.setBlock(x, y + height, z, fg_block)
if height > 2:
mc.setBlock(x + (facing-1) * 4, y + height, z, fg_block)
# horizontal lines
for width in range(0, 5):
mc.setBlock(x + (facing-1) * width, y + 4, z, fg_block)
mc.setBlock(x + (facing-1) * width, y + 2, z, fg_block)
mc.setBlock(x + (facing-1) * 4, y + 0, z, fg_block)
mc.setBlock(x + (facing-1) * 3, y + 1, z, fg_block)
def s(x, y, z, fg_block=57, bg_block=49, facing=NORTH):
fill(x, y, z, bg_block, facing=facing)
# vertical lines
for height in range(0, 5):
if (height < 3):
mc.setBlock(x + (facing-1) * 4, y + height, z, fg_block)
if (height > 2):
mc.setBlock(x + (facing-1) * 0, y + height, z, fg_block)
# horizontal lines
for width in range(0, 5):
mc.setBlock(x + (facing-1) * width, y + 4, z, fg_block)
mc.setBlock(x + (facing-1) * width, y + 2, z, fg_block)
mc.setBlock(x + (facing-1) * width, y + 0, z, fg_block)
def t(x, y, z, fg_block=57, bg_block=49, facing=NORTH):
fill(x, y, z, bg_block, facing=facing)
# vertical lines
for height in range(0, 5):
mc.setBlock(x + (facing-1) * 2, y + height, z, fg_block)
# horizontal lines
for width in range(0, 5):
mc.setBlock(x + (facing-1) * width, y + 4, z, fg_block)
def u(x, y, z, fg_block=57, bg_block=49, facing=NORTH):
fill(x, y, z, bg_block, facing=facing)
# vertical lines
for height in range(1, 5):
mc.setBlock(x + (facing-1) * 0, y + height, z, fg_block)
mc.setBlock(x + (facing-1) * 4, y + height, z, fg_block)
# horizontal lines
for width in range(1, 4):
mc.setBlock(x + (facing-1) * width, y + 0, z, fg_block)
def v(x, y, z, fg_block=57, bg_block=49, facing=NORTH):
fill(x, y, z, bg_block, facing=facing)
# vertical lines
for height in range(0, 5):
if (height > 2):
mc.setBlock(x + (facing-1) * 0, y + height, z, fg_block)
mc.setBlock(x + (facing-1) * 4, y + height, z, fg_block)
if (height < 3) and (height > 0):
mc.setBlock(x + (facing-1) * 1, y + height, z, fg_block)
mc.setBlock(x + (facing-1) * 3, y + height, z, fg_block)
# bottom dot
mc.setBlock(x + (facing-1) * 2, y + 0, z, fg_block)
def w(x, y, z, fg_block=57, bg_block=49, facing=NORTH):
fill(x, y, z, bg_block, facing=facing)
# vertical lines
for height in range(1, 5):
mc.setBlock(x + (facing-1) * 0, y + height, z, fg_block)
mc.setBlock(x + (facing-1) * 4, y + height, z, fg_block)
if (height > 0) and (height < 3):
mc.setBlock(x + (facing-1) * 2, y + height, z, fg_block)
# bottom dots
mc.setBlock(x + (facing-1) * 1, y + 0, z, fg_block)
mc.setBlock(x + (facing-1) * 3, y + 0, z, fg_block)
def eks(x, y, z, fg_block=57, bg_block=49, facing=NORTH):
fill(x, y, z, bg_block, facing=facing)
# vertical lines
for height in range(0, 5):
if (height == 0) or (height == 4):
mc.setBlock(x + (facing-1) * 0, y + height, z, fg_block)
mc.setBlock(x + (facing-1) * 4, y + height, z, fg_block)
if (height == 1) or (height == 3):
mc.setBlock(x + (facing-1) * 1, y + height, z, fg_block)
mc.setBlock(x + (facing-1) * 3, y + height, z, fg_block)
mc.setBlock(x + (facing-1) * 2, y + 2, z, fg_block)
def why(x, y, z, fg_block=57, bg_block=49, facing=NORTH):
fill(x, y, z, bg_block, facing=facing)
# vertical lines
for height in range(0, 5):
if height == 4:
mc.setBlock(x + (facing-1) * 0, y + height, z, fg_block)
mc.setBlock(x + (facing-1) * 4, y + height, z, fg_block)
if height == 3:
mc.setBlock(x + (facing-1) * 1, y + height, z, fg_block)
mc.setBlock(x + (facing-1) * 3, y + height, z, fg_block)
if (height < 3):
mc.setBlock(x + (facing-1) * 2, y + height, z, fg_block)
def zee(x, y, z, fg_block=57, bg_block=49, facing=NORTH):
fill(x, y, z, bg_block, facing=facing)
# horizontal lines
for width in range(0, 5):
if (width < 4):
mc.setBlock(x + (facing-1) * width, y + 0, z, fg_block)
if (width > 0):
mc.setBlock(x + (facing-1) * width, y + 4, z, fg_block)
# center diagonal
mc.setBlock(x + (facing-1) * 1, y + 3, z, fg_block)
mc.setBlock(x + (facing-1) * 2, y + 2, z, fg_block)
mc.setBlock(x + (facing-1) * 3, y + 1, z, fg_block)
def one(x, y, z, fg_block=57, bg_block=49, facing=NORTH):
fill(x, y, z, bg_block, facing=facing)
# vertical lines
for height in range(0, 5):
mc.setBlock(x + (facing-1) * 2, y + height, z, fg_block)
# horizontal lines
for width in range(0, 5):
mc.setBlock(x + (facing-1) * width, y + 0, z, fg_block)
if (width > 0) and (width < 3):
mc.setBlock(x + (facing-1) * width, y + 4, z, fg_block)
def two(x, y, z, fg_block=57, bg_block=49, facing=NORTH):
fill(x, y, z, bg_block, facing=facing)
# vertical lines
mc.setBlock(x, y + 1, z, fg_block)
mc.setBlock(x + (facing-1) * 4, y + 3, z, fg_block)
# horizontal lines
for width in range(0, 5):
mc.setBlock(x + (facing-1) * width, y + 0, z, fg_block)
mc.setBlock(x + (facing-1) * width, y + 2, z, fg_block)
mc.setBlock(x + (facing-1) * width, y + 4, z, fg_block)
def three(x, y, z, fg_block=57, bg_block=49, facing=NORTH):
fill(x, y, z, bg_block, facing=facing)
# vertical lines
for height in range(1, 4):
mc.setBlock(x + (facing-1) * 4, y + height, z, fg_block)
# horizontal lines
for width in range(0, 5):
if (width < 4):
mc.setBlock(x + (facing-1) * width, y + 0, z, fg_block)
mc.setBlock(x + (facing-1) * width, y + 4, z, fg_block)
if (width > 0):
mc.setBlock(x + (facing-1) * width, y + 2, z, fg_block)
def four(x, y, z, fg_block=57, bg_block=49, facing=NORTH):
fill(x, y, z, bg_block, facing=facing)
# vertical lines
for height in range(0, 5):
mc.setBlock(x + (facing-1) * 4, y + height, z, fg_block)
if (height > 1):
mc.setBlock(x + (facing-1) * 0, y + height, z, fg_block)
# horizontal lines
for width in range(0, 5):
mc.setBlock(x + (facing-1) * width, y + 2, z, fg_block)
def five(x, y, z, fg_block=57, bg_block=49, facing=NORTH):
s(x, y, z, fg_block, bg_block)
def six(x, y, z, fg_block=57, bg_block=49, facing=NORTH):
fill(x, y, z, bg_block, facing=facing)
# vertical lines
for height in range(0, 5):
mc.setBlock(x + (facing-1) * 0, y + height, z, fg_block)
if (height < 3):
mc.setBlock(x + (facing-1) * 4, y + height, z, fg_block)
# horizontal lines
for width in range(0, 5):
mc.setBlock(x + (facing-1) * width, y + 0, z, fg_block)
mc.setBlock(x + (facing-1) * width, y + 2, z, fg_block)
mc.setBlock(x + (facing-1) * width, y + 4, z, fg_block)
def seven(x, y, z, fg_block=57, bg_block=49, facing=NORTH):
fill(x, y, z, bg_block, facing=facing)
# vertical lines
for height in range(0, 2):
mc.setBlock(x + (facing-1) * 2, y + height, z, fg_block)
mc.setBlock(x + (facing-1) * 3, y + 2, z, fg_block)
mc.setBlock(x + (facing-1) * 4, y + 3, z, fg_block)
# horizontal lines
for width in range(0, 5):
mc.setBlock(x + (facing-1) * width, y + 4, z, fg_block)
def eight(x, y, z, fg_block=57, bg_block=49, facing=NORTH):
fill(x, y, z, bg_block, facing=facing)
# vertical dots
mc.setBlock(x + (facing-1) * 4, y + 1, z, fg_block)
mc.setBlock(x + (facing-1) * 4, y + 3, z, fg_block)
mc.setBlock(x + (facing-1) * 0, y + 1, z, fg_block)
mc.setBlock(x + (facing-1) * 0, y + 3, z, fg_block)
# horizontal lines
for width in range(1, 4):
mc.setBlock(x + (facing-1) * width, y + 0, z, fg_block)
mc.setBlock(x + (facing-1) * width, y + 2, z, fg_block)
mc.setBlock(x + (facing-1) * width, y + 4, z, fg_block)
def nine(x, y, z, fg_block=57, bg_block=49, facing=NORTH):
fill(x, y, z, bg_block, facing=facing)
# horizontal lines
for height in range(1, 4):
mc.setBlock(x + (facing-1) * 4, y + height, z, fg_block)
# vertical dots
mc.setBlock(x + (facing-1) * 0, y + 0, z, fg_block)
mc.setBlock(x + (facing-1) * 0, y + 3, z, fg_block)
# horizontal lines
for width in range(1, 4):
mc.setBlock(x + (facing-1) * width, y + 0, z, fg_block)
mc.setBlock(x + (facing-1) * width, y + 2, z, fg_block)
mc.setBlock(x + (facing-1) * width, y + 4, z, fg_block)
def zero(x, y, z, fg_block=57, bg_block=49, facing=NORTH):
o(x, y, z, fg_block, bg_block)
def period(x, y, z, fg_block=57, bg_block=49, facing=NORTH):
fill(x, y, z, bg_block, size=1)
mc.setBlock(x + (facing-1) * 0, y + 0, z, fg_block)
def comma(x, y, z, fg_block=57, bg_block=49, facing=NORTH):
fill(x, y, z, bg_block, size=2)
mc.setBlock(x + (facing-1) * 0, y + 0, z, fg_block)
mc.setBlock(x + (facing-1) * 1, y + 1, z, fg_block)
def colon(x, y, z, fg_block=57, bg_block=49, facing=NORTH):
fill(x, y, z, bg_block, size=1)
mc.setBlock(x + (facing-1) * 0, y + 1, z, fg_block)
mc.setBlock(x + (facing-1) * 0, y + 3, z, fg_block)
def semicolon(x, y, z, fg_block=57, bg_block=49, facing=NORTH):
fill(x, y, z, bg_block, size=2)
mc.setBlock(x + (facing-1) * 0, y + 0, z, fg_block)
mc.setBlock(x + (facing-1) * 1, y + 1, z, fg_block)
mc.setBlock(x + (facing-1) * 1, y + 3, z, fg_block)
def plus(x, y, z, fg_block=57, bg_block=49, facing=NORTH):
fill(x, y, z, bg_block, facing=facing)
# vertical lines
for height in range(0, 5):
mc.setBlock(x + (facing-1) * 2, y + height, z, fg_block)
# horizontal lines
for width in range(0, 5):
mc.setBlock(x + (facing-1) * width, y + 2, z, fg_block)
def minus(x, y, z, fg_block=57, bg_block=49, facing=NORTH):
fill(x, y, z, bg_block, facing=facing)
# horizontal lines
for width in range(0, 5):
mc.setBlock(x + (facing-1) * width, y + 2, z, fg_block)
def equals(x, y, z, fg_block=57, bg_block=49, facing=NORTH):
fill(x, y, z, bg_block, facing=facing)
# horizontal lines
for width in range(0, 5):
mc.setBlock(x + (facing-1) * width, y + 1, z, fg_block)
mc.setBlock(x + (facing-1) * width, y + 3, z, fg_block)
def times(x, y, z, fg_block=57, bg_block=49, facing=NORTH):
eks(x, y, z, fg_block=57, bg_block=49)
def divide(x, y, z, fg_block=57, bg_block=49, facing=NORTH):
fill(x, y, z, bg_block, facing=facing)
# vertical lines
mc.setBlock(x + (facing-1) * 0, y + 0, z, fg_block)
mc.setBlock(x + (facing-1) * 1, y + 1, z, fg_block)
mc.setBlock(x + (facing-1) * 2, y + 2, z, fg_block)
mc.setBlock(x + (facing-1) * 3, y + 3, z, fg_block)
mc.setBlock(x + (facing-1) * 4, y + 4, z, fg_block)
def underscore(x, y, z, fg_block=57, bg_block=49, facing=NORTH):
fill(x, y, z, bg_block, facing=facing)
# horizontal lines
for width in range(0, 5):
mc.setBlock(x + (facing-1) * width, y + 0, z, fg_block)
def letter_space(x, y, z, block=49):
# vertical lines
for height in range(0, 5):
mc.setBlock(x, y + height, z, block)
def draw_str(x, y, z, str, fg_block=57, bg_block=49, border=False, facing=NORTH):
lines = str.split("\n")
cur_y = ((len(lines) - 1) * 6) + y
last_line = len(lines) - 1
for j, line in enumerate(lines):
last = len(line) - 1
cur_x = x
line_width = 0
for i, letter in enumerate(line):
if letter == ' ':
alphabet[letter](cur_x, cur_y, z, bg_block, facing=facing)
letter_width = 5
elif (letter == ','):
alphabet[letter](cur_x, cur_y, z, fg_block, bg_block, facing=facing)
letter_width = 2
elif (letter == ':') or (letter == '.'):
alphabet[letter](cur_x, cur_y, z, fg_block, bg_block, facing=facing)
letter_width = 1
else:
alphabet[letter](cur_x, cur_y, z, fg_block, bg_block, facing=facing)
letter_width = 5
if i != last:
letter_space(cur_x + (facing-1) * letter_width, cur_y, z, bg_block)
cur_x += (facing-1) * (letter_width + 1)
line_width += letter_width + 1
if border:
# border on top
for width in range(0, line_width - 1):
mc.setBlock(x + (facing-1) * width, cur_y + 5, z, bg_block)
# left border
for height in range(-1, 6):
mc.setBlock(x - (facing-1) * 1, cur_y + height, z, bg_block)
# right border
for height in range(-1, 6):
mc.setBlock(x + (facing-1) * (line_width - 1), cur_y + height, z, bg_block)
# fill empty line
for width in range(0, line_width - 1):
mc.setBlock(x + (facing-1) * width, cur_y - 1, z, bg_block)
cur_y -= 6
alphabet = {'a': a, 'b': b, 'c': c, 'd': d, 'e': e,
'f': f, 'g': g, 'h': h, 'i': i, 'j': j,
'k': k, 'l': l, 'm': m, 'n': n, 'o': o,
'p': p, 'q': q, 'r': r, 's': s, 't': t,
'u': u, 'v': v, 'w': w, 'x': eks,
'y': why, 'z': zee, ' ': fill,
'1': one, '2': two, '3': three, '4': four,
'5': five, '6': six, '7': seven, '8': eight,
'9': nine, '0': zero,
'.': period, ',': comma, ':': colon,
'+': plus, '-': minus, '=': equals,
'*': times, '/': divide, '_': underscore}
if __name__ == "__main__":
pos = mc.player.getPos()
draw_str(pos.x, pos.y + 1, pos.z - 1, "hello\nworld", fg_block=74, bg_block=49, border=True, facing=SOUTH)
|
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train ml rnn model."""
import argparse
import math
import os
import sys
import time
import numpy as np
import tensorflow as tf
from clusterfuzz._internal.bot.fuzzers.ml.rnn import constants
from clusterfuzz._internal.bot.fuzzers.ml.rnn import utils
# Training suggestions
#
# Training only:
# Leave all the parameters as they are in constants.py.
# Disable validation to run a bit faster (set validation=False).
# You can follow progress in Tensorboard: tensorboard --logdir=log
#
# Training and experimenting (default):
# Keep validation enabled.
# You can now play with the parameters and follow the effects in
# Tensorboard.
# A good choice of parameters ensures that the testing and validation
# curves stay close. To see the curves drift apart ("overfitting") try
# to use an insufficient amount of training data.
@tf.function
def train_step(model, optimizer, input_data, expected_data, train=False):
"""Train the model for one step.
Args:
model: RNN model to train/predict.
optimize: optimizer to use to train the model.
input_data: input sequence to the model.
expected_data: expected output of the model.
Returns:
Tuple containing the sequential loss between the expected output and the
real output, the batch loss between the two, the accuracy metric value as
well as the most likely predicted output.
"""
with tf.GradientTape() as tape:
predicted_data = model(input_data)
loss = tf.keras.losses.sparse_categorical_crossentropy(
expected_data, predicted_data, from_logits=True)
seq_loss = tf.reduce_mean(input_tensor=loss, axis=1)
batch_loss = tf.reduce_mean(input_tensor=seq_loss)
output_bytes = tf.cast(
tf.argmax(predicted_data, axis=-1), expected_data.dtype)
accuracy = tf.reduce_mean(
tf.cast(tf.equal(expected_data, output_bytes), tf.float32))
if train:
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
return seq_loss, batch_loss, accuracy, output_bytes
def main(args):
"""Main function to train the model.
Args:
args: Parsed arguments.
Returns:
Execution status defined by `constants.ExitCode`.
"""
# Validate paths.
if not validate_paths(args):
return constants.ExitCode.INVALID_PATH
# Extract paths.
input_dir = args.input_dir
model_dir = args.model_dir
log_dir = args.log_dir
existing_model = args.existing_model
# Extract model parameters.
batch_size = args.batch_size
dropout_pkeep = args.dropout_pkeep
hidden_state_size = args.hidden_state_size
hidden_layer_size = args.hidden_layer_size
learning_rate = args.learning_rate
# Extract additional flags.
debug = args.debug
validation = args.validation
# Split corpus for training and validation.
# validation_text will be empty if validation is False.
code_text, validation_text, input_ranges = utils.read_data_files(
input_dir, validation=validation)
# Bail out if we don't have enough corpus for training.
if len(code_text) < batch_size * constants.TRAINING_SEQLEN + 1:
return constants.ExitCode.CORPUS_TOO_SMALL
# Get corpus files info. Will be used in debug mode to generate sample text.
files_info_list = []
if debug:
files_info_list = utils.get_files_info(input_dir)
assert files_info_list
# Calculate validation batch size. It will be 0 if we choose not to validate.
validation_batch_size = len(validation_text) // constants.VALIDATION_SEQLEN
# Display some stats on the data.
epoch_size = len(code_text) // (batch_size * constants.TRAINING_SEQLEN)
utils.print_data_stats(len(code_text), len(validation_text), epoch_size)
# Set global random seed, so any random sequence generated is repeatable.
# It could also be removed.
tf.random.set_seed(0)
# Build the RNN model.
model = utils.build_model(hidden_layer_size * hidden_state_size,
dropout_pkeep, batch_size, debug)
# Choose Adam optimizer to compute gradients.
optimizer = tf.keras.optimizers.Adam(learning_rate)
# Init Tensorboard stuff.
# This will save Tensorboard information in folder specified in command line.
# Two sets of data are saved so that you can compare training and
# validation curves visually in Tensorboard.
timestamp = str(math.trunc(time.time()))
summary_writer = tf.summary.create_file_writer(
os.path.join(log_dir, timestamp + '-training'))
validation_writer = tf.summary.create_file_writer(
os.path.join(log_dir, timestamp + '-validation'))
# For display: init the progress bar.
step_size = batch_size * constants.TRAINING_SEQLEN
frequency = constants.DISPLAY_FREQ * step_size
progress = utils.Progress(
constants.DISPLAY_FREQ,
size=constants.DISPLAY_LEN,
msg='Training on next {} batches'.format(constants.DISPLAY_FREQ))
# We continue training on existing model, or start with a new model.
if existing_model:
print('Continue training on existing model: {}'.format(existing_model))
try:
model.load_weights(existing_model)
except:
print(
('Failed to restore existing model since model '
'parameters do not match.'),
file=sys.stderr)
return constants.ExitCode.TENSORFLOW_ERROR
else:
print('No existing model provided. Start training with a new model.')
# Num of bytes we have trained so far.
steps = 0
# Training loop.
for input_batch, expected_batch, epoch in utils.rnn_minibatch_sequencer(
code_text,
batch_size,
constants.TRAINING_SEQLEN,
nb_epochs=constants.EPOCHS):
# Train on one mini-batch.
seq_loss, batch_loss, accuracy, output_bytes = train_step(
model, optimizer, input_batch, expected_batch, train=True)
# Log training data for Tensorboard display a mini-batch of sequences
# every `frequency` batches.
if debug and steps % frequency == 0:
utils.print_learning_learned_comparison(
input_batch, output_bytes, seq_loss, input_ranges, batch_loss,
accuracy, epoch_size, steps, epoch)
with summary_writer.as_default(): # pylint: disable=not-context-manager
tf.summary.scalar('batch_loss', batch_loss, step=steps)
tf.summary.scalar('batch_accuracy', accuracy, step=steps)
summary_writer.flush()
# Run a validation step every `frequency` batches.
# The validation text should be a single sequence but that's too slow.
# We cut it up and batch the pieces (slightly inaccurate).
if validation and steps % frequency == 0 and validation_batch_size:
utils.print_validation_header(len(code_text), input_ranges)
validation_x, validation_y, _ = next(
utils.rnn_minibatch_sequencer(validation_text, validation_batch_size,
constants.VALIDATION_SEQLEN, 1))
validation_model = utils.build_model(
hidden_layer_size * hidden_state_size, dropout_pkeep,
validation_batch_size, False)
last_weights = tf.train.latest_checkpoint(model_dir)
if last_weights:
validation_model.load_weights(tf.train.latest_checkpoint(model_dir))
validation_model.build(tf.TensorShape([validation_batch_size, None]))
validation_model.reset_states()
# Run one single inference step
_, batch_loss, accuracy, _ = train_step(
validation_model, optimizer, validation_x, validation_y, train=False)
utils.print_validation_stats(batch_loss, accuracy)
# Save validation data for Tensorboard.
with validation_writer.as_default(): # pylint: disable=not-context-manager
tf.summary.scalar('batch_loss', batch_loss, step=steps)
tf.summary.scalar('batch_accuracy', accuracy, step=steps)
validation_writer.flush()
# Display a short text generated with the current weights and biases.
# If enabled, there will be a large output.
if debug and steps // 4 % frequency == 0:
utils.print_text_generation_header()
file_info = utils.random_element_from_list(files_info_list)
first_byte, file_size = file_info['first_byte'], file_info['file_size']
ry = np.array([[first_byte]])
sample = [first_byte]
generation_model = utils.build_model(
hidden_layer_size * hidden_state_size, dropout_pkeep, 1, False)
last_weights = tf.train.latest_checkpoint(model_dir)
if last_weights:
generation_model.load_weights(tf.train.latest_checkpoint(model_dir))
generation_model.build(tf.TensorShape([1, None]))
generation_model.reset_states()
for _ in range(file_size - 1):
prediction = generation_model(ry)
prediction = tf.squeeze(prediction, 0).numpy()
rc = utils.sample_from_probabilities(
prediction, topn=10 if epoch <= 1 else 2)
sample.append(rc)
ry = np.array([[rc]])
print(repr(utils.decode_to_text(sample)))
utils.print_text_generation_footer()
# Save a checkpoint every `10 * frequency` batches. Each checkpoint is
# a version of model.
if steps // 10 % frequency == 0:
saved_model_name = constants.RNN_MODEL_NAME + '_' + timestamp
saved_model_path = os.path.join(model_dir, saved_model_name)
model.save_weights(saved_model_path)
print('Saved model: {}'.format(saved_model_path))
# Display progress bar.
if debug:
progress.step(reset=steps % frequency == 0)
# Update state.
steps += step_size
# Save the model after training is done.
saved_model_name = constants.RNN_MODEL_NAME + '_' + timestamp
saved_model_path = os.path.join(model_dir, saved_model_name)
model.save_weights(saved_model_path)
print('Saved model: {}'.format(saved_model_path))
return constants.ExitCode.SUCCESS
def validate_paths(args):
"""Validate paths.
Args:
args: Parsed arguments.
Returns:
True if all paths are valid, False otherwise.
"""
if not os.path.exists(args.input_dir):
print(
'Input directory {} does not exist'.format(args.input_dir),
file=sys.stderr)
return False
if not os.path.exists(args.model_dir):
os.mkdir(args.model_dir)
if not os.path.exists(args.log_dir):
os.mkdir(args.log_dir)
if args.existing_model and not utils.validate_model_path(args.existing_model):
print(
'Existing model {} does not exist'.format(args.existing_model),
file=sys.stderr)
return False
return True
def parse_args():
"""Parse command line arguments.
Returns:
Parsed arguement object.
"""
parser = argparse.ArgumentParser('Training RNN model on existing testcases')
parser.add_argument('--input-dir', help='Input folder path', required=True)
parser.add_argument('--log-dir', help='Log folder path', required=True)
parser.add_argument('--model-dir', help='Path to save models', required=True)
# Optional arguments: model parameters and additional flags.
parser.add_argument(
'--batch-size', help='Batch size', type=int, default=constants.BATCH_SIZE)
parser.add_argument(
'--debug', help='Print training progress', action='store_true')
parser.add_argument(
'--dropout-pkeep',
help='Dropout probability (keep rate)',
type=float,
default=constants.DROPOUT_PKEEP)
parser.add_argument(
'--existing-model', help='Continue training on existing model')
parser.add_argument(
'--hidden-state-size',
help='Hidden state size of LSTM cell',
type=int,
default=constants.HIDDEN_STATE_SIZE)
parser.add_argument(
'--hidden-layer-size',
help='Hidden layer size of LSTM model',
type=int,
default=constants.HIDDEN_LAYER_SIZE)
parser.add_argument(
'--learning-rate',
help='Learning rate',
type=float,
default=constants.LEARNING_RATE)
parser.add_argument(
'--validation',
help='Print validation stats during training',
action='store_true')
return parser.parse_args()
if __name__ == '__main__':
parsed_args = parse_args()
sys.exit(main(parsed_args))
|
|
# coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
r"""Sample training with distributed collection using a variable container.
See README for launch instructions.
"""
import os
from typing import Callable, Text
from absl import app
from absl import flags
from absl import logging
import gin
import tensorflow.compat.v2 as tf
from tf_agents.agents import tf_agent
from tf_agents.agents.ddpg import critic_network
from tf_agents.agents.sac import sac_agent
from tf_agents.agents.sac import tanh_normal_projection_network
from tf_agents.environments import py_environment
from tf_agents.environments import suite_mujoco
from tf_agents.experimental.distributed import reverb_variable_container
from tf_agents.networks import actor_distribution_network
from tf_agents.replay_buffers import reverb_replay_buffer
from tf_agents.system import system_multiprocessing as multiprocessing
from tf_agents.train import learner
from tf_agents.train import triggers
from tf_agents.train.utils import spec_utils
from tf_agents.train.utils import strategy_utils
from tf_agents.train.utils import train_utils
from tf_agents.trajectories import time_step as ts
from tf_agents.typing import types
flags.DEFINE_string('root_dir', os.getenv('TEST_UNDECLARED_OUTPUTS_DIR'),
'Root directory for writing logs/summaries/checkpoints.')
flags.DEFINE_string('env_name', None, 'Name of the environment')
flags.DEFINE_string('replay_buffer_server_address', None,
'Replay buffer server address.')
flags.DEFINE_string('variable_container_server_address', None,
'Variable container server address.')
flags.DEFINE_multi_string('gin_file', None, 'Paths to the gin-config files.')
flags.DEFINE_multi_string('gin_bindings', None, 'Gin binding parameters.')
FLAGS = flags.FLAGS
def _create_agent(train_step: tf.Variable,
observation_tensor_spec: types.NestedTensorSpec,
action_tensor_spec: types.NestedTensorSpec,
time_step_tensor_spec: ts.TimeStep,
learning_rate: float) -> tf_agent.TFAgent:
"""Creates an agent."""
critic_net = critic_network.CriticNetwork(
(observation_tensor_spec, action_tensor_spec),
observation_fc_layer_params=None,
action_fc_layer_params=None,
joint_fc_layer_params=(256, 256),
kernel_initializer='glorot_uniform',
last_kernel_initializer='glorot_uniform')
actor_net = actor_distribution_network.ActorDistributionNetwork(
observation_tensor_spec,
action_tensor_spec,
fc_layer_params=(256, 256),
continuous_projection_net=tanh_normal_projection_network
.TanhNormalProjectionNetwork)
return sac_agent.SacAgent(
time_step_tensor_spec,
action_tensor_spec,
actor_network=actor_net,
critic_network=critic_net,
actor_optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate),
critic_optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate),
alpha_optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate),
target_update_tau=0.005,
target_update_period=1,
td_errors_loss_fn=tf.math.squared_difference,
gamma=0.99,
reward_scale_factor=0.1,
gradient_clipping=None,
train_step_counter=train_step)
@gin.configurable
def train(
root_dir: Text,
environment_name: Text,
strategy: tf.distribute.Strategy,
replay_buffer_server_address: Text,
variable_container_server_address: Text,
suite_load_fn: Callable[[Text],
py_environment.PyEnvironment] = suite_mujoco.load,
# Training params
learning_rate: float = 3e-4,
batch_size: int = 256,
num_iterations: int = 2000000,
learner_iterations_per_call: int = 1) -> None:
"""Trains a SAC agent."""
# Get the specs from the environment.
logging.info('Training SAC with learning rate: %f', learning_rate)
env = suite_load_fn(environment_name)
observation_tensor_spec, action_tensor_spec, time_step_tensor_spec = (
spec_utils.get_tensor_specs(env))
# Create the agent.
with strategy.scope():
train_step = train_utils.create_train_step()
agent = _create_agent(
train_step=train_step,
observation_tensor_spec=observation_tensor_spec,
action_tensor_spec=action_tensor_spec,
time_step_tensor_spec=time_step_tensor_spec,
learning_rate=learning_rate)
# Create the policy saver which saves the initial model now, then it
# periodically checkpoints the policy weigths.
saved_model_dir = os.path.join(root_dir, learner.POLICY_SAVED_MODEL_DIR)
save_model_trigger = triggers.PolicySavedModelTrigger(
saved_model_dir, agent, train_step, interval=1000)
# Create the variable container.
variables = {
reverb_variable_container.POLICY_KEY: agent.collect_policy.variables(),
reverb_variable_container.TRAIN_STEP_KEY: train_step
}
variable_container = reverb_variable_container.ReverbVariableContainer(
variable_container_server_address,
table_names=[reverb_variable_container.DEFAULT_TABLE])
variable_container.push(variables)
# Create the replay buffer.
reverb_replay = reverb_replay_buffer.ReverbReplayBuffer(
agent.collect_data_spec,
sequence_length=2,
table_name=reverb_replay_buffer.DEFAULT_TABLE,
server_address=replay_buffer_server_address)
# Initialize the dataset.
def experience_dataset_fn():
with strategy.scope():
return reverb_replay.as_dataset(
sample_batch_size=batch_size, num_steps=2).prefetch(3)
# Create the learner.
learning_triggers = [
save_model_trigger,
triggers.StepPerSecondLogTrigger(train_step, interval=1000)
]
sac_learner = learner.Learner(
root_dir,
train_step,
agent,
experience_dataset_fn,
triggers=learning_triggers,
strategy=strategy)
# Run the training loop.
while train_step.numpy() < num_iterations:
sac_learner.run(iterations=learner_iterations_per_call)
variable_container.push(variables)
def main(_):
logging.set_verbosity(logging.INFO)
tf.enable_v2_behavior()
gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_bindings)
strategy = strategy_utils.get_strategy(FLAGS.tpu, FLAGS.use_gpu)
train(
root_dir=FLAGS.root_dir,
environment_name=FLAGS.env_name,
strategy=strategy,
replay_buffer_server_address=FLAGS.replay_buffer_server_address,
variable_container_server_address=FLAGS.variable_container_server_address)
if __name__ == '__main__':
flags.mark_flags_as_required([
'root_dir', 'env_name', 'replay_buffer_server_address',
'variable_container_server_address'
])
multiprocessing.handle_main(lambda _: app.run(main))
|
|
import importlib
import os
import shlex
import traceback
from xml.dom import minidom
from .util import make_dirs, remove_tree
def get_rerun_targets(xml_file: str):
test_targets = []
doc = minidom.parse(xml_file)
if doc.documentElement.nodeName == "testsuites":
root = doc.documentElement
else:
root = doc
for test_suite_node in root.getElementsByTagName("testsuite"):
for test_case_node in test_suite_node.getElementsByTagName("testcase"):
if test_case_node.getElementsByTagName("failure") or test_case_node.getElementsByTagName("skipped"):
test_target = "%s.%s" % (test_case_node.getAttribute("classname"), test_case_node.getAttribute("name"))
test_targets.append(test_target)
return test_targets
def merge_xunit_xmls(xml_files: str, to_file: str):
from .plogger import pconsole
from .test_suite import default_test_suite
pconsole.write_line("Start to merge xunit result xmls...")
test_case_results = {}
for xml_file in xml_files:
doc = minidom.parse(xml_file)
if doc.documentElement.nodeName == "testsuites":
root = doc.documentElement
else:
root = doc
for test_suite_node in root.getElementsByTagName("testsuite"):
for test_case_node in test_suite_node.getElementsByTagName("testcase"):
test_case_name = "%s.%s" % (test_case_node.getAttribute("classname"), test_case_node.getAttribute("name"))
test_case_status = 0 # passed
if test_case_node.getElementsByTagName("failure"):
test_case_status = 1 # failed
elif test_case_node.getElementsByTagName("skipped"):
test_case_status = 2 # skipped
if test_case_name not in test_case_results or test_case_status < test_case_results[test_case_name]["status"]:
test_case_results[test_case_name] = {"status": test_case_status, "node": test_case_node}
doc = minidom.Document()
test_suite_ele = doc.createElement("testsuite")
doc.appendChild(test_suite_ele)
test_suite_ele.setAttribute("name", default_test_suite.name)
test_suite_ele.setAttribute("tests", str(len(test_case_results)))
test_suite_ele.setAttribute("failures", str(len([result for result in test_case_results.values() if result["status"] == 1])))
test_suite_ele.setAttribute("skips", str(len([result for result in test_case_results.values() if result["status"] == 2])))
test_suite_ele.setAttribute("errors", "0")
for test_case_result in test_case_results.values():
test_suite_ele.appendChild(test_case_result["node"])
if os.path.exists(to_file):
pconsole.write_line("Cleaning old merged xunit result xml...")
os.remove(to_file)
else:
make_dirs(os.path.dirname(to_file))
f = open(to_file, mode="w", encoding="utf-8")
try:
doc.writexml(f, "\t", "\t", "\n", "utf-8")
pconsole.write_line("Merged xunit xml is generated at %s" % to_file)
except Exception:
pconsole.write_line("Failed to generate merged xunit xml.\n%s" % traceback.format_exc())
finally:
f.close()
def main(args=None):
import sys
from . import config
# load arguments
if args is None:
args = sys.argv[1:]
elif not isinstance(args, (tuple, list)):
if not isinstance(args, str):
sys.stderr.write("ERROR: args <%s> is not a string or argument list." % args)
return
args = shlex.split(args)
config.load(args)
# merge xunit result xmls
xunit_xmls = config.get_option("merge_xunit_xmls")
if xunit_xmls is not None:
merge_xunit_xmls(xunit_xmls, config.get_option("to"))
return
# run test
from .test_filter import TestFilterGroup, TestIncludeTagsFilter, TestExcludeTagsFilter, TestIncludeGroupsFilter
from . import test_executor, reporter, plistener
from .test_finder import TestFinder
from .test_suite import default_test_suite
from .plogger import pconsole
pconsole.write_line("Starting ptest...")
# add workspace to python path
workspace = config.get_option("workspace")
sys.path.insert(0, workspace)
pconsole.write_line("Workspace:")
pconsole.write_line(" %s" % workspace)
# add python_paths to python path
python_paths = config.get_option("python_paths")
if python_paths is not None:
pconsole.write_line("Python paths:")
for python_path in python_paths:
sys.path.append(python_path)
pconsole.write_line(" %s" % python_path)
# test filter group
test_filter_group = TestFilterGroup()
include_tags = config.get_option("include_tags")
if include_tags is not None:
test_filter_group.append_filter(TestIncludeTagsFilter(include_tags))
exclude_tags = config.get_option("exclude_tags")
if exclude_tags is not None:
test_filter_group.append_filter(TestExcludeTagsFilter(exclude_tags))
include_groups = config.get_option("include_groups")
if include_groups is not None:
test_filter_group.append_filter(TestIncludeGroupsFilter(include_groups))
filter_path = config.get_option("test_filter")
if filter_path is not None:
splitted_filter_path = filter_path.split(".")
filter_module = importlib.import_module(".".join(splitted_filter_path[:-1]))
filter_class = getattr(filter_module, splitted_filter_path[-1])
test_filter_group.append_filter(filter_class())
if test_filter_group:
pconsole.write_line("Test filters:")
for test_filter in test_filter_group:
pconsole.write_line(" %s" % test_filter)
# get test targets
test_targets = config.get_option("test_targets")
if test_targets is not None:
pconsole.write_line("Test targets:")
for test_target in test_targets:
test_finder = TestFinder(test_target, test_filter_group, default_test_suite)
test_finder.find_tests()
if test_finder.repeated_test_count:
pconsole.write_line(
" %s (%s tests found, %s repeated)" % (test_target, test_finder.found_test_count, test_finder.repeated_test_count))
else:
pconsole.write_line(" %s (%s tests found)" % (test_target, test_finder.found_test_count))
else:
# rerun failed/skipped test cases
pconsole.write_line("Run failed/skipped tests in xunit xml:")
xunit_xml = config.get_option("run_failed")
test_targets = get_rerun_targets(xunit_xml)
found_test_count = 0
for test_target in test_targets:
test_finder = TestFinder(test_target, test_filter_group, default_test_suite)
test_finder.find_tests()
found_test_count += test_finder.found_test_count
pconsole.write_line(" %s (%s tests found)" % (xunit_xml, found_test_count))
# add test listeners
listener_paths = config.get_option("test_listeners")
if listener_paths is not None:
pconsole.write_line("Test listeners:")
for listener_path in listener_paths:
pconsole.write_line(" %s" % listener_path)
splitted_listener_path = listener_path.split(".")
listener_module = importlib.import_module(".".join(splitted_listener_path[:-1]))
listener_class = getattr(listener_module, splitted_listener_path[-1])
plistener.test_listeners.append(listener_class())
# init test suite
default_test_suite.init()
test_cases = default_test_suite.test_cases
# exit if no tests found
if len(test_cases) == 0:
pconsole.write_line("=" * 100)
pconsole.write_line("No tests found. Please check your command line options.")
return
# add webdriver instance to test executor to support capturing screenshot for webdriver
try:
from selenium.webdriver.remote.webdriver import WebDriver
except ImportError as ie:
pass
else:
def add_web_driver(executor, web_driver):
web_drivers = executor.get_property("web_drivers")
if web_drivers is None:
web_drivers = []
executor.update_properties({"web_drivers": web_drivers})
web_drivers.append(web_driver)
def new_start_client(self):
try:
current_executor = test_executor.current_executor()
add_web_driver(current_executor, self)
add_web_driver(current_executor.parent_test_executor, self)
add_web_driver(current_executor.parent_test_executor.parent_test_executor, self)
except AttributeError as ae:
pass
def remove_web_driver(executor, web_driver):
web_drivers = executor.get_property("web_drivers")
if web_drivers:
web_drivers.remove(web_driver)
def new_stop_client(self):
try:
current_executor = test_executor.current_executor()
remove_web_driver(current_executor, self)
remove_web_driver(current_executor.parent_test_executor, self)
remove_web_driver(current_executor.parent_test_executor.parent_test_executor, self)
except AttributeError as ae:
pass
WebDriver.start_client = new_start_client
WebDriver.stop_client = new_stop_client
# print test names
pconsole.write_line("=" * 100)
pconsole.write_line("Start to run following %s tests:" % len(test_cases))
pconsole.write_line("-" * 30)
for test_case in test_cases:
pconsole.write_line(" %s" % test_case.full_name)
pconsole.write_line("=" * 100)
# clean and create temp dir
temp_dir = config.get_option("temp")
if os.path.exists(temp_dir):
remove_tree(temp_dir, remove_root=False)
else:
make_dirs(temp_dir)
# run test cases
test_executor.TestSuiteExecutor(default_test_suite, int(config.get_option("test_executor_number"))).start_and_join()
# log the test results
status_count = default_test_suite.status_count
pconsole.write_line("")
pconsole.write_line("=" * 100)
pconsole.write_line("Test finished in %.2fs." % default_test_suite.elapsed_time)
pconsole.write_line("Total: %s, passed: %s, failed: %s, skipped: %s. Pass rate: %.1f%%." % (
status_count.total, status_count.passed, status_count.failed, status_count.skipped, default_test_suite.pass_rate))
# generate the test report
pconsole.write_line("")
pconsole.write_line("=" * 100)
reporter.generate_xunit_xml(config.get_option("xunit_xml"))
reporter.generate_html_report(config.get_option("report_dir"))
# clean temp dir
remove_tree(temp_dir)
|
|
from __future__ import absolute_import
from __future__ import print_function
import os
import sublime
from .const import AUTO_FORMAT_FILE_EXTENSIONS
from .const import IS_ST3
from .const import PLUGIN_NAME
from .const import PRETTIER_OPTIONS_KEY
from .const import PROJECT_SETTINGS_KEY
from .const import SETTINGS_FILENAME
from .util import ensure_file_has_ext
from .util import generate_dirs
from .util import is_bool_str
from .util import is_str_none_or_empty
from .util import is_windows
from .util import to_str
from .util import which
def st_status_message(msg):
sublime.set_timeout(lambda: sublime.status_message('{0}: {1}'.format(PLUGIN_NAME, msg)), 0)
def get_setting(view, key, default_value=None):
settings = view.settings().get(PLUGIN_NAME)
if settings is None or settings.get(key) is None:
settings = sublime.load_settings(SETTINGS_FILENAME)
value = settings.get(key, default_value)
# check for project-level overrides:
project_value = _get_project_setting(key)
if project_value is None:
return value
return project_value
def get_sub_setting(view, key=None):
settings = view.settings().get(PLUGIN_NAME)
if settings is None or settings.get(PRETTIER_OPTIONS_KEY).get(key) is None:
settings = sublime.load_settings(SETTINGS_FILENAME)
value = settings.get(PRETTIER_OPTIONS_KEY).get(key)
# check for project-level overrides:
project_value = _get_project_sub_setting(key)
if project_value is None:
return value
return project_value
def _get_project_setting(key):
"""Get a project setting.
JsPrettier project settings are stored in the sublime project file
as a dictionary, e.g.:
"settings":
{
"js_prettier": { "key": "value", ... }
}
:param key: The project setting key.
:return: The project setting value.
:rtype: str
"""
project_settings = sublime.active_window().active_view().settings()
if not project_settings:
return None
js_prettier_settings = project_settings.get(PROJECT_SETTINGS_KEY)
if js_prettier_settings and key in js_prettier_settings:
return js_prettier_settings[key]
return None
def _get_project_sub_setting(option):
project_settings = sublime.active_window().active_view().settings()
js_prettier_settings = project_settings.get(PROJECT_SETTINGS_KEY, None)
if not js_prettier_settings:
return None
prettier_options = js_prettier_settings.get(PRETTIER_OPTIONS_KEY, None)
if prettier_options and option in prettier_options:
return prettier_options.get(option, None)
return None
def is_file_auto_formattable(view):
filename = view.file_name()
if not filename:
return False
file_ext = filename.rpartition('.')[-1]
if file_ext == filename:
return False
if file_ext in AUTO_FORMAT_FILE_EXTENSIONS:
return True
if file_ext in set(get_setting(view, 'custom_file_extensions', [])):
return True
return False
def get_st_project_path():
"""Get the active Sublime Text project path.
Original: https://gist.github.com/astronaughts/9678368
:rtype: object
:return: The active Sublime Text project path.
"""
window = sublime.active_window()
folders = window.folders()
if len(folders) == 1:
return folders[0]
else:
active_view = window.active_view()
if active_view:
active_file_name = active_view.file_name()
else:
active_file_name = None
if not active_file_name:
return folders[0] if len(folders) else os.path.expanduser('~')
for folder in folders:
if active_file_name.startswith(folder):
return folder
return os.path.dirname(active_file_name)
def scroll_view_to(view, row_no, col_no):
# error positions are offset by -1
# prettier -> sublime text
row_no -= 1
col_no -= 1
textpoint = view.text_point(row_no, col_no)
view.sel().clear()
view.sel().add(sublime.Region(textpoint))
view.show_at_center(textpoint)
def has_selection(view):
for sel in view.sel():
if not sel.empty():
return True
return False
def resolve_prettier_cli_path(view, plugin_path, st_project_path):
"""The prettier cli path.
When the `prettier_cli_path` setting is empty (""),
the path is resolved by searching locations in the following order,
returning the first match of the prettier cli path...
1. prettier installed relative to the view's active file: i.e.
walk up the hierarchy from the current view's file and look for
'node_modules/.bin/prettier'.
2. Locally installed prettier, relative to a Sublime Text Project
file's root directory, e.g.: `node_modules/.bin/prettier' and 'node_modules/prettier/bin-prettier.js';
3. User's $HOME/node_modules directory.
4. Look in the JsPrettier Sublime Text plug-in directory for `node_modules/.bin/prettier`.
5. Finally, check if prettier is installed globally.
:return: The prettier cli path.
"""
def make_local_prettier_path(somepath):
return os.path.join(somepath, 'node_modules', '.bin', 'prettier')
custom_prettier_cli_path = expand_var(view.window(), get_setting(view, 'prettier_cli_path', ''))
if is_str_none_or_empty(custom_prettier_cli_path):
#
# 1. check for prettier installed relative to active view
active_view_parents = generate_dirs(os.path.dirname(view.file_name()), limit=500)
for parent in active_view_parents:
closest_to_view_prettier = make_local_prettier_path(parent)
if os.path.exists(closest_to_view_prettier):
return closest_to_view_prettier
#
# 2. check locally installed prettier
project_prettier_path = make_local_prettier_path(st_project_path)
if os.path.exists(project_prettier_path):
return project_prettier_path
plugin_prettier_path = make_local_prettier_path(plugin_path)
if os.path.exists(plugin_prettier_path):
return plugin_prettier_path
#
# 3. check locally installed '--no-bin-links' prettier (see #146)
project_prettier_path_nbl = os.path.join(st_project_path, 'node_modules', 'prettier', 'bin-prettier.js')
plugin_prettier_path_nbl = os.path.join(plugin_path, 'node_modules', 'prettier', 'bin-prettier.js')
if os.path.exists(project_prettier_path_nbl):
return project_prettier_path_nbl
if os.path.exists(plugin_prettier_path_nbl):
return plugin_prettier_path_nbl
#
# 4. check globally install prettier
prettier_cmd = 'prettier'
if is_windows():
prettier_cmd = ensure_file_has_ext(prettier_cmd, ".cmd")
return which(prettier_cmd)
# handle cases when the user specifies a prettier cli path that is
# relative to the working file or project:
if not os.path.isabs(custom_prettier_cli_path):
custom_prettier_cli_path = os.path.join(st_project_path, custom_prettier_cli_path)
return os.path.normpath(custom_prettier_cli_path)
# noinspection PyUnusedLocal
def resolve_node_path(source_file):
node_cmd = 'node'
if is_windows():
node_cmd = ensure_file_has_ext(node_cmd, ".exe")
return which(node_cmd)
def expand_var(window, var_to_expand):
if not is_str_none_or_empty(var_to_expand):
expanded = os.path.expanduser(var_to_expand)
expanded = os.path.expandvars(expanded)
if IS_ST3 and window:
window_variables = window.extract_variables()
expanded = sublime.expand_variables(expanded, window_variables)
return expanded
return var_to_expand
def parse_additional_cli_args(window, additional_cli_args_setting=None):
listofargs = []
if additional_cli_args_setting is None:
additional_cli_args_setting = {}
if additional_cli_args_setting and len(additional_cli_args_setting) > 0 \
and isinstance(additional_cli_args_setting, dict):
for arg_key, arg_value in additional_cli_args_setting.items():
arg_key = to_str(arg_key).strip()
if len(arg_key) == 0:
# arg key cannot be empty
log_warn("Empty argument detected in 'additional_cli_args'. Did you forget to add something?", True)
continue
listofargs.append(arg_key)
arg_value = to_str(arg_value).strip()
if len(arg_value) == 0:
# arg value can be empty... just don't append it
continue
if is_bool_str(arg_value):
arg_value = arg_value.lower()
else:
arg_value = expand_var(window, arg_value)
listofargs.append(arg_value)
return listofargs
def debug_enabled(view):
return bool(get_setting(view, 'debug', False))
def _print_log(kind, msg, insert_leading_line_break=False):
"""Print a log message to the Sublime Text Console.
:param kind: The kind of message to log. e.g.: 'DEBUG', 'INFO', 'WARN' or 'ERROR'.
:param msg: The message to log.
:param insert_leading_line_break: Whether or not to insert a leading line break before the log message.
"""
leading_line_break = ''
if insert_leading_line_break:
leading_line_break = '\n'
print("{0}[{1} {2}]: {3}".format(leading_line_break, PLUGIN_NAME, kind, msg))
def log_debug(view, msg, insert_leading_line_break=False):
if debug_enabled(view):
_print_log('DEBUG', msg, insert_leading_line_break)
def log_warn(msg, insert_leading_line_break=False):
_print_log('WARN', msg, insert_leading_line_break)
def log_error(msg, insert_leading_line_break=False):
_print_log('ERROR', msg, insert_leading_line_break)
|
|
#!/usr/bin/env python
###############################################################################
#
# genomeCoverage.py version 1.0
#
# Calculates windowed coverage across a genome and finds mate
# matches and positions for each genome scaffold using a SAM file
#
# Calculates GC content across genome and gaps of unknown bases
# for each genome scaffold using an optional FASTA file
#
# Copyright (C) 2014 Matthew Neave & Evan Denmark
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
"""
NOTES ABOUT THIS PROGRAM
The following files are produced when only a SAM file is provided:
The coverage file produced gives the scaffold name, each subunit within each scaffold, and how many
times a read maps to that subunit. If no reads are mapped to the subunit, it will be given a value
of zero. In most cases (but not all) when there are consecutive subunits within a scaffold of
coverage zero, there is a gap spanning these subunits.
The ends file produced gives the original scaffold with the original mates start and end position on
the original scaffold as well as its mate's scaffold start and position. For this version of the
program, mates are 100 base pairs long because the program used to generate the mates (HiSeq) creates
mates of this length.
The karyotype file produced gives the scaffold name, its length, and the color in which the scaffold
will appear when it is run through a visualization software.
The following files are produced when the required SAM file and optional FASTA file are provided:
The GC file produced gives the scaffold name, each subunit within each scaffold, and the GC content of
each scaffold. In most cases when the GC content of consecutive subunits is zero, it is due to a gap
spanning these scaffolds. In addition, if a windowSize is not specified in the command line, the default
is 1000. Therefore, one would expect every subunit to have a GC content percentage to no more than one
decimal place (ex. 541 GC out of 1000 base pairs results in a 54.1% GC content). However, in many cases,
the GC content goes far beyond 1 decimal place because of gaps within the subunit. Some gaps may be only
a single nucleotide. Because this program does not count gaps as nucleotides when calculating GC content,
this results in a fraction with a denominator other than 1000, giving a percentage with many decimals.
Of course, this only applies when the default 1000 is used as the windowSize.
The gap file produced gives the scaffold in which the gap resides and the start and end position of the
gap. This program defines a gap as unknown nucleotides, given as "N" in the FASTA file.
In addition to the file produced the following will be displayed to the user:
- The number of reads in the SAM file that do not match to any scaffolds
- Warnings if any scaffolds did not have any reads matched to them
- Warnings if the scaffolds provided in your SAM are different than those in the FASTA (possibly due to a recombination of your scaffolds with outside software)
"""
import argparse
parser = argparse.ArgumentParser(description = 'Please provide a SAM file (required), a windowSize (optional, default 1000), an end_size (optional, default 500), and a FASTA file (optional). ')
parser.add_argument('samfile',help= 'a sam file with your data')
parser.add_argument('-windowSize',default = 1000, type=int, help= 'window size for coverage subunits')
parser.add_argument('-end_size', default = 500, type=int, help = 'distance from end to calculate links')
parser.add_argument('-fasta', default = None, help= 'a fasta file to calculate gc content and gaps within the scaffolds')
samfile = parser.parse_args().samfile
windowSize = parser.parse_args().windowSize
end_size = parser.parse_args().end_size
fasta= parser.parse_args().fasta
# use argparse module to get input SAM file and size of windows
def genomeCoverage(samfile, windowSize, end_size, fasta):
"""
The first object this function returns is a dictionary within a dictionary. The larger dictionary has the scaffold as a
key and a dictionary as the value. The smaller dictionary has the subunit (within each scaffold) as the key and coverage
value (an integer) as the value.
The dictionary (scaffold_dict) is in the form {scaffold: {subunit:coverage}}.
The second object this function returns is a dictionary with the scaffold as a key and
a list containing (in this order) the map position of the original read and a list of the read's mate
scaffold and the mate's position on the original scaffold.
The dictionary (end_piece_dict) is in the form {scaffold:[original read position, [mate scaffold, mate position]]}
The third object this function returns is a dictionary (len_dict) that simply shows the lengths for each scaffold.
The dictionary (len_dict) is in the form {scaffold:length}
The fourth object this function returns is a dictionary (gc_dict) that shows the GC content of each subunit of
size windowSize of each scaffold.
The dictionary is in the form {scaffold:{subunit:gc_content}}
The fifth object this function returns is a dictionary (gap_dict) that shows the gaps of unknown base in each scaffold.
The dictionary is in the form {scaffold: [[gap_start, gap_end], [gap_start, gap_end]]}
"""
# open SAM file
# initialize our dictionaries that we will use
the_file = open(samfile)
# the coverage dictionary
scaffold_dict = {}
# scaffold length dictionary
len_dict = {}
# runs near the edge dictionary
end_piece_dict = {}
didNotMap = 0
largest_coverage = {}
for each_line in the_file:
"""
The first few lines of the sam file do not include information about the reads, but rather
information about the scaffold
"""
each_line = each_line.split()
if each_line[0] == '@SQ':
scaffold_name = each_line[1].split(':')
name = str(scaffold_name[1]).lstrip('[').rstrip(']')
name = name.strip()
line_len = each_line[2].split(':')
length = str(line_len[1]).lstrip('[').rstrip(']')
length = length.strip()
len_dict[name] = length
if each_line[0][0] == '@':
continue
else:
scaffold = each_line[2]
mapPosition = int(each_line[3])
possible_equal = each_line[6]
mate_map = int(each_line[7])
include = True
"""
if the original read is at the end of a scaffold and it has a mate on another scaffold, we want to remember
its position and mate, so we create the end_piece dictionary which, for each scaffold as a key, creates a
giant list of smaller list for each read. Within these smaller lists, the first object is the map position
of the original read and the second object is a list of the name of the mate scaffold and position of mate
End_piece_dict dictionary is in the form {scaffold: [[mapPosition,[mate scaffold, mate position]], [mapPosition,[mate scaffold, mate position]] etc...]
"""
if scaffold == '*':
didNotMap += 1
continue
else:
if possible_equal != '=':
# the following ensures that the reciprocal end link is not included
if possible_equal in end_piece_dict:
for each_mate in end_piece_dict[possible_equal]:
if each_mate[0] == mate_map:
include = False
if include == True:
if end_piece(mapPosition,int(len_dict[scaffold]), possible_equal, end_size) and is_mate_at_end(int(len_dict[possible_equal]),mate_map, end_size):
if scaffold not in end_piece_dict:
end_piece_dict[scaffold] = []
read_list = [mapPosition, [possible_equal, mate_map]]
end_piece_dict[scaffold].append(read_list)
# coveragePosition is the name we will give each window of size windowSize (ex. if there are 10,000 bp and we have windowSize 1000, there will be 10 coverage positions of names 0-9)
coveragePosition = mapPosition / windowSize
coveragePosition = int(coveragePosition)
if scaffold not in largest_coverage or coveragePosition > largest_coverage[scaffold]:
largest_coverage[scaffold] = coveragePosition
if scaffold not in scaffold_dict:
scaffold_dict[scaffold] = {}
if coveragePosition in scaffold_dict[scaffold]:
scaffold_dict[scaffold][coveragePosition] += 1
else:
scaffold_dict[scaffold][coveragePosition] = 1
for each_scaffold in largest_coverage:
for i in xrange(largest_coverage[each_scaffold]):
if i not in scaffold_dict[each_scaffold]:
scaffold_dict[each_scaffold][i] =0
print
print 'For reference,', didNotMap, 'reads did not map to any scaffold.'
print
for each_scaffold in len_dict:
if each_scaffold not in scaffold_dict:
print
print "WARNING! No reads mapped to", each_scaffold, "and therefore it will not be in your coverage file."
print
# A fasta file must be provided in order to make gc_content dictionary and gap dictionary
if fasta != None:
fasta= open(fasta)
gc_dict = {}
gap_dict = {}
for each_line in fasta:
if each_line[0] == '>':
#name line
each_line = each_line.lstrip('>')
each_line = each_line.rstrip('\n')
# HERE 'each_line' MUST BE THE NAME OF A SCAFFOLD
# Some fasta files may differ in the way the original program gives the '>' line a name, so it may require alteration
gc_dict[each_line] = {}
name_line = each_line
if each_line in scaffold_dict:
if name_line not in gap_dict:
gap_dict[name_line] = []
count = 0
num_gc = 0.0
num_actg = 0.0
current_gap = False
gc_content = 0.0
else:
#sequence line
each_line = each_line.rstrip('\n')
for each_base in each_line:
count +=1
each_base = str(each_base)
if current_gap == True:
if each_base.upper() == 'N':
gap_end+=1
else:
the_gap = [gap_start, gap_end]
gap_dict[name_line].append(the_gap)
current_gap = False
elif each_base.upper() == 'N':
gap_start = count
gap_end = gap_start
current_gap = True
else:
if each_base.upper() == 'C' or each_base.upper() == 'G':
num_gc +=1
num_actg +=1
gc_content = (float(num_gc)/float(num_actg))*100.0
if each_base.upper() == 'A' or each_base.upper() == 'T':
num_actg+=1
gc_content = (float(num_gc)/float(num_actg))*100.0
if count%windowSize == 0:
current_window=((int(count/windowSize))-1)
gc_dict[name_line][current_window] = gc_content
gc_content = 0.0
num_gc = 0.0
num_actg = 0.0
elif count == int(len_dict[name_line]):
gc_dict[name_line][current_window+1] = gc_content
gc_content = 0.0
num_gc = 0.0
num_actg = 0.0
for each_scaffold in scaffold_dict:
if int((int(len_dict[each_scaffold]))/windowSize)+1 != len(gc_dict[each_scaffold]):
print
print "WARNING! The scaffolds in the SAM file have different lengths than the scaffolds in the FASTA file."
print
break
return scaffold_dict, end_piece_dict, len_dict, gc_dict, gap_dict
else:
return scaffold_dict, end_piece_dict, len_dict
def end_piece(start_pos, length, possible_equal, end_size):
"""
Determines if your read is at the end of a scaffold.
"""
if (((length - start_pos) < end_size) or (start_pos < end_size)) and possible_equal != '=':
#it is at the end and mate is on another scaffold
return True
else:
#either the read is not near the end or the mate is on the same scaffold or both
return False
def is_mate_at_end(length_mate_scaffold, position_of_mate, end_size):
"""
Determines if the mate of your original read is near the end of its scaffold
"""
if ((length_mate_scaffold - position_of_mate) < end_size) or (position_of_mate < end_size):
#the mate is near the end of its scaffold
return True
else:
return False
def make_files(samfile, windowSize, end_size, fasta):
"""
Takes the organized data generated by genomeCoverage() and creates 3 files: a coverage file,
a karyotype file, and an end file.
"""
genome = genomeCoverage(samfile, windowSize, end_size, fasta)
if fasta != None:
gap_dict = genome[4]
gc_dict = genome[3]
len_dict = genome[2]
end_piece_dict = genome[1]
scaffold_dict = genome[0]
scaff_list = []
len_list = []
end_list = []
gc_list = []
gap_list = []
for scaffold in scaffold_dict:
scaff_list.append(scaffold)
scaff_list = sorted(scaff_list)
for each_scaffold in len_dict:
len_list.append(each_scaffold)
len_list = sorted(len_list)
for the_scaffold in end_piece_dict:
end_list.append(the_scaffold)
end_list = sorted(end_list)
if fasta != None:
for scaffolds in gc_dict:
gc_list.append(scaffolds)
gc_list= sorted(gc_list)
for a_scaffold in in gap_dict:
gap_list.append(a_scaffold)
gap_list = sorted(gap_list)
# MAKE THE COVERAGE FILE
new_file = open('output.coverage.txt', 'w')
end=0
for scaffold in scaff_list:
length = int(len_dict[scaffold])
for subunit in scaffold_dict[scaffold]:
start = subunit*windowSize
end = start + windowSize
if end >= length:
end = length
coverage = scaffold_dict[scaffold][subunit]
string = str(str(scaffold)+'\t' + str(start)+ '\t' +str(end)+ '\t' +str(coverage)+'\n')
new_file.write(string)
new_file.close()
# MAKE THE KARYOTYPE FILE
new_file2 = open('output.karyotype.txt', 'w')
n=0
for scaffold in len_list:
if n == 7:
n=0
length = int(len_dict[scaffold])
color = ['set1-7-qual-1','set1-7-qual-2','set1-7-qual-3','set1-7-qual-4','set1-7-qual-5','set1-7-qual-6','set1-7-qual-7']
line = ('chr -' + '\t' + str(scaffold)+ '\t' +str(scaffold) +'\t' + str(0) + '\t' + str(length) + '\t' +str(color[n]) + '\n')
new_file2.write(line)
n+=1
new_file2.close()
# MAKE THE ENDS FILE
new_file3 = open('output.ends.txt', 'w')
for scaffold in end_list:
for each_end in end_piece_dict[scaffold]:
original = str(scaffold)
original_start = str(each_end[0])
original_end = int(original_start) + 100
if original_end > int(len_dict[scaffold]):
original_end = int(len_dict[scaffold])
original_end = str(original_end)
mate = str(each_end[1][0])
mate_start = str(each_end[1][1])
mate_end = str(int(mate_start)+100)
if int(mate_end) > int(len_dict[mate]):
mate_end = str(len_dict[mate])
output = str(original + '\t' + original_start + '\t' + original_end + '\t' + mate + '\t' + mate_start + '\t' +mate_end + '\n')
new_file3.write(output)
new_file3.close()
if fasta != None:
# MAKE THE GC FILE
new_file4 = open('output.gc.txt', 'w')
for scaffold in gc_list:
the_scaffold = str(scaffold)
length = int(len_dict[scaffold])
for subunit in gc_dict[scaffold]:
start = int(subunit)*windowSize
end = start+windowSize
if end > length:
end = length
end = str(end)
start= str(start)
gc_content = str(gc_dict[scaffold][subunit])
output= str(the_scaffold + '\t' + start+ '\t'+ end + '\t' + gc_content + '\n')
new_file4.write(output)
new_file4.close()
# MAKE THE GAP FILE
new_file5 = open('output.gaps.txt', 'w')
for scaffold in gap_list:
the_scaffold = str(scaffold)
for gap in gap_dict[scaffold]:
gap_start=str(gap[0])
gap_end=str(gap[1])
output= str(the_scaffold + '\t' + gap_start + '\t' + gap_end + '\n')
new_file5.write(output)
new_file5.close()
make_files(samfile, windowSize, end_size, fasta)
|
|
from collections import Iterable
from common.exceptions import LogicError
from ledger.ledger import Ledger
from plenum.common.constants import AUDIT_LEDGER_ID, TXN_VERSION, AUDIT_TXN_VIEW_NO, AUDIT_TXN_PP_SEQ_NO, \
AUDIT_TXN_LEDGERS_SIZE, AUDIT_TXN_LEDGER_ROOT, AUDIT_TXN_STATE_ROOT, AUDIT_TXN_PRIMARIES, AUDIT_TXN_DIGEST, \
AUDIT_TXN_NODE_REG, CURRENT_TXN_PAYLOAD_VERSIONS, AUDIT, CURRENT_TXN_VERSION
from plenum.common.ledger_uncommitted_tracker import LedgerUncommittedTracker
from plenum.common.transactions import PlenumTransactions
from plenum.common.txn_util import init_empty_txn, set_payload_data, get_payload_data, get_seq_no
from plenum.server.batch_handlers.batch_request_handler import BatchRequestHandler
from plenum.server.batch_handlers.three_pc_batch import ThreePcBatch
from plenum.server.database_manager import DatabaseManager
from stp_core.common.log import getlogger
logger = getlogger()
class AuditBatchHandler(BatchRequestHandler):
def __init__(self, database_manager: DatabaseManager, ):
super().__init__(database_manager, AUDIT_LEDGER_ID)
# TODO: move it to BatchRequestHandler
self.tracker = LedgerUncommittedTracker(None, self.ledger.uncommitted_root_hash, self.ledger.size)
def post_batch_applied(self, three_pc_batch: ThreePcBatch, prev_handler_result=None):
txn = self._add_to_ledger(three_pc_batch)
self.tracker.apply_batch(None, self.ledger.uncommitted_root_hash, self.ledger.uncommitted_size)
logger.debug("applied audit txn {}; uncommitted root hash is {}; uncommitted size is {}".
format(str(txn), self.ledger.uncommitted_root_hash, self.ledger.uncommitted_size))
def post_batch_rejected(self, ledger_id, prev_handler_result=None):
_, _, txn_count = self.tracker.reject_batch()
self.ledger.discardTxns(txn_count)
logger.debug("rejected {} audit txns; uncommitted root hash is {}; uncommitted size is {}".
format(txn_count, self.ledger.uncommitted_root_hash, self.ledger.uncommitted_size))
def commit_batch(self, three_pc_batch, prev_handler_result=None):
_, _, txns_count = self.tracker.commit_batch()
_, committedTxns = self.ledger.commitTxns(txns_count)
logger.debug("committed {} audit txns; uncommitted root hash is {}; uncommitted size is {}".
format(txns_count, self.ledger.uncommitted_root_hash, self.ledger.uncommitted_size))
return committedTxns
def on_catchup_finished(self):
self.tracker.set_last_committed(state_root=None,
txn_root=self.ledger.uncommitted_root_hash,
ledger_size=self.ledger.size)
@staticmethod
def transform_txn_for_ledger(txn):
'''
Makes sure that we have integer as keys after possible deserialization from json
:param txn: txn to be transformed
:return: transformed txn
'''
txn_data = get_payload_data(txn)
txn_data[AUDIT_TXN_LEDGERS_SIZE] = {int(k): v for k, v in txn_data[AUDIT_TXN_LEDGERS_SIZE].items()}
txn_data[AUDIT_TXN_LEDGER_ROOT] = {int(k): v for k, v in txn_data[AUDIT_TXN_LEDGER_ROOT].items()}
txn_data[AUDIT_TXN_STATE_ROOT] = {int(k): v for k, v in txn_data[AUDIT_TXN_STATE_ROOT].items()}
return txn
def _add_to_ledger(self, three_pc_batch: ThreePcBatch):
# if PRE-PREPARE doesn't have audit txn (probably old code) - do nothing
# TODO: remove this check after all nodes support audit ledger
if not three_pc_batch.has_audit_txn:
logger.info("Has 3PC batch without audit ledger: {}".format(str(three_pc_batch)))
return
# 1. prepare AUDIT txn
txn_data = self._create_audit_txn_data(three_pc_batch, self.ledger.get_last_txn())
txn = init_empty_txn(txn_type=PlenumTransactions.AUDIT.value)
txn = set_payload_data(txn, txn_data)
# 2. Append txn metadata
self.ledger.append_txns_metadata([txn], three_pc_batch.pp_time)
# 3. Add to the Ledger
self.ledger.appendTxns([txn])
return txn
def _create_audit_txn_data(self, three_pc_batch, last_audit_txn):
# 1. general format and (view_no, pp_seq_no)
view_no = three_pc_batch.original_view_no if three_pc_batch.original_view_no is not None else three_pc_batch.view_no
txn = {
TXN_VERSION: CURRENT_TXN_PAYLOAD_VERSIONS[AUDIT],
AUDIT_TXN_VIEW_NO: view_no,
AUDIT_TXN_PP_SEQ_NO: three_pc_batch.pp_seq_no,
AUDIT_TXN_LEDGERS_SIZE: {},
AUDIT_TXN_LEDGER_ROOT: {},
AUDIT_TXN_STATE_ROOT: {},
AUDIT_TXN_PRIMARIES: None,
AUDIT_TXN_DIGEST: three_pc_batch.pp_digest
}
for lid, ledger in self.database_manager.ledgers.items():
if lid == AUDIT_LEDGER_ID:
continue
# 2. ledger size
txn[AUDIT_TXN_LEDGERS_SIZE][lid] = ledger.uncommitted_size
# 3. ledger root (either root_hash or seq_no to last changed)
# TODO: support setting for multiple ledgers
self._fill_ledger_root_hash(txn, lid, ledger, last_audit_txn, three_pc_batch)
# 5. set primaries field
self._fill_primaries(txn, three_pc_batch, last_audit_txn)
# 6. set nodeReg field
self._fill_node_reg(txn, three_pc_batch, last_audit_txn)
return txn
def _fill_ledger_root_hash(self, txn, lid, ledger, last_audit_txn, three_pc_batch):
last_audit_txn_data = get_payload_data(last_audit_txn) if last_audit_txn is not None else None
if lid == three_pc_batch.ledger_id:
txn[AUDIT_TXN_LEDGER_ROOT][lid] = Ledger.hashToStr(ledger.uncommitted_root_hash)
txn[AUDIT_TXN_STATE_ROOT][lid] = Ledger.hashToStr(self.database_manager.get_state(lid).headHash)
# 1. it is the first batch and we have something
elif last_audit_txn_data is None and ledger.uncommitted_size:
txn[AUDIT_TXN_LEDGER_ROOT][lid] = Ledger.hashToStr(ledger.uncommitted_root_hash)
txn[AUDIT_TXN_STATE_ROOT][lid] = Ledger.hashToStr(self.database_manager.get_state(lid).headHash)
# 1.1. Rare case -- we have previous audit txns but don't have this ledger i.e. new plugins
elif last_audit_txn_data is not None and last_audit_txn_data[AUDIT_TXN_LEDGERS_SIZE].get(lid, None) is None and \
len(ledger.uncommittedTxns):
txn[AUDIT_TXN_LEDGER_ROOT][lid] = Ledger.hashToStr(ledger.uncommitted_root_hash)
txn[AUDIT_TXN_STATE_ROOT][lid] = Ledger.hashToStr(self.database_manager.get_state(lid).headHash)
# 2. Usual case -- this ledger was updated since the last audit txn
elif last_audit_txn_data is not None and last_audit_txn_data[AUDIT_TXN_LEDGERS_SIZE].get(lid,
None) is not None and \
ledger.uncommitted_size > last_audit_txn_data[AUDIT_TXN_LEDGERS_SIZE][lid]:
txn[AUDIT_TXN_LEDGER_ROOT][lid] = Ledger.hashToStr(ledger.uncommitted_root_hash)
txn[AUDIT_TXN_STATE_ROOT][lid] = Ledger.hashToStr(self.database_manager.get_state(lid).headHash)
# 3. This ledger is never audited, so do not add the key
elif last_audit_txn_data is None or lid not in last_audit_txn_data[AUDIT_TXN_LEDGER_ROOT]:
return
# 4. ledger is not changed in last batch => delta = delta + 1
elif isinstance(last_audit_txn_data[AUDIT_TXN_LEDGER_ROOT][lid], int):
txn[AUDIT_TXN_LEDGER_ROOT][lid] = last_audit_txn_data[AUDIT_TXN_LEDGER_ROOT][lid] + 1
# 5. ledger is changed in last batch but not changed now => delta = 1
elif last_audit_txn_data:
txn[AUDIT_TXN_LEDGER_ROOT][lid] = 1
def _fill_primaries(self, txn, three_pc_batch, last_audit_txn):
last_audit_txn_data = get_payload_data(last_audit_txn) if last_audit_txn is not None else None
last_txn_value = last_audit_txn_data[AUDIT_TXN_PRIMARIES] if last_audit_txn_data else None
current_primaries = three_pc_batch.primaries
# 1. First audit txn
if last_audit_txn_data is None:
txn[AUDIT_TXN_PRIMARIES] = current_primaries
# 2. Previous primaries field contains primary list
# If primaries did not changed, we will store seq_no delta
# between current txn and last persisted primaries, i.e.
# we can find seq_no of last actual primaries, like:
# last_audit_txn_seq_no - last_audit_txn[AUDIT_TXN_PRIMARIES]
elif isinstance(last_txn_value, Iterable):
if last_txn_value == current_primaries:
txn[AUDIT_TXN_PRIMARIES] = 1
else:
txn[AUDIT_TXN_PRIMARIES] = current_primaries
# 3. Previous primaries field is delta
elif isinstance(last_txn_value, int) and last_txn_value < self.ledger.uncommitted_size:
last_primaries_seq_no = get_seq_no(last_audit_txn) - last_txn_value
last_primaries = get_payload_data(
self.ledger.get_by_seq_no_uncommitted(last_primaries_seq_no))[AUDIT_TXN_PRIMARIES]
if isinstance(last_primaries, Iterable):
if last_primaries == current_primaries:
txn[AUDIT_TXN_PRIMARIES] = last_txn_value + 1
else:
txn[AUDIT_TXN_PRIMARIES] = current_primaries
else:
raise LogicError('Value, mentioned in primaries field must be a '
'seq_no of a txn with primaries')
# 4. That cannot be
else:
raise LogicError('Incorrect primaries field in audit ledger (seq_no: {}. value: {})'.format(
get_seq_no(last_audit_txn), last_txn_value))
def _fill_node_reg(self, txn, three_pc_batch, last_audit_txn):
last_audit_txn_data = get_payload_data(last_audit_txn) if last_audit_txn is not None else None
last_audit_node_reg = last_audit_txn_data.get(AUDIT_TXN_NODE_REG) if last_audit_txn_data else None
current_node_reg = three_pc_batch.node_reg
if current_node_reg is None:
return
# 1. First audit txn with node reg
if last_audit_node_reg is None:
txn[AUDIT_TXN_NODE_REG] = current_node_reg
# 2. Previous nodeReg field contains nodeReg list
# If nodeReg did not changed, we will store seq_no delta
# between current txn and last persisted nodeReg, i.e.
# we can find seq_no of last actual nodeReg, like:
# last_audit_txn_seq_no - last_audit_txn[AUDIT_TXN_NODE_REG]
elif isinstance(last_audit_node_reg, Iterable):
if last_audit_node_reg == current_node_reg:
txn[AUDIT_TXN_NODE_REG] = 1
else:
txn[AUDIT_TXN_NODE_REG] = current_node_reg
# 3. Previous nodeReg field is delta
elif isinstance(last_audit_node_reg, int) and last_audit_node_reg < self.ledger.uncommitted_size:
last_node_reg_seq_no = get_seq_no(last_audit_txn) - last_audit_node_reg
last_node_reg = get_payload_data(
self.ledger.get_by_seq_no_uncommitted(last_node_reg_seq_no))[AUDIT_TXN_NODE_REG]
if isinstance(last_node_reg, Iterable):
if last_node_reg == current_node_reg:
txn[AUDIT_TXN_NODE_REG] = last_audit_node_reg + 1
else:
txn[AUDIT_TXN_NODE_REG] = current_node_reg
else:
raise LogicError('Value, mentioned in nodeReg field must be a '
'seq_no of a txn with nodeReg')
|
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""One-off jobs for explorations."""
import ast
import logging
from constants import constants
from core import jobs
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import rights_manager
from core.platform import models
import feconf
import utils
(base_models, exp_models,) = models.Registry.import_models([
models.NAMES.base_model, models.NAMES.exploration])
_COMMIT_TYPE_REVERT = 'revert'
class ExpSummariesCreationOneOffJob(jobs.BaseMapReduceOneOffJobManager):
"""Job that calculates summaries of explorations. For every
ExplorationModel entity, create a ExpSummaryModel entity containing
information described in ExpSummariesAggregator.
The summaries store the following information:
title, category, objective, language_code, tags, last_updated,
created_on, status (private, public), community_owned, owner_ids,
editor_ids, viewer_ids, version.
Note: contributor_ids field populated by
ExpSummariesContributorsOneOffJob.
"""
@classmethod
def entity_classes_to_map_over(cls):
return [exp_models.ExplorationModel]
@staticmethod
def map(exploration_model):
if not exploration_model.deleted:
exp_services.create_exploration_summary(
exploration_model.id, None)
@staticmethod
def reduce(exp_id, list_of_exps):
pass
class ExpSummariesContributorsOneOffJob(jobs.BaseMapReduceOneOffJobManager):
"""One-off job that finds the user ids of the contributors
(defined as any human who has made a 'positive' -- i.e.
non-revert-- commit) for each exploration.
"""
@classmethod
def entity_classes_to_map_over(cls):
return [exp_models.ExplorationSnapshotMetadataModel]
@staticmethod
def map(item):
if (item.commit_type != _COMMIT_TYPE_REVERT and
item.committer_id not in feconf.SYSTEM_USER_IDS):
exp_id = item.get_unversioned_instance_id()
yield (exp_id, item.committer_id)
@staticmethod
def reduce(exp_id, committer_id_list):
exp_summary_model = exp_models.ExpSummaryModel.get_by_id(exp_id)
if exp_summary_model is None:
return
exp_summary_model.contributor_ids = list(set(committer_id_list))
exp_summary_model.put()
class ExplorationContributorsSummaryOneOffJob(
jobs.BaseMapReduceOneOffJobManager):
"""One-off job that computes the number of commits
done by contributors for each Exploration
"""
@classmethod
def entity_classes_to_map_over(cls):
return [exp_models.ExplorationModel]
@staticmethod
def map(item):
if item.deleted:
return
summary = exp_services.get_exploration_summary_by_id(item.id)
summary.contributors_summary = (
exp_services.compute_exploration_contributors_summary(item.id))
exp_services.save_exploration_summary(summary)
@staticmethod
def reduce(key, values):
pass
class ExplorationFirstPublishedOneOffJob(jobs.BaseMapReduceOneOffJobManager):
"""One-off job that finds first published time in milliseconds for all
explorations.
"""
@classmethod
def entity_classes_to_map_over(cls):
return [exp_models.ExplorationRightsSnapshotContentModel]
@staticmethod
def map(item):
if item.content['status'] == rights_manager.ACTIVITY_STATUS_PUBLIC:
yield (
item.get_unversioned_instance_id(),
utils.get_time_in_millisecs(item.created_on))
@staticmethod
def reduce(exp_id, stringified_commit_times_msecs):
exploration_rights = rights_manager.get_exploration_rights(
exp_id, strict=False)
if exploration_rights is None:
return
commit_times_msecs = [
ast.literal_eval(commit_time_string) for
commit_time_string in stringified_commit_times_msecs]
first_published_msec = min(commit_times_msecs)
rights_manager.update_activity_first_published_msec(
constants.ACTIVITY_TYPE_EXPLORATION, exp_id,
first_published_msec)
class ExplorationValidityJobManager(jobs.BaseMapReduceOneOffJobManager):
"""Job that checks that all explorations have appropriate validation
statuses.
"""
@classmethod
def entity_classes_to_map_over(cls):
return [exp_models.ExplorationModel]
@staticmethod
def map(item):
if item.deleted:
return
exploration = exp_services.get_exploration_from_model(item)
exp_rights = rights_manager.get_exploration_rights(item.id)
try:
if exp_rights.status == rights_manager.ACTIVITY_STATUS_PRIVATE:
exploration.validate()
else:
exploration.validate(strict=True)
except utils.ValidationError as e:
yield (item.id, unicode(e).encode('utf-8'))
@staticmethod
def reduce(key, values):
yield (key, values)
class ExplorationMigrationJobManager(jobs.BaseMapReduceOneOffJobManager):
"""A reusable one-time job that may be used to migrate exploration schema
versions. This job will load all existing explorations from the data store
and immediately store them back into the data store. The loading process of
an exploration in exp_services automatically performs schema updating. This
job persists that conversion work, keeping explorations up-to-date and
improving the load time of new explorations.
"""
@classmethod
def entity_classes_to_map_over(cls):
return [exp_models.ExplorationModel]
@staticmethod
def map(item):
if item.deleted:
return
# Do not upgrade explorations that fail non-strict validation.
old_exploration = exp_services.get_exploration_by_id(item.id)
try:
old_exploration.validate()
except Exception as e:
logging.error(
'Exploration %s failed non-strict validation: %s' %
(item.id, e))
return
# If the exploration model being stored in the datastore is not the
# most up-to-date states schema version, then update it.
if (item.states_schema_version !=
feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION):
# Note: update_exploration does not need to apply a change list in
# order to perform a migration. See the related comment in
# exp_services.apply_change_list for more information.
commit_cmds = [{
'cmd': exp_domain.CMD_MIGRATE_STATES_SCHEMA_TO_LATEST_VERSION,
'from_version': str(item.states_schema_version),
'to_version': str(
feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION)
}]
exp_services.update_exploration(
feconf.MIGRATION_BOT_USERNAME, item.id, commit_cmds,
'Update exploration states from schema version %d to %d.' % (
item.states_schema_version,
feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION))
@staticmethod
def reduce(key, values):
yield (key, values)
class InteractionAuditOneOffJob(jobs.BaseMapReduceOneOffJobManager):
"""Job that produces a list of (exploration, state) pairs, grouped by the
interaction they use.
This job is for demonstration purposes. It is not enabled by default in the
jobs registry.
"""
@classmethod
def entity_classes_to_map_over(cls):
return [exp_models.ExplorationModel]
@staticmethod
def map(item):
if item.deleted:
return
exploration = exp_services.get_exploration_from_model(item)
for state_name, state in exploration.states.iteritems():
exp_and_state_key = '%s %s' % (item.id, state_name)
yield (state.interaction.id, exp_and_state_key)
@staticmethod
def reduce(key, values):
yield (key, values)
class ItemSelectionInteractionOneOffJob(jobs.BaseMapReduceOneOffJobManager):
"""Job that produces a list of (exploration, state) pairs that use the item
selection interaction and that have rules that do not match the answer
choices. These probably need to be fixed manually.
"""
@classmethod
def entity_classes_to_map_over(cls):
return [exp_models.ExplorationModel]
@staticmethod
def map(item):
if item.deleted:
return
exploration = exp_services.get_exploration_from_model(item)
for state_name, state in exploration.states.iteritems():
if state.interaction.id == 'ItemSelectionInput':
choices = (
state.interaction.customization_args['choices']['value'])
for group in state.interaction.answer_groups:
for rule_spec in group.rule_specs:
for rule_item in rule_spec.inputs['x']:
if rule_item not in choices:
yield (
item.id,
'%s: %s' % (
state_name.encode('utf-8'),
rule_item.encode('utf-8')))
@staticmethod
def reduce(key, values):
yield (key, values)
class ViewableExplorationsAuditJob(jobs.BaseMapReduceOneOffJobManager):
"""Job that outputs a list of private explorations which are viewable."""
@classmethod
def entity_classes_to_map_over(cls):
return [exp_models.ExplorationModel]
@staticmethod
def map(item):
if item.deleted:
return
exploration_rights = rights_manager.get_exploration_rights(
item.id, strict=False)
if exploration_rights is None:
return
if (exploration_rights.status == feconf.ACTIVITY_STATUS_PRIVATE
and exploration_rights.viewable_if_private):
yield (item.id, item.title.encode('utf-8'))
@staticmethod
def reduce(key, values):
yield (key, values)
class ExplorationConversionErrorIdentificationJob(
jobs.BaseMapReduceOneOffJobManager):
"""Job that outputs the list of explorations that currently consist of
redundant features and result in an ExplorationConversionError when
retrieved.
"""
@classmethod
def entity_classes_to_map_over(cls):
return [exp_models.ExplorationModel]
@staticmethod
def map(item):
try:
exploration = exp_services.get_exploration_by_id(item.id)
# Handle case where the exploration is deleted.
except Exception as e:
return
latest_exp_version = exploration.version
version_numbers = range(1, latest_exp_version + 1)
try:
exp_services.get_multiple_explorations_by_version(
item.id, version_numbers)
except Exception as e:
yield (item.id, e)
return
@staticmethod
def reduce(key, values):
yield (key, values)
|
|
import unittest
import re
from django.conf import settings
from django.contrib.auth.models import User
from tango_comments import signals
from tango_comments.models import Comment
from . import CommentTestCase
from tests.testapp.models import Article, Book
post_redirect_re = re.compile(r'^/posted/\?c=(?P<pk>\d+$)')
class CommentViewTests(CommentTestCase):
def testPostCommentHTTPMethods(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
response = self.client.get("/post/", data)
self.assertEqual(response.status_code, 405)
self.assertEqual(response["Allow"], "POST")
def testPostCommentMissingCtype(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
del data["content_type"]
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
def testPostCommentBadCtype(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["content_type"] = "Nobody expects the Spanish Inquisition!"
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
def testPostCommentMissingObjectPK(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
del data["object_pk"]
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
def testPostCommentBadObjectPK(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["object_pk"] = "14"
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
def testPostInvalidIntegerPK(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["comment"] = "This is another comment"
data["object_pk"] = '\ufffd'
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
def testPostInvalidDecimalPK(self):
b = Book.objects.get(pk='12.34')
data = self.getValidData(b)
data["comment"] = "This is another comment"
data["object_pk"] = 'cookies'
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
def testHashTampering(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["security_hash"] = "Nobody expects the Spanish Inquisition!"
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
def testDebugCommentErrors(self):
"""The debug error template should be shown only if DEBUG is True"""
olddebug = settings.DEBUG
settings.DEBUG = True
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["security_hash"] = "Nobody expects the Spanish Inquisition!"
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
self.assertTemplateUsed(response, "comments/400-debug.html")
settings.DEBUG = False
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
self.assertTemplateNotUsed(response, "comments/400-debug.html")
settings.DEBUG = olddebug
@unittest.skip('post not working')
def testPostAsAuthenticatedUser(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data['name'] = data['email'] = ''
self.client.login(username="normaluser", password="normaluser")
self.response = self.client.post("/post/", data, REMOTE_ADDR="1.2.3.4")
self.assertEqual(self.response.status_code, 302)
self.assertEqual(Comment.objects.count(), 1)
c = Comment.objects.all()[0]
self.assertEqual(c.ip_address, "1.2.3.4")
u = User.objects.get(username='normaluser')
self.assertEqual(c.user, u)
self.assertEqual(c.user_name, u.get_full_name())
self.assertEqual(c.user_email, u.email)
@unittest.skip('post not working')
def testPreventDuplicateComments(self):
"""Prevent posting the exact same comment twice"""
a = Article.objects.get(pk=1)
data = self.getValidData(a)
self.client.post("/post/", data)
self.client.post("/post/", data)
self.assertEqual(Comment.objects.count(), 1)
# This should not trigger the duplicate prevention
self.client.post("/post/", dict(data, comment="My second comment."))
self.assertEqual(Comment.objects.count(), 2)
def testWillBePostedSignal(self):
"""
Test that the comment_will_be_posted signal can prevent the comment from
actually getting saved
"""
def receive(sender, **kwargs): return False
signals.comment_will_be_posted.connect(receive, dispatch_uid="comment-test")
a = Article.objects.get(pk=1)
data = self.getValidData(a)
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
self.assertEqual(Comment.objects.count(), 0)
signals.comment_will_be_posted.disconnect(dispatch_uid="comment-test")
@unittest.skip("Location not in response")
def testCommentNext(self):
"""Test the different "next" actions the comment view can take"""
a = Article.objects.get(pk=1)
data = self.getValidData(a)
response = self.client.post("/post/", data)
location = response["Location"]
match = post_redirect_re.match(location)
self.assertTrue(match != None, "Unexpected redirect location: %s" % location)
data["next"] = "/somewhere/else/"
data["comment"] = "This is another comment"
response = self.client.post("/post/", data)
location = response["Location"]
match = re.search(r"^/somewhere/else/\?c=\d+$", location)
self.assertTrue(match != None, "Unexpected redirect location: %s" % location)
data["next"] = "http://badserver/somewhere/else/"
data["comment"] = "This is another comment with an unsafe next url"
response = self.client.post("/post/", data)
location = response["Location"]
match = post_redirect_re.match(location)
self.assertTrue(match != None, "Unsafe redirection to: %s" % location)
@unittest.skip("Key error for location. Maybe due to bad post")
def testCommentDoneView(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
response = self.client.post("/post/", data)
location = response["Location"]
match = post_redirect_re.match(location)
self.assertTrue(match != None, "Unexpected redirect location: %s" % location)
pk = int(match.group('pk'))
response = self.client.get(location)
self.assertTemplateUsed(response, "comments/posted.html")
self.assertEqual(response.context[0]["comment"], Comment.objects.get(pk=pk))
@unittest.skip("Key error for location. Maybe due to bad post")
def testCommentNextWithQueryString(self):
"""
The `next` key needs to handle already having a query string (#10585)
"""
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["next"] = "/somewhere/else/?foo=bar"
data["comment"] = "This is another comment"
response = self.client.post("/post/", data)
location = response["Location"]
match = re.search(r"^/somewhere/else/\?foo=bar&c=\d+$", location)
self.assertTrue(match != None, "Unexpected redirect location: %s" % location)
@unittest.skip("Key error for location. Maybe due to bad post")
def testCommentPostRedirectWithInvalidIntegerPK(self):
"""
Tests that attempting to retrieve the location specified in the
post redirect, after adding some invalid data to the expected
querystring it ends with, doesn't cause a server error.
"""
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["comment"] = "This is another comment"
response = self.client.post("/post/", data)
location = response["Location"]
broken_location = location + "\ufffd"
response = self.client.get(broken_location)
self.assertEqual(response.status_code, 200)
@unittest.skip("Key error for location. Maybe due to bad post")
def testCommentNextWithQueryStringAndAnchor(self):
"""
The `next` key needs to handle already having an anchor. Refs #13411.
"""
# With a query string also.
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["next"] = "/somewhere/else/?foo=bar#baz"
data["comment"] = "This is another comment"
response = self.client.post("/post/", data)
location = response["Location"]
match = re.search(r"^/somewhere/else/\?foo=bar&c=\d+#baz$", location)
self.assertTrue(match != None, "Unexpected redirect location: %s" % location)
# Without a query string
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["next"] = "/somewhere/else/#baz"
data["comment"] = "This is another comment"
response = self.client.post("/post/", data)
location = response["Location"]
match = re.search(r"^/somewhere/else/\?c=\d+#baz$", location)
self.assertTrue(match != None, "Unexpected redirect location: %s" % location)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from unittest.mock import MagicMock, patch
import pytest
import requests
import requests_mock
from airflow.exceptions import AirflowException
from airflow.providers.apache.druid.hooks.druid import DruidDbApiHook, DruidHook
class TestDruidHook(unittest.TestCase):
def setUp(self):
super().setUp()
session = requests.Session()
adapter = requests_mock.Adapter()
session.mount('mock', adapter)
class TestDRuidhook(DruidHook):
def get_conn_url(self):
return 'http://druid-overlord:8081/druid/indexer/v1/task'
self.db_hook = TestDRuidhook()
@requests_mock.mock()
def test_submit_gone_wrong(self, m):
task_post = m.post(
'http://druid-overlord:8081/druid/indexer/v1/task',
text='{"task":"9f8a7359-77d4-4612-b0cd-cc2f6a3c28de"}',
)
status_check = m.get(
'http://druid-overlord:8081/druid/indexer/v1/task/9f8a7359-77d4-4612-b0cd-cc2f6a3c28de/status',
text='{"status":{"status": "FAILED"}}',
)
# The job failed for some reason
with pytest.raises(AirflowException):
self.db_hook.submit_indexing_job('Long json file')
assert task_post.called_once
assert status_check.called_once
@requests_mock.mock()
def test_submit_ok(self, m):
task_post = m.post(
'http://druid-overlord:8081/druid/indexer/v1/task',
text='{"task":"9f8a7359-77d4-4612-b0cd-cc2f6a3c28de"}',
)
status_check = m.get(
'http://druid-overlord:8081/druid/indexer/v1/task/9f8a7359-77d4-4612-b0cd-cc2f6a3c28de/status',
text='{"status":{"status": "SUCCESS"}}',
)
# Exists just as it should
self.db_hook.submit_indexing_job('Long json file')
assert task_post.called_once
assert status_check.called_once
@requests_mock.mock()
def test_submit_correct_json_body(self, m):
task_post = m.post(
'http://druid-overlord:8081/druid/indexer/v1/task',
text='{"task":"9f8a7359-77d4-4612-b0cd-cc2f6a3c28de"}',
)
status_check = m.get(
'http://druid-overlord:8081/druid/indexer/v1/task/9f8a7359-77d4-4612-b0cd-cc2f6a3c28de/status',
text='{"status":{"status": "SUCCESS"}}',
)
json_ingestion_string = """
{
"task":"9f8a7359-77d4-4612-b0cd-cc2f6a3c28de"
}
"""
self.db_hook.submit_indexing_job(json_ingestion_string)
assert task_post.called_once
assert status_check.called_once
if task_post.called_once:
req_body = task_post.request_history[0].json()
assert req_body['task'] == "9f8a7359-77d4-4612-b0cd-cc2f6a3c28de"
@requests_mock.mock()
def test_submit_unknown_response(self, m):
task_post = m.post(
'http://druid-overlord:8081/druid/indexer/v1/task',
text='{"task":"9f8a7359-77d4-4612-b0cd-cc2f6a3c28de"}',
)
status_check = m.get(
'http://druid-overlord:8081/druid/indexer/v1/task/9f8a7359-77d4-4612-b0cd-cc2f6a3c28de/status',
text='{"status":{"status": "UNKNOWN"}}',
)
# An unknown error code
with pytest.raises(AirflowException):
self.db_hook.submit_indexing_job('Long json file')
assert task_post.called_once
assert status_check.called_once
@requests_mock.mock()
def test_submit_timeout(self, m):
self.db_hook.timeout = 1
self.db_hook.max_ingestion_time = 5
task_post = m.post(
'http://druid-overlord:8081/druid/indexer/v1/task',
text='{"task":"9f8a7359-77d4-4612-b0cd-cc2f6a3c28de"}',
)
status_check = m.get(
'http://druid-overlord:8081/druid/indexer/v1/task/9f8a7359-77d4-4612-b0cd-cc2f6a3c28de/status',
text='{"status":{"status": "RUNNING"}}',
)
shutdown_post = m.post(
'http://druid-overlord:8081/druid/indexer/v1/task/'
'9f8a7359-77d4-4612-b0cd-cc2f6a3c28de/shutdown',
text='{"task":"9f8a7359-77d4-4612-b0cd-cc2f6a3c28de"}',
)
# Because the jobs keeps running
with pytest.raises(AirflowException):
self.db_hook.submit_indexing_job('Long json file')
assert task_post.called_once
assert status_check.called
assert shutdown_post.called_once
@patch('airflow.providers.apache.druid.hooks.druid.DruidHook.get_connection')
def test_get_conn_url(self, mock_get_connection):
get_conn_value = MagicMock()
get_conn_value.host = 'test_host'
get_conn_value.conn_type = 'https'
get_conn_value.port = '1'
get_conn_value.extra_dejson = {'endpoint': 'ingest'}
mock_get_connection.return_value = get_conn_value
hook = DruidHook(timeout=1, max_ingestion_time=5)
assert hook.get_conn_url() == 'https://test_host:1/ingest'
@patch('airflow.providers.apache.druid.hooks.druid.DruidHook.get_connection')
def test_get_auth(self, mock_get_connection):
get_conn_value = MagicMock()
get_conn_value.login = 'airflow'
get_conn_value.password = 'password'
mock_get_connection.return_value = get_conn_value
expected = requests.auth.HTTPBasicAuth('airflow', 'password')
assert self.db_hook.get_auth() == expected
@patch('airflow.providers.apache.druid.hooks.druid.DruidHook.get_connection')
def test_get_auth_with_no_user(self, mock_get_connection):
get_conn_value = MagicMock()
get_conn_value.login = None
get_conn_value.password = 'password'
mock_get_connection.return_value = get_conn_value
assert self.db_hook.get_auth() is None
@patch('airflow.providers.apache.druid.hooks.druid.DruidHook.get_connection')
def test_get_auth_with_no_password(self, mock_get_connection):
get_conn_value = MagicMock()
get_conn_value.login = 'airflow'
get_conn_value.password = None
mock_get_connection.return_value = get_conn_value
assert self.db_hook.get_auth() is None
@patch('airflow.providers.apache.druid.hooks.druid.DruidHook.get_connection')
def test_get_auth_with_no_user_and_password(self, mock_get_connection):
get_conn_value = MagicMock()
get_conn_value.login = None
get_conn_value.password = None
mock_get_connection.return_value = get_conn_value
assert self.db_hook.get_auth() is None
class TestDruidDbApiHook(unittest.TestCase):
def setUp(self):
super().setUp()
self.cur = MagicMock(rowcount=0)
self.conn = conn = MagicMock()
self.conn.host = 'host'
self.conn.port = '1000'
self.conn.conn_type = 'druid'
self.conn.extra_dejson = {'endpoint': 'druid/v2/sql'}
self.conn.cursor.return_value = self.cur
class TestDruidDBApiHook(DruidDbApiHook):
def get_conn(self):
return conn
def get_connection(self, conn_id):
return conn
self.db_hook = TestDruidDBApiHook
def test_get_uri(self):
db_hook = self.db_hook()
assert 'druid://host:1000/druid/v2/sql' == db_hook.get_uri()
def test_get_first_record(self):
statement = 'SQL'
result_sets = [('row1',), ('row2',)]
self.cur.fetchone.return_value = result_sets[0]
assert result_sets[0] == self.db_hook().get_first(statement)
assert self.conn.close.call_count == 1
assert self.cur.close.call_count == 1
self.cur.execute.assert_called_once_with(statement)
def test_get_records(self):
statement = 'SQL'
result_sets = [('row1',), ('row2',)]
self.cur.fetchall.return_value = result_sets
assert result_sets == self.db_hook().get_records(statement)
assert self.conn.close.call_count == 1
assert self.cur.close.call_count == 1
self.cur.execute.assert_called_once_with(statement)
def test_get_pandas_df(self):
statement = 'SQL'
column = 'col'
result_sets = [('row1',), ('row2',)]
self.cur.description = [(column,)]
self.cur.fetchall.return_value = result_sets
df = self.db_hook().get_pandas_df(statement)
assert column == df.columns[0]
for i, item in enumerate(result_sets):
assert item[0] == df.values.tolist()[i][0]
assert self.conn.close.call_count == 1
assert self.cur.close.call_count == 1
self.cur.execute.assert_called_once_with(statement)
|
|
#!/usr/bin/env python
"""Standard actions that happen on the client."""
import cStringIO as StringIO
import ctypes
import gzip
import hashlib
import os
import platform
import socket
import sys
import time
import zlib
import psutil
import logging
from grr.client import actions
from grr.client import client_utils_common
from grr.client import vfs
from grr.client.client_actions import tempfiles
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import utils
from grr.lib.rdfvalues import crypto
# We do not send larger buffers than this:
MAX_BUFFER_SIZE = 640*1024
class ReadBuffer(actions.ActionPlugin):
"""Reads a buffer from a file and returns it to a server callback."""
in_rdfvalue = rdfvalue.BufferReference
out_rdfvalue = rdfvalue.BufferReference
def Run(self, args):
"""Reads a buffer on the client and sends it to the server."""
# Make sure we limit the size of our output
if args.length > MAX_BUFFER_SIZE:
raise RuntimeError("Can not read buffers this large.")
try:
fd = vfs.VFSOpen(args.pathspec, progress_callback=self.Progress)
fd.Seek(args.offset)
offset = fd.Tell()
data = fd.Read(args.length)
except (IOError, OSError), e:
self.SetStatus(rdfvalue.GrrStatus.ReturnedStatus.IOERROR, e)
return
# Now return the data to the server
self.SendReply(offset=offset, data=data,
length=len(data), pathspec=fd.pathspec)
HASH_CACHE = utils.FastStore(100)
class TransferBuffer(actions.ActionPlugin):
"""Reads a buffer from a file and returns it to the server efficiently."""
in_rdfvalue = rdfvalue.BufferReference
out_rdfvalue = rdfvalue.BufferReference
def Run(self, args):
"""Reads a buffer on the client and sends it to the server."""
# Make sure we limit the size of our output
if args.length > MAX_BUFFER_SIZE:
raise RuntimeError("Can not read buffers this large.")
data = vfs.ReadVFS(args.pathspec, args.offset, args.length,
progress_callback=self.Progress)
result = rdfvalue.DataBlob(
data=zlib.compress(data),
compression=rdfvalue.DataBlob.CompressionType.ZCOMPRESSION)
digest = hashlib.sha256(data).digest()
# Ensure that the buffer is counted against this response. Check network
# send limit.
self.ChargeBytesToSession(len(data))
# Now return the data to the server into the special TransferStore well
# known flow.
self.grr_worker.SendReply(
result, session_id=rdfvalue.SessionID("aff4:/flows/W:TransferStore"))
# Now report the hash of this blob to our flow as well as the offset and
# length.
self.SendReply(offset=args.offset, length=len(data),
data=digest)
class HashBuffer(actions.ActionPlugin):
"""Hash a buffer from a file and returns it to the server efficiently."""
in_rdfvalue = rdfvalue.BufferReference
out_rdfvalue = rdfvalue.BufferReference
def Run(self, args):
"""Reads a buffer on the client and sends it to the server."""
# Make sure we limit the size of our output
if args.length > MAX_BUFFER_SIZE:
raise RuntimeError("Can not read buffers this large.")
data = vfs.ReadVFS(args.pathspec, args.offset, args.length)
digest = hashlib.sha256(data).digest()
# Now report the hash of this blob to our flow as well as the offset and
# length.
self.SendReply(offset=args.offset, length=len(data),
data=digest)
class CopyPathToFile(actions.ActionPlugin):
"""Copy contents of a pathspec to a file on disk."""
in_rdfvalue = rdfvalue.CopyPathToFileRequest
out_rdfvalue = rdfvalue.CopyPathToFileRequest
BLOCK_SIZE = 10 * 1024 * 1024
def _Copy(self, dest_fd):
"""Copy from VFS to file until no more data or self.length is reached.
Args:
dest_fd: file object to write to
Returns:
self.written: bytes written
"""
while self.written < self.length:
to_read = min(self.length - self.written, self.BLOCK_SIZE)
data = self.src_fd.read(to_read)
if not data:
break
dest_fd.write(data)
self.written += len(data)
# Send heartbeats for long files.
self.Progress()
return self.written
def Run(self, args):
"""Read from a VFS file and write to a GRRTempFile on disk.
If file writing doesn't complete files won't be cleaned up.
Args:
args: see CopyPathToFile in jobs.proto
"""
self.src_fd = vfs.VFSOpen(args.src_path, progress_callback=self.Progress)
self.src_fd.Seek(args.offset)
offset = self.src_fd.Tell()
self.length = args.length or (1024 ** 4) # 1 TB
self.written = 0
suffix = ".gz" if args.gzip_output else ""
self.dest_fd = tempfiles.CreateGRRTempFile(directory=args.dest_dir,
lifetime=args.lifetime,
suffix=suffix)
self.dest_file = self.dest_fd.name
with self.dest_fd:
if args.gzip_output:
gzip_fd = gzip.GzipFile(self.dest_file, "wb", 9, self.dest_fd)
# Gzip filehandle needs its own close method called
with gzip_fd:
self._Copy(gzip_fd)
else:
self._Copy(self.dest_fd)
pathspec_out = rdfvalue.PathSpec(
path=self.dest_file, pathtype=rdfvalue.PathSpec.PathType.OS)
self.SendReply(offset=offset, length=self.written, src_path=args.src_path,
dest_dir=args.dest_dir, dest_path=pathspec_out,
gzip_output=args.gzip_output)
class ListDirectory(ReadBuffer):
"""Lists all the files in a directory."""
in_rdfvalue = rdfvalue.ListDirRequest
out_rdfvalue = rdfvalue.StatEntry
def Run(self, args):
"""Lists a directory."""
try:
directory = vfs.VFSOpen(args.pathspec, progress_callback=self.Progress)
except (IOError, OSError), e:
self.SetStatus(rdfvalue.GrrStatus.ReturnedStatus.IOERROR, e)
return
files = list(directory.ListFiles())
files.sort(key=lambda x: x.pathspec.path)
for response in files:
self.SendReply(response)
class IteratedListDirectory(actions.IteratedAction):
"""Lists a directory as an iterator."""
in_rdfvalue = rdfvalue.ListDirRequest
out_rdfvalue = rdfvalue.StatEntry
def Iterate(self, request, client_state):
"""Restores its way through the directory using an Iterator."""
try:
fd = vfs.VFSOpen(request.pathspec, progress_callback=self.Progress)
except (IOError, OSError), e:
self.SetStatus(rdfvalue.GrrStatus.ReturnedStatus.IOERROR, e)
return
files = list(fd.ListFiles())
files.sort(key=lambda x: x.pathspec.path)
index = client_state.get("index", 0)
length = request.iterator.number
for response in files[index:index+length]:
self.SendReply(response)
# Update the state
client_state["index"] = index + length
class SuspendableListDirectory(actions.SuspendableAction):
"""Lists a directory as a suspendable client action."""
in_rdfvalue = rdfvalue.ListDirRequest
out_rdfvalue = rdfvalue.StatEntry
def Iterate(self):
try:
fd = vfs.VFSOpen(self.request.pathspec, progress_callback=self.Progress)
except (IOError, OSError), e:
self.SetStatus(rdfvalue.GrrStatus.ReturnedStatus.IOERROR, e)
return
length = self.request.iterator.number
for group in utils.Grouper(fd.ListFiles(), length):
for response in group:
self.SendReply(response)
self.Suspend()
class StatFile(ListDirectory):
"""Sends a StatResponse for a single file."""
in_rdfvalue = rdfvalue.ListDirRequest
out_rdfvalue = rdfvalue.StatEntry
def Run(self, args):
"""Sends a StatResponse for a single file."""
try:
fd = vfs.VFSOpen(args.pathspec, progress_callback=self.Progress)
res = fd.Stat()
self.SendReply(res)
except (IOError, OSError), e:
self.SetStatus(rdfvalue.GrrStatus.ReturnedStatus.IOERROR, e)
return
class ExecuteCommand(actions.ActionPlugin):
"""Executes one of the predefined commands."""
in_rdfvalue = rdfvalue.ExecuteRequest
out_rdfvalue = rdfvalue.ExecuteResponse
def Run(self, command):
"""Run."""
cmd = command.cmd
args = command.args
time_limit = command.time_limit
res = client_utils_common.Execute(cmd, args, time_limit)
(stdout, stderr, status, time_used) = res
# Limit output to 10MB so our response doesn't get too big.
stdout = stdout[:10 * 1024 * 1024]
stderr = stderr[:10 * 1024 * 1024]
result = rdfvalue.ExecuteResponse(
request=command,
stdout=stdout,
stderr=stderr,
exit_status=status,
# We have to return microseconds.
time_used=int(1e6 * time_used))
self.SendReply(result)
class ExecuteBinaryCommand(actions.ActionPlugin):
"""Executes a command from a passed in binary.
Obviously this is a dangerous function, it provides for arbitrary code exec by
the server running as root/SYSTEM.
This is protected by the CONFIG[PrivateKeys.executable_signing_private_key],
which should be stored offline and well protected.
This method can be utilized as part of an autoupdate mechanism if necessary.
NOTE: If the binary is too large to fit inside a single request, the request
will have the more_data flag enabled, indicating more data is coming.
"""
in_rdfvalue = rdfvalue.ExecuteBinaryRequest
out_rdfvalue = rdfvalue.ExecuteBinaryResponse
suffix = ""
def WriteBlobToFile(self, request, suffix=""):
"""Writes the blob to a file and returns its path."""
lifetime = 0
# Only set the lifetime thread on the last chunk written.
if not request.more_data:
lifetime = request.time_limit
# Keep the file for at least 5 seconds after execution.
if lifetime > 0:
lifetime += 5
# First chunk truncates the file, later chunks append.
if request.offset == 0:
mode = "w+b"
else:
mode = "r+b"
temp_file = tempfiles.CreateGRRTempFile(filename=request.write_path,
suffix=suffix, mode=mode)
with temp_file:
path = temp_file.name
temp_file.seek(0, 2)
if temp_file.tell() != request.offset:
raise IOError("Chunks out of order Error.")
# Write the new chunk.
temp_file.write(request.executable.data)
return path
def CleanUp(self, path):
"""Removes the temp file."""
try:
if os.path.exists(path):
os.remove(path)
except (OSError, IOError), e:
logging.info("Failed to remove temporary file %s. Err: %s", path, e)
def Run(self, args):
"""Run."""
# Verify the executable blob.
args.executable.Verify(config_lib.CONFIG[
"Client.executable_signing_public_key"])
path = self.WriteBlobToFile(args, self.suffix)
# Only actually run the file on the last chunk.
if not args.more_data:
self.ProcessFile(path, args)
self.CleanUp(path)
def ProcessFile(self, path, args):
res = client_utils_common.Execute(path, args.args, args.time_limit,
bypass_whitelist=True)
(stdout, stderr, status, time_used) = res
# Limit output to 10MB so our response doesn't get too big.
stdout = stdout[:10 * 1024 * 1024]
stderr = stderr[:10 * 1024 * 1024]
result = rdfvalue.ExecuteBinaryResponse(
stdout=stdout,
stderr=stderr,
exit_status=status,
# We have to return microseconds.
time_used=int(1e6 * time_used))
self.SendReply(result)
class ExecutePython(actions.ActionPlugin):
"""Executes python code with exec.
Obviously this is a dangerous function, it provides for arbitrary code exec by
the server running as root/SYSTEM.
This is protected by CONFIG[PrivateKeys.executable_signing_private_key], which
should be stored offline and well protected.
"""
in_rdfvalue = rdfvalue.ExecutePythonRequest
out_rdfvalue = rdfvalue.ExecutePythonResponse
def Run(self, args):
"""Run."""
time_start = time.time()
class StdOutHook(object):
def __init__(self, buf):
self.buf = buf
def write(self, text):
self.buf.write(text)
args.python_code.Verify(config_lib.CONFIG[
"Client.executable_signing_public_key"])
# The execed code can assign to this variable if it wants to return data.
logging.debug("exec for python code %s", args.python_code.data[0:100])
context = globals().copy()
context["py_args"] = args.py_args.ToDict()
context["magic_return_str"] = ""
# Export the Progress function to allow python hacks to call it.
context["Progress"] = self.Progress
stdout = StringIO.StringIO()
with utils.Stubber(sys, "stdout", StdOutHook(stdout)):
exec(args.python_code.data, context) # pylint: disable=exec-used
stdout_output = stdout.getvalue()
magic_str_output = context.get("magic_return_str")
if stdout_output and magic_str_output:
output = "Stdout: %s\nMagic Str:%s\n" % (stdout_output, magic_str_output)
else:
output = stdout_output or magic_str_output
time_used = time.time() - time_start
# We have to return microseconds.
result = rdfvalue.ExecutePythonResponse(
time_used=int(1e6 * time_used),
return_val=utils.SmartStr(output))
self.SendReply(result)
class Segfault(actions.ActionPlugin):
"""This action is just for debugging. It induces a segfault."""
in_rdfvalue = None
out_rdfvalue = None
def Run(self, unused_args):
"""Does the segfaulting."""
if flags.FLAGS.debug:
logging.warning("Segfault action requested :(")
print ctypes.cast(1, ctypes.POINTER(ctypes.c_void_p)).contents
else:
logging.warning("Segfault requested but not running in debug mode.")
class ListProcesses(actions.ActionPlugin):
"""This action lists all the processes running on a machine."""
in_rdfvalue = None
out_rdfvalue = rdfvalue.Process
def Run(self, unused_arg):
# psutil will cause an active loop on Windows 2000
if platform.system() == "Windows" and platform.version().startswith("5.0"):
raise RuntimeError("ListProcesses not supported on Windows 2000")
for proc in psutil.process_iter():
response = rdfvalue.Process()
process_fields = ["pid", "ppid", "name", "exe", "username", "terminal"]
for field in process_fields:
try:
value = getattr(proc, field)
if value is None:
continue
if callable(value):
value = value()
if not isinstance(value, (int, long)):
value = utils.SmartUnicode(value)
setattr(response, field, value)
except (psutil.NoSuchProcess, psutil.AccessDenied, AttributeError):
pass
try:
for arg in proc.cmdline():
response.cmdline.append(utils.SmartUnicode(arg))
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
try:
response.nice = proc.nice()
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
try:
# Not available on Windows.
if hasattr(proc, "uids"):
(response.real_uid, response.effective_uid,
response.saved_uid) = proc.uids()
(response.real_gid, response.effective_gid,
response.saved_gid) = proc.gids()
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
try:
response.ctime = long(proc.create_time() * 1e6)
response.status = str(proc.status())
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
try:
# Not available on OSX.
if hasattr(proc, "cwd"):
response.cwd = utils.SmartUnicode(proc.cwd())
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
try:
response.num_threads = proc.num_threads()
except (psutil.NoSuchProcess, psutil.AccessDenied, RuntimeError):
pass
try:
(response.user_cpu_time,
response.system_cpu_time) = proc.cpu_times()
# This is very time consuming so we do not collect cpu_percent here.
# response.cpu_percent = proc.get_cpu_percent()
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
try:
response.RSS_size, response.VMS_size = proc.memory_info()
response.memory_percent = proc.memory_percent()
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
# Due to a bug in psutil, this function is disabled for now
# (https://github.com/giampaolo/psutil/issues/340)
# try:
# for f in proc.open_files():
# response.open_files.append(utils.SmartUnicode(f.path))
# except (psutil.NoSuchProcess, psutil.AccessDenied):
# pass
try:
for c in proc.connections():
conn = response.connections.Append(family=c.family,
type=c.type,
pid=proc.pid)
try:
conn.state = c.status
except ValueError:
logging.info("Encountered unknown connection status (%s).",
c.status)
try:
conn.local_address.ip, conn.local_address.port = c.laddr
# Could be in state LISTEN.
if c.raddr:
conn.remote_address.ip, conn.remote_address.port = c.raddr
except AttributeError:
conn.local_address.ip, conn.local_address.port = c.local_address
# Could be in state LISTEN.
if c.remote_address:
(conn.remote_address.ip,
conn.remote_address.port) = c.remote_address
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
self.SendReply(response)
# Reading information here is slow so we heartbeat between processes.
self.Progress()
class SendFile(actions.ActionPlugin):
"""This action encrypts and sends a file to a remote listener."""
in_rdfvalue = rdfvalue.SendFileRequest
out_rdfvalue = rdfvalue.StatEntry
BLOCK_SIZE = 1024 * 1024 * 10 # 10 MB
def Send(self, sock, msg):
totalsent = 0
n = len(msg)
while totalsent < n:
sent = sock.send(msg[totalsent:])
if sent == 0:
raise RuntimeError("socket connection broken")
totalsent += sent
def Run(self, args):
"""Run."""
# Open the file.
fd = vfs.VFSOpen(args.pathspec, progress_callback=self.Progress)
if args.address_family == rdfvalue.NetworkAddress.Family.INET:
family = socket.AF_INET
elif args.address_family == rdfvalue.NetworkAddress.Family.INET6:
family = socket.AF_INET6
else:
raise RuntimeError("Socket address family not supported.")
s = socket.socket(family, socket.SOCK_STREAM)
try:
s.connect((args.host, args.port))
except socket.error as e:
raise RuntimeError(str(e))
cipher = crypto.AES128CBCCipher(args.key, args.iv,
crypto.Cipher.OP_ENCRYPT)
while True:
data = fd.read(self.BLOCK_SIZE)
if not data:
break
self.Send(s, cipher.Update(data))
# Send heartbeats for long files.
self.Progress()
self.Send(s, cipher.Final())
s.close()
self.SendReply(fd.Stat())
class StatFS(actions.ActionPlugin):
"""Call os.statvfs for a given list of paths. OS X and Linux only.
Note that a statvfs call for a network filesystem (e.g. NFS) that is
unavailable, e.g. due to no network, will result in the call blocking.
"""
in_rdfvalue = rdfvalue.StatFSRequest
out_rdfvalue = rdfvalue.Volume
def Run(self, args):
if platform.system() == "Windows":
raise RuntimeError("os.statvfs not available on Windows")
for path in args.path_list:
try:
fd = vfs.VFSOpen(rdfvalue.PathSpec(path=path, pathtype=args.pathtype),
progress_callback=self.Progress)
st = fd.StatFS()
mount_point = fd.GetMountPoint()
except (IOError, OSError), e:
self.SetStatus(rdfvalue.GrrStatus.ReturnedStatus.IOERROR, e)
continue
unix = rdfvalue.UnixVolume(mount_point=mount_point)
# On linux pre 2.6 kernels don't have frsize, so we fall back to bsize.
# The actual_available_allocation_units attribute is set to blocks
# available to the unprivileged user, root may have some additional
# reserved space.
result = rdfvalue.Volume(bytes_per_sector=(st.f_frsize or st.f_bsize),
sectors_per_allocation_unit=1,
total_allocation_units=st.f_blocks,
actual_available_allocation_units=st.f_bavail,
unix=unix)
self.SendReply(result)
|
|
"""Support for a Hue API to control Home Assistant."""
import logging
from aiohttp import web
from homeassistant import core
from homeassistant.components import (
climate, cover, fan, light, media_player, scene, script)
from homeassistant.components.climate.const import (
SERVICE_SET_TEMPERATURE, SUPPORT_TARGET_TEMPERATURE)
from homeassistant.components.cover import (
ATTR_CURRENT_POSITION, ATTR_POSITION, SERVICE_SET_COVER_POSITION,
SUPPORT_SET_POSITION)
from homeassistant.components.fan import (
ATTR_SPEED, SPEED_HIGH, SPEED_LOW, SPEED_MEDIUM, SPEED_OFF,
SUPPORT_SET_SPEED)
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.http.const import KEY_REAL_IP
from homeassistant.components.light import (
ATTR_BRIGHTNESS, ATTR_HS_COLOR, SUPPORT_BRIGHTNESS, SUPPORT_COLOR)
from homeassistant.components.media_player.const import (
ATTR_MEDIA_VOLUME_LEVEL, SUPPORT_VOLUME_SET)
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_SUPPORTED_FEATURES, ATTR_TEMPERATURE,
HTTP_BAD_REQUEST, HTTP_NOT_FOUND, SERVICE_CLOSE_COVER, SERVICE_OPEN_COVER,
SERVICE_TURN_OFF, SERVICE_TURN_ON, SERVICE_VOLUME_SET, STATE_OFF, STATE_ON)
from homeassistant.util.network import is_local
_LOGGER = logging.getLogger(__name__)
HUE_API_STATE_ON = 'on'
HUE_API_STATE_BRI = 'bri'
HUE_API_STATE_HUE = 'hue'
HUE_API_STATE_SAT = 'sat'
HUE_API_STATE_HUE_MAX = 65535.0
HUE_API_STATE_SAT_MAX = 254.0
HUE_API_STATE_BRI_MAX = 255.0
STATE_BRIGHTNESS = HUE_API_STATE_BRI
STATE_HUE = HUE_API_STATE_HUE
STATE_SATURATION = HUE_API_STATE_SAT
class HueUsernameView(HomeAssistantView):
"""Handle requests to create a username for the emulated hue bridge."""
url = '/api'
name = 'emulated_hue:api:create_username'
extra_urls = ['/api/']
requires_auth = False
async def post(self, request):
"""Handle a POST request."""
try:
data = await request.json()
except ValueError:
return self.json_message('Invalid JSON', HTTP_BAD_REQUEST)
if 'devicetype' not in data:
return self.json_message('devicetype not specified',
HTTP_BAD_REQUEST)
if not is_local(request[KEY_REAL_IP]):
return self.json_message('only local IPs allowed',
HTTP_BAD_REQUEST)
return self.json([{'success': {'username': '12345678901234567890'}}])
class HueAllGroupsStateView(HomeAssistantView):
"""Group handler."""
url = '/api/{username}/groups'
name = 'emulated_hue:all_groups:state'
requires_auth = False
def __init__(self, config):
"""Initialize the instance of the view."""
self.config = config
@core.callback
def get(self, request, username):
"""Process a request to make the Brilliant Lightpad work."""
if not is_local(request[KEY_REAL_IP]):
return self.json_message('only local IPs allowed',
HTTP_BAD_REQUEST)
return self.json({
})
class HueGroupView(HomeAssistantView):
"""Group handler to get Logitech Pop working."""
url = '/api/{username}/groups/0/action'
name = 'emulated_hue:groups:state'
requires_auth = False
def __init__(self, config):
"""Initialize the instance of the view."""
self.config = config
@core.callback
def put(self, request, username):
"""Process a request to make the Logitech Pop working."""
if not is_local(request[KEY_REAL_IP]):
return self.json_message('only local IPs allowed',
HTTP_BAD_REQUEST)
return self.json([{
'error': {
'address': '/groups/0/action/scene',
'type': 7,
'description': 'invalid value, dummy for parameter, scene'
}
}])
class HueAllLightsStateView(HomeAssistantView):
"""Handle requests for getting and setting info about entities."""
url = '/api/{username}/lights'
name = 'emulated_hue:lights:state'
requires_auth = False
def __init__(self, config):
"""Initialize the instance of the view."""
self.config = config
@core.callback
def get(self, request, username):
"""Process a request to get the list of available lights."""
if not is_local(request[KEY_REAL_IP]):
return self.json_message('only local IPs allowed',
HTTP_BAD_REQUEST)
hass = request.app['hass']
json_response = {}
for entity in hass.states.async_all():
if self.config.is_entity_exposed(entity):
state = get_entity_state(self.config, entity)
number = self.config.entity_id_to_number(entity.entity_id)
json_response[number] = entity_to_json(self.config,
entity, state)
return self.json(json_response)
class HueOneLightStateView(HomeAssistantView):
"""Handle requests for getting and setting info about entities."""
url = '/api/{username}/lights/{entity_id}'
name = 'emulated_hue:light:state'
requires_auth = False
def __init__(self, config):
"""Initialize the instance of the view."""
self.config = config
@core.callback
def get(self, request, username, entity_id):
"""Process a request to get the state of an individual light."""
if not is_local(request[KEY_REAL_IP]):
return self.json_message('only local IPs allowed',
HTTP_BAD_REQUEST)
hass = request.app['hass']
entity_id = self.config.number_to_entity_id(entity_id)
entity = hass.states.get(entity_id)
if entity is None:
_LOGGER.error('Entity not found: %s', entity_id)
return web.Response(text="Entity not found", status=404)
if not self.config.is_entity_exposed(entity):
_LOGGER.error('Entity not exposed: %s', entity_id)
return web.Response(text="Entity not exposed", status=404)
state = get_entity_state(self.config, entity)
json_response = entity_to_json(self.config, entity, state)
return self.json(json_response)
class HueOneLightChangeView(HomeAssistantView):
"""Handle requests for getting and setting info about entities."""
url = '/api/{username}/lights/{entity_number}/state'
name = 'emulated_hue:light:state'
requires_auth = False
def __init__(self, config):
"""Initialize the instance of the view."""
self.config = config
async def put(self, request, username, entity_number):
"""Process a request to set the state of an individual light."""
if not is_local(request[KEY_REAL_IP]):
return self.json_message('only local IPs allowed',
HTTP_BAD_REQUEST)
config = self.config
hass = request.app['hass']
entity_id = config.number_to_entity_id(entity_number)
if entity_id is None:
_LOGGER.error('Unknown entity number: %s', entity_number)
return self.json_message('Entity not found', HTTP_NOT_FOUND)
entity = hass.states.get(entity_id)
if entity is None:
_LOGGER.error('Entity not found: %s', entity_id)
return self.json_message('Entity not found', HTTP_NOT_FOUND)
if not config.is_entity_exposed(entity):
_LOGGER.error('Entity not exposed: %s', entity_id)
return web.Response(text="Entity not exposed", status=404)
try:
request_json = await request.json()
except ValueError:
_LOGGER.error('Received invalid json')
return self.json_message('Invalid JSON', HTTP_BAD_REQUEST)
# Parse the request into requested "on" status and brightness
parsed = parse_hue_api_put_light_body(request_json, entity)
if parsed is None:
_LOGGER.error('Unable to parse data: %s', request_json)
return web.Response(text="Bad request", status=400)
# Choose general HA domain
domain = core.DOMAIN
# Entity needs separate call to turn on
turn_on_needed = False
# Convert the resulting "on" status into the service we need to call
service = SERVICE_TURN_ON if parsed[STATE_ON] else SERVICE_TURN_OFF
# Construct what we need to send to the service
data = {ATTR_ENTITY_ID: entity_id}
# Make sure the entity actually supports brightness
entity_features = entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if entity.domain == light.DOMAIN:
if parsed[STATE_ON]:
if entity_features & SUPPORT_BRIGHTNESS:
if parsed[STATE_BRIGHTNESS] is not None:
data[ATTR_BRIGHTNESS] = parsed[STATE_BRIGHTNESS]
if entity_features & SUPPORT_COLOR:
if parsed[STATE_HUE] is not None:
if parsed[STATE_SATURATION]:
sat = parsed[STATE_SATURATION]
else:
sat = 0
hue = parsed[STATE_HUE]
# Convert hs values to hass hs values
sat = int((sat / HUE_API_STATE_SAT_MAX) * 100)
hue = int((hue / HUE_API_STATE_HUE_MAX) * 360)
data[ATTR_HS_COLOR] = (hue, sat)
# If the requested entity is a script add some variables
elif entity.domain == script.DOMAIN:
data['variables'] = {
'requested_state': STATE_ON if parsed[STATE_ON] else STATE_OFF
}
if parsed[STATE_BRIGHTNESS] is not None:
data['variables']['requested_level'] = parsed[STATE_BRIGHTNESS]
# If the requested entity is a climate, set the temperature
elif entity.domain == climate.DOMAIN:
# We don't support turning climate devices on or off,
# only setting the temperature
service = None
if entity_features & SUPPORT_TARGET_TEMPERATURE:
if parsed[STATE_BRIGHTNESS] is not None:
domain = entity.domain
service = SERVICE_SET_TEMPERATURE
data[ATTR_TEMPERATURE] = parsed[STATE_BRIGHTNESS]
# If the requested entity is a media player, convert to volume
elif entity.domain == media_player.DOMAIN:
if entity_features & SUPPORT_VOLUME_SET:
if parsed[STATE_BRIGHTNESS] is not None:
turn_on_needed = True
domain = entity.domain
service = SERVICE_VOLUME_SET
# Convert 0-100 to 0.0-1.0
data[ATTR_MEDIA_VOLUME_LEVEL] = \
parsed[STATE_BRIGHTNESS] / 100.0
# If the requested entity is a cover, convert to open_cover/close_cover
elif entity.domain == cover.DOMAIN:
domain = entity.domain
if service == SERVICE_TURN_ON:
service = SERVICE_OPEN_COVER
else:
service = SERVICE_CLOSE_COVER
if entity_features & SUPPORT_SET_POSITION:
if parsed[STATE_BRIGHTNESS] is not None:
domain = entity.domain
service = SERVICE_SET_COVER_POSITION
data[ATTR_POSITION] = parsed[STATE_BRIGHTNESS]
# If the requested entity is a fan, convert to speed
elif entity.domain == fan.DOMAIN:
if entity_features & SUPPORT_SET_SPEED:
if parsed[STATE_BRIGHTNESS] is not None:
domain = entity.domain
# Convert 0-100 to a fan speed
brightness = parsed[STATE_BRIGHTNESS]
if brightness == 0:
data[ATTR_SPEED] = SPEED_OFF
elif 0 < brightness <= 33.3:
data[ATTR_SPEED] = SPEED_LOW
elif 33.3 < brightness <= 66.6:
data[ATTR_SPEED] = SPEED_MEDIUM
elif 66.6 < brightness <= 100:
data[ATTR_SPEED] = SPEED_HIGH
if entity.domain in config.off_maps_to_on_domains:
# Map the off command to on
service = SERVICE_TURN_ON
# Caching is required because things like scripts and scenes won't
# report as "off" to Alexa if an "off" command is received, because
# they'll map to "on". Thus, instead of reporting its actual
# status, we report what Alexa will want to see, which is the same
# as the actual requested command.
config.cached_states[entity_id] = parsed
# Separate call to turn on needed
if turn_on_needed:
hass.async_create_task(hass.services.async_call(
core.DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: entity_id},
blocking=True))
if service is not None:
hass.async_create_task(hass.services.async_call(
domain, service, data, blocking=True))
json_response = \
[create_hue_success_response(
entity_id, HUE_API_STATE_ON, parsed[STATE_ON])]
if parsed[STATE_BRIGHTNESS] is not None:
json_response.append(create_hue_success_response(
entity_id, HUE_API_STATE_BRI, parsed[STATE_BRIGHTNESS]))
if parsed[STATE_HUE] is not None:
json_response.append(create_hue_success_response(
entity_id, HUE_API_STATE_HUE, parsed[STATE_HUE]))
if parsed[STATE_SATURATION] is not None:
json_response.append(create_hue_success_response(
entity_id, HUE_API_STATE_SAT, parsed[STATE_SATURATION]))
return self.json(json_response)
def parse_hue_api_put_light_body(request_json, entity):
"""Parse the body of a request to change the state of a light."""
data = {
STATE_BRIGHTNESS: None,
STATE_HUE: None,
STATE_ON: False,
STATE_SATURATION: None,
}
# Make sure the entity actually supports brightness
entity_features = entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if HUE_API_STATE_ON in request_json:
if not isinstance(request_json[HUE_API_STATE_ON], bool):
return None
if request_json[HUE_API_STATE_ON]:
# Echo requested device be turned on
data[STATE_BRIGHTNESS] = None
data[STATE_ON] = True
else:
# Echo requested device be turned off
data[STATE_BRIGHTNESS] = None
data[STATE_ON] = False
if HUE_API_STATE_HUE in request_json:
try:
# Clamp brightness from 0 to 65535
data[STATE_HUE] = \
max(0, min(int(request_json[HUE_API_STATE_HUE]),
HUE_API_STATE_HUE_MAX))
except ValueError:
return None
if HUE_API_STATE_SAT in request_json:
try:
# Clamp saturation from 0 to 254
data[STATE_SATURATION] = \
max(0, min(int(request_json[HUE_API_STATE_SAT]),
HUE_API_STATE_SAT_MAX))
except ValueError:
return None
if HUE_API_STATE_BRI in request_json:
try:
# Clamp brightness from 0 to 255
data[STATE_BRIGHTNESS] = \
max(0, min(int(request_json[HUE_API_STATE_BRI]),
HUE_API_STATE_BRI_MAX))
except ValueError:
return None
if entity.domain == light.DOMAIN:
data[STATE_ON] = (data[STATE_BRIGHTNESS] > 0)
if not entity_features & SUPPORT_BRIGHTNESS:
data[STATE_BRIGHTNESS] = None
elif entity.domain == scene.DOMAIN:
data[STATE_BRIGHTNESS] = None
data[STATE_ON] = True
elif entity.domain in [
script.DOMAIN, media_player.DOMAIN,
fan.DOMAIN, cover.DOMAIN, climate.DOMAIN]:
# Convert 0-255 to 0-100
level = (data[STATE_BRIGHTNESS] / HUE_API_STATE_BRI_MAX) * 100
data[STATE_BRIGHTNESS] = round(level)
data[STATE_ON] = True
return data
def get_entity_state(config, entity):
"""Retrieve and convert state and brightness values for an entity."""
cached_state = config.cached_states.get(entity.entity_id, None)
data = {
STATE_BRIGHTNESS: None,
STATE_HUE: None,
STATE_ON: False,
STATE_SATURATION: None
}
if cached_state is None:
data[STATE_ON] = entity.state != STATE_OFF
if data[STATE_ON]:
data[STATE_BRIGHTNESS] = entity.attributes.get(ATTR_BRIGHTNESS)
hue_sat = entity.attributes.get(ATTR_HS_COLOR, None)
if hue_sat is not None:
hue = hue_sat[0]
sat = hue_sat[1]
# convert hass hs values back to hue hs values
data[STATE_HUE] = int((hue / 360.0) * HUE_API_STATE_HUE_MAX)
data[STATE_SATURATION] = \
int((sat / 100.0) * HUE_API_STATE_SAT_MAX)
else:
data[STATE_BRIGHTNESS] = 0
data[STATE_HUE] = 0
data[STATE_SATURATION] = 0
# Make sure the entity actually supports brightness
entity_features = entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if entity.domain == light.DOMAIN:
if entity_features & SUPPORT_BRIGHTNESS:
pass
elif entity.domain == climate.DOMAIN:
temperature = entity.attributes.get(ATTR_TEMPERATURE, 0)
# Convert 0-100 to 0-255
data[STATE_BRIGHTNESS] = round(temperature * 255 / 100)
elif entity.domain == media_player.DOMAIN:
level = entity.attributes.get(
ATTR_MEDIA_VOLUME_LEVEL, 1.0 if data[STATE_ON] else 0.0)
# Convert 0.0-1.0 to 0-255
data[STATE_BRIGHTNESS] = \
round(min(1.0, level) * HUE_API_STATE_BRI_MAX)
elif entity.domain == fan.DOMAIN:
speed = entity.attributes.get(ATTR_SPEED, 0)
# Convert 0.0-1.0 to 0-255
data[STATE_BRIGHTNESS] = 0
if speed == SPEED_LOW:
data[STATE_BRIGHTNESS] = 85
elif speed == SPEED_MEDIUM:
data[STATE_BRIGHTNESS] = 170
elif speed == SPEED_HIGH:
data[STATE_BRIGHTNESS] = 255
elif entity.domain == cover.DOMAIN:
level = entity.attributes.get(ATTR_CURRENT_POSITION, 0)
data[STATE_BRIGHTNESS] = round(level / 100 * HUE_API_STATE_BRI_MAX)
else:
data = cached_state
# Make sure brightness is valid
if data[STATE_BRIGHTNESS] is None:
data[STATE_BRIGHTNESS] = 255 if data[STATE_ON] else 0
# Make sure hue/saturation are valid
if (data[STATE_HUE] is None) or (data[STATE_SATURATION] is None):
data[STATE_HUE] = 0
data[STATE_SATURATION] = 0
# If the light is off, set the color to off
if data[STATE_BRIGHTNESS] == 0:
data[STATE_HUE] = 0
data[STATE_SATURATION] = 0
return data
def entity_to_json(config, entity, state):
"""Convert an entity to its Hue bridge JSON representation."""
return {
'state':
{
HUE_API_STATE_ON: state[STATE_ON],
HUE_API_STATE_BRI: state[STATE_BRIGHTNESS],
HUE_API_STATE_HUE: state[STATE_HUE],
HUE_API_STATE_SAT: state[STATE_SATURATION],
'reachable': True
},
'type': 'Dimmable light',
'name': config.get_entity_name(entity),
'modelid': 'HASS123',
'uniqueid': entity.entity_id,
'swversion': '123'
}
def create_hue_success_response(entity_id, attr, value):
"""Create a success response for an attribute set on a light."""
success_key = '/lights/{}/state/{}'.format(entity_id, attr)
return {'success': {success_key: value}}
|
|
# pylint:skip-file
from collections import namedtuple
import mxnet as mx
from stt_layer_batchnorm import batchnorm
LSTMState = namedtuple("LSTMState", ["c", "h"])
LSTMParam = namedtuple("LSTMParam", ["i2h_weight", "i2h_bias",
"h2h_weight", "h2h_bias",
"ph2h_weight",
"c2i_bias", "c2f_bias", "c2o_bias"])
LSTMModel = namedtuple("LSTMModel", ["rnn_exec", "symbol",
"init_states", "last_states",
"seq_data", "seq_labels", "seq_outputs",
"param_blocks"])
def vanilla_lstm(num_hidden, indata, prev_state, param, seqidx, layeridx, is_batchnorm=False, gamma=None, beta=None):
"""LSTM Cell symbol"""
i2h = mx.sym.FullyConnected(data=indata,
weight=param.i2h_weight,
bias=param.i2h_bias,
num_hidden=num_hidden * 4,
name="t%d_l%d_i2h" % (seqidx, layeridx))
if is_batchnorm:
i2h = batchnorm(net=i2h, gamma=gamma, beta=beta)
h2h = mx.sym.FullyConnected(data=prev_state.h,
weight=param.h2h_weight,
bias=param.h2h_bias,
num_hidden=num_hidden * 4,
name="t%d_l%d_h2h" % (seqidx, layeridx))
gates = i2h + h2h
slice_gates = mx.sym.SliceChannel(gates, num_outputs=4,
name="t%d_l%d_slice" % (seqidx, layeridx))
in_gate = mx.sym.Activation(slice_gates[0], act_type="sigmoid")
in_transform = mx.sym.Activation(slice_gates[1], act_type="tanh")
forget_gate = mx.sym.Activation(slice_gates[2], act_type="sigmoid")
out_gate = mx.sym.Activation(slice_gates[3], act_type="sigmoid")
next_c = (forget_gate * prev_state.c) + (in_gate * in_transform)
next_h = out_gate * mx.sym.Activation(next_c, act_type="tanh")
return LSTMState(c=next_c, h=next_h)
def lstm(num_hidden, indata, prev_state, param, seqidx, layeridx, dropout=0., num_hidden_proj=0, is_batchnorm=False,
gamma=None, beta=None):
"""LSTM Cell symbol"""
# dropout input
if dropout > 0.:
indata = mx.sym.Dropout(data=indata, p=dropout)
i2h = mx.sym.FullyConnected(data=indata,
weight=param.i2h_weight,
bias=param.i2h_bias,
num_hidden=num_hidden * 4,
name="t%d_l%d_i2h" % (seqidx, layeridx))
if is_batchnorm:
i2h = batchnorm(net=i2h, gamma=gamma, beta=beta)
h2h = mx.sym.FullyConnected(data=prev_state.h,
weight=param.h2h_weight,
# bias=param.h2h_bias,
no_bias=True,
num_hidden=num_hidden * 4,
name="t%d_l%d_h2h" % (seqidx, layeridx))
gates = i2h + h2h
slice_gates = mx.sym.SliceChannel(gates, num_outputs=4,
name="t%d_l%d_slice" % (seqidx, layeridx))
Wcidc = mx.sym.broadcast_mul(param.c2i_bias, prev_state.c) + slice_gates[0]
in_gate = mx.sym.Activation(Wcidc, act_type="sigmoid")
in_transform = mx.sym.Activation(slice_gates[1], act_type="tanh")
Wcfdc = mx.sym.broadcast_mul(param.c2f_bias, prev_state.c) + slice_gates[2]
forget_gate = mx.sym.Activation(Wcfdc, act_type="sigmoid")
next_c = (forget_gate * prev_state.c) + (in_gate * in_transform)
Wcoct = mx.sym.broadcast_mul(param.c2o_bias, next_c) + slice_gates[3]
out_gate = mx.sym.Activation(Wcoct, act_type="sigmoid")
next_h = out_gate * mx.sym.Activation(next_c, act_type="tanh")
if num_hidden_proj > 0:
proj_next_h = mx.sym.FullyConnected(data=next_h,
weight=param.ph2h_weight,
no_bias=True,
num_hidden=num_hidden_proj,
name="t%d_l%d_ph2h" % (seqidx, layeridx))
return LSTMState(c=next_c, h=proj_next_h)
else:
return LSTMState(c=next_c, h=next_h)
def lstm_unroll(net, num_lstm_layer, seq_len, num_hidden_lstm_list, dropout=0., num_hidden_proj=0,
lstm_type='fc_lstm', is_batchnorm=False, prefix="", direction="forward"):
if num_lstm_layer > 0:
param_cells = []
last_states = []
for i in range(num_lstm_layer):
param_cells.append(LSTMParam(i2h_weight=mx.sym.Variable(prefix + "l%d_i2h_weight" % i),
i2h_bias=mx.sym.Variable(prefix + "l%d_i2h_bias" % i),
h2h_weight=mx.sym.Variable(prefix + "l%d_h2h_weight" % i),
h2h_bias=mx.sym.Variable(prefix + "l%d_h2h_bias" % i),
ph2h_weight=mx.sym.Variable(prefix + "l%d_ph2h_weight" % i),
c2i_bias=mx.sym.Variable(prefix + "l%d_c2i_bias" % i,
shape=(1, num_hidden_lstm_list[i])),
c2f_bias=mx.sym.Variable(prefix + "l%d_c2f_bias" % i,
shape=(1, num_hidden_lstm_list[i])),
c2o_bias=mx.sym.Variable(prefix + "l%d_c2o_bias" % i,
shape=(1, num_hidden_lstm_list[i]))
))
state = LSTMState(c=mx.sym.Variable(prefix + "l%d_init_c" % i),
h=mx.sym.Variable(prefix + "l%d_init_h" % i))
last_states.append(state)
assert (len(last_states) == num_lstm_layer)
# declare batchnorm param(gamma,beta) in timestep wise
if is_batchnorm:
batchnorm_gamma = []
batchnorm_beta = []
for seqidx in range(seq_len):
batchnorm_gamma.append(mx.sym.Variable(prefix + "t%d_i2h_gamma" % seqidx))
batchnorm_beta.append(mx.sym.Variable(prefix + "t%d_i2h_beta" % seqidx))
hidden_all = []
for seqidx in range(seq_len):
if direction == "forward":
k = seqidx
hidden = net[k]
elif direction == "backward":
k = seq_len - seqidx - 1
hidden = net[k]
else:
raise Exception("direction should be whether forward or backward")
# stack LSTM
for i in range(num_lstm_layer):
if i == 0:
dp = 0.
else:
dp = dropout
if lstm_type == 'fc_lstm':
if is_batchnorm:
next_state = lstm(num_hidden_lstm_list[i],
indata=hidden,
prev_state=last_states[i],
param=param_cells[i],
seqidx=k,
layeridx=i,
dropout=dp,
num_hidden_proj=num_hidden_proj,
is_batchnorm=is_batchnorm,
gamma=batchnorm_gamma[k],
beta=batchnorm_beta[k]
)
else:
next_state = lstm(num_hidden_lstm_list[i],
indata=hidden,
prev_state=last_states[i],
param=param_cells[i],
seqidx=k,
layeridx=i,
dropout=dp,
num_hidden_proj=num_hidden_proj,
is_batchnorm=is_batchnorm
)
elif lstm_type == 'vanilla_lstm':
if is_batchnorm:
next_state = vanilla_lstm(num_hidden_lstm_list[i], indata=hidden,
prev_state=last_states[i],
param=param_cells[i],
seqidx=k, layeridx=i,
is_batchnorm=is_batchnorm,
gamma=batchnorm_gamma[k],
beta=batchnorm_beta[k]
)
else:
next_state = vanilla_lstm(num_hidden_lstm_list[i], indata=hidden,
prev_state=last_states[i],
param=param_cells[i],
seqidx=k, layeridx=i,
is_batchnorm=is_batchnorm
)
else:
raise Exception("lstm type %s error" % lstm_type)
hidden = next_state.h
last_states[i] = next_state
# decoder
if dropout > 0.:
hidden = mx.sym.Dropout(data=hidden, p=dropout)
if direction == "forward":
hidden_all.append(hidden)
elif direction == "backward":
hidden_all.insert(0, hidden)
else:
raise Exception("direction should be whether forward or backward")
net = hidden_all
return net
def bi_lstm_unroll(net, num_lstm_layer, seq_len, num_hidden_lstm_list, dropout=0., num_hidden_proj=0,
lstm_type='fc_lstm', is_batchnorm=False):
if num_lstm_layer > 0:
net_forward = lstm_unroll(net=net,
num_lstm_layer=num_lstm_layer,
seq_len=seq_len,
num_hidden_lstm_list=num_hidden_lstm_list,
dropout=dropout,
num_hidden_proj=num_hidden_proj,
lstm_type=lstm_type,
is_batchnorm=is_batchnorm,
prefix="forward_",
direction="forward")
net_backward = lstm_unroll(net=net,
num_lstm_layer=num_lstm_layer,
seq_len=seq_len,
num_hidden_lstm_list=num_hidden_lstm_list,
dropout=dropout,
num_hidden_proj=num_hidden_proj,
lstm_type=lstm_type,
is_batchnorm=is_batchnorm,
prefix="backward_",
direction="backward")
hidden_all = []
for i in range(seq_len):
hidden_all.append(mx.sym.Concat(*[net_forward[i], net_backward[i]], dim=1))
net = hidden_all
return net
# bilistm_2to1
def bi_lstm_unroll_two_input_two_output(net1, net2, num_lstm_layer, seq_len, num_hidden_lstm_list, dropout=0.,
num_hidden_proj=0,
lstm_type='fc_lstm', is_batchnorm=False):
if num_lstm_layer > 0:
net_forward = lstm_unroll(net=net1,
num_lstm_layer=num_lstm_layer,
seq_len=seq_len,
num_hidden_lstm_list=num_hidden_lstm_list,
dropout=dropout,
num_hidden_proj=num_hidden_proj,
lstm_type=lstm_type,
is_batchnorm=is_batchnorm,
prefix="forward_",
direction="forward")
net_backward = lstm_unroll(net=net2,
num_lstm_layer=num_lstm_layer,
seq_len=seq_len,
num_hidden_lstm_list=num_hidden_lstm_list,
dropout=dropout,
num_hidden_proj=num_hidden_proj,
lstm_type=lstm_type,
is_batchnorm=is_batchnorm,
prefix="backward_",
direction="backward")
return net_forward, net_backward
else:
return net1, net2
|
|
from __future__ import print_function
from itertools import chain
import pytest
pd = pytest.importorskip('pandas')
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import ExtraTreesRegressor
from eli5 import (
format_as_dataframes, format_as_dataframe,
explain_weights_df, explain_weights_dfs,
explain_prediction_df, explain_prediction_dfs,
format_as_text, explain_weights, explain_prediction,
)
from eli5.base import (
Explanation, TargetExplanation, FeatureWeight, FeatureWeights,
FeatureImportances, TransitionFeatureWeights,
)
def test_explain_weights(boston_train):
X, y, feature_names = boston_train
reg = LinearRegression()
reg.fit(X, y)
expl = explain_weights(reg)
df = format_as_dataframe(expl)
check_targets_dataframe(df, expl)
check_targets_dataframe(explain_weights_df(reg), expl)
df_dict = explain_weights_dfs(reg)
assert set(df_dict.keys()) == {'targets'}
check_targets_dataframe(df_dict['targets'], expl)
def check_targets_dataframe(df, expl):
assert list(df.columns) == ['target', 'feature', 'weight']
df_indexed = df.groupby(['target', 'feature']).agg(lambda x: x)
for target in expl.targets:
feature_weights = target.feature_weights
for fw in chain(feature_weights.pos, feature_weights.neg):
weight = df_indexed.loc[target.target, fw.feature]['weight']
assert weight == fw.weight
def test_explain_weights_fi(boston_train):
X, y, feature_names = boston_train
reg = ExtraTreesRegressor()
reg.fit(X, y)
expl = explain_weights(reg)
df = format_as_dataframe(expl)
assert list(df.columns) == ['feature', 'weight', 'std']
df_indexed = df.groupby('feature').agg(lambda x: x)
for fw in expl.feature_importances.importances:
df_fw = df_indexed.loc[fw.feature]
assert np.isclose(df_fw['weight'], fw.weight)
assert np.isclose(df_fw['std'], fw.std)
def test_explain_prediction(boston_train):
X, y, feature_names = boston_train
reg = LinearRegression()
reg.fit(X, y)
expl = explain_prediction(reg, X[0])
df = format_as_dataframe(expl)
check_prediction_df(df, expl)
check_prediction_df(explain_prediction_df(reg, X[0]), expl)
df_dict = explain_prediction_dfs(reg, X[0])
assert set(df_dict.keys()) == {'targets'}
check_prediction_df(df_dict['targets'], expl)
def check_prediction_df(df, expl):
assert list(df.columns) == ['target', 'feature', 'weight', 'value']
target = expl.targets[0].target
feature_weights = expl.targets[0].feature_weights
df_indexed = df.groupby(['target', 'feature']).agg(lambda x: x)
for fw in chain(feature_weights.pos, feature_weights.neg):
df_fw = df_indexed.loc[target, fw.feature]
assert df_fw['weight'] == fw.weight
assert df_fw['value'] == fw.value
@pytest.mark.parametrize(
['with_std', 'with_value'],
[[False, False], [True, False], [False, True]])
def test_targets(with_std, with_value):
expl = Explanation(
estimator='some estimator',
targets=[
TargetExplanation(
'y', feature_weights=FeatureWeights(
pos=[FeatureWeight('a', 13,
std=0.13 if with_std else None,
value=2 if with_value else None),
FeatureWeight('b', 5,
std=0.5 if with_std else None,
value=1 if with_value else None)],
neg=[FeatureWeight('neg1', -10,
std=0.2 if with_std else None,
value=5 if with_value else None),
FeatureWeight('neg2', -1,
std=0.3 if with_std else None,
value=4 if with_value else None)],
)),
TargetExplanation(
'y2', feature_weights=FeatureWeights(
pos=[FeatureWeight('f', 1)],
neg=[],
)),
],
)
df_dict = format_as_dataframes(expl)
assert isinstance(df_dict, dict)
assert list(df_dict) == ['targets']
df = df_dict['targets']
expected_df = pd.DataFrame(
{'target': ['y', 'y', 'y', 'y', 'y2'],
'feature': ['a', 'b', 'neg2', 'neg1', 'f'],
'weight': [13, 5, -1, -10, 1]},
columns=['target', 'feature', 'weight'])
if with_std:
expected_df['std'] = [0.13, 0.5, 0.3, 0.2, None]
if with_value:
expected_df['value'] = [2, 1, 4, 5, None]
print(df, expected_df, sep='\n')
assert expected_df.equals(df)
single_df = format_as_dataframe(expl)
assert expected_df.equals(single_df)
def test_bad_list():
with pytest.raises(ValueError):
format_as_dataframe([1])
@pytest.mark.parametrize(
['with_std', 'with_value'],
[[False, False], [True, False], [False, True]])
def test_feature_importances(with_std, with_value):
expl = Explanation(
estimator='some estimator',
feature_importances=FeatureImportances(
importances=[
FeatureWeight('a', 1,
std=0.1 if with_std else None,
value=1 if with_value else None),
FeatureWeight('b', 2,
std=0.2 if with_std else None,
value=3 if with_value else None),
],
remaining=10,
)
)
df_dict = format_as_dataframes(expl)
assert isinstance(df_dict, dict)
assert list(df_dict) == ['feature_importances']
df = df_dict['feature_importances']
expected_df = pd.DataFrame(
{'feature': ['a', 'b'], 'weight': [1, 2]},
columns=['feature', 'weight'])
if with_std:
expected_df['std'] = [0.1, 0.2]
if with_value:
expected_df['value'] = [1, 3]
print(df, expected_df, sep='\n')
assert expected_df.equals(df)
single_df = format_as_dataframe(expl)
assert expected_df.equals(single_df)
def test_transition_features():
expl = Explanation(
estimator='some estimator',
targets=[
TargetExplanation(
'class1', feature_weights=FeatureWeights(
pos=[FeatureWeight('pos', 13, value=1)],
neg=[],
)),
TargetExplanation(
'class2', feature_weights=FeatureWeights(
pos=[FeatureWeight('pos', 13, value=1)],
neg=[],
)),
],
transition_features=TransitionFeatureWeights(
class_names=['class2', 'class1'], # reverse on purpose
coef=np.array([[1.5, 2.5], [3.5, 4.5]]),
)
)
df_dict = format_as_dataframes(expl)
assert isinstance(df_dict, dict)
assert set(df_dict) == {'targets', 'transition_features'}
assert df_dict['targets'].equals(format_as_dataframe(expl.targets))
df = df_dict['transition_features']
print(df)
print(format_as_text(expl))
expected = pd.DataFrame([
{'from': 'class2', 'to': 'class2', 'coef': 1.5},
{'from': 'class2', 'to': 'class1', 'coef': 2.5},
{'from': 'class1', 'to': 'class2', 'coef': 3.5},
{'from': 'class1', 'to': 'class1', 'coef': 4.5},
], columns=['from', 'to', 'coef'])
assert df.equals(expected)
with pytest.warns(UserWarning):
single_df = format_as_dataframe(expl)
assert single_df.equals(df)
|
|
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
from azure import (
DEFAULT_HTTP_TIMEOUT,
MANAGEMENT_HOST,
_str,
_validate_not_none,
)
from azure.servicemanagement import (
_ServiceBusManagementXmlSerializer,
QueueDescription,
TopicDescription,
NotificationHubDescription,
RelayDescription,
MetricProperties,
MetricValues,
MetricRollups,
_MinidomXmlToObject,
)
from azure.servicemanagement.servicemanagementclient import (
_ServiceManagementClient,
)
from functools import partial
X_MS_VERSION = '2012-03-01'
class ServiceBusManagementService(_ServiceManagementClient):
def __init__(self, subscription_id=None, cert_file=None,
host=MANAGEMENT_HOST, request_session=None,
timeout=DEFAULT_HTTP_TIMEOUT):
'''
Initializes the service bus management service.
subscription_id:
Subscription to manage.
cert_file:
Path to .pem certificate file (httplib), or location of the
certificate in your Personal certificate store (winhttp) in the
CURRENT_USER\my\CertificateName format.
If a request_session is specified, then this is unused.
host:
Live ServiceClient URL. Defaults to Azure public cloud.
request_session:
Session object to use for http requests. If this is specified, it
replaces the default use of httplib or winhttp. Also, the cert_file
parameter is unused when a session is passed in.
The session object handles authentication, and as such can support
multiple types of authentication: .pem certificate, oauth.
For example, you can pass in a Session instance from the requests
library. To use .pem certificate authentication with requests
library, set the path to the .pem file on the session.cert
attribute.
timeout:
Optional. Timeout for the http request, in seconds.
'''
super(ServiceBusManagementService, self).__init__(
subscription_id, cert_file, host, request_session, timeout)
self.x_ms_version = X_MS_VERSION
# Operations for service bus ----------------------------------------
def get_regions(self):
'''
Get list of available service bus regions.
'''
response = self._perform_get(
self._get_path('services/serviceBus/Regions/', None),
None)
return _MinidomXmlToObject.convert_response_to_feeds(
response,
_ServiceBusManagementXmlSerializer.xml_to_region)
def list_namespaces(self):
'''
List the service bus namespaces defined on the account.
'''
response = self._perform_get(
self._get_path('services/serviceBus/Namespaces/', None),
None)
return _MinidomXmlToObject.convert_response_to_feeds(
response,
_ServiceBusManagementXmlSerializer.xml_to_namespace)
def get_namespace(self, name):
'''
Get details about a specific namespace.
name:
Name of the service bus namespace.
'''
response = self._perform_get(
self._get_path('services/serviceBus/Namespaces', name),
None)
return _ServiceBusManagementXmlSerializer.xml_to_namespace(
response.body)
def create_namespace(self, name, region):
'''
Create a new service bus namespace.
name:
Name of the service bus namespace to create.
region:
Region to create the namespace in.
'''
_validate_not_none('name', name)
return self._perform_put(
self._get_path('services/serviceBus/Namespaces', name),
_ServiceBusManagementXmlSerializer.namespace_to_xml(region))
def delete_namespace(self, name):
'''
Delete a service bus namespace.
name:
Name of the service bus namespace to delete.
'''
_validate_not_none('name', name)
return self._perform_delete(
self._get_path('services/serviceBus/Namespaces', name),
None)
def check_namespace_availability(self, name):
'''
Checks to see if the specified service bus namespace is available, or
if it has already been taken.
name:
Name of the service bus namespace to validate.
'''
_validate_not_none('name', name)
response = self._perform_get(
self._get_path('services/serviceBus/CheckNamespaceAvailability',
None) + '/?namespace=' + _str(name), None)
return _ServiceBusManagementXmlSerializer.xml_to_namespace_availability(
response.body)
def list_queues(self, name):
'''
Enumerates the queues in the service namespace.
name:
Name of the service bus namespace.
'''
_validate_not_none('name', name)
response = self._perform_get(
self._get_list_queues_path(name),
None)
return _MinidomXmlToObject.convert_response_to_feeds(
response,
partial(
_MinidomXmlToObject.convert_xml_to_azure_object,
azure_type=QueueDescription
)
)
def list_topics(self, name):
'''
Retrieves the topics in the service namespace.
name:
Name of the service bus namespace.
'''
response = self._perform_get(
self._get_list_topics_path(name),
None)
return _MinidomXmlToObject.convert_response_to_feeds(
response,
partial(
_MinidomXmlToObject.convert_xml_to_azure_object,
azure_type=TopicDescription
)
)
def list_notification_hubs(self, name):
'''
Retrieves the notification hubs in the service namespace.
name:
Name of the service bus namespace.
'''
response = self._perform_get(
self._get_list_notification_hubs_path(name),
None)
return _MinidomXmlToObject.convert_response_to_feeds(
response,
partial(
_MinidomXmlToObject.convert_xml_to_azure_object,
azure_type=NotificationHubDescription
)
)
def list_relays(self, name):
'''
Retrieves the relays in the service namespace.
name:
Name of the service bus namespace.
'''
response = self._perform_get(
self._get_list_relays_path(name),
None)
return _MinidomXmlToObject.convert_response_to_feeds(
response,
partial(
_MinidomXmlToObject.convert_xml_to_azure_object,
azure_type=RelayDescription
)
)
def get_supported_metrics_queue(self, name, queue_name):
'''
Retrieves the list of supported metrics for this namespace and queue
name:
Name of the service bus namespace.
queue_name:
Name of the service bus queue in this namespace.
'''
response = self._perform_get(
self._get_get_supported_metrics_queue_path(name, queue_name),
None)
return _MinidomXmlToObject.convert_response_to_feeds(
response,
partial(
_ServiceBusManagementXmlSerializer.xml_to_metrics,
object_type=MetricProperties
)
)
def get_supported_metrics_topic(self, name, topic_name):
'''
Retrieves the list of supported metrics for this namespace and topic
name:
Name of the service bus namespace.
topic_name:
Name of the service bus queue in this namespace.
'''
response = self._perform_get(
self._get_get_supported_metrics_topic_path(name, topic_name),
None)
return _MinidomXmlToObject.convert_response_to_feeds(
response,
partial(
_ServiceBusManagementXmlSerializer.xml_to_metrics,
object_type=MetricProperties
)
)
def get_supported_metrics_notification_hub(self, name, hub_name):
'''
Retrieves the list of supported metrics for this namespace and topic
name:
Name of the service bus namespace.
hub_name:
Name of the service bus notification hub in this namespace.
'''
response = self._perform_get(
self._get_get_supported_metrics_hub_path(name, hub_name),
None)
return _MinidomXmlToObject.convert_response_to_feeds(
response,
partial(
_ServiceBusManagementXmlSerializer.xml_to_metrics,
object_type=MetricProperties
)
)
def get_supported_metrics_relay(self, name, relay_name):
'''
Retrieves the list of supported metrics for this namespace and relay
name:
Name of the service bus namespace.
relay_name:
Name of the service bus relay in this namespace.
'''
response = self._perform_get(
self._get_get_supported_metrics_relay_path(name, relay_name),
None)
return _MinidomXmlToObject.convert_response_to_feeds(
response,
partial(
_ServiceBusManagementXmlSerializer.xml_to_metrics,
object_type=MetricProperties
)
)
def get_metrics_data_queue(self, name, queue_name, metric, rollup, filter_expresssion):
'''
Retrieves the list of supported metrics for this namespace and queue
name:
Name of the service bus namespace.
queue_name:
Name of the service bus queue in this namespace.
metric:
name of a supported metric
rollup:
name of a supported rollup
filter_expression:
filter, for instance "$filter=Timestamp gt datetime'2014-10-01T00:00:00Z'"
'''
response = self._perform_get(
self._get_get_metrics_data_queue_path(name, queue_name, metric, rollup, filter_expresssion),
None)
return _MinidomXmlToObject.convert_response_to_feeds(
response,
partial(
_ServiceBusManagementXmlSerializer.xml_to_metrics,
object_type=MetricValues
)
)
def get_metrics_data_topic(self, name, topic_name, metric, rollup, filter_expresssion):
'''
Retrieves the list of supported metrics for this namespace and topic
name:
Name of the service bus namespace.
topic_name:
Name of the service bus queue in this namespace.
metric:
name of a supported metric
rollup:
name of a supported rollup
filter_expression:
filter, for instance "$filter=Timestamp gt datetime'2014-10-01T00:00:00Z'"
'''
response = self._perform_get(
self._get_get_metrics_data_topic_path(name, topic_name, metric, rollup, filter_expresssion),
None)
return _MinidomXmlToObject.convert_response_to_feeds(
response,
partial(
_ServiceBusManagementXmlSerializer.xml_to_metrics,
object_type=MetricValues
)
)
def get_metrics_data_notification_hub(self, name, hub_name, metric, rollup, filter_expresssion):
'''
Retrieves the list of supported metrics for this namespace and topic
name:
Name of the service bus namespace.
hub_name:
Name of the service bus notification hub in this namespace.
metric:
name of a supported metric
rollup:
name of a supported rollup
filter_expression:
filter, for instance "$filter=Timestamp gt datetime'2014-10-01T00:00:00Z'"
'''
response = self._perform_get(
self._get_get_metrics_data_hub_path(name, hub_name, metric, rollup, filter_expresssion),
None)
return _MinidomXmlToObject.convert_response_to_feeds(
response,
partial(
_ServiceBusManagementXmlSerializer.xml_to_metrics,
object_type=MetricValues
)
)
def get_metrics_data_relay(self, name, relay_name, metric, rollup, filter_expresssion):
'''
Retrieves the list of supported metrics for this namespace and relay
name:
Name of the service bus namespace.
relay_name:
Name of the service bus relay in this namespace.
metric:
name of a supported metric
rollup:
name of a supported rollup
filter_expression:
filter, for instance "$filter=Timestamp gt datetime'2014-10-01T00:00:00Z'"
'''
response = self._perform_get(
self._get_get_metrics_data_relay_path(name, relay_name, metric, rollup, filter_expresssion),
None)
return _MinidomXmlToObject.convert_response_to_feeds(
response,
partial(
_ServiceBusManagementXmlSerializer.xml_to_metrics,
object_type=MetricValues
)
)
def get_metrics_rollups_queue(self, name, queue_name, metric):
'''
This operation gets rollup data for Service Bus metrics queue.
Rollup data includes the time granularity for the telemetry aggregation as well as
the retention settings for each time granularity.
name:
Name of the service bus namespace.
queue_name:
Name of the service bus queue in this namespace.
metric:
name of a supported metric
'''
response = self._perform_get(
self._get_get_metrics_rollup_queue_path(name, queue_name, metric),
None)
return _MinidomXmlToObject.convert_response_to_feeds(
response,
partial(
_ServiceBusManagementXmlSerializer.xml_to_metrics,
object_type=MetricRollups
)
)
def get_metrics_rollups_topic(self, name, topic_name, metric):
'''
This operation gets rollup data for Service Bus metrics topic.
Rollup data includes the time granularity for the telemetry aggregation as well as
the retention settings for each time granularity.
name:
Name of the service bus namespace.
topic_name:
Name of the service bus queue in this namespace.
metric:
name of a supported metric
'''
response = self._perform_get(
self._get_get_metrics_rollup_topic_path(name, topic_name, metric),
None)
return _MinidomXmlToObject.convert_response_to_feeds(
response,
partial(
_ServiceBusManagementXmlSerializer.xml_to_metrics,
object_type=MetricRollups
)
)
def get_metrics_rollups_notification_hub(self, name, hub_name, metric):
'''
This operation gets rollup data for Service Bus metrics notification hub.
Rollup data includes the time granularity for the telemetry aggregation as well as
the retention settings for each time granularity.
name:
Name of the service bus namespace.
hub_name:
Name of the service bus notification hub in this namespace.
metric:
name of a supported metric
'''
response = self._perform_get(
self._get_get_metrics_rollup_hub_path(name, hub_name, metric),
None)
return _MinidomXmlToObject.convert_response_to_feeds(
response,
partial(
_ServiceBusManagementXmlSerializer.xml_to_metrics,
object_type=MetricRollups
)
)
def get_metrics_rollups_relay(self, name, relay_name, metric):
'''
This operation gets rollup data for Service Bus metrics relay.
Rollup data includes the time granularity for the telemetry aggregation as well as
the retention settings for each time granularity.
name:
Name of the service bus namespace.
relay_name:
Name of the service bus relay in this namespace.
metric:
name of a supported metric
'''
response = self._perform_get(
self._get_get_metrics_rollup_relay_path(name, relay_name, metric),
None)
return _MinidomXmlToObject.convert_response_to_feeds(
response,
partial(
_ServiceBusManagementXmlSerializer.xml_to_metrics,
object_type=MetricRollups
)
)
# Helper functions --------------------------------------------------
def _get_list_queues_path(self, namespace_name):
return self._get_path('services/serviceBus/Namespaces/',
namespace_name) + '/Queues'
def _get_list_topics_path(self, namespace_name):
return self._get_path('services/serviceBus/Namespaces/',
namespace_name) + '/Topics'
def _get_list_notification_hubs_path(self, namespace_name):
return self._get_path('services/serviceBus/Namespaces/',
namespace_name) + '/NotificationHubs'
def _get_list_relays_path(self, namespace_name):
return self._get_path('services/serviceBus/Namespaces/',
namespace_name) + '/Relays'
def _get_get_supported_metrics_queue_path(self, namespace_name, queue_name):
return self._get_path('services/serviceBus/Namespaces/',
namespace_name) + '/Queues/' + _str(queue_name) + '/Metrics'
def _get_get_supported_metrics_topic_path(self, namespace_name, topic_name):
return self._get_path('services/serviceBus/Namespaces/',
namespace_name) + '/Topics/' + _str(topic_name) + '/Metrics'
def _get_get_supported_metrics_hub_path(self, namespace_name, hub_name):
return self._get_path('services/serviceBus/Namespaces/',
namespace_name) + '/NotificationHubs/' + _str(hub_name) + '/Metrics'
def _get_get_supported_metrics_relay_path(self, namespace_name, queue_name):
return self._get_path('services/serviceBus/Namespaces/',
namespace_name) + '/Relays/' + _str(queue_name) + '/Metrics'
def _get_get_metrics_data_queue_path(self, namespace_name, queue_name, metric, rollup, filter_expr):
return "".join([
self._get_path('services/serviceBus/Namespaces/', namespace_name),
'/Queues/',
_str(queue_name),
'/Metrics/',
_str(metric),
'/Rollups/',
_str(rollup),
'/Values?',
filter_expr
])
def _get_get_metrics_data_topic_path(self, namespace_name, queue_name, metric, rollup, filter_expr):
return "".join([
self._get_path('services/serviceBus/Namespaces/', namespace_name),
'/Topics/',
_str(queue_name),
'/Metrics/',
_str(metric),
'/Rollups/',
_str(rollup),
'/Values?',
filter_expr
])
def _get_get_metrics_data_hub_path(self, namespace_name, queue_name, metric, rollup, filter_expr):
return "".join([
self._get_path('services/serviceBus/Namespaces/', namespace_name),
'/NotificationHubs/',
_str(queue_name),
'/Metrics/',
_str(metric),
'/Rollups/',
_str(rollup),
'/Values?',
filter_expr
])
def _get_get_metrics_data_relay_path(self, namespace_name, queue_name, metric, rollup, filter_expr):
return "".join([
self._get_path('services/serviceBus/Namespaces/', namespace_name),
'/Relays/',
_str(queue_name),
'/Metrics/',
_str(metric),
'/Rollups/',
_str(rollup),
'/Values?',
filter_expr
])
def _get_get_metrics_rollup_queue_path(self, namespace_name, queue_name, metric):
return "".join([
self._get_path('services/serviceBus/Namespaces/', namespace_name),
'/Queues/',
_str(queue_name),
'/Metrics/',
_str(metric),
'/Rollups',
])
def _get_get_metrics_rollup_topic_path(self, namespace_name, queue_name, metric):
return "".join([
self._get_path('services/serviceBus/Namespaces/', namespace_name),
'/Topics/',
_str(queue_name),
'/Metrics/',
_str(metric),
'/Rollups',
])
def _get_get_metrics_rollup_hub_path(self, namespace_name, queue_name, metric):
return "".join([
self._get_path('services/serviceBus/Namespaces/', namespace_name),
'/NotificationHubs/',
_str(queue_name),
'/Metrics/',
_str(metric),
'/Rollups',
])
def _get_get_metrics_rollup_relay_path(self, namespace_name, queue_name, metric):
return "".join([
self._get_path('services/serviceBus/Namespaces/', namespace_name),
'/Relays/',
_str(queue_name),
'/Metrics/',
_str(metric),
'/Rollups',
])
|
|
# To make forward fast
import os
os.environ["CHAINER_TYPE_CHECK"] = "0"
import six
import itertools
import numpy as np
import multiprocessing
import chainer
from chainer import serializers
from chainer import cuda
import nutszebra_log2
import nutszebra_utility
import nutszebra_sampling
import nutszebra_download_cifar10
import nutszebra_data_augmentation_picture
import nutszebra_data_augmentation
import nutszebra_basic_print
try:
from cupy.cuda import nccl
_available = True
chainer.cuda.set_max_workspace_size(chainer.cuda.get_max_workspace_size() * 4)
except ImportError:
_available = False
Da = nutszebra_data_augmentation_picture.DataAugmentationPicture()
sampling = nutszebra_sampling.Sampling
utility = nutszebra_utility.Utility()
"""
I refered the implementation of MultiprocessParallelUpdate for multiprocessing and nccl.
https://github.com/chainer/chainer/blob/master/chainer/training/updaters/multiprocess_parallel_updater.py
"""
class _Worker(multiprocessing.Process):
def __init__(self, process_id, pipe, model, gpus, da, batch, master, sampling=sampling()):
super(_Worker, self).__init__()
self.process_id = process_id
self.pipe = pipe
self.model = model
self.da = da
self.device = gpus[process_id]
self.number_of_devices = len(gpus)
self.batch = batch
self.master = master
self.train_x = master.train_x
self.train_y = master.train_y
self.train_batch_divide = master.train_batch_divide
self.picture_number_at_each_categories = master.picture_number_at_each_categories
self.parallel = master.parallel_train
self.sampling = sampling
def get(self, name):
return self.__dict__[name]
def setup(self):
_, communication_id = self.pipe.recv()
self.communication = nccl.NcclCommunicator(self.number_of_devices,
communication_id,
self.process_id)
self.model.to_gpu(self.device)
def run(self):
dev = cuda.Device(self.device)
dev.use()
# build communication via nccl
self.setup()
gp = None
da_args = [self.da() for _ in six.moves.range(self.batch)]
p = multiprocessing.Pool(self.parallel)
batch_of_batch = int(float(self.batch) / self.train_batch_divide)
while True:
job, data = self.pipe.recv()
if job == 'finalize':
dev.synchronize()
break
if job == 'update':
# for reducing memory
self.model.zerograds()
indices = list(self.sampling.yield_random_batch_samples(1, self.batch, len(self.train_x), sort=False))[0]
for ii in six.moves.range(0, len(indices), batch_of_batch):
x = self.train_x[indices[ii:ii + batch_of_batch]]
t = self.train_y[indices[ii:ii + batch_of_batch]]
args = list(six.moves.zip(x, t, da_args))
processed = p.starmap(process_train, args)
tmp_x, tmp_t = list(zip(*processed))
train = True
x = self.model.prepare_input(tmp_x, dtype=np.float32, volatile=not train, gpu=self.device)
t = self.model.prepare_input(tmp_t, dtype=np.int32, volatile=not train, gpu=self.device)
y = self.model(x, train=train)
loss = self.model.calc_loss(y, t) / self.number_of_devices / self.train_batch_divide
loss.backward()
del x
del t
del y
del loss
# send gradients of self.model
gg = gather_grads(self.model)
null_stream = cuda.Stream.null
self.communication.reduce(gg.data.ptr,
gg.data.ptr,
gg.size,
nccl.NCCL_FLOAT,
nccl.NCCL_SUM,
0,
null_stream.ptr)
del gg
self.model.zerograds()
# send parameters of self.model
gp = gather_params(self.model)
self.communication.bcast(gp.data.ptr,
gp.size,
nccl.NCCL_FLOAT,
0,
null_stream.ptr)
scatter_params(self.model, gp)
gp = None
def size_num_grads(link):
"""Count total size of all gradient arrays of a given link
Args:
link (chainer.link.Link): Target link object.
"""
size = 0
num = 0
for param in link.params():
if param.size == 0:
continue
size += param.size
num += 1
return size, num
def _batch_memcpy():
return cuda.cupy.ElementwiseKernel(
'raw T ptrs, raw X info',
'raw float32 dst',
'''
int id_min = id_pre;
int id_max = num_src;
while (id_max - id_min > 1) {
int id = (id_max + id_min) / 2;
if (i < info[id]) id_max = id;
else id_min = id;
}
int id = id_min;
float *src = (float *)(ptrs[id]);
int i_dst = i;
int i_src = i;
if (id > 0) i_src -= info[id];
dst[i_dst] = 0;
if (src != NULL) {
dst[i_dst] = src[i_src];
}
id_pre = id;
''',
'batch_memcpy',
loop_prep='''
int num_src = info[0];
int id_pre = 0;
''')
def gather_grads(link):
"""Put together all gradient arrays and make a single array
Args:
link (chainer.link.Link): Target link object.
Return:
cupy.ndarray
"""
size, num = size_num_grads(link)
ptrs = np.empty(num, dtype=np.uint64)
info = np.empty(num + 1, dtype=np.int32)
info[0] = 0
i = 0
for param in link.params():
if param.size == 0:
continue
ptrs[i] = 0 # NULL pointer
if param.grad is not None:
ptrs[i] = param.grad.data.ptr
info[i + 1] = info[i] + param.size
i += 1
info[0] = num
ptrs = cuda.to_gpu(ptrs, stream=cuda.Stream.null)
info = cuda.to_gpu(info, stream=cuda.Stream.null)
return _batch_memcpy()(ptrs, info, size=size)
def gather_params(link):
"""Put together all gradient arrays and make a single array
Args:
link (chainer.link.Link): Target link object.
Return:
cupy.ndarray
"""
size, num = size_num_grads(link)
ptrs = np.empty(num, dtype=np.uint64)
info = np.empty(num + 1, dtype=np.int32)
info[0] = 0
i = 0
for param in link.params():
if param.size == 0:
continue
ptrs[i] = 0 # NULL pointer
if param.data is not None:
ptrs[i] = param.data.data.ptr
info[i + 1] = info[i] + param.size
i += 1
info[0] = num
ptrs = cuda.to_gpu(ptrs, stream=cuda.Stream.null)
info = cuda.to_gpu(info, stream=cuda.Stream.null)
return _batch_memcpy()(ptrs, info, size=size)
def scatter_grads(link, array):
"""Put back contents of the specified array to the related gradient arrays
Args:
link (chainer.link.Link): Target link object.
array (cupy.ndarray): gathered array created by gather_grads()
"""
offset = 0
for param in link.params():
next_offset = offset + param.size
param.grad = array[offset:next_offset].reshape(param.data.shape)
offset = next_offset
def scatter_params(link, array):
"""Put back contents of the specified array to the related gradient arrays
Args:
link (chainer.link.Link): Target link object.
array (cupy.ndarray): gathered array created by gather_params()
"""
offset = 0
for param in link.params():
next_offset = offset + param.size
param.data = array[offset:next_offset].reshape(param.data.shape)
offset = next_offset
class TrainCifar10WithMultiGpus(object):
def __init__(self, model=None, optimizer=None, load_model=None, load_optimizer=None, load_log=None, load_data=None, da=nutszebra_data_augmentation.DataAugmentationNormalizeBigger, save_path='./', epoch=200, batch=128, gpus=(0, 1, 2, 3), start_epoch=1, train_batch_divide=1, test_batch_divide=1, parallel_train=2, parallel_test=16):
self.model = model
self.optimizer = optimizer
self.load_model = load_model
self.load_optimizer = load_optimizer
self.load_log = load_log
self.load_data = load_data
self.da = da
self._da = da
self.save_path = save_path
self.epoch = epoch
self.batch = batch
self.gpus = gpus
self.start_epoch = start_epoch
self.train_batch_divide = train_batch_divide
self.test_batch_divide = test_batch_divide
self.parallel_train = parallel_train
self.parallel_test = parallel_test
# Generate dataset
self.train_x, self.train_y, self.test_x, self.test_y, self.picture_number_at_each_categories, self.categories = self.data_init()
# Log module
self.log = self.log_init()
# initializing
self.model_init(model, load_model)
# create directory
self.save_path = save_path if save_path[-1] == '/' else save_path + '/'
utility.make_dir('{}model'.format(self.save_path))
self.sampling = sampling()
self._initialized = False
self._pipes = []
self._workers = []
self.communication = None
self.da_args = [self.da() for _ in six.moves.range(self.batch)]
self.p_train = multiprocessing.Pool(self.parallel_train)
self.p_test = multiprocessing.Pool(self.parallel_test)
def data_init(self):
dl = nutszebra_download_cifar10.Cifar10()
data = dl.load_cifar10_data()
train_x = data['train_x']
train_y = data['train_y']
test_x = data['test_x']
test_y = data['test_y']
meta = data['meta']
categories = list(set(train_y.tolist()))
return (train_x, train_y, test_x, test_y, meta, categories)
def log_init(self):
load_log = self.load_log
log = nutszebra_log2.Log2()
if load_log is not None:
log.load(load_log)
else:
log({'are': self.categories}, 'categories')
log({'parameter': len(self.train_x)}, 'train_parameter')
log({'parameter': len(self.test_x)}, 'test_parameter')
for i in six.moves.range(len(self.categories)):
log({'parameter': float((np.array(self.test_y) == i).sum())}, 'test_parameter_{}'.format(i))
log({'model': str(self.model)}, 'model')
return log
@staticmethod
def model_init(model, load_model):
if load_model is None:
print('Weight initialization')
model.weight_initialization()
else:
print('loading {}'.format(load_model))
serializers.load_npz(load_model, model)
@staticmethod
def available():
return _available
def _send_message(self, message):
for pipe in self._pipes:
pipe.send(message)
def setup_workers(self):
# work only once
if self._initialized:
return
self._initialized = True
self.model.zerograds()
for i in six.moves.range(1, len(self.gpus)):
pipe, worker_end = multiprocessing.Pipe()
worker = _Worker(i, worker_end, self.model, self.gpus, self.da, int(self.batch / len(self.gpus) / self.train_batch_divide), self)
worker.start()
self._workers.append(worker)
self._pipes.append(pipe)
with cuda.Device(self.gpus[0]):
self.model.to_gpu(self.gpus[0])
if len(self.gpus) > 1:
communication_id = nccl.get_unique_id()
self._send_message(("set comm_id", communication_id))
self.communication = nccl.NcclCommunicator(len(self.gpus),
communication_id,
0)
def update_core(self, x, t):
self._send_message(('update', None))
with cuda.Device(self.gpus[0]):
self.model.zerograds()
# tmp_x = []
# tmp_t = []
# for i in six.moves.range(len(x)):
# img, info = self.da.train(x[i])
# if img is not None:
# tmp_x.append(img)
# tmp_t.append(t[i])
args = list(six.moves.zip(x, t, self.da_args))
processed = self.p_train.starmap(process_train, args)
tmp_x, tmp_t = list(zip(*processed))
train = True
data_length = len(tmp_x)
x = self.model.prepare_input(tmp_x, dtype=np.float32, volatile=not train, gpu=self.gpus[0])
t = self.model.prepare_input(tmp_t, dtype=np.int32, volatile=not train, gpu=self.gpus[0])
y = self.model(x, train=train)
loss = self.model.calc_loss(y, t) / len(self.gpus)
loss.backward()
loss.to_cpu()
loss = float(loss.data) * data_length
del x
del t
del y
# NCCL: reduce grads
null_stream = cuda.Stream.null
if self.communication is not None:
# send grads
gg = gather_grads(self.model)
self.communication.reduce(gg.data.ptr,
gg.data.ptr,
gg.size,
nccl.NCCL_FLOAT,
nccl.NCCL_SUM,
0,
null_stream.ptr)
# copy grads, gg, to self.model
scatter_grads(self.model, gg)
del gg
self.optimizer.update()
if self.communication is not None:
gp = gather_params(self.model)
self.communication.bcast(gp.data.ptr,
gp.size,
nccl.NCCL_FLOAT,
0,
null_stream.ptr)
return loss
def finalize(self):
self._send_message(('finalize', None))
for worker in self._workers:
worker.join()
def train_one_epoch(self):
self.setup_workers()
# initialization
batch_of_batch = int(float(self.batch) / len(self.gpus) / self.train_batch_divide)
sum_loss = 0
yielder = self.sampling.yield_random_batch_samples(int(len(self.train_x) / self.batch), int(float(self.batch) / len(self.gpus)), len(self.train_x), sort=False)
progressbar = utility.create_progressbar(int(len(self.train_x) / self.batch), desc='train', stride=1)
# train start
for _, indices in six.moves.zip(progressbar, yielder):
for ii in six.moves.range(0, len(indices), batch_of_batch):
x = self.train_x[indices[ii:ii + batch_of_batch]]
t = self.train_y[indices[ii:ii + batch_of_batch]]
sum_loss += self.update_core(x, t) * len(self.gpus)
self.log({'loss': float(sum_loss)}, 'train_loss')
print(self.log.train_loss())
def test_one_epoch(self):
self.setup_workers()
batch_of_batch = int(self.batch / self.test_batch_divide)
sum_loss = 0
sum_accuracy = {}
sum_5_accuracy = {}
false_accuracy = {}
for ii in six.moves.range(len(self.categories)):
sum_accuracy[ii] = 0
sum_5_accuracy[ii] = 0
elements = six.moves.range(len(self.categories))
for ii, iii in itertools.product(elements, elements):
false_accuracy[(ii, iii)] = 0
da = [self._da() for _ in six.moves.range(self.batch)]
progressbar = utility.create_progressbar(len(self.test_x), desc='test', stride=batch_of_batch)
for i in progressbar:
x = self.test_x[i:i + batch_of_batch]
t = self.test_y[i:i + batch_of_batch]
tmp_x = []
tmp_t = []
args = list(zip(x, t, da))
processed = self.p_test.starmap(process, args)
tmp_x, tmp_t = list(zip(*processed))
data_length = len(tmp_x)
train = False
x = self.model.prepare_input(tmp_x, dtype=np.float32, volatile=not train, gpu=self.gpus[0])
t = self.model.prepare_input(tmp_t, dtype=np.int32, volatile=not train, gpu=self.gpus[0])
y = self.model(x, train=train)
# accuracy
tmp_accuracy, tmp_false_accuracy = self.model.accuracy(y, t)
for key in tmp_accuracy:
sum_accuracy[key] += tmp_accuracy[key]
for key in tmp_false_accuracy:
false_accuracy[key] += tmp_false_accuracy[key]
# loss
loss = self.model.calc_loss(y, t)
loss.to_cpu()
sum_loss += float(loss.data) * data_length
# sum_loss
self.log({'loss': float(sum_loss)}, 'test_loss')
# sum_accuracy
num = 0
for key in sum_accuracy:
value = sum_accuracy[key]
self.log({'accuracy': int(value)}, 'test_accuracy_{}'.format(key))
num += value
self.log({'accuracy': int(num)}, 'test_accuracy')
# false_accuracy
for key in false_accuracy:
if key[0] == key[1]:
pass
else:
value = false_accuracy[key]
self.log({'accuracy': int(value)}, 'test_accuracy_{}_{}'.format(key[0], key[1]))
# show logs
sen = [self.log.test_loss(), self.log.test_accuracy(max_flag=True)]
print('\n'.join(sen))
def run(self):
log = self.log
model = self.model
optimizer = self.optimizer
epoch = self.epoch
start_epoch = self.start_epoch
save_path = self.save_path
epoch_progressbar = utility.create_progressbar(epoch + 1, desc='epoch', stride=1, start=start_epoch)
for i in epoch_progressbar:
self.train_one_epoch()
# save model
model.save_model('{}model/{}_{}.model'.format(save_path, model.name, i))
optimizer(i)
self.test_one_epoch()
log.generate_loss_figure('{}loss.jpg'.format(save_path))
log.generate_accuracy_figure('{}accuracy.jpg'.format(save_path))
log.save(save_path + 'log.json')
def process(x, t, da):
x, info = da.test(x)
return (x, t)
def process_train(x, t, da):
x, info = da.train(x)
return (x, t)
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged_array_ops.boolean_mask."""
from absl.testing import parameterized
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.ragged import ragged_array_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedBooleanMaskOpTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
# Define short constants for true & false, so the data & mask can be lined
# up in the examples below. This makes it easier to read the examples, to
# see which values should be kept vs. masked.
T = True
F = False
@parameterized.parameters([
#=========================================================================
# Docstring examples
#=========================================================================
dict(
descr='Docstring example 1',
data=[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
mask=[[T, F, T], [F, F, F], [T, F, F]],
expected=ragged_factory_ops.constant_value([[1, 3], [], [7]])),
dict(
descr='Docstring example 2',
data=ragged_factory_ops.constant_value([[1, 2, 3], [4], [5, 6]]),
mask=ragged_factory_ops.constant_value([[F, F, T], [F], [T, T]]),
expected=ragged_factory_ops.constant_value([[3], [], [5, 6]])),
dict(
descr='Docstring example 3',
data=ragged_factory_ops.constant_value([[1, 2, 3], [4], [5, 6]]),
mask=[True, False, True],
expected=ragged_factory_ops.constant_value([[1, 2, 3], [5, 6]])),
#=========================================================================
# Uniform data and uniform mask.
#=========================================================================
dict(
descr='data.shape=[7]; mask.shape=[7]',
data=[1, 2, 3, 4, 5, 6, 7],
mask=[T, F, T, T, F, F, F],
expected=[1, 3, 4]),
dict(
descr='data.shape=[5, 3]; mask.shape=[5]',
data=[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12], [13, 14, 15]],
mask=[True, False, True, True, False],
expected=[[1, 2, 3], [7, 8, 9], [10, 11, 12]]),
dict(
descr='data.shape=[5, 3]; mask.shape=[5, 3]',
data=[[1, 2, 3], [4, 5, 6], [7, 8, 9], [0, 1, 2], [3, 4, 5]],
mask=[[F, F, F], [T, F, T], [T, T, T], [F, F, F], [T, T, F]],
expected=ragged_factory_ops.constant_value(
[[], [4, 6], [7, 8, 9], [], [3, 4]])),
dict(
descr='data.shape=[3, 2, 2]; mask.shape=[3]',
data=[[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[2, 4], [6, 8]]],
mask=[F, F, T],
expected=[[[2, 4], [6, 8]]]),
dict(
descr='data.shape=[3, 2, 2]; mask.shape=[3]',
data=[[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[2, 4], [6, 8]]],
mask=[F, F, T],
expected=[[[2, 4], [6, 8]]]),
dict(
descr='data.shape=[3, 2, 2]; mask.shape=[3, 2]',
data=[[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[2, 4], [6, 8]]],
mask=[[T, F], [T, T], [F, F]],
expected=ragged_factory_ops.constant_value(
[[[1, 2]], [[5, 6], [7, 8]], []],
ragged_rank=1)),
dict(
descr='data.shape=[3, 2, 2]; mask.shape=[3, 2, 2]',
data=[[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[2, 4], [6, 8]]],
mask=[[[T, T], [F, T]], [[F, F], [F, F]], [[T, F], [T, T]]],
expected=ragged_factory_ops.constant_value(
[[[1, 2], [4]], [[], []], [[2], [6, 8]]])),
dict(
descr='data.shape=mask.shape=[2, 2, 2, 2]',
data=[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[[2, 4], [6, 8]], [[1, 3], [5, 7]]]],
mask=[[[[T, T], [F, F]], [[T, F], [F, F]]],
[[[F, F], [F, F]], [[T, T], [T, F]]]],
expected=ragged_factory_ops.constant_value(
[[[[1, 2], []], [[5], []]], [[[], []], [[1, 3], [5]]]])),
#=========================================================================
# Ragged data and ragged mask.
#=========================================================================
dict(
descr='data.shape=[5, (D2)]; mask.shape=[5, (D2)]',
data=ragged_factory_ops.constant_value(
[[1, 2], [3, 4, 5, 6], [7, 8, 9], [], [1, 2, 3]]),
mask=ragged_factory_ops.constant_value(
[[F, F], [F, T, F, T], [F, F, F], [], [T, F, T]]),
expected=ragged_factory_ops.constant_value(
[[], [4, 6], [], [], [1, 3]])),
dict(
descr='data.shape=[3, (D2), (D3)]; mask.shape=[3, (D2)]',
data=ragged_factory_ops.constant_value(
[[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[2, 4], [6, 8]]]),
mask=ragged_factory_ops.constant_value([[T, F], [T, T], [F, F]]),
expected=ragged_factory_ops.constant_value(
[[[1, 2]], [[5, 6], [7, 8]], []])),
dict(
descr='data.shape=[3, (D2), D3]; mask.shape=[3, (D2)]',
data=ragged_factory_ops.constant_value(
[[[1, 2], [3, 4]], [[5, 6], [7, 8], [2, 4]], [[6, 8]]],
ragged_rank=1),
mask=ragged_factory_ops.constant_value([[T, F], [T, T, F], [F]]),
expected=ragged_factory_ops.constant_value(
[[[1, 2]], [[5, 6], [7, 8]], []],
ragged_rank=1)),
dict(
descr='data.shape=[3, (D2), (D3)]; mask.shape=[3, (D2), (D3)]',
data=ragged_factory_ops.constant_value(
[[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[2, 4]]]),
mask=ragged_factory_ops.constant_value(
[[[T, T], [F, T]], [[F, F], [F, F]], [[T, F]]]),
expected=ragged_factory_ops.constant_value(
[[[1, 2], [4]], [[], []], [[2]]])),
dict(
descr=('data.shape=[3, (D2), (D3), (D4)]; '
'mask.shape=[3, (D2), (D3), (D4)]'),
data=ragged_factory_ops.constant_value(
[[[[1, 2], [3, 4]], [[5, 6]]], [[[2, 4], [6, 8]]]]),
mask=ragged_factory_ops.constant_value(
[[[[T, T], [F, F]], [[T, F]]], [[[F, F], [T, T]]]]),
expected=ragged_factory_ops.constant_value(
[[[[1, 2], []], [[5]]], [[[], [6, 8]]]])),
#=========================================================================
# Ragged mask and uniform data
#=========================================================================
dict(
descr='data.shape=[2, 3]; mask.shape=[2, (3)]',
data=[[1, 2, 3], [4, 5, 6]],
mask=ragged_factory_ops.constant_value([[T, F, F], [F, T, T]]),
expected=ragged_factory_ops.constant_value([[1], [5, 6]])),
dict(
descr='data.shape=[2, 3, 2]; mask.shape=[2, (3)]',
data=[[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 0], [2, 4]]],
mask=ragged_factory_ops.constant_value([[T, F, F], [F, T, T]]),
expected=ragged_factory_ops.constant_value(
[[[1, 2]], [[9, 0], [2, 4]]],
ragged_rank=1)),
dict(
descr='data.shape=[2, 3, 2]; mask.shape=[2, (3), 2]',
data=[[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 0], [2, 4]]],
mask=ragged_factory_ops.constant_value(
[[[T, F], [F, F], [T, T]], [[T, F], [F, T], [F, F]]],
ragged_rank=1),
expected=ragged_factory_ops.constant_value(
[[[1], [], [5, 6]], [[7], [0], []]])),
#=========================================================================
# Ragged data and uniform mask.
#=========================================================================
dict(
descr='data.shape=[4, (D2)]; mask.shape=[4]',
data=ragged_factory_ops.constant_value([[1, 2, 3], [4], [], [5, 6]]),
mask=[T, F, T, F],
expected=ragged_factory_ops.constant_value([[1, 2, 3], []])),
dict(
descr='data.shape=[4, (D2), (D3)]; mask.shape=[4]',
data=ragged_factory_ops.constant_value(
[[[1, 2, 3]], [[4], []], [[5, 6]], []]),
mask=[T, F, T, T],
expected=ragged_factory_ops.constant_value(
[[[1, 2, 3]], [[5, 6]], []])),
dict(
descr='data.shape=[4, (D2), 2]; mask.shape=[4]',
data=ragged_factory_ops.constant_value(
[[[1, 2], [3, 4]], [], [[5, 6]], [[7, 8], [9, 0], [1, 2]]],
ragged_rank=1),
mask=[T, F, F, T],
expected=ragged_factory_ops.constant_value(
[[[1, 2], [3, 4]], [[7, 8], [9, 0], [1, 2]]],
ragged_rank=1)),
dict(
descr='data.shape=[4, (D2), 2]; mask.shape=[4]',
data=ragged_factory_ops.constant_value(
[[[1, 2], [3, 4]], [], [[5, 6]], [[7, 8], [9, 0], [1, 2]]],
ragged_rank=1),
mask=[T, F, F, T],
expected=ragged_factory_ops.constant_value(
[[[1, 2], [3, 4]], [[7, 8], [9, 0], [1, 2]]],
ragged_rank=1)),
dict(
descr='data.shape=[1, (2)]; mask.shape=[1, 2]',
data=ragged_factory_ops.constant_value([[1, 2]]),
mask=[[T, F]],
expected=ragged_factory_ops.constant_value([[1]])),
dict(
descr='data.shape=[2, (2), (D3)]; mask.shape=[2, 2]',
data=ragged_factory_ops.constant_value(
[[[1], [2, 3]], [[], [4, 5, 6]]]),
mask=[[T, F], [T, T]],
expected=ragged_factory_ops.constant_value([[[1]], [[], [4, 5, 6]]])),
dict(
descr='data.shape=[2, (2), 3]; mask.shape=[2, 2]',
data=ragged_factory_ops.constant_value(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [2, 4, 6]]],
ragged_rank=1),
mask=[[T, F], [T, T]],
expected=ragged_factory_ops.constant_value(
[[[1, 2, 3]], [[7, 8, 9], [2, 4, 6]]],
ragged_rank=1)),
dict(
descr='data.shape=[2, (2), 3]; mask.shape=[2, 2, 3]',
data=ragged_factory_ops.constant_value(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [2, 4, 6]]],
ragged_rank=1),
mask=[[[T, F, F], [T, F, T]], [[T, F, T], [F, F, F]]],
expected=ragged_factory_ops.constant_value(
[[[1], [4, 6]], [[7, 9], []]])),
]) # pyformat: disable
def testBooleanMask(self, descr, data, mask, expected):
actual = ragged_array_ops.boolean_mask(data, mask)
self.assertAllEqual(actual, expected)
def testErrors(self):
if not context.executing_eagerly():
self.assertRaisesRegex(ValueError,
r'mask\.shape\.ndims must be known statically',
ragged_array_ops.boolean_mask, [[1, 2]],
array_ops.placeholder(dtypes.bool))
self.assertRaises(TypeError, ragged_array_ops.boolean_mask, [[1, 2]],
[[0, 1]])
self.assertRaisesRegex(
ValueError, 'Tensor conversion requested dtype bool for '
'RaggedTensor with dtype int32', ragged_array_ops.boolean_mask,
ragged_factory_ops.constant([[1, 2]]),
ragged_factory_ops.constant([[0, 0]]))
self.assertRaisesRegex(ValueError,
r'Shapes \(1, 2\) and \(1, 3\) are incompatible',
ragged_array_ops.boolean_mask, [[1, 2]],
[[True, False, True]])
self.assertRaisesRegex(errors.InvalidArgumentError,
r'Inputs must have identical ragged splits',
ragged_array_ops.boolean_mask,
ragged_factory_ops.constant([[1, 2]]),
ragged_factory_ops.constant([[True, False, True]]))
self.assertRaisesRegex(ValueError, 'mask cannot be scalar',
ragged_array_ops.boolean_mask, [[1, 2]], True)
self.assertRaisesRegex(ValueError, 'mask cannot be scalar',
ragged_array_ops.boolean_mask,
ragged_factory_ops.constant([[1, 2]]), True)
if __name__ == '__main__':
googletest.main()
|
|
import logging
from typing import Optional
from gi.repository import Gtk
from sastool.io.credo_cct import Header, Exposure
from ..core.functions import update_comboboxtext_choices
from ..core.plotcurve import PlotCurveWindow
from ..core.plotimage import PlotImageWindow
from ..core.toolwindow import ToolWindow
from ...core.commands.detector import Expose, ExposeMulti
from ...core.commands.motor import SetSample
from ...core.commands.xray_source import Shutter
from ...core.services.interpreter import Interpreter
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class SingleExposure(ToolWindow):
required_devices = ['detector', 'xray_source', 'Motor_Sample_X', 'Motor_Sample_Y']
widgets_to_make_insensitive = ['inputframe']
def __init__(self, *args, **kwargs):
self._images_done = 0
self._images_requested = 0
self._sampleconnection = None
self._killed = False
self._nimages = 0
self._images_done = 0
self._expanalyzerconnection = None
super().__init__(*args, **kwargs)
def cleanup(self):
if self._sampleconnection is not None:
self.instrument.services['samplestore'].disconnect(self._sampleconnection)
self._sampleconnection = None
def on_mainwidget_map(self, window):
if super().on_mainwidget_map(window):
return True
self._sampleconnection = self.instrument.services['samplestore'].connect(
'list-changed', self.on_samplelist_changed)
self.on_samplelist_changed(self.instrument.services['samplestore'])
update_comboboxtext_choices(
self.builder.get_object('prefixselector'),
sorted(self.instrument.services['filesequence'].get_prefixes()),
self.instrument.config['path']['prefixes']['tst']
)
self.on_samplecheck_toggled(self.builder.get_object('samplename_check'))
def on_samplecheck_toggled(self, togglebutton):
self.builder.get_object('sampleselector').set_sensitive(togglebutton.get_active())
def on_command_return(self, interpreter: Interpreter, commandname: Optional[str], returnvalue: object):
if commandname is not None:
super().on_command_return(interpreter, commandname, returnvalue)
if self._killed:
commandname = 'shutter'
returnvalue = False
if commandname is None:
# not a true command, we just enter here because self.start() called us.
if self.builder.get_object('samplename_check').get_active():
self.execute_command(
SetSample, (self.builder.get_object('sampleselector').get_active_text(),))
return False
else:
self.instrument.services['samplestore'].set_active(None)
commandname = 'sample'
returnvalue = None
self.on_command_return(interpreter, 'sample', None)
# pass through to the next if.
if commandname == 'sample':
logger.debug('Sample in place')
if self.builder.get_object('shutter_check').get_active():
logger.debug('Opening shutter')
self.execute_command(
Shutter, (True,))
return False
else:
logger.debug('Not opening shutter, passing through to next command.')
commandname = 'shutter'
returnvalue = True
# pass through to the next if.
if commandname == 'shutter' and returnvalue is True:
# start exposure
logger.debug('Starting exposure')
prefix = self.builder.get_object('prefixselector').get_active_text()
exptime = self.builder.get_object('exptime_spin').get_value()
self._nimages = self.builder.get_object('nimages_spin').get_value_as_int()
expdelay = self.builder.get_object('expdelay_spin').get_value()
self.builder.get_object('progressframe').show_all()
self.builder.get_object('progressframe').set_visible(True)
if self._nimages == 1:
logger.debug('Executing Expose')
self.execute_command(
Expose, (exptime, prefix))
else:
logger.debug('Executing ExposeMulti')
self.execute_command(
ExposeMulti, (exptime, self._nimages, prefix, expdelay))
return False
if commandname in ['expose', 'exposemulti']:
logger.debug('Exposure ended.')
if self.builder.get_object('shutter_check').get_active():
logger.debug('Closing shutter')
self.execute_command(
Shutter, (False,))
return False
else:
logger.debug('Not closing shutter')
commandname = 'shutter'
returnvalue = False
# pass through to the next if.
if commandname == 'shutter' and returnvalue is False:
# this is the end.
logger.debug('Shutter is closed, ending singleexposure')
self.builder.get_object('start_button').set_label('Start')
self.builder.get_object('start_button').get_image().set_from_icon_name('system-run', Gtk.IconSize.BUTTON)
self.builder.get_object('progressframe').set_visible(False)
self.set_sensitive(True)
self.widget.resize(1, 1)
self._killed = False
return False
# we should not reach here.
raise ValueError(commandname, returnvalue)
def on_command_pulse(self, interpreter, commandname, message):
self.builder.get_object('exposure_progress').set_text(message)
self.builder.get_object('exposure_progress').pulse()
def on_command_progress(self, interpreter, commandname, message, fraction):
self.builder.get_object('exposure_progress').set_text(message)
self.builder.get_object('exposure_progress').set_fraction(fraction)
def on_start(self, button):
if button.get_label() == 'Start':
button.set_label('Stop')
button.get_image().set_from_icon_name('gtk-stop', Gtk.IconSize.BUTTON)
self._images_done = 0
self._expanalyzerconnection = self.instrument.services['exposureanalyzer'].connect('image', self.on_image)
self.on_command_return(self.instrument.services['interpreter'], None, None)
else:
self._killed = True
self.instrument.services['interpreter'].kill()
def on_image(self, exposureanalyzer, prefix, fsn, matrix, params, mask):
im = Exposure(matrix, header=Header(params), mask=mask)
try:
sample = im.header.title
except KeyError:
sample = 'unknown sample'
legend = 'FSN #{:d}, {} at {:.2f} mm'.format(
im.header.fsn, sample, float(im.header.distance))
if self.builder.get_object('plotimage_check').get_active():
if self.builder.get_object('reuseimage_check').get_active():
imgwin = PlotImageWindow.get_latest_window()
else:
imgwin = PlotImageWindow()
imgwin.set_image(im.intensity)
imgwin.set_mask(im.mask)
imgwin.set_distance(im.header.distance)
imgwin.set_beampos(im.header.beamcenterx,
im.header.beamcentery)
assert im.header.pixelsizex == im.header.pixelsizey
imgwin.set_pixelsize(im.header.pixelsizex)
imgwin.set_wavelength(im.header.wavelength)
imgwin.set_title(legend)
if self.builder.get_object('plotradial_check').get_active():
if self.builder.get_object('reuseradial_check').get_active():
curvewin = PlotCurveWindow.get_latest_window()
else:
curvewin = PlotCurveWindow()
curve = im.radial_average()
assert im.header.pixelsizex == im.header.pixelsizey
curvewin.addcurve(curve.q, curve.Intensity, curve.qError, curve.Error, legend, 'q',
im.header.pixelsizex,
im.header.distance, im.header.wavelength)
self._images_done += 1
if self._images_done >= self._nimages:
self.instrument.services['exposureanalyzer'].disconnect(self._expanalyzerconnection)
self._expanalyzerconnection = None
def on_samplelist_changed(self, samplestore):
update_comboboxtext_choices(
self.builder.get_object('sampleselector'),
sorted([x.title for x in samplestore]))
def on_nimages_changed(self, spinbutton):
self.builder.get_object('expdelay_spin').set_sensitive(spinbutton.get_value_as_int() > 1)
def on_maskoverride_toggled(self, togglebutton):
pass
|
|
from OpenGL.GL import *
from PyQt4 import QtGui
from PyQt4.QtCore import *
from PyQt4.QtOpenGL import *
import math
GLUT_UP = 1
GLUT_DOWN = 0
GLUT_ACTIVE_CTRL = 2
GLUT_ACTIVE_SHIFT = 1
GLUT_ACTIVE_ALT = 4
keymap = {Qt.Key_F1:'f1',
Qt.Key_F2:'f2',
Qt.Key_F3:'f3',
Qt.Key_F4:'f4',
Qt.Key_F5:'f5',
Qt.Key_F6:'f6',
Qt.Key_F7:'f7',
Qt.Key_F8:'f8',
Qt.Key_F9:'f9',
Qt.Key_F10:'f10',
Qt.Key_F11:'f11',
Qt.Key_F12:'f12',
Qt.Key_Up:'up',
Qt.Key_Left:'left',
Qt.Key_Down:'down',
Qt.Key_Right:'right',
Qt.Key_Home:'home',
Qt.Key_End:'end',
Qt.Key_Delete:'delete',
Qt.Key_Enter:'enter'
}
def toGlutButton(button):
if button==Qt.LeftButton:
return 0
elif button==Qt.RightButton:
return 2
elif button==Qt.MidButton:
return 1
return 0
def toModifierList(modifiers):
res = []
if modifiers & Qt.AltModifier:
res.append('alt')
if modifiers & Qt.ShiftModifier:
res.append('shift')
if modifiers & Qt.ControlModifier:
res.append('ctrl')
return res
class QtGLWindow(QGLWidget):
"""A basic OpenGL window using Qt. Should not be used directly, use
the functions in QtBackend instead.
Attributes:
- name: title of the window (only has an effect before calling
run())
- width, height: width/height of the window (only has an effect
before calling run(), and these are updated when the user resizes
the window.
- clearColor: the RGBA floating point values of the background color.
"""
def __init__(self,name="OpenGL window",parent=None):
format = QGLFormat()
format.setRgba(True)
format.setDoubleBuffer(True)
format.setDepth(True)
format.setSampleBuffers(True)
format.setSamples(4)
QGLWidget.__init__(self,format)
self.name = name
self.program = None
self.width = 640
self.height = 480
self.sizePolicy = "resize"
self.clearColor = [1.0,1.0,1.0,0.0]
#keyboard state information
self.modifierList = []
#mouse state information
self.lastx,self.lasty = None,None
self.initialized = False
self.refreshed = False
self.setFixedSize(self.width,self.height)
self.setWindowTitle(self.name)
self.idleTimer = QTimer()
self.actions = []
self.actionMenu = None
self.inpaint = False
def setProgram(self,program):
from glprogram import GLProgram
assert isinstance(program,GLProgram)
if hasattr(program,'name'):
self.name = program.name
if self.initialized:
self.setWindowTitle(program.name)
self.program = program
program.window = self
if self.initialized:
program.initialize()
program.reshapefunc(self.width,self.height)
def f():
if self.program: self.program.idlefunc()
self.idleTimer.timeout.connect(f)
else:
self.reshape(program.view.w,program.view.h)
def setParent(self,parent=None):
QGLWidget.setParent(self,parent)
def initialize(self):
""" Open a window and initialize """
assert self.program != None, "QGLWidget initialized without a GLProgram"
glEnable(GL_MULTISAMPLE)
self.setMouseTracking(True)
self.setFocusPolicy(Qt.StrongFocus)
def f():
if self.program: self.program.idlefunc()
self.idleTimer.timeout.connect(f)
self.idleTimer.start(0)
#init function
self.program.initialize()
if self.actionMenu is not None:
for a in self.actions:
self.actionMenu.addAction(a)
else:
print "QtGLWidget.initialize: no action menu?"
self.initialized = True
def add_action(self,hook,short_text,key,description=None):
a = QtGui.QAction(short_text, self)
a.setShortcut(key)
if description == None:
description = short_text
a.setStatusTip(description)
a.triggered.connect(hook)
self.actions.append(a)
def sizeHint(self):
return QSize(self.width,self.height)
#QtGLWidget bindings
def initializeGL(self):
print "######### QGLWidget Initialize GL ###############"
if self.initialized:
print "QGLWidget.initializeGL: already initialized?"
try:
return self.initialize()
except Exception,e:
import traceback
print "QGLWidget.initializeGL: hit an exception?"
traceback.print_exc()
exit(-1)
def resizeGL(self,w,h):
if self.program == None:
print "QGLWidget.resizeGL: called after close?"
return
if not self.isVisible():
return
(self.width,self.height) = (w,h)
self.program.reshapefunc(w,h)
return
def paintGL(self):
if self.program == None:
print "QGLWidget.paintGL: called after close?"
return
if not self.isVisible():
print "QGLWidget.paintGL: called while invisible?"
return
if self.inpaint:
return
self.inpaint = True
self.refreshed = False
try:
res = self.program.displayfunc()
except Exception,e:
import traceback
print "QGLWidget.paintGL: hit an exception?"
traceback.print_exc()
exit(-1)
self.inpaint = False
return
#QWidget bindings
def mouseMoveEvent(self,e):
if self.program == None:
print "QGLWidget.mouseMoveEvent: called after close?"
return
x,y = e.pos().x(),e.pos().y()
if self.lastx == None: dx,dy = 0,0
else: dx, dy = x - self.lastx, y - self.lasty
try:
res = self.program.motionfunc(x,y,dx,dy)
except Exception,e:
import traceback
traceback.print_exc()
exit(-1)
self.lastx,self.lasty = x,y
def mousePressEvent(self,e):
x,y = e.pos().x(),e.pos().y()
self.modifierList = toModifierList(e.modifiers())
self.lastx,self.lasty = x,y
self.program.mousefunc(toGlutButton(e.button()),GLUT_DOWN,x,y)
def mouseReleaseEvent(self,e):
x,y = e.pos().x(),e.pos().y()
self.modifierList = toModifierList(e.modifiers())
self.lastx,self.lasty = x,y
self.program.mousefunc(toGlutButton(e.button()),GLUT_UP,x,y)
def keyPressEvent(self,e):
if e.isAutoRepeat():
return
if e.key() in keymap:
self.modifierList = toModifierList(e.modifiers())
self.program.keyboardfunc(keymap[e.key()],self.lastx,self.lasty)
return
else:
c = str(e.text())
if len(c)==0: return #some empty press, like shift/control
self.modifierList = toModifierList(e.modifiers())
self.program.keyboardfunc(c,self.lastx,self.lasty)
def keyReleaseEvent(self,e):
if e.isAutoRepeat():
return
if e.key() in keymap:
self.modifierList = toModifierList(e.modifiers())
self.program.keyboardupfunc(keymap[e.key()],self.lastx,self.lasty)
return
else:
c = str(e.text())
if len(c)==0: return #some empty press, like shift/control
self.modifierList = toModifierList(e.modifiers())
self.program.keyboardupfunc(c,self.lastx,self.lasty)
def modifiers(self):
"""Call this to retrieve modifiers. Called by frontend."""
return self.modifierList
def idlesleep(self,duration=float('inf')):
"""Sleeps the idle callback for t seconds. If t is not provided,
the idle callback is slept forever"""
if duration==0:
self.idleTimer.start(0)
else:
self.idleTimer.stop()
if duration!=float('inf'):
QTimer.singleShot(duration*1000,lambda:self.idleTimer.start(0));
def close(self):
"""Call close() after this widget should be closed down, to stop
any existing Qt callbacks."""
#print "######### QGLWidget close ###############"
self.idleTimer.stop()
if self.program:
self.program.window = None
self.program = None
def refresh(self):
if not self.refreshed:
self.refreshed = True
if not self.isVisible():
return
#TODO: resolve whether it's better to call updateGL here or to schedule
# a timer event
#self.updateGL()
QTimer.singleShot(0,lambda:self.updateGL())
def reshape(self,w,h):
(self.width,self.height) = (w,h)
def doreshape():
self.setFixedSize(self.width,self.height)
self.window().resize(self.sizeHint())
self.window().adjustSize()
if self.isVisible():
self.refresh()
if not self.initialized: doreshape()
else: QTimer.singleShot(0,doreshape)
def draw_text(self,point,text,size=12,color=None):
if color:
if len(color)==3:
glColor3f(*color)
else:
glColor4f(*color)
font = QtGui.QFont()
font.setPixelSize(size)
if len(point) == 2:
self.renderText(point[0],point[1],0,text,font)
else:
self.renderText(point[0],point[1],point[2],text,font)
class QtBackend:
"""
To use as a standalone program: Set up your GLProgramInterface, then call run() to start the Qt main loop.
For more control over windowing, you can use the createWindow function to
construct new windows and setProgram to set the program used in that window.
IMPORTANT NOTE: only one window may be created for a given world due to OpenGL display lists
not being shared between OpenGL contexts. If you want to use multiple windows, then a new world
should be loaded for each world. You can close down and start up a new window with the same world
as long as you refresh all appearances in the world.
"""
def __init__(self):
self.app = None
self.window = None
def initialize(self,program_name):
if self.app == None:
#this is needed for some X11 multithreading bug
QCoreApplication.setAttribute(Qt.AA_X11InitThreads)
self.app = QtGui.QApplication([program_name])
def createWindow(self,name,parent=None):
self.initialize(name)
return QtGLWindow(name,parent)
def run(self):
"""Starts the main loop"""
assert self.window != None, "No windows create()'ed"
self.window.show()
self.app.exec_()
|
|
#!/usr/bin/python
# Download WHO geographic distribution of COVID-19 cases worldwide
# Source : European Centre for Disease Prevention and Control
# Plot cases and deaths for selected countries
# The downloaded spreadsheet is stored locally in Covid-19.csv
# To use cached local spreadsheet, use "-l" option
# Intermediate data for cases/deaths and also for each country are stored in relevant .csv files
# All plots can be aligned to :
# First date of detection or death, in that country (default)
# First date of detection in China, 2019-12-31 (-n)
# Data can be plotted as daily values (default) cumulative values (-c)
# Countries to plot and line colours are specified in the appropriate tables at the top of this file
# Dependencies : pandas, matplotlib, numpy, google-auth-httplib2, beautifulsoup4, xlrd
import argparse
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import os
import urllib.request
from bs4 import BeautifulSoup
topLevelPage = "https://www.ecdc.europa.eu/en/publications-data/download-todays-data-geographic-distribution-covid-19-cases-worldwide"
localFileName = "Covid-19.csv"
countries = ["China","Germany","Italy","United_Kingdom","United_States_of_America", "Australia", "Sweden", "Brazil"]
colours = ["red", "black", "green","blue", "orange", "pink", "grey", "violet"]
# countries = ["China","Germany","Italy","United_Kingdom","United_States_of_America", "Australia"]
# colours = ["red", "black", "green","blue", "orange", "pink"]
country_single = ["United_Kingdom"] # Default value, can be overwritten
# Extract cases and deaths and align day 0 to first date of detection or death
def extractCountries(covidData, country, dates, noAlignFlag):
print("Country: " + country)
countryData = pd.DataFrame(index = dates) # Create dataframe for country data
# Extract the data for the required country
# We need to copy it to the new countryData array so that dates are pre-pended back to 2019-12-31
countryData_tmp = covidData[covidData["countriesAndTerritories"].str.match(country)]
countryData_tmp = countryData_tmp.iloc[::-1] # Invert table - top to bottom
countryData[list(countryData_tmp.columns.values)] = countryData_tmp[list(countryData_tmp.columns.values)]
countryData=countryData.fillna(0) # Replace NaN with 0
# countryFileName = country + '.csv'
# # countryData.to_csv (countryFileName, index = False, header=True)
# countryData.to_csv (countryFileName, index = True, header=True)
# Fill columns : countriesAndTerritories geoId countryterritoryCode popData2019
countryData['countriesAndTerritories'] = countryData['countriesAndTerritories'].iloc[-1]
countryData['geoId'] = countryData['geoId'].iloc[-1]
countryData['countryterritoryCode'] = countryData['countryterritoryCode'].iloc[-1]
countryData['popData2019'] = countryData['popData2019'].iloc[-1]
countryData=countryData.fillna(0) # Replace NaN with 0
# Create cumulative cases column and cumulative deaths column - Rename column titles
countryDataCS = countryData.cumsum(axis = 0)
countryDataCS = countryDataCS.rename(columns={"cases": "casesCumulative", "deaths": "deathsCumulative"})
countryData['casesCumulative'] = countryDataCS['casesCumulative'] # Copy cumulative columns to countryData
countryData['deathsCumulative'] = countryDataCS['deathsCumulative']
countryData['casesMA'] = countryData['cases'].rolling(7).mean() # Calculate moving averages
countryData['deathsMA'] = countryData['deaths'].rolling(7).mean()
countryData['casesCumulativeMA'] = countryData['casesCumulative'].rolling(7).mean()
countryData['deathsCumulativeMA'] = countryData['deathsCumulative'].rolling(7).mean()
# Calculate fatality rates and clip to 100%
countryData['fatalityPercentage'] = countryData['deaths'] * 100./countryData['cases']
countryData['fatalityPercentage'] = countryData['fatalityPercentage'].where(countryData['fatalityPercentage'] <= 100., 100.)
countryData.loc[countryData.cases == 0, "fatalityPercentage"] = 0 # When cases 0= 0 set percentage to 0
countryData['fatalityPercentageCumulative'] = countryData['deathsCumulative'] * 100./countryData['casesCumulative']
countryData['fatalityPercentageCumulative'] = countryData['fatalityPercentageCumulative'].where(countryData['fatalityPercentageCumulative'] <= 100., 100.)
countryData.loc[countryData.casesCumulative == 0, "fatalityPercentageCumulative"] = 0 # When cases 0= 0 set percentage to 0
countryData['fatalityPercentageMA'] = countryData['fatalityPercentage'].rolling(7).mean()
countryData['fatalityPercentageCumulativeMA'] = countryData['fatalityPercentageCumulative'].rolling(7).mean()
outputFileName = country + ".csv"
countryData.to_csv(outputFileName, index=True)
latestCases=countryData['cases'].iloc[-1] # Print latest cases and deaths count
latestDeaths=countryData['deaths'].iloc[-1]
print('Latest cases : ' + str(latestCases))
print('Latest deaths : ' + str(latestDeaths))
print("Latest fatality rate : %.2f %%" % ((latestDeaths*100.)/latestCases))
dc = countryData.index[countryData['cases'] != 0].tolist() # Print first data of cases
print("First Case : " + str(dc[0]).replace(' 00:00:00',''))
dd = countryData.index[countryData['deaths'] != 0].tolist() # Print first data of deaths
print("First Death : " + str(dd[0]).replace(' 00:00:00',''))
totalCases=countryData['casesCumulative'].iloc[-1]
totalDeaths=countryData['deathsCumulative'].iloc[-1]
fatalityRate=totalDeaths*100./totalCases
# population=int(countryData['popData2019'].iloc[0])
population=countryData['popData2019'].iloc[0]
print("Total number of cases : " + str(totalCases))
print("Total number of deaths : " + str(totalDeaths))
print("Total number of cases (Per 1 million pop.) : %.2f" % (totalCases / (population/1000000.)))
print("Total number of deaths (Per 1 million pop.): %.2f" % (totalDeaths / (population/1000000.)))
print("Overall Fatality rate : %.2f %%" % (fatalityRate))
print("Population (2019) : %.2f (Million)" % (population / 1000000.))
print('')
# If we are not aligning first case or death then just return the data
if noAlignFlag == True:
return country, population, countryData, countryData;
# Align to first case or death by removing leading zeros and resetting index
# Get names of indexes for which column casesCumulative has value 0
else:
indexNames = countryData[ countryData['casesCumulative'] == 0 ].index # Remove leading zeros from cases
extractedCases = countryData.drop(indexNames)
extractedCases = extractedCases.reset_index()
indexNames = countryData[ countryData['deathsCumulative'] == 0 ].index # Remove leading zeros from deaths
extractedDeaths = countryData.drop(indexNames)
extractedDeaths = extractedDeaths.reset_index()
return country, population, extractedCases, extractedDeaths;
# main
def main(useCachedFileFlag, cumulativeResultsFlag, noAlignFlag, noPlotFlag, fileSavePlotFlag, popNormalizeFlag):
global countries
cachedFilePresentFlag = False
try:
f = open(localFileName)
cachedFilePresentFlag = True
f.close()
except IOError:
cachedFilePresentFlag = False
# If cached file not present or we have requested to refresh then get the file
if (cachedFilePresentFlag == False) or (useCachedFileFlag == False):
cachedFilePresentFlag = False
resp = urllib.request.urlopen(topLevelPage)
soup = BeautifulSoup(resp, "html.parser", from_encoding=resp.info().get_param('charset'))
for link in soup.find_all('a', href=True):
# print(link['href'])
if ("xlsx" in link['href']): # If data in .xlsx format then retrieve and store as local .csv format
xlsxfileurl = link['href']
try:
xlsx_tmp = pd.read_excel(xlsxfileurl, index_col=0)
xlsx_tmp.to_csv(localFileName, index=True)
cachedFilePresentFlag = True
print("Cached spreadheet updated (xlsx)")
except:
cachedFilePresentFlag = False
print("Spreadheet failed to download (xlsx)")
break
if (cachedFilePresentFlag == False): # If data NOT in .xlsx format then retrieve and store .csv format
for link in soup.find_all(class_="btn btn-primary", href=True):
if ("csv" in link['href']):
csvfileurl = link['href']
try:
urllib.request.urlretrieve(csvfileurl, localFileName)
cachedFilePresentFlag = True
print("Cached spreadheet updated (csv)")
except:
cachedFilePresentFlag = False
print("Spreadheet failed to download (csv)")
break
if (cachedFilePresentFlag == False):
print("No spreadsheet found at the URL, use \"-l\" to use local cached file")
exit(0)
numberOfCountries = len(countries)
extractedCountry = {} # Create empty dictionaries to store result data frames for each country
extractedPopulation = {}
extractedCases = {}
extractedDeaths = {}
if (cachedFilePresentFlag == True):
covidData = pd.read_csv(localFileName, index_col=0, encoding="utf-8-sig")
# Spreadsheet columns :
# dateRep day month year cases deaths countriesAndTerritories geoId countryterritoryCode popData2019
covidData=covidData.fillna(0) # Replace NaN with 0
clen = 0 # For longest sequency
dlen = 0
# Extract Chinese dates to create index - this allows for countries that do not have full data supplied
dates_tmp = covidData[covidData["countriesAndTerritories"].str.match("China")]
dates_tmp = dates_tmp.iloc[::-1] # Invert table - top to bottom
dates_tmp=dates_tmp.reset_index()
dates=list(dates_tmp['dateRep'])
countryIndex = 0
for country in countries:
# Extract the data for each country
# Data can be aligned on 2019-12-29 or first instance
extractedCountry[countryIndex], extractedPopulation[countryIndex], extractedCases[countryIndex], extractedDeaths[countryIndex] = \
extractCountries(covidData, country, dates, noAlignFlag)
# print(extractedCases)
# print(extractedDeaths)
if (popNormalizeFlag == True):
extractedCases[countryIndex]['cases'] = extractedCases[countryIndex]['cases'] * (1000000.0 / extractedPopulation[countryIndex])
extractedCases[countryIndex]['deaths'] = extractedCases[countryIndex]['deaths'] * (1000000.0 / extractedPopulation[countryIndex])
extractedCases[countryIndex]['casesCumulative'] = extractedCases[countryIndex]['casesCumulative'] * (1000000.0 / extractedPopulation[countryIndex])
extractedCases[countryIndex]['deathsCumulative'] = extractedCases[countryIndex]['deathsCumulative'] * (1000000.0 / extractedPopulation[countryIndex])
extractedCases[countryIndex]['casesMA'] = extractedCases[countryIndex]['casesMA'] * (1000000.0 / extractedPopulation[countryIndex])
extractedCases[countryIndex]['deathsMA'] = extractedCases[countryIndex]['deathsMA'] * (1000000.0 / extractedPopulation[countryIndex])
extractedDeaths[countryIndex]['casesCumulativeMA'] = extractedDeaths[countryIndex]['casesCumulativeMA'] * (1000000.0 / extractedPopulation[countryIndex])
extractedDeaths[countryIndex]['deathsCumulativeMA'] = extractedDeaths[countryIndex]['deathsCumulativeMA'] * (1000000.0 / extractedPopulation[countryIndex])
clen = np.maximum(clen, extractedCases[countryIndex].shape[0])
dlen = np.maximum(dlen, extractedDeaths[countryIndex].shape[0])
countryIndex = countryIndex+1
lastDate = str(covidData.first_valid_index()) # Get last date in combinedCases
lastDate = lastDate.replace(' 00:00:00','')
if len(countries) == 1: # Single country - Cases And Deaths
# Select daily or cumulative results
if (cumulativeResultsFlag == True):
casesType = 'casesCumulative'
deathsType = 'deathsCumulative'
casesMAType = 'casesCumulativeMA'
deathsMAType = 'deathsCumulativeMA'
percentageType = 'fatalityPercentageCumulative'
percentageMAType = 'fatalityPercentageCumulativeMA'
else:
casesType = 'cases'
deathsType = 'deaths'
casesMAType = 'casesMA'
deathsMAType = 'deathsMA'
percentageType = 'fatalityPercentage'
percentageMAType = 'fatalityPercentageMA'
# Plot titles
titleStr='Covid-19 '
if (cumulativeResultsFlag == True):
titleStr=titleStr + ' Cumulative Cases And Deaths: '
else:
titleStr=titleStr + ' Daily Cases And Deaths: '
ax = plt.gca() # Create plot - get current axis
ax.autoscale(enable=True, tight=True)
# Plot daily cases and deaths AND moving average
extractedCases[0].plot(kind='line', y=casesType, label='Cases', color='dodgerblue',ax=ax)
extractedCases[0].plot(kind='line', y=casesMAType, label='Cases - 7 Day Moving Average', color='blue',ax=ax)
extractedDeaths[0].plot(kind='line',y=deathsType, label='Deaths',color='lime',ax=ax)
extractedDeaths[0].plot(kind='line',y=deathsMAType, label='Deaths - 7 Day Moving Average',color='seagreen',ax=ax)
# Plot daily mortality rate
ax2 = plt.gca().twinx()
extractedDeaths[0].plot(kind='line',y=percentageType,label='Mortality Rate (%)',color='orange',linewidth=.75,ax=ax2)
extractedDeaths[0].plot(kind='line',y=percentageMAType,label='Mortality Rate Moving Average (%)',color='red',linewidth=.75,ax=ax2)
ax2.set_ylabel('Mortality Rate (%)', color='red')
ax2.tick_params(axis='y', labelcolor='red')
ax2.set_ylim(0, 40)
ax2.get_legend().remove()
plt.title(extractedCountry[0] + '\n' + titleStr + str(lastDate) + "\nSource: European Centre for Disease Prevention and Control")
ax.set_ylim(bottom=0.0) # Don't plot numbers < zero
if (fileSavePlotFlag == True):
titleStr = titleStr.split(':', 1)[0]
plt.savefig(titleStr.replace(" ", "")+extractedCountry[0]+'.png')
if noPlotFlag == False: # Plot the data
plt.show()
else:
plt.close()
else: # Multiple countries
# Select daily or cumulative results
if (cumulativeResultsFlag == True):
casesType = 'casesCumulative'
deathsType = 'deathsCumulative'
percentageType = 'fatalityPercentageCumulative'
else:
casesType = 'casesMA'
deathsType = 'deathsMA'
percentageType = 'fatalityPercentage'
# Plot titles
titleStr='Covid-19 '
if (noAlignFlag == False):
titleStr=titleStr + 'Aligned '
if (popNormalizeFlag == True):
titleStr=titleStr + 'Pop Noramlized '
if (cumulativeResultsFlag == True):
titleStr=titleStr + 'Cumulative Cases: '
else:
titleStr=titleStr + 'Daily Cases (7 day moving average): '
ax = plt.gca() # Create plot - get current axis
countryIndex = 0
for country in countries:
extractedCases[countryIndex].plot(kind='line',y=casesType,title=titleStr + str(lastDate) + "\nSource: European Centre for Disease Prevention and Control",label=extractedCountry[countryIndex],color=colours[countryIndex],ax=ax)
countryIndex = countryIndex+1
ax.set_ylim(bottom=0.0) # Don't plot numbers < zero
if (fileSavePlotFlag == True):
titleStr = titleStr.split(':', 1)[0]
plt.savefig(titleStr.replace(" ", "")+'.png')
if noPlotFlag == False: # Plot the data
plt.show()
else:
plt.close()
# Plot titles
titleStr='Covid-19 '
if (noAlignFlag == False):
titleStr=titleStr + 'Aligned '
if (popNormalizeFlag == True):
titleStr=titleStr + 'Pop Noramlized '
if (cumulativeResultsFlag == True):
titleStr=titleStr + 'Cumulative Deaths: '
else:
titleStr=titleStr + 'Daily Deaths (7 day moving average): '
ax = plt.gca() # Create plot - get current axis
countryIndex = 0
for country in countries:
extractedDeaths[countryIndex].plot(kind='line',y=deathsType,title=titleStr + str(lastDate) + "\nSource: European Centre for Disease Prevention and Control",label=extractedCountry[countryIndex],color=colours[countryIndex],ax=ax)
countryIndex = countryIndex+1
ax.set_ylim(bottom=0.0) # Don't plot numbers < zero
if (fileSavePlotFlag == True):
titleStr = titleStr.split(':', 1)[0]
plt.savefig(titleStr.replace(" ", "")+'.png')
if noPlotFlag == False: # Plot the data
plt.show()
else:
plt.close()
# Plot titles
titleStr='Covid-19 '
if (noAlignFlag == False):
titleStr=titleStr + 'Aligned '
if (popNormalizeFlag == True):
titleStr=titleStr + 'Pop Noramlized '
if (cumulativeResultsFlag == True):
titleStr=titleStr + 'Cumulative Fatality Percentage: '
else:
titleStr=titleStr + 'Daily Fatality Percentage (7 day moving average): '
ax = plt.gca() # Create plot - get current axis
countryIndex = 0
for country in countries:
# extractedDeaths[countryIndex].plot(kind='line',y=percentageType,title=titleStr + str(lastDate) + "\nSource: European Centre for Disease Prevention and Control",label=extractedCountry[countryIndex],color=colours[countryIndex],ax=ax)
extractedDeaths[countryIndex].plot(kind='line',y=percentageType,title=titleStr + str(lastDate) + "\nSource: European Centre for Disease Prevention and Control",label=extractedCountry[countryIndex],color=colours[countryIndex],ax=ax)
countryIndex = countryIndex+1
ax.set_ylim(bottom=0.0) # Don't plot numbers < zero
if (fileSavePlotFlag == True):
titleStr = titleStr.split(':', 1)[0]
plt.savefig(titleStr.replace(" ", "")+'.png')
if noPlotFlag == False: # Plot the data
plt.show()
else:
plt.close()
else:
print("Cached spreadsheet file not found on computer")
exit()
if __name__ == '__main__':
useCachedFileFlag = False
cumulativeResultsFlag = False
noAlignFlag = False
noPlotFlag = False
fileSavePlotFlag = False
popNormalizeFlag = False
if ((len(countries) > 1) and (len(countries) != len(colours))):
print("The number of colours must equal the number of countries")
exit()
parser = argparse.ArgumentParser(description='Covid-19 Visualizer')
parser.add_argument("-c", "--cumulative", action="store_true", help="Display cumulative results")
parser.add_argument("-l", "--local", action="store_true", help="Use local cached Covid-19.csv")
parser.add_argument("-n", "--noalign", action="store_true", help="Do not align first instance dates - all graphs start 2019-12-31")
parser.add_argument("-f", "--file", action="store_true", help="Save plot to file")
parser.add_argument("-p", "--population", action="store_true", help="Use population to normalize data to cases per 1 Million")
parser.add_argument("-q", "--quiet", action="store_true", help="Quiet - Do not plot graphs")
parser.add_argument("-s", "--single", nargs='?', const=1, help="Process a single country - Specify the countriesAndTerritories string used in the spreadsheet")
parser.add_argument("-m", "--ma", action="store_true", help="Plot Moving Average (only availeble for single courntry plot)")
args = parser.parse_args()
if (args.cumulative):
cumulativeResultsFlag = True
print("Cumulative Results = True")
if (args.local):
useCachedFileFlag = True
print("Use cached file = True")
if (args.noalign):
noAlignFlag = True
print("Do not align first instance date = True")
if (args.file):
fileSavePlotFlag = True
print("Save plot graphs to file = True")
if (args.population):
popNormalizeFlag = True
print("Normalize to population = True")
if (args.quiet):
noPlotFlag = True
print("Do not plot graphs = True")
if (args.single): # Process single country - if no country specified use default country at top of file
if (args.single != 1):
country_single[0] = args.single
countries = country_single # Overwrite the countries array
noAlignFlag = True
print("Process single country: " + str(countries))
print("Do not align first instance date = True")
main(useCachedFileFlag, cumulativeResultsFlag, noAlignFlag, noPlotFlag, fileSavePlotFlag, popNormalizeFlag)
|
|
#!/usr/bin/env python
#
# Copyright 2011 Google Inc. All Rights Reserved.
"""Testing mapreduce functionality end to end."""
import datetime
import logging
import random
import string
import unittest
from google.appengine.ext import ndb
from google.appengine.api import files
from google.appengine.ext import db
from mapreduce import context
from mapreduce import control
from mapreduce import handlers
from mapreduce import model
from mapreduce import output_writers
from mapreduce import parameters
from mapreduce import records
from mapreduce import test_support
from testlib import testutil
from mapreduce.tools import gcs_file_seg_reader
# pylint: disable=g-import-not-at-top
try:
import cloudstorage
# In 25 runtime, the above code will be scrubbed to import the stub version
# of cloudstorage. All occurences of the following if condition in MR
# codebase is to tell it apart.
# TODO(user): Remove after 25 runtime MR is abondoned.
if hasattr(cloudstorage, "_STUB"):
cloudstorage = None
from cloudstorage import storage_api
except ImportError:
cloudstorage = None # CloudStorage library not available
# pylint: disable=g-bad-name
def random_string(length):
"""Generate a random string of given length."""
return "".join(
random.choice(string.letters + string.digits) for _ in range(length))
class TestEntity(db.Model):
"""Test entity class."""
int_property = db.IntegerProperty()
dt = db.DateTimeProperty(default=datetime.datetime(2000, 1, 1))
class NdbTestEntity(ndb.Model):
"""Test entity class for NDB."""
class TestHandler(object):
"""Test handler which stores all processed entities keys.
Properties:
processed_entites: all processed entities.
"""
processed_entites = []
def __call__(self, entity):
"""Main handler process function.
Args:
entity: entity to process.
"""
TestHandler.processed_entites.append(entity)
@staticmethod
def reset():
"""Clear processed_entites & reset delay to 0."""
TestHandler.processed_entites = []
class SerializableHandler(object):
"""Handler that utilize serialization."""
_next_instance_id = 0
# The first few instances will keep raising errors.
# This is to test that upon shard retry, shard creates a new handler.
INSTANCES_THAT_RAISE_ERRORS = 3
# The first instance is created for validation and not used by any shard.
FAILURES_INDUCED_BY_INSTANCE = INSTANCES_THAT_RAISE_ERRORS - 1
def __init__(self):
self.count = 0
self.instance = self.__class__._next_instance_id
self.__class__._next_instance_id += 1
def __call__(self, entity):
if self.instance < self.INSTANCES_THAT_RAISE_ERRORS:
raise files.FinalizationError("Injected error.")
# Increment the int property by one on every call.
entity.int_property = self.count
entity.put()
self.count += 1
@classmethod
def reset(cls):
cls._next_instance_id = 0
def test_handler_yield_key(entity):
"""Test handler which yields entity key."""
yield entity.key()
def test_handler_yield_ndb_key(entity):
"""Test handler which yields entity key (NDB version)."""
yield entity.key
class TestOutputWriter(output_writers.OutputWriter):
"""Test output writer."""
file_contents = {}
def __init__(self, filename):
self.filename = filename
@classmethod
def reset(cls):
cls.file_contents = {}
@classmethod
def validate(cls, mapper_spec):
pass
@classmethod
def finalize_job(cls, mapreduce_state):
pass
@classmethod
def create(cls, mr_spec, shard_number, shard_attempt, _writer_state=None):
random_str = "".join(
random.choice(string.ascii_uppercase + string.digits)
for _ in range(64))
cls.file_contents[random_str] = []
return cls(random_str)
def to_json(self):
return {"filename": self.filename}
@classmethod
def from_json(cls, json_dict):
return cls(json_dict["filename"])
def write(self, data):
self.file_contents[self.filename].append(data)
def finalize(self, ctx, shard_number):
pass
class EndToEndTest(testutil.HandlerTestBase):
"""Test mapreduce functionality end to end."""
def setUp(self):
testutil.HandlerTestBase.setUp(self)
TestHandler.reset()
TestOutputWriter.reset()
self.original_slice_duration = parameters.config._SLICE_DURATION_SEC
SerializableHandler.reset()
def tearDown(self):
parameters.config._SLICE_DURATION_SEC = self.original_slice_duration
def testHandlerSerialization(self):
"""Test serializable handler works with MR and shard retry."""
entity_count = 10
for _ in range(entity_count):
TestEntity(int_property=-1).put()
# Force handler to serialize on every call.
parameters.config._SLICE_DURATION_SEC = 0
control.start_map(
"test_map",
__name__ + ".SerializableHandler",
"mapreduce.input_readers.DatastoreInputReader",
{
"entity_kind": __name__ + "." + TestEntity.__name__,
},
shard_count=1,
base_path="/mapreduce_base_path")
task_run_counts = test_support.execute_until_empty(self.taskqueue)
self.assertEquals(
task_run_counts[handlers.MapperWorkerCallbackHandler],
# Shard retries + one per entity + one to exhaust input reader
SerializableHandler.FAILURES_INDUCED_BY_INSTANCE + entity_count + 1)
vals = [e.int_property for e in TestEntity.all()]
vals.sort()
# SerializableHandler updates int_property to be incremental from 0 to 9.
self.assertEquals(range(10), vals)
def testLotsOfEntities(self):
entity_count = 1000
for i in range(entity_count):
TestEntity().put()
mapreduce_id = control.start_map(
"test_map",
__name__ + ".TestHandler",
"mapreduce.input_readers.DatastoreInputReader",
{
"entity_kind": __name__ + "." + TestEntity.__name__,
},
shard_count=4,
base_path="/mapreduce_base_path")
test_support.execute_until_empty(self.taskqueue)
self.assertEquals(entity_count, len(TestHandler.processed_entites))
def testEntityQuery(self):
entity_count = 1000
for i in range(entity_count):
TestEntity(int_property=i % 5).put()
control.start_map(
"test_map",
__name__ + ".TestHandler",
"mapreduce.input_readers.DatastoreInputReader",
{
"entity_kind": __name__ + "." + TestEntity.__name__,
"filters": [("int_property", "=", 3),
# Test datetime can be json serialized.
("dt", "=", datetime.datetime(2000, 1, 1))],
},
shard_count=4,
base_path="/mapreduce_base_path")
test_support.execute_until_empty(self.taskqueue)
self.assertEquals(200, len(TestHandler.processed_entites))
def testLotsOfNdbEntities(self):
entity_count = 1000
for i in range(entity_count):
NdbTestEntity().put()
mapreduce_id = control.start_map(
"test_map",
__name__ + ".TestHandler",
"mapreduce.input_readers.DatastoreInputReader",
{
"entity_kind": __name__ + "." + NdbTestEntity.__name__,
},
shard_count=4,
base_path="/mapreduce_base_path")
test_support.execute_until_empty(self.taskqueue)
self.assertEquals(entity_count, len(TestHandler.processed_entites))
def testInputReaderDedicatedParameters(self):
entity_count = 100
for i in range(entity_count):
TestEntity().put()
mapreduce_id = control.start_map(
"test_map",
__name__ + ".TestHandler",
"mapreduce.input_readers.DatastoreInputReader",
{
"input_reader": {
"entity_kind": __name__ + "." + TestEntity.__name__,
},
},
shard_count=4,
base_path="/mapreduce_base_path")
test_support.execute_until_empty(self.taskqueue)
self.assertEquals(entity_count, len(TestHandler.processed_entites))
def testOutputWriter(self):
"""End-to-end test with output writer."""
entity_count = 1000
for i in range(entity_count):
TestEntity().put()
mapreduce_id = control.start_map(
"test_map",
__name__ + ".test_handler_yield_key",
"mapreduce.input_readers.DatastoreInputReader",
{
"entity_kind": __name__ + "." + TestEntity.__name__,
},
shard_count=4,
base_path="/mapreduce_base_path",
output_writer_spec=__name__ + ".TestOutputWriter")
test_support.execute_until_empty(self.taskqueue)
self.assertEquals(entity_count,
sum(map(len, TestOutputWriter.file_contents.values())))
def testRecordsReader(self):
"""End-to-end test for records reader."""
input_file = files.blobstore.create()
input_data = [str(i) for i in range(100)]
with files.open(input_file, "a") as f:
with records.RecordsWriter(f) as w:
for record in input_data:
w.write(record)
files.finalize(input_file)
input_file = files.blobstore.get_file_name(
files.blobstore.get_blob_key(input_file))
mapreduce_id = control.start_map(
"test_map",
__name__ + ".TestHandler",
"mapreduce.input_readers.RecordsReader",
{
"file": input_file
},
shard_count=4,
base_path="/mapreduce_base_path")
test_support.execute_until_empty(self.taskqueue)
self.assertEquals(100, len(TestHandler.processed_entites))
def testHugeTaskPayloadTest(self):
"""Test map job with huge parameter values."""
input_file = files.blobstore.create()
input_data = [str(i) for i in range(100)]
with files.open(input_file, "a") as f:
with records.RecordsWriter(f) as w:
for record in input_data:
w.write(record)
files.finalize(input_file)
input_file = files.blobstore.get_file_name(
files.blobstore.get_blob_key(input_file))
mapreduce_id = control.start_map(
"test_map",
__name__ + ".TestHandler",
"mapreduce.input_readers.RecordsReader",
{
"file": input_file,
# the parameter will be compressed and should fit into
# taskqueue payload
"huge_parameter": "0" * 200000, # 200K
},
shard_count=4,
base_path="/mapreduce_base_path")
test_support.execute_until_empty(self.taskqueue)
self.assertEquals(100, len(TestHandler.processed_entites))
self.assertEquals([], model._HugeTaskPayload.all().fetch(100))
def testHugeTaskUseDatastore(self):
"""Test map job with huge parameter values."""
input_file = files.blobstore.create()
input_data = [str(i) for i in range(100)]
with files.open(input_file, "a") as f:
with records.RecordsWriter(f) as w:
for record in input_data:
w.write(record)
files.finalize(input_file)
input_file = files.blobstore.get_file_name(
files.blobstore.get_blob_key(input_file))
mapreduce_id = control.start_map(
"test_map",
__name__ + ".TestHandler",
"mapreduce.input_readers.RecordsReader",
{
"file": input_file,
# the parameter can't be compressed and wouldn't fit into
# taskqueue payload
"huge_parameter": random_string(900000)
},
shard_count=4,
base_path="/mapreduce_base_path")
test_support.execute_until_empty(self.taskqueue)
self.assertEquals(100, len(TestHandler.processed_entites))
self.assertEquals([], model._HugeTaskPayload.all().fetch(100))
class GCSOutputWriterTestBase(testutil.HandlerTestBase):
"""Base class for all GCS output writer tests."""
def setUp(self):
super(GCSOutputWriterTestBase, self).setUp()
self.original_slice_duration = parameters.config._SLICE_DURATION_SEC
self.original_block_size = storage_api.StreamingBuffer._blocksize
# Use this to adjust what is printed for debugging purpose.
logging.getLogger().setLevel(logging.CRITICAL)
self.writer_cls = output_writers._GoogleCloudStorageOutputWriter
# Populate datastore with inputs.
entity_count = 30
for i in range(entity_count):
TestEntity(int_property=i).put()
# Make slice short.
parameters.config._SLICE_DURATION_SEC = 1
# 5 items per second. This effectively terminates a slice after
# processing 5 items.
self.processing_rate = 5
def tearDown(self):
storage_api.StreamingBuffer._blocksize = self.original_block_size
parameters.config._SLICE_DURATION_SEC = self.original_slice_duration
super(GCSOutputWriterTestBase, self).tearDown()
class GCSOutputWriterNoDupModeTest(GCSOutputWriterTestBase):
"""Test GCS output writer slice recovery."""
def testSliceRecoveryWithForcedFlushing(self):
# Force a flush to GCS on every character wrote.
storage_api.StreamingBuffer._blocksize = 1
mr_id = control.start_map(
"test_map",
__name__ + ".FaultyHandler",
"mapreduce.input_readers.DatastoreInputReader",
{
"input_reader": {
"entity_kind": __name__ + "." + TestEntity.__name__,
},
"output_writer": {
"bucket_name": "bucket",
"no_duplicate": True,
},
"processing_rate": self.processing_rate,
},
output_writer_spec=("mapreduce.output_writers."
"_GoogleCloudStorageOutputWriter"),
shard_count=1)
test_support.execute_until_empty(self.taskqueue)
mr_state = model.MapreduceState.get_by_job_id(mr_id)
# Verify MR is successful.
self.assertEqual(model.MapreduceState.RESULT_SUCCESS,
mr_state.result_status)
# Read output info from shard states.
shard_state = model.ShardState.find_all_by_mapreduce_state(mr_state).next()
writer_state = shard_state.writer_state
last_seg_index = writer_state[self.writer_cls._LAST_SEG_INDEX]
seg_prefix = writer_state[self.writer_cls._SEG_PREFIX]
# Verify we have 5 segs.
self.assertEqual(4, last_seg_index)
self._assertOutputEqual(seg_prefix, last_seg_index)
# Check there are indeed duplicated data.
f1 = set([line for line in cloudstorage.open(seg_prefix + "0")])
f2 = set([line for line in cloudstorage.open(seg_prefix + "1")])
common = f1.intersection(f2)
self.assertEqual(set(["10\n", "11\n"]), common)
def testSliceRecoveryWithFrequentFlushing(self):
# Force a flush to GCS on every 8 chars.
storage_api.StreamingBuffer._blocksize = 8
mr_id = control.start_map(
"test_map",
__name__ + ".FaultyHandler",
"mapreduce.input_readers.DatastoreInputReader",
{
"input_reader": {
"entity_kind": __name__ + "." + TestEntity.__name__,
},
"output_writer": {
"bucket_name": "bucket",
"no_duplicate": True,
},
"processing_rate": self.processing_rate,
},
output_writer_spec=("mapreduce.output_writers."
"_GoogleCloudStorageOutputWriter"),
shard_count=1)
test_support.execute_until_empty(self.taskqueue)
mr_state = model.MapreduceState.get_by_job_id(mr_id)
# Verify MR is successful.
self.assertEqual(model.MapreduceState.RESULT_SUCCESS,
mr_state.result_status)
# Read output info from shard states.
shard_state = model.ShardState.find_all_by_mapreduce_state(mr_state).next()
writer_state = shard_state.writer_state
last_seg_index = writer_state[self.writer_cls._LAST_SEG_INDEX]
seg_prefix = writer_state[self.writer_cls._SEG_PREFIX]
# Verify we have 5 segs.
self.assertEqual(4, last_seg_index)
self._assertOutputEqual(seg_prefix, last_seg_index)
def testSliceRecoveryWithNoFlushing(self):
# Flushing is done every 256K, which means never until slice recovery.
mr_id = control.start_map(
"test_map",
__name__ + ".FaultyHandler",
"mapreduce.input_readers.DatastoreInputReader",
{
"input_reader": {
"entity_kind": __name__ + "." + TestEntity.__name__,
},
"output_writer": {
"bucket_name": "bucket",
"no_duplicate": True,
},
"processing_rate": self.processing_rate,
},
output_writer_spec=("mapreduce.output_writers."
"_GoogleCloudStorageOutputWriter"),
shard_count=1)
test_support.execute_until_empty(self.taskqueue)
mr_state = model.MapreduceState.get_by_job_id(mr_id)
# Verify MR is successful.
self.assertEqual(model.MapreduceState.RESULT_SUCCESS,
mr_state.result_status)
# Read output info from shard states.
shard_state = model.ShardState.find_all_by_mapreduce_state(mr_state).next()
writer_state = shard_state.writer_state
last_seg_index = writer_state[self.writer_cls._LAST_SEG_INDEX]
seg_prefix = writer_state[self.writer_cls._SEG_PREFIX]
# Verify we have 5 segs.
self.assertEqual(4, last_seg_index)
self._assertOutputEqual(seg_prefix, last_seg_index)
def _assertOutputEqual(self, seg_prefix, last_seg_index):
# Read back outputs.
reader = gcs_file_seg_reader._GCSFileSegReader(seg_prefix, last_seg_index)
result = ""
while True:
tmp = reader.read(n=100)
if not tmp:
break
result += tmp
# Verify output has no duplicates.
expected = ""
for i in range(30):
expected += "%s\n" % i
self.assertEqual(expected, result)
class FaultyHandler(object):
def __init__(self):
self.slice_count = 0
def __setstate__(self, state):
# Reset at beginning of each slice.
self.slice_count = 0
def __call__(self, entity):
self.slice_count += 1
yield "%s\n" % entity.int_property
slice_id = context.get()._shard_state.slice_id
# Raise exception when processing the 2 item in a slice every 3 slices.
if (self.slice_count == 2 and
(slice_id + 1) % 3 == 0):
raise Exception("Intentionally raise an exception")
if __name__ == "__main__":
unittest.main()
|
|
import inspect
import asplib.asp.codegen.python_ast as ast
import asplib.asp.codegen.cpp_ast as cpp_ast
import asplib.asp.codegen.ast_tools as ast_tools
import codepy.cgen
import asplib.asp.jit.asp_module as asp_module
from collections import namedtuple
class IfNotDefined(cpp_ast.Generable):
"""
A generable AST node for the 'if not defined' (#ifndef) directive.
Accepts argument 'symbol', the token to check for defined status.
"""
def __init__(self, symbol):
self.symbol = symbol
def generate(self):
yield "#ifndef %s" % self.symbol
class EndIf(cpp_ast.Generable):
"""
A generable AST node for the 'end if' (#endif) directive.
"""
def generate(self):
yield "#endif"
class Namespace(cpp_ast.Generable):
"""
A generable AST node representing a namespace.
Accepts arguments
'name', the namespace name, and
'body', a cpp_ast.Block containing the body of the namespace.
"""
def __init__(self, name, body):
self.name = name
self.body = body
self._fields = ['name', 'body']
def generate(self, with_semicolon=False):
yield 'namespace %s' % self.name
assert isinstance(self.body, cpp_ast.Block)
for line in self.body.generate(with_semicolon):
yield line
class ConstFunctionDeclaration(cpp_ast.FunctionDeclaration):
"""
Simply subclasses cpp_ast.FunctionDeclaration to add make it constant.
Implementation of this might be better moved into FunctionDeclaration.
"""
def generate(self, with_semicolon=True):
for item in super(ConstFunctionDeclaration, self).generate(with_semicolon):
yield item
yield ' const'
class New(cpp_ast.Generable):
"""
Generable AST node for a statement allocating new memory.
Accepts 'typename', name of associated type.
"""
def __init__(self, typename):
self.typename = typename
def generate(self, with_semicolon=False):
gen_str = 'new ' + str(self.typename)
if with_semicolon:
gen_str += ';'
yield gen_str
class CMakeModule(object):
"""
This module is a (still somewhat hacky) mimic of the style of CodePy's Boost
Python module in order to add support for including ASP-generated code in
projects which use GNU make for a build system.
Note that the compile() member method is specific to the pyCombBLAS project
makefile that accepts a DYNFILE= command line argument, the filename of a
dynamically generated file.
Arguments:
temp_dir - Directory to store dynamically generated cpp, header, and SWIG interface files.
makefile_dir - Directory of the makefile.
name - A name given to the generated files.
namespace - A namespace to include all code generated in.
include_files - A list of files to #include at the top of the header and cpp files.
"""
def __init__(self, temp_dir, makefile_dir, name="module", namespace=None, include_files=[]):
self.name = name
self.preamble = []
self.mod_body = []
self.header_body = []
self.namespace = namespace
self.temp_dir = temp_dir
self.makefile_dir = makefile_dir
self.include_files = include_files
def include_file(self, filepath):
self.include_files.append(filepath)
def add_to_preamble(self, pa):
self.preamble.extend(pa)
def add_to_module(self, body):
self.mod_body.extend(body)
def add_function(self, func):
"""*func* is a :class:`cgen.FunctionBody`."""
self.mod_body.append(func)
# Want the prototype for the function added to the header.
self.header_body.append(func.fdecl)
def add_struct(self, struct):
self.mod_body.append(struct)
def generate(self):
source = []
if self.namespace is not None:
self.mod_body = [Namespace(self.namespace, cpp_ast.Block(self.mod_body))]
self.preamble += [cpp_ast.Include(self.temp_dir+self.name+".h", system=False)]
for include in self.include_files:
self.preamble += [cpp_ast.Include(include, system=False)]
source += self.preamble + [codepy.cgen.Line()] + self.mod_body
return codepy.cgen.Module(source)
def generate_header(self):
header = []
if self.namespace is not None:
self.header_body = [Namespace(self.namespace, cpp_ast.Block(self.header_body))]
header_top = [IfNotDefined(self.name+"_H"), cpp_ast.Define(self.name+"_H", "")]
for include in self.include_files:
header_top += [cpp_ast.Include(include, system=False)]
header += header_top + self.header_body + [EndIf()]
return codepy.cgen.Module(header)
def generate_swig_interface(self):
interface_string = "%module " + self.name + "\n"
interface_string += "%{\n"
interface_string += str(cpp_ast.Include(self.temp_dir+self.name+".h", system=False))
interface_string += "\n"
interface_string += "%}\n"
interface_string += "".join([str(line) for line in self.header_body])
return interface_string
def compile(self):
from os import getcwd, chdir
from subprocess import call
original_dir = getcwd()
chdir(self.temp_dir)
header_file = open(self.name + ".h", 'w')
print >>header_file, self.generate_header()
header_file.close()
cpp_file = open(self.name + ".cpp", 'w')
print >>cpp_file, self.generate()
cpp_file.close()
i_file = open(self.name + ".i", 'w')
print >>i_file, self.generate_swig_interface()
i_file.close()
chdir(self.makefile_dir)
args = ["make", "DYNFILE="+self.temp_dir+self.name]
call(args)
chdir(original_dir)
class Operator(object):
"""
Class to represent the data associated with an operator.
Used a class because empty fields are nicer than with NamedTuple.
"""
def __init__(self, name, assoc=None, comm=None, src=None, ast=None):
self.name = name
self.src = src
self.ast = ast
self.assoc = assoc
self.comm = comm
class PcbOperator(object):
def __init__(self, operators):
# check for 'op' method
self.operators = operators
temp_path = "/home/harper/Documents/Work/SEJITS/temp/"
makefile_path = "/home/harper/Documents/Work/SEJITS/pyCombBLAS/trunk/kdt/pyCombBLAS"
include_files = ["/home/harper/Documents/Work/SEJITS/pyCombBLAS/trunk/kdt/pyCombBLAS/pyOperations.h"]
mod = CMakeModule(temp_path, makefile_path, namespace="op", include_files=include_files)
for operator in self.operators:
try:
dir(self).index(operator.name)
except ValueError:
raise Exception('No %s method defined.' % operator.name)
operator.src = inspect.getsource(getattr(self, operator.name))
operator.ast = ast.parse(operator.src.lstrip())
phase2 = PcbOperator.ProcessAST(operator).visit(operator.ast)
converted = PcbOperator.ConvertAST().visit(phase2)
mod.add_struct(converted.contents[0])
mod.add_function(converted.contents[1])
mod.compile()
class UnaryFunctionNode(ast.AST):
def __init__(self, name, args, body):
self.name = name
self.args = args
self.body = body
self._fields = ['name', 'args', 'body']
super(PcbOperator.UnaryFunctionNode, self).__init__()
class BinaryFunctionNode(ast.AST):
def __init__(self, name, args, body, assoc, comm):
self.name = name
self.args = args
self.body = body
self.assoc = assoc
self.comm = comm
self._fields = ['name', 'args', 'body']
super(PcbOperator.BinaryFunctionNode, self).__init__()
class ProcessAST(ast_tools.NodeTransformer):
def __init__(self, operator):
self.operator = operator
super(PcbOperator.ProcessAST, self).__init__()
def visit_Number(self, node):
new_node = cpp_ast.FunctionCall("doubleint", [node])
return new_node
def visit_FunctionDef(self, node):
print node.args.args[0].id
if len(node.args.args) == 1:
new_node = PcbOperator.UnaryFunctionNode(node.name, node.args, node.body)
elif len(node.args.args) == 2:
new_node = PcbOperator.BinaryFunctionNode(node.name, node.args, node.body,
self.operator.assoc, self.operator.comm)
else:
return node
return new_node
class ConvertAST(ast_tools.ConvertAST):
def visit_Num(self, node):
"""If we find a number, want to convert it to a doubleint for PCB."""
print dir(node)
return cpp_ast.FunctionCall("doubleint", [node.n])
def visit_UnaryFunctionNode(self, node):
# Create the new function that does the same thing as 'op'
new_function_decl = ConstFunctionDeclaration(
cpp_ast.Value("T", "operator()"),
[cpp_ast.Value("const T&", node.args.args[0].id)])
# Add all of the contends of the old function to the new
new_function_contents = cpp_ast.Block([self.visit(subnode) for subnode in node.body])
new_function_body = cpp_ast.FunctionBody(new_function_decl, new_function_contents)
operator_struct = cpp_ast.Template(
"typename T",
cpp_ast.Struct(node.name+"_s : public ConcreteUnaryFunction<T>", [new_function_body])
)
# Finally, generate a function for constructing one of these operators
new_constructor_decl = cpp_ast.FunctionDeclaration(
cpp_ast.Value("UnaryFunction", node.name),
[] )
new_constructor_body = cpp_ast.ReturnStatement(
cpp_ast.FunctionCall("UnaryFunction", [
New(node.name+"_s<doubleint>()")])
)
new_constructor_function = cpp_ast.FunctionBody(new_constructor_decl, cpp_ast.Block([new_constructor_body]))
# Block for the module contents.
main_block = cpp_ast.Block()
main_block.append(operator_struct)
main_block.append(new_constructor_function)
return main_block
def visit_BinaryFunctionNode(self, node):
# Create the new function that does the same thing as 'op'
new_function_decl = ConstFunctionDeclaration(
cpp_ast.Value("T", "operator()"),
[cpp_ast.Value("const T&", node.args.args[0].id),
cpp_ast.Value("const T&", node.args.args[1].id)])
# Add all of the contends of the old function to the new
new_function_contents = cpp_ast.Block([self.visit(subnode) for subnode in node.body])
new_function_body = cpp_ast.FunctionBody(new_function_decl, new_function_contents)
operator_struct = cpp_ast.Template(
"typename T",
cpp_ast.Struct(node.name+"_s : public ConcreteBinaryFunction<T>", [new_function_body])
)
# Finally, generate a function for constructing one of these operators
new_constructor_decl = cpp_ast.FunctionDeclaration(
cpp_ast.Value("BinaryFunction", node.name),
[] )
new_constructor_body = cpp_ast.ReturnStatement(
cpp_ast.FunctionCall("BinaryFunction", [
New(node.name+"_s<doubleint>()"),
str(node.assoc).lower(), str(node.comm).lower()])
)
new_constructor_function = cpp_ast.FunctionBody(new_constructor_decl, cpp_ast.Block([new_constructor_body]))
# Block for the module contents.
main_block = cpp_ast.Block()
main_block.append(operator_struct)
main_block.append(new_constructor_function)
return main_block
def explore_ast(self, node, depth):
print ' '*depth, node
for n in ast.iter_child_nodes(node):
self.explore_ast(n, depth+1)
|
|
import os
import unittest
import uuid
import floe
import webtest
import json
import wsgiadapter
import logging
import socket
import floe.restapi
import floe.connector
import time
import pymysql
wsgiadapter.logger.addHandler(logging.NullHandler())
mysql_user = os.getenv('MYSQL_USER', 'root')
mysql_pass = os.getenv('MYSQL_PASSWORD', None)
mysql_auth = "%s:%s" % (mysql_user, mysql_pass) \
if mysql_pass is not None else mysql_user
table_prefix_variable = int(time.time())
os.environ['FLOE_URL_TEST_FILE'] = 'file://.test_floe'
os.environ['FLOE_URL_TEST_REST_BOGUS'] = 'http://test-floe/bogus'
os.environ['FLOE_URL_TEST_REST_FILE'] = 'http://test-floe/test_file'
os.environ['FLOE_URL_TEST_REST_BROKEN'] = 'http://test-floe/broken'
adapter = wsgiadapter.WSGIAdapter(floe.floe_server())
floe.restapi.RestClientFloe.session.mount('http://test-floe/', adapter) # noqa
def drop_table(pool, table_name):
statement = "DROP table {}".format(table_name)
try:
with pool.connection() as connection:
with connection.cursor() as cursor:
cursor.execute(statement)
except pymysql.Error as e:
raise e
def is_local_mysql_running():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex(('127.0.0.1', 3306))
return True if result == 0 else False
codeship_build = os.getenv('CODESHIP_BUILD')
mysql_test_enable = True if \
os.getenv('MYSQL_TEST_ENABLE', is_local_mysql_running()) \
else False
MYSQL_TEST = unittest.skipIf(codeship_build or not mysql_test_enable,
'mysql test disabled on local and codeship')
BLOB_MAX_CHAR_LEN = 65535
MEDIUM_BLOB_MAX_CHAR_LEN = 16777215
def xid():
return uuid.uuid4().hex
class BrokenFloe(object):
def get(self, key):
raise floe.FloeReadException('failed to read')
def get_multi(self, keys):
raise floe.FloeReadException('failed to read')
def set(self, key, bin_data):
raise floe.FloeWriteException('failed to write')
def set_multi(self, mapping):
raise floe.FloeWriteException('failed to write')
def delete(self, key):
raise floe.FloeDeleteException('failed to delete')
def delete_multi(self, keys):
raise floe.FloeDeleteException('failed to delete')
def flush(self):
pass
def ids(self):
raise floe.FloeReadException('failed to read')
floe.connector._CONNECTIONS['BROKEN'] = BrokenFloe()
class FileFloeTest(unittest.TestCase):
def init_floe(self):
return floe.connect('test_file')
def setUp(self):
self.floe = self.init_floe()
self.floe.flush()
def tearDown(self):
self.floe.flush()
def test_main(self):
store = self.floe
foo = xid()
bar = xid()
bazz = xid()
foo_test_data = os.urandom(4096)
store.set(foo, foo_test_data)
self.assertEqual(store.get(foo), foo_test_data)
foo_test_data = os.urandom(500)
bazz_test_data = os.urandom(200)
store.set_multi({foo: foo_test_data, bazz: bazz_test_data})
self.assertEqual(store.get(foo), foo_test_data)
self.assertEqual(store.get(bazz), bazz_test_data)
self.assertEqual(store.get_multi([foo, bar, bazz]),
{foo: foo_test_data, bazz: bazz_test_data})
ids = {i for i in store.ids()}
self.assertEqual(ids, {bazz, foo})
store.delete(foo)
self.assertEqual(store.get_multi([foo, bar, bazz]),
{bazz: bazz_test_data})
store.delete_multi([foo, bar, bazz])
self.assertEqual(store.get_multi([foo, bar, bazz]), {})
self.assertRaises(floe.FloeInvalidKeyException,
lambda: store.get('foo/bar'))
self.assertRaises(floe.FloeInvalidKeyException,
lambda: store.set('foo/bar', '1'))
self.assertRaises(floe.FloeInvalidKeyException,
lambda: store.delete('foo/bar'))
self.assertRaises(floe.FloeInvalidKeyException,
lambda: store.get('foo/bar'))
self.assertRaises(floe.FloeInvalidKeyException,
lambda: store.set('foo/bar', '1'))
self.assertRaises(floe.FloeInvalidKeyException,
lambda: store.delete('foo/bar'))
class MysqlFloe(FileFloeTest):
def setUp(self):
self.mysql_tables = [
'%s_%s' % (table_name, table_prefix_variable)
for table_name in ['test_floe', 'test_floe_2', 'test_floe_3']
]
for index, table in enumerate(self.mysql_tables):
environ_key = 'FLOE_URL_%s' % table.upper()
url = "mysql://%s@127.0.0.1:3306/test?table=%s" % (
mysql_auth, table)
if index > 0:
url += "&dynamic_char_len=True"
if index > 1:
url += "&bin_data_type=blob"
os.environ[environ_key] = url
super(MysqlFloe, self).setUp()
def tearDown(self):
for table in self.mysql_tables:
store = floe.connect(table)
drop_table(store.pool, table)
def init_floe(self):
return floe.connect(self.mysql_tables[0])
@MYSQL_TEST
def test_main(self):
super(MysqlFloe, self).test_main()
@MYSQL_TEST
def test_uppercase(self):
store = self.floe
foo = xid()
foo_upper = foo.upper()
foo_test_data = os.urandom(10)
foo_upper_test_data = os.urandom(12)
self.assertNotEqual(foo_test_data, foo_upper_test_data)
store.set(foo, foo_test_data)
store.set(foo_upper, foo_upper_test_data)
self.assertEqual(store.get(foo), foo_test_data)
self.assertEqual(store.get(foo_upper), foo_upper_test_data)
@MYSQL_TEST
def test_data_overflow_from_sql(self):
store = floe.connect(self.mysql_tables[1])
foo = xid()
foo_smaller = foo.upper()
foo_data = os.urandom(MEDIUM_BLOB_MAX_CHAR_LEN + 1)
self.assertRaises(
floe.FloeDataOverflowException,
lambda: store.set(foo, foo_data))
foo_smaller_data = foo_data[:-1]
store.set(foo_smaller, foo_smaller_data)
self.assertEqual(store.get(foo_smaller), foo_smaller_data)
@MYSQL_TEST
def test_data_overflow(self):
store = self.floe
foo = xid()
foo_smaller = foo.upper()
foo_data = os.urandom(BLOB_MAX_CHAR_LEN + 1)
self.assertRaises(
floe.FloeDataOverflowException,
lambda: store.set(foo, foo_data))
foo_smaller_data = foo_data[:-1]
store.set(foo_smaller, foo_smaller_data)
self.assertEqual(store.get(foo_smaller), foo_smaller_data)
@MYSQL_TEST
def test_custom_bin_data_type(self):
store = floe.connect(self.mysql_tables[2])
foo = xid()
foo_smaller = foo.upper()
foo_data = os.urandom(BLOB_MAX_CHAR_LEN + 1)
self.assertRaises(
floe.FloeDataOverflowException,
lambda: store.set(foo, foo_data))
foo_smaller_data = foo_data[:-1]
store.set(foo_smaller, foo_smaller_data)
self.assertEqual(store.get(foo_smaller), foo_smaller_data)
class RestServerAdditionalRoute(object):
def on_get(self, req, resp):
resp.content_type = 'text/plain'
resp.body = 'additional'
class RestServerTest(unittest.TestCase):
def init_floe(self):
return floe.connect('test_file')
def setUp(self):
self.floe = self.init_floe()
self.floe.flush()
self.app = webtest.TestApp(floe.floe_server(
routes={'/testroute': RestServerAdditionalRoute()}))
def tearDown(self):
self.floe.flush()
def test_crud(self):
key = xid()
res = self.app.get('/test_file/%s' % key, expect_errors=True)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.content_length, 0)
data = os.urandom(100)
res = self.app.put('/test_file/%s' % key, params=data,
headers={'content-type': 'binary/octet-stream'})
self.assertEqual(res.status_code, 200)
res = self.app.get('/test_file/%s' % key)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.body, data)
res = self.app.delete('/test_file/%s' % key)
self.assertEqual(res.status_code, 200)
def test_keys(self):
keys = {xid() for _ in range(0, 120)}
for key in keys:
res = self.app.put('/test_file/%s' % key, params=os.urandom(10))
res = self.app.get('/test_file')
result_keys = set()
for line in res.body.decode('utf-8').split('\n'):
if line:
result_keys.update(json.loads(line.strip()))
self.assertEqual(keys, result_keys)
def test_nested_dirs(self):
res = self.app.get('/test_file/foo/bar', expect_errors=True)
self.assertEqual(res.status_code, 404)
def test_index(self):
res = self.app.get('/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.body, b'Floe Microservice')
def test_additional_route(self):
res = self.app.get('/testroute')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.body, b'additional')
class RestClientFileTest(FileFloeTest):
def init_floe(self):
return floe.connect('test_rest_file')
class RestClientMysqlTest(FileFloeTest):
def setUp(self):
table = '%s_%s' % ('rest_mysql', table_prefix_variable)
os.environ['FLOE_URL_TEST_REST_MYSQL'] = 'http://test-floe/%s' % table
environ_key = 'FLOE_URL_%s' % table.upper()
url = "mysql://%s@127.0.0.1:3306/test?table=%s" % (
mysql_auth, table)
self.table = table
os.environ[environ_key] = url
super(RestClientMysqlTest, self).setUp()
def tearDown(self):
store = self.floe
drop_table(store.pool, self.table)
def init_floe(self):
return floe.connect(self.table)
@MYSQL_TEST
def test_main(self):
super(RestClientMysqlTest, self).test_main()
class RestClientMisconfigurationTest(unittest.TestCase):
def init_floe(self):
return floe.connect('test_rest_bogus')
def setUp(self):
self.floe = self.init_floe()
def test_main(self):
store = self.floe
foo = xid()
self.assertRaises(floe.FloeConfigurationException,
lambda: store.get(foo))
self.assertRaises(floe.FloeConfigurationException,
lambda: store.set(foo, '1'))
self.assertRaises(floe.FloeConfigurationException,
lambda: store.delete(foo))
self.assertRaises(floe.FloeConfigurationException,
lambda: [k for k in store.ids()])
class RestClientBrokenTest(unittest.TestCase):
def init_floe(self):
return floe.connect('test_rest_broken')
def setUp(self):
self.floe = self.init_floe()
def test_main(self):
store = self.floe
foo = xid()
self.assertRaises(floe.FloeReadException,
lambda: store.get(foo))
self.assertRaises(floe.FloeWriteException,
lambda: store.set(foo, '1'))
self.assertRaises(floe.FloeDeleteException,
lambda: store.delete(foo))
self.assertRaises(floe.FloeReadException,
lambda: [k for k in store.ids()])
if __name__ == "__main__":
unittest.main(verbosity=2)
|
|
import collections
import datetime
import json
from django.urls import reverse
from django.utils import timezone
from wagtail.api.v2.tests.test_pages import TestPageDetail, TestPageListing
from wagtail.core.models import Page
from wagtail.tests.demosite import models
from wagtail.tests.testapp.models import SimplePage, StreamPage
from .utils import AdminAPITestCase
def get_total_page_count():
# Need to take away 1 as the root page is invisible over the API by default
return Page.objects.count() - 1
class TestAdminPageListing(AdminAPITestCase, TestPageListing):
fixtures = ['demosite.json']
def get_response(self, **params):
return self.client.get(reverse('wagtailadmin_api_v1:pages:listing'), params)
def get_page_id_list(self, content):
return [page['id'] for page in content['items']]
# BASIC TESTS
def test_basic(self):
response = self.get_response()
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-type'], 'application/json')
# Will crash if the JSON is invalid
content = json.loads(response.content.decode('UTF-8'))
# Check that the meta section is there
self.assertIn('meta', content)
self.assertIsInstance(content['meta'], dict)
# Check that the total count is there and correct
self.assertIn('total_count', content['meta'])
self.assertIsInstance(content['meta']['total_count'], int)
self.assertEqual(content['meta']['total_count'], get_total_page_count())
# Check that the items section is there
self.assertIn('items', content)
self.assertIsInstance(content['items'], list)
# Check that each page has a meta section with type, detail_url, html_url, status and children attributes
for page in content['items']:
self.assertIn('meta', page)
self.assertIsInstance(page['meta'], dict)
self.assertEqual(set(page['meta'].keys()), {'type', 'detail_url', 'html_url', 'status', 'children', 'slug', 'first_published_at', 'latest_revision_created_at'})
# Check the type info
self.assertIsInstance(content['__types'], dict)
self.assertEqual(set(content['__types'].keys()), {
'demosite.EventPage',
'demosite.StandardIndexPage',
'demosite.PersonPage',
'demosite.HomePage',
'demosite.StandardPage',
'demosite.EventIndexPage',
'demosite.ContactPage',
'demosite.BlogEntryPage',
'demosite.BlogIndexPage',
})
self.assertEqual(set(content['__types']['demosite.EventPage'].keys()), {'verbose_name', 'verbose_name_plural'})
self.assertEqual(content['__types']['demosite.EventPage']['verbose_name'], 'event page')
self.assertEqual(content['__types']['demosite.EventPage']['verbose_name_plural'], 'event pages')
# Not applicable to the admin API
test_unpublished_pages_dont_appear_in_list = None
test_private_pages_dont_appear_in_list = None
def test_unpublished_pages_appear_in_list(self):
total_count = get_total_page_count()
page = models.BlogEntryPage.objects.get(id=16)
page.unpublish()
response = self.get_response()
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(content['meta']['total_count'], total_count)
def test_private_pages_appear_in_list(self):
total_count = get_total_page_count()
page = models.BlogIndexPage.objects.get(id=5)
page.view_restrictions.create(password='test')
new_total_count = get_total_page_count()
self.assertEqual(total_count, total_count)
response = self.get_response()
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(content['meta']['total_count'], new_total_count)
# FIELDS
# Not applicable to the admin API
test_parent_field_gives_error = None
def test_fields(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='title,date,feed_image')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'meta', 'title', 'admin_display_title', 'date', 'feed_image'})
def test_fields_default(self):
response = self.get_response(type='demosite.BlogEntryPage')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'meta', 'title', 'admin_display_title'})
self.assertEqual(set(page['meta'].keys()), {'type', 'detail_url', 'html_url', 'children', 'status', 'slug', 'first_published_at', 'latest_revision_created_at'})
def test_remove_meta_fields(self):
response = self.get_response(fields='-html_url')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'meta', 'title', 'admin_display_title'})
self.assertEqual(set(page['meta'].keys()), {'type', 'detail_url', 'slug', 'first_published_at', 'latest_revision_created_at', 'status', 'children'})
def test_remove_all_meta_fields(self):
response = self.get_response(fields='-type,-detail_url,-slug,-first_published_at,-html_url,-latest_revision_created_at,-status,-children')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'title', 'admin_display_title'})
def test_remove_fields(self):
response = self.get_response(fields='-title,-admin_display_title')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'meta'})
def test_remove_id_field(self):
response = self.get_response(fields='-id')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'meta', 'title', 'admin_display_title'})
def test_all_fields(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='*')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'meta', 'title', 'admin_display_title', 'date', 'related_links', 'tags', 'carousel_items', 'body', 'feed_image', 'feed_image_thumbnail'})
self.assertEqual(set(page['meta'].keys()), {'type', 'detail_url', 'show_in_menus', 'first_published_at', 'seo_title', 'slug', 'parent', 'html_url', 'search_description', 'children', 'descendants', 'status', 'latest_revision_created_at'})
def test_all_fields_then_remove_something(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='*,-title,-admin_display_title,-date,-seo_title,-status')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'meta', 'related_links', 'tags', 'carousel_items', 'body', 'feed_image', 'feed_image_thumbnail'})
self.assertEqual(set(page['meta'].keys()), {'type', 'detail_url', 'show_in_menus', 'first_published_at', 'slug', 'parent', 'html_url', 'search_description', 'children', 'descendants', 'latest_revision_created_at'})
def test_all_nested_fields(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='feed_image(*)')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page['feed_image'].keys()), {'id', 'meta', 'title', 'width', 'height', 'thumbnail'})
def test_fields_foreign_key(self):
# Only the base the detail_url is different here from the public API
response = self.get_response(type='demosite.BlogEntryPage', fields='title,date,feed_image')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
feed_image = page['feed_image']
if feed_image is not None:
self.assertIsInstance(feed_image, dict)
self.assertEqual(set(feed_image.keys()), {'id', 'meta', 'title'})
self.assertIsInstance(feed_image['id'], int)
self.assertIsInstance(feed_image['meta'], dict)
self.assertEqual(set(feed_image['meta'].keys()), {'type', 'detail_url', 'download_url'})
self.assertEqual(feed_image['meta']['type'], 'wagtailimages.Image')
self.assertEqual(feed_image['meta']['detail_url'], 'http://localhost/admin/api/v2beta/images/%d/' % feed_image['id'])
def test_fields_parent(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='parent')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
parent = page['meta']['parent']
# All blog entry pages have the same parent
self.assertDictEqual(parent, {
'id': 5,
'meta': {
'type': 'demosite.BlogIndexPage',
'detail_url': 'http://localhost/admin/api/v2beta/pages/5/',
'html_url': 'http://localhost/blog-index/',
},
'title': "Blog index"
})
def test_fields_descendants(self):
response = self.get_response(fields='descendants')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
descendants = page['meta']['descendants']
self.assertEqual(set(descendants.keys()), {'count', 'listing_url'})
self.assertIsInstance(descendants['count'], int)
self.assertEqual(descendants['listing_url'], 'http://localhost/admin/api/v2beta/pages/?descendant_of=%d' % page['id'])
def test_fields_child_relation(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='title,related_links')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'meta', 'title', 'admin_display_title', 'related_links'})
self.assertIsInstance(page['related_links'], list)
def test_fields_ordering(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='date,title,feed_image,related_links')
# Will crash if the JSON is invalid
content = json.loads(response.content.decode('UTF-8'))
# Test field order
content = json.JSONDecoder(object_pairs_hook=collections.OrderedDict).decode(response.content.decode('UTF-8'))
field_order = [
'id',
'meta',
'title',
'admin_display_title',
'date',
'feed_image',
'related_links',
]
self.assertEqual(list(content['items'][0].keys()), field_order)
def test_fields_tags(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='tags')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'meta', 'tags', 'title', 'admin_display_title'})
self.assertIsInstance(page['tags'], list)
# CHILD OF FILTER
# Not applicable to the admin API
test_child_of_page_thats_not_in_same_site_gives_error = None
def test_child_of_root(self):
# Only return the homepage as that's the only child of the "root" node
# in the tree. This is different to the public API which pretends the
# homepage of the current site is the root page.
response = self.get_response(child_of='root')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [2])
def test_child_of_page_1(self):
# Public API doesn't allow this, as it's the root page
response = self.get_response(child_of=1)
json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 200)
# DESCENDANT OF FILTER
# Not applicable to the admin API
test_descendant_of_page_thats_not_in_same_site_gives_error = None
def test_descendant_of_root(self):
response = self.get_response(descendant_of='root')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [2, 4, 8, 9, 5, 16, 18, 19, 6, 10, 15, 17, 21, 22, 23, 20, 13, 14, 12])
def test_descendant_of_root_doesnt_give_error(self):
# Public API doesn't allow this
response = self.get_response(descendant_of=1)
json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 200)
# FOR EXPLORER FILTER
def make_simple_page(self, parent, title):
return parent.add_child(instance=SimplePage(title=title, content='Simple page'))
def test_for_explorer_filter(self):
movies = self.make_simple_page(Page.objects.get(pk=1), 'Movies')
visible_movies = [
self.make_simple_page(movies, 'The Way of the Dragon'),
self.make_simple_page(movies, 'Enter the Dragon'),
self.make_simple_page(movies, 'Dragons Forever'),
]
hidden_movies = [
self.make_simple_page(movies, 'The Hidden Fortress'),
self.make_simple_page(movies, 'Crouching Tiger, Hidden Dragon'),
self.make_simple_page(movies, 'Crouching Tiger, Hidden Dragon: Sword of Destiny'),
]
response = self.get_response(child_of=movies.pk, for_explorer=1)
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [page.pk for page in visible_movies])
response = self.get_response(child_of=movies.pk)
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [page.pk for page in visible_movies + hidden_movies])
def test_for_explorer_no_child_of(self):
response = self.get_response(for_explorer=1)
self.assertEqual(response.status_code, 400)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(content, {
'message': 'filtering by for_explorer without child_of is not supported',
})
# HAS CHILDREN FILTER
def test_has_children_filter(self):
response = self.get_response(has_children='true')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [2, 4, 5, 6, 21, 20])
def test_has_children_filter_off(self):
response = self.get_response(has_children='false')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [8, 9, 16, 18, 19, 10, 15, 17, 22, 23, 13, 14, 12])
def test_has_children_filter_int(self):
response = self.get_response(has_children=1)
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [2, 4, 5, 6, 21, 20])
def test_has_children_filter_int_off(self):
response = self.get_response(has_children=0)
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [8, 9, 16, 18, 19, 10, 15, 17, 22, 23, 13, 14, 12])
def test_has_children_filter_invalid_integer(self):
response = self.get_response(has_children=3)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "has_children must be 'true' or 'false'"})
def test_has_children_filter_invalid_value(self):
response = self.get_response(has_children='yes')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "has_children must be 'true' or 'false'"})
# TYPE FILTER
def test_type_filter_items_are_all_blog_entries(self):
response = self.get_response(type='demosite.BlogEntryPage')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(page['meta']['type'], 'demosite.BlogEntryPage')
# No specific fields available by default
self.assertEqual(set(page.keys()), {'id', 'meta', 'title', 'admin_display_title'})
def test_type_filter_multiple(self):
response = self.get_response(type='demosite.BlogEntryPage,demosite.EventPage')
content = json.loads(response.content.decode('UTF-8'))
blog_page_seen = False
event_page_seen = False
for page in content['items']:
self.assertIn(page['meta']['type'], ['demosite.BlogEntryPage', 'demosite.EventPage'])
if page['meta']['type'] == 'demosite.BlogEntryPage':
blog_page_seen = True
elif page['meta']['type'] == 'demosite.EventPage':
event_page_seen = True
# Only generic fields available
self.assertEqual(set(page.keys()), {'id', 'meta', 'title', 'admin_display_title'})
self.assertTrue(blog_page_seen, "No blog pages were found in the items")
self.assertTrue(event_page_seen, "No event pages were found in the items")
class TestAdminPageDetail(AdminAPITestCase, TestPageDetail):
fixtures = ['demosite.json']
def get_response(self, page_id, **params):
return self.client.get(reverse('wagtailadmin_api_v1:pages:detail', args=(page_id, )), params)
def test_basic(self):
response = self.get_response(16)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-type'], 'application/json')
# Will crash if the JSON is invalid
content = json.loads(response.content.decode('UTF-8'))
# Check the id field
self.assertIn('id', content)
self.assertEqual(content['id'], 16)
# Check that the meta section is there
self.assertIn('meta', content)
self.assertIsInstance(content['meta'], dict)
# Check the meta type
self.assertIn('type', content['meta'])
self.assertEqual(content['meta']['type'], 'demosite.BlogEntryPage')
# Check the meta detail_url
self.assertIn('detail_url', content['meta'])
self.assertEqual(content['meta']['detail_url'], 'http://localhost/admin/api/v2beta/pages/16/')
# Check the meta html_url
self.assertIn('html_url', content['meta'])
self.assertEqual(content['meta']['html_url'], 'http://localhost/blog-index/blog-post/')
# Check the meta status
self.assertIn('status', content['meta'])
self.assertEqual(content['meta']['status'], {
'status': 'live',
'live': True,
'has_unpublished_changes': False
})
# Check the meta children
self.assertIn('children', content['meta'])
self.assertEqual(content['meta']['children'], {
'count': 0,
'listing_url': 'http://localhost/admin/api/v2beta/pages/?child_of=16'
})
# Check the parent field
self.assertIn('parent', content['meta'])
self.assertIsInstance(content['meta']['parent'], dict)
self.assertEqual(set(content['meta']['parent'].keys()), {'id', 'meta', 'title'})
self.assertEqual(content['meta']['parent']['id'], 5)
self.assertIsInstance(content['meta']['parent']['meta'], dict)
self.assertEqual(set(content['meta']['parent']['meta'].keys()), {'type', 'detail_url', 'html_url'})
self.assertEqual(content['meta']['parent']['meta']['type'], 'demosite.BlogIndexPage')
self.assertEqual(content['meta']['parent']['meta']['detail_url'], 'http://localhost/admin/api/v2beta/pages/5/')
self.assertEqual(content['meta']['parent']['meta']['html_url'], 'http://localhost/blog-index/')
# Check that the custom fields are included
self.assertIn('date', content)
self.assertIn('body', content)
self.assertIn('tags', content)
self.assertIn('feed_image', content)
self.assertIn('related_links', content)
self.assertIn('carousel_items', content)
# Check that the date was serialised properly
self.assertEqual(content['date'], '2013-12-02')
# Check that the tags were serialised properly
self.assertEqual(content['tags'], ['bird', 'wagtail'])
# Check that the feed image was serialised properly
self.assertIsInstance(content['feed_image'], dict)
self.assertEqual(set(content['feed_image'].keys()), {'id', 'meta', 'title'})
self.assertEqual(content['feed_image']['id'], 7)
self.assertIsInstance(content['feed_image']['meta'], dict)
self.assertEqual(set(content['feed_image']['meta'].keys()), {'type', 'detail_url', 'download_url'})
self.assertEqual(content['feed_image']['meta']['type'], 'wagtailimages.Image')
self.assertEqual(content['feed_image']['meta']['detail_url'], 'http://localhost/admin/api/v2beta/images/7/')
# Check that the child relations were serialised properly
self.assertEqual(content['related_links'], [])
for carousel_item in content['carousel_items']:
self.assertEqual(set(carousel_item.keys()), {'id', 'meta', 'embed_url', 'link', 'caption', 'image'})
self.assertEqual(set(carousel_item['meta'].keys()), {'type'})
# Check the type info
self.assertIsInstance(content['__types'], dict)
self.assertEqual(set(content['__types'].keys()), {
'demosite.BlogIndexPage',
'demosite.BlogEntryPageCarouselItem',
'demosite.BlogEntryPage',
'wagtailimages.Image'
})
self.assertEqual(set(content['__types']['demosite.BlogIndexPage'].keys()), {'verbose_name', 'verbose_name_plural'})
self.assertEqual(content['__types']['demosite.BlogIndexPage']['verbose_name'], 'blog index page')
self.assertEqual(content['__types']['demosite.BlogIndexPage']['verbose_name_plural'], 'blog index pages')
def test_field_ordering(self):
# Need to override this as the admin API has a __types field
response = self.get_response(16)
# Will crash if the JSON is invalid
content = json.loads(response.content.decode('UTF-8'))
# Test field order
content = json.JSONDecoder(object_pairs_hook=collections.OrderedDict).decode(response.content.decode('UTF-8'))
field_order = [
'id',
'meta',
'title',
'admin_display_title',
'body',
'tags',
'date',
'feed_image',
'feed_image_thumbnail',
'carousel_items',
'related_links',
'__types',
]
self.assertEqual(list(content.keys()), field_order)
def test_meta_status_draft(self):
# Unpublish the page
Page.objects.get(id=16).unpublish()
response = self.get_response(16)
content = json.loads(response.content.decode('UTF-8'))
self.assertIn('status', content['meta'])
self.assertEqual(content['meta']['status'], {
'status': 'draft',
'live': False,
'has_unpublished_changes': True
})
def test_meta_status_live_draft(self):
# Save revision without republish
Page.objects.get(id=16).save_revision()
response = self.get_response(16)
content = json.loads(response.content.decode('UTF-8'))
self.assertIn('status', content['meta'])
self.assertEqual(content['meta']['status'], {
'status': 'live + draft',
'live': True,
'has_unpublished_changes': True
})
def test_meta_status_scheduled(self):
# Unpublish and save revision with go live date in the future
Page.objects.get(id=16).unpublish()
tomorrow = timezone.now() + datetime.timedelta(days=1)
Page.objects.get(id=16).save_revision(approved_go_live_at=tomorrow)
response = self.get_response(16)
content = json.loads(response.content.decode('UTF-8'))
self.assertIn('status', content['meta'])
self.assertEqual(content['meta']['status'], {
'status': 'scheduled',
'live': False,
'has_unpublished_changes': True
})
def test_meta_status_expired(self):
# Unpublish and set expired flag
Page.objects.get(id=16).unpublish()
Page.objects.filter(id=16).update(expired=True)
response = self.get_response(16)
content = json.loads(response.content.decode('UTF-8'))
self.assertIn('status', content['meta'])
self.assertEqual(content['meta']['status'], {
'status': 'expired',
'live': False,
'has_unpublished_changes': True
})
def test_meta_children_for_parent(self):
# Homepage should have children
response = self.get_response(2)
content = json.loads(response.content.decode('UTF-8'))
self.assertIn('children', content['meta'])
self.assertEqual(content['meta']['children'], {
'count': 5,
'listing_url': 'http://localhost/admin/api/v2beta/pages/?child_of=2'
})
def test_meta_descendants(self):
# Homepage should have children
response = self.get_response(2)
content = json.loads(response.content.decode('UTF-8'))
self.assertIn('descendants', content['meta'])
self.assertEqual(content['meta']['descendants'], {
'count': 18,
'listing_url': 'http://localhost/admin/api/v2beta/pages/?descendant_of=2'
})
# FIELDS
def test_remove_all_meta_fields(self):
response = self.get_response(16, fields='-type,-detail_url,-slug,-first_published_at,-html_url,-descendants,-latest_revision_created_at,-children,-show_in_menus,-seo_title,-parent,-status,-search_description')
content = json.loads(response.content.decode('UTF-8'))
self.assertNotIn('meta', set(content.keys()))
self.assertIn('id', set(content.keys()))
def test_remove_all_fields(self):
response = self.get_response(16, fields='_,id,type')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(set(content.keys()), {'id', 'meta', '__types'})
self.assertEqual(set(content['meta'].keys()), {'type'})
def test_all_nested_fields(self):
response = self.get_response(16, fields='feed_image(*)')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(set(content['feed_image'].keys()), {'id', 'meta', 'title', 'width', 'height', 'thumbnail'})
def test_fields_foreign_key(self):
response = self.get_response(16)
content = json.loads(response.content.decode('UTF-8'))
feed_image = content['feed_image']
self.assertIsInstance(feed_image, dict)
self.assertEqual(set(feed_image.keys()), {'id', 'meta', 'title'})
self.assertIsInstance(feed_image['id'], int)
self.assertIsInstance(feed_image['meta'], dict)
self.assertEqual(set(feed_image['meta'].keys()), {'type', 'detail_url', 'download_url'})
self.assertEqual(feed_image['meta']['type'], 'wagtailimages.Image')
self.assertEqual(feed_image['meta']['detail_url'], 'http://localhost/admin/api/v2beta/images/%d/' % feed_image['id'])
class TestAdminPageDetailWithStreamField(AdminAPITestCase):
fixtures = ['test.json']
def setUp(self):
super().setUp()
self.homepage = Page.objects.get(url_path='/home/')
def make_stream_page(self, body):
stream_page = StreamPage(
title='stream page',
slug='stream-page',
body=body
)
return self.homepage.add_child(instance=stream_page)
def test_can_fetch_streamfield_content(self):
stream_page = self.make_stream_page('[{"type": "text", "value": "foo"}]')
response_url = reverse('wagtailadmin_api_v1:pages:detail', args=(stream_page.id, ))
response = self.client.get(response_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['content-type'], 'application/json')
content = json.loads(response.content.decode('utf-8'))
self.assertIn('id', content)
self.assertEqual(content['id'], stream_page.id)
self.assertIn('body', content)
self.assertEqual(len(content['body']), 1)
self.assertEqual(content['body'][0]['type'], 'text')
self.assertEqual(content['body'][0]['value'], 'foo')
self.assertTrue(content['body'][0]['id'])
def test_image_block(self):
stream_page = self.make_stream_page('[{"type": "image", "value": 1}]')
response_url = reverse('wagtailadmin_api_v1:pages:detail', args=(stream_page.id, ))
response = self.client.get(response_url)
content = json.loads(response.content.decode('utf-8'))
# ForeignKeys in a StreamField shouldn't be translated into dictionary representation
self.assertEqual(content['body'][0]['type'], 'image')
self.assertEqual(content['body'][0]['value'], 1)
class TestCustomAdminDisplayTitle(AdminAPITestCase):
fixtures = ['test.json']
def setUp(self):
super().setUp()
self.event_page = Page.objects.get(url_path='/home/events/saint-patrick/')
def test_custom_admin_display_title_shown_on_detail_page(self):
api_url = reverse('wagtailadmin_api_v1:pages:detail', args=(self.event_page.id, ))
response = self.client.get(api_url)
content = json.loads(response.content.decode('utf-8'))
self.assertEqual(content['title'], "Saint Patrick")
self.assertEqual(content['admin_display_title'], "Saint Patrick (single event)")
def test_custom_admin_display_title_shown_on_listing(self):
api_url = reverse('wagtailadmin_api_v1:pages:listing')
response = self.client.get(api_url)
content = json.loads(response.content.decode('utf-8'))
matching_items = [item for item in content['items'] if item['id'] == self.event_page.id]
self.assertEqual(1, len(matching_items))
self.assertEqual(matching_items[0]['title'], "Saint Patrick")
self.assertEqual(matching_items[0]['admin_display_title'], "Saint Patrick (single event)")
|
|
#!/usr/bin/python
##Time
from datetime import datetime, timedelta
from time import gmtime, strftime, sleep
##Data Acqusition
from xml.dom import minidom
import urllib2
import json
import feedparser
##Scheduling
import schedule
import time
## Raspberry libraries
import RPi.GPIO as GPIO
from RPLCD import CharLCD
## Shutdown management
import os.path
##define staic values
## LCD SETUP
### Pin number has to be change to the pin numbers you are using on your Raspberry Pi.
### The LCD is a 40x4 display. The library RPLCD can only handle 40x2 so the LCD has to be set up as two 40x2 displays.
### using two enable signals. The handling of this is done under LCD handler.
### The number are the pin numbers of the Raspberry Pi, not the GPIO numbers.
### If using a older Raspberry Pi with only 26 pins make sure you have the correct pin pinnumbers.
GPIO_PIN_RS = 32
GPIO_PIN_RW = None ## Raspberry Pi cannot handle if the display writes data. Could damage RPi. This pin on the LCD was connected to gound.
GPIO_PIN_E_TOP = 33
GPIO_PIN_E_BOTTOM = 31
GPIO_PIN_D4 = 36
GPIO_PIN_D5 = 35
GPIO_PIN_D6 = 38
GPIO_PIN_D7 = 40
LCD_COLUMNS = 40
LCD_ROWS = 2
LCD_DOT_SIZE = 8
LCD_BRIGHTNESS = 0 # to be used with PWM for control of the LCD brightness.
### Initialize the LCD
lcd_top = CharLCD(pin_rs=GPIO_PIN_RS, pin_rw=GPIO_PIN_RW, pin_e=GPIO_PIN_E_TOP, pins_data=[GPIO_PIN_D4, GPIO_PIN_D5, GPIO_PIN_D6, GPIO_PIN_D7], numbering_mode=GPIO.BOARD, cols=LCD_COLUMNS, rows=LCD_ROWS, dotsize=LCD_DOT_SIZE)
lcd_bottom = CharLCD(pin_rs=GPIO_PIN_RS, pin_rw=GPIO_PIN_RW, pin_e=GPIO_PIN_E_BOTTOM, pins_data=[GPIO_PIN_D4, GPIO_PIN_D5, GPIO_PIN_D6, GPIO_PIN_D7], numbering_mode=GPIO.BOARD, cols=LCD_COLUMNS, rows=LCD_ROWS, dotsize=LCD_DOT_SIZE)
var = 1
i = 0
### Functions for getting time
def getTime():
"Gets the current time and date and returns as a string"
time=strftime("%A %Y-%m-%d %H:%M:%S")
return time
### Functions for XML parsing
def getNodeText(node):
nodelist = node.childNodes
result = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
result.append(node.data)
return ''.join(result)
### Functions for downloading data
def getUrlData(url):
try:
my_data = urllib2.urlopen(url)
except urllib2.URLError, e:
my_data = "-1"
return my_data
### Functions and variables for getting bus times
busTimes="Not available yet..."
def getBusTimes():
## Skanetrafiken Open Lab API
stationID = "81748"
busStopName = "Lund Gambro"
skanetrafikenURL="http://www.labs.skanetrafiken.se/v2.2/stationresults.asp?selPointFrKey=" + stationID
myStopPoint = "A" # towards city/Gunnesbo
global busTimes
##Get XML Data from API
my_data = getUrlData(skanetrafikenURL)
if "-1" in my_data:
busTimes = "Something went wrong..."
print "Something went wrong..."
else:
xml_data = minidom.parse(urllib2.urlopen(skanetrafikenURL))
##Get all line elements (each arriving bus is one line"
results = xml_data.getElementsByTagName("Line")
# Lists for the bus times and DepDeviation
timeList = []
deviationList = []
#Loop through all departures
for departure in results:
# Get stopPoint
stopPoint = getNodeText(departure.getElementsByTagName("StopPoint")[0])
# We only want buses going towards city centre
if stopPoint == myStopPoint:
# Save bus name (bus 4)
name = getNodeText(departure.getElementsByTagName("Name")[0])
# Get date and time, formatted YYYY-MM-DDTHH:MM:SS and get only HH:MM
time = getNodeText(departure.getElementsByTagName("JourneyDateTime")[0])[11:-3]
# Check if there is any deviation in time.
if( len(departure.getElementsByTagName("DepTimeDeviation") ) != 0 ):
# IF deviation, save the deviation
deviation = getNodeText(departure.getElementsByTagName("DepTimeDeviation")[0])
else:
# if no deviation, save 0 as deviation
deviation = "0"
# Append time and deviation to respective list.
timeList.append(time)
deviationList.append(deviation)
## Create string from times and deviations
nbrBusTimes = 6 # How many bus times that can possibly fit screen (Best case)
maxChar = 34
my_times = ""
for i in range (0,nbrBusTimes):
# Format should be HH:MM+dev
devInt = int(float(deviationList[i]))
nextTime = ""
if(devInt < 0):
nextTime = timeList[i]+deviationList[i]+" "
elif(devInt >0):
nextTime = timeList[i]+"+"+str(devInt)+" "
else:
nextTime = timeList[i]+" "
if len(my_times)+len(nextTime) < maxChar:
my_times += nextTime
busTimes = my_times
#print "New BusTimes: "+busTimes
return
### Temperature
## yr.no API
curTemp = "NA" #placeholder...
def getTemp():
placeID = "Sverige/Scania/Lund"
weatherNowURL = "http://www.yr.no/place/" + placeID + "/forecast.xml"
global curTemp
my_data = getUrlData(weatherNowURL)
if "-1" in my_data:
curTemp = "NA"
print "Lost internet connection..."
else:
xml_data = minidom.parse(urllib2.urlopen(weatherNowURL))
node = xml_data.getElementsByTagName("temperature")[0]
temp = getNodeText(node.attributes.values()[0])
curTemp = temp
#print "New temp: "+curTemp
### Exchange rates
my_currencies = ["USD", "EUR","CNY","GBP","NOK", "DKK"]
exchange_rates = []
for i in range(0,len(my_currencies)):
exchange_rates.append("N/A") #placeholder
xrt_count = 0
# API information
app_id = ""
with open("openxrtappid") as f:
app_id = f.readline()
base_url = "https://openexchangerates.org/api/"
def getLatest(currencies):
latest = "latest.json?app_id="
# Create URL
my_url = base_url + latest + app_id
# Get JSON data from URL
json_data = json.load(getUrlData(my_url))
# Get exchange rates from JSON data
rates = json_data['rates']
my_rates = []
for currency in currencies:
#print currency
#All currencies correlating to USD, we convert to SEK...
USD = rates['SEK']
if "USD" in currency:
this_xrt = "%.2f" % USD
else:
this_xrt = "%.2f" % (USD/rates[currency])
# After getting XRT, append it to
#print type(this_xrt)
my_rates.append(this_xrt)
#print my_rates
return my_rates
def getHistory(date, currencies):
history = "historical/"+date+".json?app_id="
# Create URL
my_url = base_url + history + app_id
#print my_url
# Get JSON data from URL
json_data = json.load(getUrlData(my_url))
rates = json_data['rates']
my_rates = []
for currency in currencies:
#print currency
#All currencies correlating to USD, we convert to SEK...
USD = rates['SEK']
if "USD" in currency:
this_xrt = "%.2f" % USD
else:
this_xrt = "%.2f" % (USD/rates[currency])
# After getting XRT, append it to
#print type(this_xrt)
my_rates.append(this_xrt)
#print my_rates
return my_rates
def getPercent(now,then):
percents = []
nbr = len(now)
for i in range(0, nbr):
#print float(now[i])
#print float(then[i])
percent = 100*(float(now[i]) - float(then[i]))/float(then[i])
#print percent
percents.append(str("%.2f" % percent))
return percents
## Function for getting XRT (Exchange Rate)
def changeXRT_count():
global xrt_count
xrt_count+=1
if xrt_count >= len(my_currencies):
xrt_count = 0
def getXRT():
#Variable to save printed string to
global exchange_rates
#print "get latest XRT"
xrt_latest = getLatest(my_currencies)
# Get dates
date_today = datetime.now().date()
date_oneday = str(date_today - timedelta(days = 1))
date_oneweek = str(date_today - timedelta(days = 7))
date_onemonth = str(date_today - timedelta(days = 30))
date_oneyear = str(date_today - timedelta(days = 365))
#Getting historical data
xrt_oneday = getHistory(date_oneday, my_currencies)
xrt_oneweek = getHistory(date_oneweek, my_currencies)
xrt_onemonth = getHistory(date_onemonth, my_currencies)
xrt_oneyear = getHistory(date_oneyear, my_currencies)
# Calculating percentages
percent_oneday = getPercent(xrt_latest,xrt_oneday)
percent_oneweek = getPercent(xrt_latest,xrt_oneweek)
percent_onemonth = getPercent(xrt_latest,xrt_onemonth)
percent_oneyear = getPercent(xrt_latest,xrt_oneyear)
#Store to array of rates
for i in range(0,len(my_currencies)):
exchange_rates[i] = my_currencies[i]+": "+xrt_latest[i]+"kr "+percent_oneday[i]+"% "+percent_oneweek[i]+"% "+percent_onemonth[i]+"% "+percent_oneyear[i]+"% "
### News from Reddit World News
feed_url = "http://www.reddit.com/r/worldnews/.rss"
nbrTitles = 10 #number of headlines wanted
news_count = 0
curNews = []
for i in range(0,nbrTitles):
curNews.append("N/A")
scrollCount = 0
# For changing which headline to show
def changenews_count():
global news_count
news_count+=1
if news_count >= nbrTitles:
news_count = 0
global scrollCount
scrollCount = 0
return
# For getting news
def getNews():
#Downloading feed
d = feedparser.parse(feed_url)
global curNews
# Emptying curNews
curNews = []
# Fill it up with news from feed
for post in d.entries:
#print "Printing headline"
curNews.append(post.title)
return
### LCD Functions
def printLine( lineNr, str):
#Add spaces for automatic clearing of LCD
str+=" "
"Prints one line on LCD, lineNR, 0-3 is LCD Row and str is string to be printed, max 40 char (will be cropped if longer)"
str=str[:40] #Crop string to first 40 char
# If lineNr 0 or 1, top LCD
if lineNr==0 or lineNr==1:
lcd_top.cursor_pos=(lineNr,0)
lcd_top.write_string(str)
# If lineNr 2 or 3, bottom LCD
elif lineNr==2 or lineNr==3:
lineNr-=2 #Still called as row 0,1 to lcd...
lcd_bottom.cursor_pos=(lineNr,0)
lcd_bottom.write_string(str)
return
def clearLine(lineNr):
printLine(lineNr, " ")
return
def firstString():
"Creates first string for LCD"
degree_sign=unichr(223).rstrip()
str = "Lund "+curTemp+degree_sign+"C, "+getTime()
return str
def secondString():
"Creates second string for LCD"
str = "Bus 4: "+busTimes
return str
def thirdString():
"Creates third string for LCD"
my_news = " "+curNews[news_count]
str = "News: "+my_news[scrollCount:scrollCount+34]
global scrollCount
scrollCount +=1
return str
def fourthString():
"Creates fourth string for LCD"
global xrt_count
str = exchange_rates[xrt_count]
return str
def updateLCD():
printLine(0,firstString())
printLine(1,secondString())
printLine(2,thirdString())
printLine(3,fourthString())
#print "LCD update"
## Shutdown management
def shutdown_message():
# Print shutdown message
printLine(0,40*'-')
printLine(1,13*' '+"Shutting down")
printLine(2,5*' '+"Re-plug power cable to restart")
printLine(3,40*'-')
# Terminate LCD program
quit()
### MAIN PROGRAM
# Remove old shutdown file
try:
os.remove("/home/pi/RPi-LCD/shutdown")
except (OSError):
pass
### SCHEDULING
# Run everything once at start of program
getBusTimes()
getTemp()
getXRT()
getNews()
updateLCD()
#Update bus times every 30 sec
schedule.every(30).seconds.do(getBusTimes)
# Update temp every 30 minutes
schedule.every(30).minutes.do(getTemp)
# Update exchange rate XRT every 12 hours
schedule.every(12).hours.do(getXRT)
# Update exchange rate counter ever 15 seconds
schedule.every(15).seconds.do(changeXRT_count)
# Update news every 30 mins
schedule.every(30).minutes.do(getNews)
# Update new counter every 20 seconds
schedule.every(20).seconds.do(changenews_count)
### MAIN FUNCTION
cnt=0
while True:
schedule.run_pending()
time.sleep(0.01)
#Check if shutting down
try:
if os.path.isfile("/home/pi/RPi-LCD/shutdown"):
print "Shutting down"
shutdown_message()
except (OSError):
pass
#Update LCD every 100 ms
updateLCD()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 New Dream Network, LLC (DreamHost)
# Copyright 2013 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
# @author: Youcef Laribi, Citrix
import weakref
from oslo.config import cfg
from neutron.agent.common import config
from neutron.agent import rpc as agent_rpc
from neutron.common import constants
from neutron import context
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
from neutron.openstack.common import periodic_task
from neutron.services.loadbalancer.drivers.netscaler import (
agent_api,
plugin_driver
)
LOG = logging.getLogger(__name__)
NS_PREFIX = 'qlbaas-'
OPTS = [
cfg.StrOpt(
'device_driver',
default=('neutron.services.loadbalancer.drivers'
'.netscaler.netscaler_ncc_driver.AgentDriver'),
help=_('The driver used to manage the NetScaler devices'),
),
cfg.StrOpt(
'agent_bind_host',
default='0.0.0.0',
help=_('The host IP address on which the lbaas agent listens'),
),
cfg.StrOpt(
'agent_bind_port',
default='20371',
help=_('The host port address on which the lbaas agent listens')
)
]
class LogicalDeviceCache(object):
"""Manage a cache of known devices."""
class Device(object):
"""Inner classes used to hold values for weakref lookups."""
def __init__(self, port_id, pool_id):
self.port_id = port_id
self.pool_id = pool_id
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __hash__(self):
return hash((self.port_id, self.pool_id))
def __init__(self):
self.devices = set()
self.port_lookup = weakref.WeakValueDictionary()
self.pool_lookup = weakref.WeakValueDictionary()
def put(self, device):
port_id = device['vip']['port_id']
pool_id = device['pool']['id']
d = self.Device(device['vip']['port_id'], device['pool']['id'])
if d not in self.devices:
self.devices.add(d)
self.port_lookup[port_id] = d
self.pool_lookup[pool_id] = d
def remove(self, device):
if not isinstance(device, self.Device):
device = self.Device(
device['vip']['port_id'], device['pool']['id']
)
if device in self.devices:
self.devices.remove(device)
def remove_by_pool_id(self, pool_id):
d = self.pool_lookup.get(pool_id)
if d:
self.devices.remove(d)
def get_by_pool_id(self, pool_id):
return self.pool_lookup.get(pool_id)
def get_by_port_id(self, port_id):
return self.port_lookup.get(port_id)
def get_pool_ids(self):
return self.pool_lookup.keys()
class LbaasAgentManager(periodic_task.PeriodicTasks):
# history
# 1.0 Initial version
# 1.1 Support agent_updated call
RPC_API_VERSION = '1.1'
def __init__(self, conf):
self.conf = conf
try:
self.driver = importutils.import_object(
conf.device_driver, self.conf)
except ImportError:
msg = _('Error importing loadbalancer device driver: %s')
raise SystemExit(msg % conf.device_driver)
self.agent_state = {
'binary': 'neutron-loadbalancer-agent',
'host': conf.host,
'topic': plugin_driver.TOPIC_LOADBALANCER_AGENT,
'configurations': {'device_driver': conf.device_driver},
'agent_type': constants.AGENT_TYPE_LOADBALANCER,
'start_flag': True}
self.admin_state_up = True
self.context = context.get_admin_context_without_session()
self._setup_rpc()
self.needs_resync = False
self.cache = LogicalDeviceCache()
def _setup_rpc(self):
self.plugin_rpc = agent_api.LbaasAgentApi(
plugin_driver.TOPIC_LOADBALANCER_DEVICE,
self.context,
self.conf.host
)
self.state_rpc = agent_rpc.PluginReportStateAPI(
plugin_driver.TOPIC_LOADBALANCER_DEVICE)
report_interval = self.conf.AGENT.report_interval
if report_interval:
heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
heartbeat.start(interval=report_interval)
def _report_state(self):
try:
device_count = len(self.cache.devices)
self.agent_state['configurations']['devices'] = device_count
self.state_rpc.report_state(self.context,
self.agent_state)
self.agent_state.pop('start_flag', None)
except Exception:
LOG.exception(_("Failed reporting state!"))
@periodic_task.periodic_task(spacing=6)
def collect_stats(self, context):
for pool_id in self.cache.get_pool_ids():
try:
stats = self.driver.get_stats(pool_id)
if stats:
self.plugin_rpc.update_pool_stats(pool_id, stats)
except Exception:
LOG.exception(_('Error upating stats'))
self.needs_resync = True
def create_vip(self, context, vip, netinfo):
"""Handle RPC cast from plugin to reload a pool."""
LOG.info(_("Agent received create_vip"))
self.driver.create_vip(vip, netinfo)
def update_vip(self, context, old_vip, vip, old_netinfo, netinfo):
LOG.info(_("Agent received update_vip"))
self.driver.update_vip(old_vip, vip, old_netinfo, netinfo)
def delete_vip(self, context, vip, netinfo):
LOG.info(_("Agent received delete_vip"))
self.driver.delete_vip(vip, netinfo)
def create_pool(self, context, pool, netinfo):
LOG.info(_("Agent received create_pool"))
self.driver.create_pool(pool, netinfo)
def update_pool(self, context, old_pool, pool, old_netinfo, netinfo):
LOG.info(_('Agent received update_pool...'))
self.driver.update_pool(old_pool, pool, old_netinfo, netinfo)
def delete_pool(self, context, pool, netinfo):
LOG.info(_('Agent received delete_pool...'))
self.driver.delete_pool(pool, netinfo)
def create_member(self, context, member, netinfo):
LOG.info(_('Agent received create_member...'))
self.driver.create_member(member, netinfo)
def update_member(self, context, old_member, member, old_netinfo, netinfo):
LOG.info(_('Agent received update_member...'))
self.driver.update_member(old_member, member, old_netinfo, netinfo)
def delete_member(self, context, member, netinfo):
LOG.info(_('Agent received delete_member...'))
self.driver.delete_member(member, netinfo)
def create_pool_health_monitor(self, context, health_monitor, pool_id, netinfo):
LOG.info(_('Agent received create_pool_health_monitor...'))
self.driver.create_pool_health_monitor(health_monitor, pool_id, netinfo)
def update_health_monitor(self, context, old_health_monitor, health_monitor, pool_id, netinfo):
LOG.info(_('Agent received update_health_monitor...'))
self.driver.update_health_monitor(old_health_monitor, health_monitor, pool_id, netinfo)
def delete_pool_health_monitor(self, context, health_monitor, pool_id, netinfo):
LOG.info(_('Agent received delete_pool_health_monitor...'))
self.driver.delete_pool_health_monitor(health_monitor, pool_id, netinfo)
def stats(self, context, pool_id, host):
LOG.info(_('Agent received stats...'))
@periodic_task.periodic_task(spacing=6)
def poll_for_pending_tasks(self, context):
tasks = self.driver.get_tasks()
for task in tasks:
try:
self._process_task(task)
except:
LOG.exception(_("processing task %s failed with an exception" % task["id"]))
def remove_orphans(self):
try:
self.driver.remove_orphans(self.cache.get_pool_ids())
except NotImplementedError:
pass # Not all drivers will support this
def destroy_pool(self, context, pool_id=None, host=None):
"""Handle RPC cast from plugin to destroy a pool if known to agent."""
if self.cache.get_by_pool_id(pool_id):
self.destroy_device(pool_id)
def agent_updated(self, context, payload):
"""Handle the agent_updated notification event."""
if payload['admin_state_up'] != self.admin_state_up:
self.admin_state_up = payload['admin_state_up']
if self.admin_state_up:
self.needs_resync = True
else:
for pool_id in self.cache.get_pool_ids():
self.destroy_device(pool_id)
LOG.info(_("agent_updated by server side %s!"), payload)
|
|
import math
import sys
import operator
import networkx as nx
#import matplotlib.pyplot as plt
import numpy as np
import scipy.spatial.distance
import scipy.signal
import skimage
import skimage.io
from skimage.segmentation import slic
from skimage.util import img_as_float
from scipy.optimize import minimize
#import pdb
def raster_scan(img,L,U,D):
n_rows = len(img)
n_cols = len(img[0])
for x in xrange(1,n_rows - 1):
for y in xrange(1,n_cols - 1):
ix = img[x][y]
d = D[x][y]
u1 = U[x-1][y]
l1 = L[x-1][y]
u2 = U[x][y-1]
l2 = L[x][y-1]
b1 = max(u1,ix) - min(l1,ix)
b2 = max(u2,ix) - min(l2,ix)
if d <= b1 and d <= b2:
continue
elif b1 < d and b1 <= b2:
D[x][y] = b1
U[x][y] = max(u1,ix)
L[x][y] = min(l1,ix)
else:
D[x][y] = b2
U[x][y] = max(u2,ix)
L[x][y] = min(l2,ix)
return True
def raster_scan_inv(img,L,U,D):
n_rows = len(img)
n_cols = len(img[0])
for x in xrange(n_rows - 2,1,-1):
for y in xrange(n_cols - 2,1,-1):
ix = img[x][y]
d = D[x][y]
u1 = U[x+1][y]
l1 = L[x+1][y]
u2 = U[x][y+1]
l2 = L[x][y+1]
b1 = max(u1,ix) - min(l1,ix)
b2 = max(u2,ix) - min(l2,ix)
if d <= b1 and d <= b2:
continue
elif b1 < d and b1 <= b2:
D[x][y] = b1
U[x][y] = max(u1,ix)
L[x][y] = min(l1,ix)
else:
D[x][y] = b2
U[x][y] = max(u2,ix)
L[x][y] = min(l2,ix)
return True
def mbd(img, num_iters):
if len(img.shape) != 2:
print('did not get 2d np array to fast mbd')
return None
if (img.shape[0] <= 3 or img.shape[1] <= 3):
print('image is too small')
return None
L = np.copy(img)
U = np.copy(img)
D = float('Inf') * np.ones(img.shape)
D[0,:] = 0
D[-1,:] = 0
D[:,0] = 0
D[:,-1] = 0
# unfortunately, iterating over numpy arrays is very slow
img_list = img.tolist()
L_list = L.tolist()
U_list = U.tolist()
D_list = D.tolist()
for x in xrange(0,num_iters):
if x%2 == 1:
raster_scan(img_list,L_list,U_list,D_list)
else:
raster_scan_inv(img_list,L_list,U_list,D_list)
return np.array(D_list)
def get_saliency_mbd(input,method='b'):
img_path_list = []
#we get either a filename or a list of filenames
if type(input) == type(str()):
img_path_list.append(input)
elif type(input) == type(list()):
img_path_list = input
else:
print('Input type is neither list or string')
return None
# Saliency map calculation based on: Minimum Barrier Salient Object Detection at 80 FPS
for img_path in img_path_list:
img = skimage.io.imread(img_path)
img_mean = np.mean(img,axis=(2))
sal = mbd(img_mean,3)
if method == 'b':
# get the background map
# paper uses 30px for an image of size 300px, so we use 10%
(n_rows,n_cols,n_channels) = img.shape
img_size = math.sqrt(n_rows * n_cols)
border_thickness = int(math.floor(0.1 * img_size))
img_lab = img_as_float(skimage.color.rgb2lab(img))
px_left = img_lab[0:border_thickness,:,:]
px_right = img_lab[n_rows - border_thickness-1:-1,:,:]
px_top = img_lab[:,0:border_thickness,:]
px_bottom = img_lab[:,n_cols - border_thickness-1:-1,:]
px_mean_left = np.mean(px_left,axis=(0,1))
px_mean_right = np.mean(px_right,axis=(0,1))
px_mean_top = np.mean(px_top,axis=(0,1))
px_mean_bottom = np.mean(px_bottom,axis=(0,1))
px_left = px_left.reshape((n_cols*border_thickness,3))
px_right = px_right.reshape((n_cols*border_thickness,3))
px_top = px_top.reshape((n_rows*border_thickness,3))
px_bottom = px_bottom.reshape((n_rows*border_thickness,3))
cov_left = np.cov(px_left.T)
cov_right = np.cov(px_right.T)
cov_top = np.cov(px_top.T)
cov_bottom = np.cov(px_bottom.T)
cov_left = np.linalg.inv(cov_left)
cov_right = np.linalg.inv(cov_right)
cov_top = np.linalg.inv(cov_top)
cov_bottom = np.linalg.inv(cov_bottom)
u_left = np.zeros(sal.shape)
u_right = np.zeros(sal.shape)
u_top = np.zeros(sal.shape)
u_bottom = np.zeros(sal.shape)
u_final = np.zeros(sal.shape)
img_lab_unrolled = img_lab.reshape(img_lab.shape[0]*img_lab.shape[1],3)
px_mean_left_2 = np.zeros((1,3))
px_mean_left_2[0,:] = px_mean_left
u_left = scipy.spatial.distance.cdist(img_lab_unrolled,px_mean_left_2,'mahalanobis', VI=cov_left)
u_left = u_left.reshape((img_lab.shape[0],img_lab.shape[1]))
px_mean_right_2 = np.zeros((1,3))
px_mean_right_2[0,:] = px_mean_right
u_right = scipy.spatial.distance.cdist(img_lab_unrolled,px_mean_right_2,'mahalanobis', VI=cov_right)
u_right = u_right.reshape((img_lab.shape[0],img_lab.shape[1]))
px_mean_top_2 = np.zeros((1,3))
px_mean_top_2[0,:] = px_mean_top
u_top = scipy.spatial.distance.cdist(img_lab_unrolled,px_mean_top_2,'mahalanobis', VI=cov_top)
u_top = u_top.reshape((img_lab.shape[0],img_lab.shape[1]))
px_mean_bottom_2 = np.zeros((1,3))
px_mean_bottom_2[0,:] = px_mean_bottom
u_bottom = scipy.spatial.distance.cdist(img_lab_unrolled,px_mean_bottom_2,'mahalanobis', VI=cov_bottom)
u_bottom = u_bottom.reshape((img_lab.shape[0],img_lab.shape[1]))
max_u_left = np.max(u_left)
max_u_right = np.max(u_right)
max_u_top = np.max(u_top)
max_u_bottom = np.max(u_bottom)
u_left = u_left / max_u_left
u_right = u_right / max_u_right
u_top = u_top / max_u_top
u_bottom = u_bottom / max_u_bottom
u_max = np.maximum(np.maximum(np.maximum(u_left,u_right),u_top),u_bottom)
u_final = (u_left + u_right + u_top + u_bottom) - u_max
u_max_final = np.max(u_final)
sal_max = np.max(sal)
sal = sal / sal_max + u_final / u_max_final
#postprocessing
# apply centredness map
sal = sal / np.max(sal)
s = np.mean(sal)
alpha = 50.0
delta = alpha * math.sqrt(s)
xv,yv = np.meshgrid(np.arange(sal.shape[1]),np.arange(sal.shape[0]))
(w,h) = sal.shape
w2 = w/2.0
h2 = h/2.0
C = 1 - np.sqrt(np.power(xv - h2,2) + np.power(yv - w2,2)) / math.sqrt(np.power(w2,2) + np.power(h2,2))
sal = sal * C
#increase bg/fg contrast
def f(x):
b = 10.0
return 1.0 / (1.0 + math.exp(-b*(x - 0.5)))
fv = np.vectorize(f)
sal = sal / np.max(sal)
sal = fv(sal)
return sal* 255.0
|
|
from __future__ import division, with_statement
import gzip
import mmap
import os
import shutil
import sys
import warnings
import zipfile
import numpy as np
from ..extern.six import BytesIO
import pyfits as fits
from ..convenience import _getext
from ..file import _File
from ..util import PyfitsDeprecationWarning
from . import PyfitsTestCase
from .util import catch_warnings, ignore_warnings, CaptureStdio
from nose.tools import assert_raises
class TestCore(PyfitsTestCase):
def test_with_statement(self):
with fits.open(self.data('ascii.fits')) as f:
pass
def test_missing_file(self):
assert_raises(IOError, fits.open, self.temp('does-not-exist.fits'))
def test_naxisj_check(self):
hdulist = fits.open(self.data('o4sp040b0_raw.fits'))
hdulist[1].header['NAXIS3'] = 500
assert 'NAXIS3' in hdulist[1].header
hdulist.verify('silentfix')
assert 'NAXIS3' not in hdulist[1].header
def test_byteswap(self):
p = fits.PrimaryHDU()
l = fits.HDUList()
n = np.zeros(3, dtype='i2')
n[0] = 1
n[1] = 60000
n[2] = 2
c = fits.Column(name='foo', format='i2', bscale=1, bzero=32768,
array=n)
t = fits.BinTableHDU.from_columns([c])
l.append(p)
l.append(t)
l.writeto(self.temp('test.fits'), clobber=True)
with fits.open(self.temp('test.fits')) as p:
assert p[1].data[1]['foo'] == 60000.0
def test_add_del_columns(self):
p = fits.ColDefs([])
p.add_col(fits.Column(name='FOO', format='3J'))
p.add_col(fits.Column(name='BAR', format='1I'))
assert p.names == ['FOO', 'BAR']
p.del_col('FOO')
assert p.names == ['BAR']
def test_add_del_columns2(self):
hdulist = fits.open(self.data('tb.fits'))
table = hdulist[1]
assert table.data.dtype.names == ('c1', 'c2', 'c3', 'c4')
assert table.columns.names == ['c1', 'c2', 'c3', 'c4']
table.columns.del_col('c1')
assert table.data.dtype.names == ('c2', 'c3', 'c4')
assert table.columns.names == ['c2', 'c3', 'c4']
table.columns.del_col('c3')
assert table.data.dtype.names == ('c2', 'c4')
assert table.columns.names == ['c2', 'c4']
table.columns.add_col(fits.Column('foo', '3J'))
assert table.data.dtype.names == ('c2', 'c4', 'foo')
assert table.columns.names == ['c2', 'c4', 'foo']
hdulist.writeto(self.temp('test.fits'), clobber=True)
with ignore_warnings():
# TODO: The warning raised by this test is actually indication of a
# bug and should *not* be ignored. But as it is a known issue we
# hide it for now. See
# https://github.com/spacetelescope/PyFITS/issues/44
with fits.open(self.temp('test.fits')) as hdulist:
table = hdulist[1]
assert table.data.dtype.names == ('c2', 'c4', 'foo')
assert table.columns.names == ['c2', 'c4', 'foo']
@ignore_warnings(PyfitsDeprecationWarning)
def test_update_header_card(self):
"""A very basic test for the Header.update method--I'd like to add a
few more cases to this at some point.
"""
header = fits.Header()
comment = 'number of bits per data pixel'
header['BITPIX'] = (16, comment)
assert 'BITPIX' in header
assert header['BITPIX'] == 16
assert header.ascard['BITPIX'].comment == comment
# The new API doesn't support savecomment so leave this line here; at
# any rate good to have testing of the new API mixed with the old API
header.update('BITPIX', 32, savecomment=True)
# Make sure the value has been updated, but the comment was preserved
assert header['BITPIX'] == 32
assert header.ascard['BITPIX'].comment == comment
# The comment should still be preserved--savecomment only takes effect if
# a new comment is also specified
header['BITPIX'] = 16
assert header.ascard['BITPIX'].comment == comment
header.update('BITPIX', 16, 'foobarbaz', savecomment=True)
assert header.ascard['BITPIX'].comment == comment
@ignore_warnings(PyfitsDeprecationWarning)
def test_set_card_value(self):
"""Similar to test_update_header_card(), but tests the the
`header['FOO'] = 'bar'` method of updating card values.
"""
header = fits.Header()
comment = 'number of bits per data pixel'
card = fits.Card.fromstring('BITPIX = 32 / %s' % comment)
header.ascard.append(card)
header['BITPIX'] = 32
assert 'BITPIX' in header
assert header['BITPIX'] == 32
assert header.ascard['BITPIX'].key == 'BITPIX'
assert header.ascard['BITPIX'].value == 32
assert header.ascard['BITPIX'].comment == comment
def test_uint(self):
hdulist_f = fits.open(self.data('o4sp040b0_raw.fits'))
hdulist_i = fits.open(self.data('o4sp040b0_raw.fits'), uint=True)
assert hdulist_f[1].data.dtype == np.float32
assert hdulist_i[1].data.dtype == np.uint16
assert np.all(hdulist_f[1].data == hdulist_i[1].data)
@ignore_warnings(PyfitsDeprecationWarning)
def test_fix_missing_card_append(self):
hdu = fits.ImageHDU()
errs = hdu.req_cards('TESTKW', None, None, 'foo', 'silentfix', [])
assert len(errs) == 1
assert 'TESTKW' in hdu.header
assert hdu.header['TESTKW'] == 'foo'
assert hdu.header.ascard[-1].key == 'TESTKW'
def test_fix_invalid_keyword_value(self):
hdu = fits.ImageHDU()
hdu.header['TESTKW'] = 'foo'
errs = hdu.req_cards('TESTKW', None,
lambda v: v == 'foo', 'foo', 'ignore', [])
assert len(errs) == 0
# Now try a test that will fail, and ensure that an error will be
# raised in 'exception' mode
errs = hdu.req_cards('TESTKW', None, lambda v: v == 'bar', 'bar',
'exception', [])
assert len(errs) == 1
assert errs[0][1] == "'TESTKW' card has invalid value 'foo'."
# See if fixing will work
hdu.req_cards('TESTKW', None, lambda v: v == 'bar', 'bar', 'silentfix',
[])
assert hdu.header['TESTKW'] == 'bar'
def test_unfixable_missing_card(self):
class TestHDU(fits.hdu.base.NonstandardExtHDU):
def _verify(self, option='warn'):
errs = super(TestHDU, self)._verify(option)
hdu.req_cards('TESTKW', None, None, None, 'fix', errs)
return errs
hdu = TestHDU(header=fits.Header())
assert_raises(fits.VerifyError, hdu.verify, 'fix')
def test_exception_on_verification_error(self):
hdu = fits.ImageHDU()
del hdu.header['XTENSION']
assert_raises(fits.VerifyError, hdu.verify, 'exception')
def test_ignore_verification_error(self):
hdu = fits.ImageHDU()
# The default here would be to issue a warning; ensure that no warnings
# or exceptions are raised
with catch_warnings():
warnings.simplefilter('error')
del hdu.header['NAXIS']
try:
hdu.verify('ignore')
except Exception:
exc = sys.exc_info()[1]
self.fail('An exception occurred when the verification error '
'should have been ignored: %s' % exc)
# Make sure the error wasn't fixed either, silently or otherwise
assert 'NAXIS' not in hdu.header
def test_unrecognized_verify_option(self):
hdu = fits.ImageHDU()
assert_raises(ValueError, hdu.verify, 'foobarbaz')
def test_combined_verify_options(self):
"""
Test verify options like fix+ignore.
"""
def make_invalid_hdu():
hdu = fits.ImageHDU()
# Add one keyword to the header that contains a fixable defect, and one
# with an unfixable defect
c1 = fits.Card.fromstring("test = ' test'")
c2 = fits.Card.fromstring("P.I. = ' Hubble'")
hdu.header.append(c1)
hdu.header.append(c2)
return hdu
# silentfix+ignore should be completely silent
hdu = make_invalid_hdu()
with catch_warnings():
warnings.simplefilter('error')
try:
hdu.verify('silentfix+ignore')
except Exception:
exc = sys.exc_info()[1]
self.fail('An exception occurred when the verification error '
'should have been ignored: %s' % exc)
# silentfix+warn should be quiet about the fixed HDU and only warn
# about the unfixable one
hdu = make_invalid_hdu()
with catch_warnings(record=True) as w:
hdu.verify('silentfix+warn')
assert len(w) == 4
assert 'Illegal keyword name' in str(w[2].message)
# silentfix+exception should only mention the unfixable error in the
# exception
hdu = make_invalid_hdu()
try:
hdu.verify('silentfix+exception')
except fits.VerifyError:
exc = sys.exc_info()[1]
assert 'Illegal keyword name' in str(exc)
assert 'not upper case' not in str(exc)
else:
self.fail('An exception should have been raised.')
# fix+ignore is not too useful, but it should warn about the fixed
# problems while saying nothing about the unfixable problems
hdu = make_invalid_hdu()
with catch_warnings(record=True) as w:
hdu.verify('fix+ignore')
assert len(w) == 4
assert 'not upper case' in str(w[2].message)
# fix+warn
hdu = make_invalid_hdu()
with catch_warnings(record=True) as w:
hdu.verify('fix+warn')
assert len(w) == 6
assert 'not upper case' in str(w[2].message)
assert 'Illegal keyword name' in str(w[4].message)
# fix+exception
hdu = make_invalid_hdu()
try:
hdu.verify('fix+exception')
except fits.VerifyError:
exc = sys.exc_info()[1]
assert 'Illegal keyword name' in str(exc)
assert 'not upper case' in str(exc)
else:
self.fail('An exception should have been raised.')
def test_getext(self):
"""
Test the various different ways of specifying an extension header in
the convenience functions.
"""
hl, ext = _getext(self.data('test0.fits'), 'readonly', 1)
assert ext == 1
assert_raises(ValueError, _getext, self.data('test0.fits'), 'readonly',
1, 2)
assert_raises(ValueError, _getext, self.data('test0.fits'), 'readonly',
(1, 2))
assert_raises(ValueError, _getext, self.data('test0.fits'), 'readonly',
'sci', 'sci')
assert_raises(TypeError, _getext, self.data('test0.fits'), 'readonly',
1, 2, 3)
hl, ext = _getext(self.data('test0.fits'), 'readonly', ext=1)
assert ext == 1
hl, ext = _getext(self.data('test0.fits'), 'readonly', ext=('sci', 2))
assert ext == ('sci', 2)
assert_raises(TypeError, _getext, self.data('test0.fits'), 'readonly',
1, ext=('sci', 2), extver=3)
assert_raises(TypeError, _getext, self.data('test0.fits'), 'readonly',
ext=('sci', 2), extver=3)
hl, ext = _getext(self.data('test0.fits'), 'readonly', 'sci')
assert ext == ('sci', 1)
hl, ext = _getext(self.data('test0.fits'), 'readonly', 'sci', 1)
assert ext == ('sci', 1)
hl, ext = _getext(self.data('test0.fits'), 'readonly', ('sci', 1))
assert ext == ('sci', 1)
hl, ext = _getext(self.data('test0.fits'), 'readonly', 'sci',
extver=1, do_not_scale_image_data=True)
assert ext == ('sci', 1)
assert_raises(TypeError, _getext, self.data('test0.fits'), 'readonly',
'sci', ext=1)
assert_raises(TypeError, _getext, self.data('test0.fits'), 'readonly',
'sci', 1, extver=2)
hl, ext = _getext(self.data('test0.fits'), 'readonly', extname='sci')
assert ext == ('sci', 1)
hl, ext = _getext(self.data('test0.fits'), 'readonly', extname='sci',
extver=1)
assert ext == ('sci', 1)
assert_raises(TypeError, _getext, self.data('test0.fits'), 'readonly',
extver=1)
def test_extension_name_case_sensitive(self):
"""
Tests that setting fits.EXTENSION_NAME_CASE_SENSITIVE at runtime
works.
"""
if 'PYFITS_EXTENSION_NAME_CASE_SENSITIVE' in os.environ:
del os.environ['PYFITS_EXTENSION_NAME_CASE_SENSITIVE']
hdu = fits.ImageHDU()
hdu.name = 'sCi'
assert hdu.name == 'SCI'
assert hdu.header['EXTNAME'] == 'SCI'
try:
fits.EXTENSION_NAME_CASE_SENSITIVE = True
hdu = fits.ImageHDU()
hdu.name = 'sCi'
assert hdu.name == 'sCi'
assert hdu.header['EXTNAME'] == 'sCi'
finally:
fits.EXTENSION_NAME_CASE_SENSITIVE = False
hdu.name = 'sCi'
assert hdu.name == 'SCI'
assert hdu.header['EXTNAME'] == 'SCI'
def test_hdu_fromstring(self):
"""
Tests creating a fully-formed HDU object from a string containing the
bytes of the HDU.
"""
dat = open(self.data('test0.fits'), 'rb').read()
offset = 0
with fits.open(self.data('test0.fits')) as hdul:
hdulen = hdul[0]._data_offset + hdul[0]._data_size
hdu = fits.PrimaryHDU.fromstring(dat[:hdulen])
assert isinstance(hdu, fits.PrimaryHDU)
assert hdul[0].header == hdu.header
assert hdu.data is None
hdu.header['TEST'] = 'TEST'
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert isinstance(hdu, fits.PrimaryHDU)
assert hdul[0].header[:-1] == hdu.header[:-1]
assert hdul[0].header['TEST'] == 'TEST'
assert hdu.data is None
with fits.open(self.data('test0.fits'))as hdul:
for ext_hdu in hdul[1:]:
offset += hdulen
hdulen = len(str(ext_hdu.header)) + ext_hdu._data_size
hdu = fits.ImageHDU.fromstring(dat[offset:offset + hdulen])
assert isinstance(hdu, fits.ImageHDU)
assert ext_hdu.header == hdu.header
assert (ext_hdu.data == hdu.data).all()
def test_nonstandard_hdu(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/157
Tests that "Nonstandard" HDUs with SIMPLE = F are read and written
without prepending a superfluous and unwanted standard primary HDU.
"""
data = np.arange(100, dtype=np.uint8)
hdu = fits.PrimaryHDU(data=data)
hdu.header['SIMPLE'] = False
hdu.writeto(self.temp('test.fits'))
info = [(0, '', 'NonstandardHDU', 5, (), '', '')]
with fits.open(self.temp('test.fits')) as hdul:
assert hdul.info(output=False) == info
# NonstandardHDUs just treat the data as an unspecified array of
# bytes. The first 100 bytes should match the original data we
# passed in...the rest should be zeros padding out the rest of the
# FITS block
assert (hdul[0].data[:100] == data).all()
assert (hdul[0].data[100:] == 0).all()
def test_extname(self):
"""Test getting/setting the EXTNAME of an HDU."""
h1 = fits.PrimaryHDU()
assert h1.name == 'PRIMARY'
# Normally a PRIMARY HDU should not have an EXTNAME, though it should
# have a default .name attribute
assert 'EXTNAME' not in h1.header
# The current version of the FITS standard does allow PRIMARY HDUs to
# have an EXTNAME, however.
h1.name = 'NOTREAL'
assert h1.name == 'NOTREAL'
assert h1.header.get('EXTNAME') == 'NOTREAL'
# Updating the EXTNAME in the header should update the .name
h1.header['EXTNAME'] = 'TOOREAL'
assert h1.name == 'TOOREAL'
# If we delete an EXTNAME keyword from a PRIMARY HDU it should go back
# to the default
del h1.header['EXTNAME']
assert h1.name == 'PRIMARY'
# For extension HDUs the situation is a bit simpler:
h2 = fits.ImageHDU()
assert h2.name == ''
assert 'EXTNAME' not in h2.header
h2.name = 'HELLO'
assert h2.name == 'HELLO'
assert h2.header.get('EXTNAME') == 'HELLO'
h2.header['EXTNAME'] = 'GOODBYE'
assert h2.name == 'GOODBYE'
def test_extver_extlevel(self):
"""Test getting/setting the EXTVER and EXTLEVEL of and HDU."""
# EXTVER and EXTNAME work exactly the same; their semantics are, for
# now, to be inferred by the user. Although they should never be less
# than 1, the standard does not explicitly forbid any value so long as
# it's an integer
h1 = fits.PrimaryHDU()
assert h1.ver == 1
assert h1.level == 1
assert 'EXTVER' not in h1.header
assert 'EXTLEVEL' not in h1.header
h1.ver = 2
assert h1.header.get('EXTVER') == 2
h1.header['EXTVER'] = 3
assert h1.ver == 3
del h1.header['EXTVER']
h1.ver == 1
h1.level = 2
assert h1.header.get('EXTLEVEL') == 2
h1.header['EXTLEVEL'] = 3
assert h1.level == 3
del h1.header['EXTLEVEL']
assert h1.level == 1
assert_raises(TypeError, setattr, h1, 'ver', 'FOO')
assert_raises(TypeError, setattr, h1, 'level', 'BAR')
def test_consecutive_writeto(self):
"""
Regression test for an issue where calling writeto twice on the same
HDUList could write a corrupted file.
https://github.com/spacetelescope/PyFITS/issues/40 is actually a
particular instance of this problem, though isn't unique to sys.stdout.
"""
with fits.open(self.data('test0.fits')) as hdul1:
# Add a bunch of header keywords so that the data will be forced to
# new offsets within the file:
for idx in range(40):
hdul1[1].header['TEST%d' % idx] = 'test'
hdul1.writeto(self.temp('test1.fits'))
hdul1.writeto(self.temp('test2.fits'))
# Open a second handle to the original file and compare it to hdul1
# (We only compare part of the one header that was modified)
# Compare also with the second writeto output
with fits.open(self.data('test0.fits')) as hdul2:
with fits.open(self.temp('test2.fits')) as hdul3:
for hdul in (hdul1, hdul3):
for idx, hdus in enumerate(zip(hdul1, hdul)):
hdu1, hdu2 = hdus
if idx != 1:
assert hdu1.header == hdu2.header
else:
assert (hdu1.header ==
hdu2.header[:len(hdu1.header)])
assert np.all(hdu1.data == hdu2.data)
class TestConvenienceFunctions(PyfitsTestCase):
def test_writeto(self):
"""
Simple test for writing a trivial header and some data to a file
with the `writeto()` convenience function.
"""
data = np.zeros((100, 100))
header = fits.Header()
fits.writeto(self.temp('array.fits'), data, header=header,
clobber=True)
hdul = fits.open(self.temp('array.fits'))
assert len(hdul) == 1
assert (data == hdul[0].data).all()
@ignore_warnings(PyfitsDeprecationWarning)
def test_writeto_2(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/107
Test of `writeto()` with a trivial header containing a single keyword.
"""
data = np.zeros((100, 100))
header = fits.Header()
header.update('CRPIX1', 1.)
fits.writeto(self.temp('array.fits'), data, header=header,
clobber=True, output_verify='silentfix')
hdul = fits.open(self.temp('array.fits'))
assert len(hdul) == 1
assert (data == hdul[0].data).all()
assert 'CRPIX1' in hdul[0].header
assert hdul[0].header['CRPIX1'] == 1.0
class TestFileFunctions(PyfitsTestCase):
"""
Tests various basic I/O operations, specifically in the pyfits.file._File
class.
"""
def test_open_nonexistent(self):
"""Test that trying to open a non-existent file results in an
IOError (and not some other arbitrary exception).
"""
try:
fits.open(self.temp('foobar.fits'))
except IOError:
exc = sys.exc_info()[1]
assert 'File does not exist' in str(exc)
except:
raise
# But opening in ostream or append mode should be okay, since they
# allow writing new files
for mode in ('ostream', 'append'):
with fits.open(self.temp('foobar.fits'), mode=mode) as h:
pass
assert os.path.exists(self.temp('foobar.fits'))
os.remove(self.temp('foobar.fits'))
def test_open_gzipped(self):
with ignore_warnings():
assert len(fits.open(self._make_gzip_file())) == 5
def test_detect_gzipped(self):
"""Test detection of a gzip file when the extension is not .gz."""
with ignore_warnings():
assert len(fits.open(self._make_gzip_file('test0.fz'))) == 5
def test_writeto_append_mode_gzip(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/33
Check that a new GzipFile opened in append mode can be used to write
out a new FITS file.
"""
# Note: when opening a GzipFile the 'b+' is superfluous, but this was
# still how the original test case looked
# Note: with statement not supported on GzipFile in older Python
# versions
fileobj = gzip.GzipFile(self.temp('test.fits.gz'), 'ab+')
h = fits.PrimaryHDU()
try:
h.writeto(fileobj)
finally:
fileobj.close()
with fits.open(self.temp('test.fits.gz')) as hdul:
assert hdul[0].header == h.header
def test_open_zipped(self):
zf = self._make_zip_file()
with ignore_warnings():
assert len(fits.open(self._make_zip_file())) == 5
with ignore_warnings():
assert len(fits.open(zipfile.ZipFile(zf))) == 5
def test_detect_zipped(self):
"""Test detection of a zip file when the extension is not .zip."""
zf = self._make_zip_file(filename='test0.fz')
with ignore_warnings():
assert len(fits.open(zf)) == 5
def test_open_zipped_writeable(self):
"""Opening zipped files in a writeable mode should fail."""
zf = self._make_zip_file()
assert_raises(IOError, fits.open, zf, 'update')
assert_raises(IOError, fits.open, zf, 'append')
zf = zipfile.ZipFile(zf, 'a')
assert_raises(IOError, fits.open, zf, 'update')
assert_raises(IOError, fits.open, zf, 'append')
def test_open_multiple_member_zipfile(self):
"""
Opening zip files containing more than one member files should fail
as there's no obvious way to specify which file is the FITS file to
read.
"""
zfile = zipfile.ZipFile(self.temp('test0.zip'), 'w')
zfile.write(self.data('test0.fits'))
zfile.writestr('foo', 'bar')
zfile.close()
assert_raises(IOError, fits.open, zfile.filename)
def test_read_open_file(self):
"""Read from an existing file object."""
with open(self.data('test0.fits'), 'rb') as f:
assert len(fits.open(f)) == 5
def test_read_closed_file(self):
"""Read from an existing file object that's been closed."""
f = open(self.data('test0.fits'), 'rb')
f.close()
assert len(fits.open(f)) == 5
def test_read_open_gzip_file(self):
"""Read from an open gzip file object."""
gf = gzip.GzipFile(self._make_gzip_file())
try:
assert len(fits.open(gf)) == 5
finally:
gf.close()
def test_open_gzip_file_for_writing(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/195."""
gf = self._make_gzip_file()
with fits.open(gf, mode='update') as h:
h[0].header['EXPFLAG'] = 'ABNORMAL'
with fits.open(gf) as h:
# Just to make sur ethe update worked; if updates work
# normal writes should work too...
assert h[0].header['EXPFLAG'] == 'ABNORMAL'
def test_read_file_like_object(self):
"""Test reading a FITS file from a file-like object."""
filelike = BytesIO()
with open(self.data('test0.fits'), 'rb') as f:
filelike.write(f.read())
filelike.seek(0)
with ignore_warnings():
assert len(fits.open(filelike)) == 5
def test_updated_file_permissions(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/79
Tests that when a FITS file is modified in update mode, the file
permissions are preserved.
"""
filename = self.temp('test.fits')
hdul = [fits.PrimaryHDU(), fits.ImageHDU()]
hdul = fits.HDUList(hdul)
hdul.writeto(filename)
old_mode = os.stat(filename).st_mode
hdul = fits.open(filename, mode='update')
hdul.insert(1, fits.ImageHDU())
hdul.flush()
hdul.close()
assert old_mode == os.stat(filename).st_mode
def test_fileobj_mode_guessing(self):
"""Tests whether a file opened without a specified pyfits mode
('readonly', etc.) is opened in a mode appropriate for the given file
object.
"""
self.copy_file('test0.fits')
# Opening in text mode should outright fail
for mode in ('r', 'w', 'a'):
with open(self.temp('test0.fits'), mode) as f:
assert_raises(ValueError, fits.HDUList.fromfile, f)
# Need to re-copy the file since opening it in 'w' mode blew it away
self.copy_file('test0.fits')
with open(self.temp('test0.fits'), 'rb') as f:
with fits.HDUList.fromfile(f) as h:
assert h.fileinfo(0)['filemode'] == 'readonly'
for mode in ('wb', 'ab'):
with open(self.temp('test0.fits'), mode) as f:
with fits.HDUList.fromfile(f) as h:
# Basically opening empty files for output streaming
assert len(h) == 0
# Need to re-copy the file since opening it in 'w' mode blew it away
self.copy_file('test0.fits')
with open(self.temp('test0.fits'), 'wb+') as f:
with fits.HDUList.fromfile(f) as h:
# wb+ still causes an existing file to be overwritten so there
# are no HDUs
assert len(h) == 0
# Need to re-copy the file since opening it in 'w' mode blew it away
self.copy_file('test0.fits')
with open(self.temp('test0.fits'), 'rb+') as f:
with fits.HDUList.fromfile(f) as h:
assert h.fileinfo(0)['filemode'] == 'update'
with open(self.temp('test0.fits'), 'ab+') as f:
with fits.HDUList.fromfile(f) as h:
assert h.fileinfo(0)['filemode'] == 'append'
if sys.version_info[:2] > (2, 5):
# After a fair bit of experimentation I found that it's more difficult
# than it's worth to wrap mmap in Python 2.5.
def test_mmap_unwriteable(self):
"""Regression test for
https://github.com/astropy/astropy/issues/968
Temporarily patches mmap.mmap to exhibit platform-specific bad
behavior.
"""
class MockMmap(mmap.mmap):
def flush(self):
raise mmap.error('flush is broken on this platform')
old_mmap = mmap.mmap
mmap.mmap = MockMmap
# Force the mmap test to be rerun
_File._mmap_available = None
try:
# TODO: Use self.copy_file once it's merged into Astropy
shutil.copy(self.data('test0.fits'), self.temp('test0.fits'))
with catch_warnings(record=True) as w:
with fits.open(self.temp('test0.fits'), mode='update',
memmap=True) as h:
h[1].data[0, 0] = 999
assert len(w) == 1
assert 'mmap.flush is unavailable' in str(w[0].message)
# Double check that writing without mmap still worked
with fits.open(self.temp('test0.fits')) as h:
assert h[1].data[0, 0] == 999
finally:
mmap.mmap = old_mmap
_File._mmap_available = None
def test_uncloseable_file(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2356
Demonstrates that FITS files can still be read from "file-like" objects
that don't have an obvious "open" or "closed" state.
"""
class MyFileLike(object):
def __init__(self, foobar):
self._foobar = foobar
def read(self, n):
return self._foobar.read(n)
def seek(self, offset, whence=os.SEEK_SET):
self._foobar.seek(offset, whence)
def tell(self):
return self._foobar.tell()
with open(self.data('test0.fits'), 'rb') as f:
fileobj = MyFileLike(f)
with fits.open(fileobj) as hdul1:
with fits.open(self.data('test0.fits')) as hdul2:
assert hdul1.info(output=False) == hdul2.info(output=False)
for hdu1, hdu2 in zip(hdul1, hdul2):
assert hdu1.header == hdu2.header
if hdu1.data is not None and hdu2.data is not None:
assert np.all(hdu1.data == hdu2.data)
def _make_gzip_file(self, filename='test0.fits.gz'):
gzfile = self.temp(filename)
with open(self.data('test0.fits'), 'rb') as f:
gz = gzip.open(gzfile, 'wb')
gz.write(f.read())
gz.close()
return gzfile
def _make_zip_file(self, mode='copyonwrite', filename='test0.fits.zip'):
zfile = zipfile.ZipFile(self.temp(filename), 'w')
zfile.write(self.data('test0.fits'))
zfile.close()
return zfile.filename
class TestStreamingFunctions(PyfitsTestCase):
"""Test functionality of the StreamingHDU class."""
def test_streaming_hdu(self):
shdu = self._make_streaming_hdu(self.temp('new.fits'))
assert isinstance(shdu.size, int)
assert shdu.size == 100
def test_streaming_hdu_file_wrong_mode(self):
"""
Test that streaming an HDU to a file opened in the wrong mode fails as
expected (any writeable mode is acceptable; any read-only mode should
fail).
"""
# touch new.fits
with open(self.temp('new.fits'), 'wb'):
pass
with open(self.temp('new.fits'), 'rb') as f:
header = fits.Header()
assert_raises(ValueError, fits.StreamingHDU, f, header)
def test_streaming_hdu_write_file(self):
"""Test streaming an HDU to an open file object."""
arr = np.zeros((5, 5), dtype=np.int32)
with open(self.temp('new.fits'), 'ab+') as f:
shdu = self._make_streaming_hdu(f)
shdu.write(arr)
assert shdu.writecomplete
assert shdu.size == 100
hdul = fits.open(self.temp('new.fits'))
assert len(hdul) == 1
assert (hdul[0].data == arr).all()
def test_streaming_hdu_write_file_like(self):
"""Test streaming an HDU to an open file-like object."""
arr = np.zeros((5, 5), dtype=np.int32)
# The file-like object underlying a StreamingHDU must be in binary mode
sf = BytesIO()
shdu = self._make_streaming_hdu(sf)
shdu.write(arr)
assert shdu.writecomplete
assert shdu.size == 100
sf.seek(0)
hdul = fits.open(sf)
assert len(hdul) == 1
assert (hdul[0].data == arr).all()
def test_streaming_hdu_append_extension(self):
arr = np.zeros((5, 5), dtype=np.int32)
with open(self.temp('new.fits'), 'ab+') as f:
shdu = self._make_streaming_hdu(f)
shdu.write(arr)
# Doing this again should update the file with an extension
with open(self.temp('new.fits'), 'ab+') as f:
shdu = self._make_streaming_hdu(f)
shdu.write(arr)
def test_fix_invalid_extname(self):
phdu = fits.PrimaryHDU()
ihdu = fits.ImageHDU()
ihdu.header['EXTNAME'] = 12345678
hdul = fits.HDUList([phdu, ihdu])
assert_raises(fits.VerifyError, hdul.writeto, self.temp('temp.fits'),
output_verify='exception')
with CaptureStdio():
hdul.writeto(self.temp('temp.fits'), output_verify='fix')
with fits.open(self.temp('temp.fits')):
assert hdul[1].name == '12345678'
assert hdul[1].header['EXTNAME'] == '12345678'
def _make_streaming_hdu(self, fileobj):
hd = fits.Header()
hd['SIMPLE'] = (True, 'conforms to FITS standard')
hd['BITPIX'] = (32, 'array data type')
hd['NAXIS'] = (2, 'number of array dimensions')
hd['NAXIS1'] = 5
hd['NAXIS2'] = 5
hd['EXTEND'] = True
return fits.StreamingHDU(fileobj, hd)
|
|
# Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
import os.path
import re
import time
import logging
import subprocess
import signal
import flask
import gevent.event
from . import utils
import digits.log
from config import config_value
from status import Status, StatusCls
import platform
# NOTE: Increment this everytime the pickled version changes
PICKLE_VERSION = 1
class Task(StatusCls):
"""
Base class for Tasks
A Task is a compute-heavy operation that runs in a separate executable
Communication is done by processing the stdout of the executable
"""
def __init__(self, job_dir, parents=None):
super(Task, self).__init__()
self.pickver_task = PICKLE_VERSION
self.job_dir = job_dir
self.job_id = os.path.basename(job_dir)
if parents is None:
self.parents = None
elif isinstance(parents, (list, tuple)):
self.parents = parents
elif isinstance(parents, Task):
self.parents = [parents]
else:
raise TypeError('parents is %s' % type(parents))
self.exception = None
self.traceback = None
self.aborted = gevent.event.Event()
self.set_logger()
def __getstate__(self):
d = self.__dict__.copy()
if 'aborted' in d:
del d['aborted']
if 'logger' in d:
del d['logger']
return d
def __setstate__(self, state):
self.__dict__ = state
self.aborted = gevent.event.Event()
self.set_logger()
def set_logger(self):
self.logger = digits.log.JobIdLoggerAdapter(
logging.getLogger('digits.webapp'),
{'job_id': self.job_id},
)
def name(self):
"""
Returns a string
"""
raise NotImplementedError
def html_id(self):
"""
Returns a string
"""
return 'task-%s' % id(self)
def on_status_update(self):
"""
Called when StatusCls.status.setter is used
"""
from digits.webapp import app, socketio
# Send socketio updates
message = {
'task': self.html_id(),
'update': 'status',
'status': self.status.name,
'css': self.status.css,
'show': (self.status in [Status.RUN, Status.ERROR]),
'running': self.status.is_running(),
}
with app.app_context():
message['html'] = flask.render_template('status_updates.html',
updates = self.status_history,
exception = self.exception,
traceback = self.traceback,
)
socketio.emit('task update',
message,
namespace='/jobs',
room=self.job_id,
)
def path(self, filename, relative=False):
"""
Returns a path to the given file
Arguments:
filename -- the requested file
Keyword arguments:
relative -- If False, return an absolute path to the file
If True, return a path relative to the jobs directory
"""
if not filename:
return None
if os.path.isabs(filename):
path = filename
else:
path = os.path.join(self.job_dir, filename)
if relative:
path = os.path.relpath(path, config_value('jobs_dir'))
return str(path).replace("\\","/")
def ready_to_queue(self):
"""
Returns True if all parents are done
"""
if not self.parents:
return True
for parent in self.parents:
if parent.status != Status.DONE:
return False
return True
def offer_resources(self, resources):
"""
Check the available resources and return a set of requested resources
Arguments:
resources -- a copy of scheduler.resources
"""
raise NotImplementedError
def task_arguments(self, resources):
"""
Returns args used by subprocess.Popen to execute the task
Returns False if the args cannot be set properly
Arguments:
resources -- the resources assigned by the scheduler for this task
"""
raise NotImplementedError
def before_run(self):
"""
Called before run() executes
Raises exceptions
"""
pass
def run(self, resources):
"""
Execute the task
Arguments:
resources -- the resources assigned by the scheduler for this task
"""
self.before_run()
args = self.task_arguments(resources)
if not args:
self.logger.error('Could not create the arguments for Popen')
self.status = Status.ERROR
return False
# Convert them all to strings
args = [str(x) for x in args]
self.logger.info('%s task started.' % self.name())
self.status = Status.RUN
unrecognized_output = []
p = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=self.job_dir,
close_fds=False if platform.system() == 'Windows' else True,
)
try:
sigterm_time = None # When was the SIGTERM signal sent
sigterm_timeout = 2 # When should the SIGKILL signal be sent
while p.poll() is None:
for line in utils.nonblocking_readlines(p.stdout):
if self.aborted.is_set():
if sigterm_time is None:
# Attempt graceful shutdown
p.send_signal(signal.SIGTERM)
sigterm_time = time.time()
self.status = Status.ABORT
break
if line is not None:
# Remove whitespace
line = line.strip()
if line:
if not self.process_output(line):
self.logger.warning('%s unrecognized output: %s' % (self.name(), line.strip()))
unrecognized_output.append(line)
else:
time.sleep(0.05)
if sigterm_time is not None and (time.time() - sigterm_time > sigterm_timeout):
p.send_signal(signal.SIGKILL)
self.logger.warning('Sent SIGKILL to task "%s"' % self.name())
time.sleep(0.1)
except:
p.terminate()
self.after_run()
raise
self.after_run()
if self.status != Status.RUN:
return False
elif p.returncode != 0:
self.logger.error('%s task failed with error code %d' % (self.name(), p.returncode))
if self.exception is None:
self.exception = 'error code %d' % p.returncode
if unrecognized_output:
if self.traceback is None:
self.traceback = '\n'.join(unrecognized_output)
else:
self.traceback = self.traceback + ('\n'.join(unrecognized_output))
self.after_runtime_error()
self.status = Status.ERROR
return False
else:
self.logger.info('%s task completed.' % self.name())
self.status = Status.DONE
return True
def abort(self):
"""
Abort the Task
"""
if self.status.is_running():
self.aborted.set()
def preprocess_output_digits(self, line):
"""
Takes line of output and parses it according to DIGITS's log format
Returns (timestamp, level, message) or (None, None, None)
"""
# NOTE: This must change when the logging format changes
# YYYY-MM-DD HH:MM:SS [LEVEL] message
match = re.match(r'(\S{10} \S{8}) \[(\w+)\s*\] (.*)$', line)
if match:
timestr = match.group(1)
timestamp = time.mktime(time.strptime(timestr, digits.log.DATE_FORMAT))
level = match.group(2)
message = match.group(3)
if level.startswith('DEB'):
level = 'debug'
elif level.startswith('INF'):
level = 'info'
elif level.startswith('WAR'):
level = 'warning'
elif level.startswith('ERR'):
level = 'error'
elif level.startswith('CRI'):
level = 'critical'
return (timestamp, level, message)
else:
return (None, None, None)
def process_output(self, line):
"""
Process a line of output from the task
Returns True if the output was able to be processed
Arguments:
line -- a line of output
"""
raise NotImplementedError
def est_done(self):
"""
Returns the estimated time in seconds until the task is done
"""
if self.status != Status.RUN or self.progress == 0:
return None
elapsed = time.time() - self.status_history[-1][1]
return (1 - self.progress) * elapsed // self.progress
def after_run(self):
"""
Called after run() executes
"""
pass
def after_runtime_error(self):
"""
Called after a runtime error during run()
"""
pass
|
|
# -*- coding: utf-8 -*-
"""
Smoothers.
:copyright: 2015 Agile Geoscience
:license: Apache 2.0
"""
import numpy as np
import scipy.ndimage
from bruges.bruges import BrugesError
from bruges.util import nearest
from bruges.util import rms as rms_
# TODO:
# - 1D and 2D Gaussian (or, better, n-D)
# - See how these handle Nans, consider removing, interpolating, replacing.
def mean(arr, size=5):
"""
A linear n-D smoothing filter. Can be used as a moving average on 1D data.
Args:
arr (ndarray): an n-dimensional array, such as a seismic horizon.
size (int): the kernel size, e.g. 5 for 5x5. Should be odd,
rounded up if not.
Returns:
ndarray: the resulting smoothed array.
"""
arr = np.array(arr, dtype=np.float)
if not size // 2:
size += 1
return scipy.ndimage.generic_filter(arr, np.mean, size=size)
def rms(arr, size=5):
"""
A linear n-D smoothing filter. Can be used as a moving average on 1D data.
Args:
arr (ndarray): an n-dimensional array, such as a seismic horizon.
size (int): the kernel size, e.g. 5 for 5x5. Should be odd,
rounded up if not.
Returns:
ndarray: the resulting smoothed array.
"""
arr = np.array(arr, dtype=np.float)
if not size // 2:
size += 1
return scipy.ndimage.generic_filter(arr, rms_, size=size)
def median(arr, size=5):
"""
A nonlinear n-D edge-preserving smoothing filter.
Args:
arr (ndarray): an n-dimensional array, such as a seismic horizon.
size (int): the kernel size, e.g. 5 for 5x5. Should be odd,
rounded up if not.
Returns:
ndarray: the resulting smoothed array.
"""
arr = np.array(arr, dtype=np.float)
if not size // 2:
size += 1
return scipy.ndimage.generic_filter(arr, np.median, size=size)
def mode(arr, size=5, tie='smallest'):
"""
A nonlinear n-D categorical smoothing filter. Use this to filter non-
continuous variables, such as categorical integers, e.g. to label facies.
Args:
arr (ndarray): an n-dimensional array, such as a seismic horizon.
size (int): the kernel size, e.g. 5 for 5x5. Should be odd,
rounded up if not.
tie (str): `'smallest'` or `'largest`'. In the event of a tie (i.e. two
or more values having the same count in the kernel), whether to
give back the smallest of the tying values, or the largest.
Returns:
ndarray: the resulting smoothed array.
"""
def func(this, tie):
if tie == 'smallest':
m, _ = scipy.stats.mode(this)
else:
m, _ = -scipy.stats.mode(-this)
return np.squeeze(m)
arr = np.array(arr, dtype=np.float)
if not size // 2:
size += 1
return scipy.ndimage.generic_filter(arr, func, size=size,
extra_keywords={'tie': tie}
)
def snn(arr, size=5, include=True):
"""
Symmetric nearest neighbour, a nonlinear 2D smoothing filter.
http://subsurfwiki.org/wiki/Symmetric_nearest_neighbour_filter
Args:
arr (ndarray): a 2D array, such as a seismic horizon.
size (int): the kernel size, e.g. 5 for 5x5. Should be odd,
rounded up if not.
include (bool): whether to include the central pixel itself.
Returns:
ndarray: the resulting smoothed array.
"""
def func(this, pairs, include):
"""
Deal with this patch.
"""
centre = this[this.size // 2]
select = [nearest(this[p], centre) for p in pairs]
if include:
select += [centre]
return np.mean(select)
arr = np.array(arr, dtype=np.float)
if arr.ndim != 2:
raise BrugesError("arr must have 2-dimensions")
if not size // 2:
size += 1
pairs = [[i, size**2-1 - i] for i in range(size**2 // 2)]
return scipy.ndimage.generic_filter(arr,
func,
size=size,
extra_keywords={'pairs': pairs,
'include': include}
)
def kuwahara(arr, size=5):
"""
Kuwahara, a nonlinear 2D smoothing filter.
http://subsurfwiki.org/wiki/Kuwahara_filter
Args:
arr (ndarray): a 2D array, such as a seismic horizon.
size (int): the kernel size, e.g. 5 for 5x5. Should be odd,
rounded up if not.
Returns:
ndarray: the resulting smoothed array.
"""
def func(this, s, k):
"""
Deal with this patch.
"""
t = this.reshape((s, s))
sub = np.array([t[:k, :k].flatten(),
t[:k, k-1:].flatten(),
t[k-1:, :k].flatten(),
t[k-1:, k-1:].flatten()]
)
select = sub[np.argmin(np.var(sub, axis=1))]
return np.mean(select)
arr = np.array(arr, dtype=np.float)
if arr.ndim != 2:
raise BrugesError("arr must have 2-dimensions")
if not size // 2:
size += 1
k = int(np.ceil(size / 2))
return scipy.ndimage.generic_filter(arr,
func,
size=size,
extra_keywords={'s': size,
'k': k,
}
)
def conservative(arr, size=5, supercon=False):
"""
Conservative, a nonlinear n-D despiking filter. Very conservative! Only
changes centre value if it is outside the range of all the other values
in the kernel. Read http://subsurfwiki.org/wiki/Conservative_filter
Args:
arr (ndarray): an n-dimensional array, such as a seismic horizon.
size (int): the kernel size, e.g. 5 for 5x5 (in a 2D arr). Should be
odd, rounded up if not.
supercon (bool): whether to be superconservative. If True, replaces
pixel with min or max of kernel. If False (default), replaces pixel
with mean of kernel.
Returns:
ndarray: the resulting smoothed array.
"""
def func(this, k, supercon):
this = this.flatten()
centre = this[k]
rest = [this[:k], this[-k:]]
mi, ma = np.nanmin(rest), np.nanmax(rest)
if centre < mi:
return mi if supercon else np.mean(rest)
elif centre > ma:
return ma if supercon else np.mean(rest)
else:
return centre
arr = np.array(arr, dtype=np.float)
if not size // 2:
size += 1
k = int(np.floor(size**arr.ndim / 2))
return scipy.ndimage.generic_filter(arr,
func,
size=size,
extra_keywords={'k': k,
'supercon': supercon,
}
)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.session_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import session_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class SessionOpsTest(test.TestCase):
def testHandleBasic(self):
with self.cached_session() as sess:
# Return a handle.
a = constant_op.constant(10)
b = constant_op.constant(5)
c = math_ops.multiply(a, b)
h = session_ops.get_session_handle(c)
h = sess.run(h)
# Feed a tensor handle.
f, x = session_ops.get_session_tensor(h.handle, dtypes.int32)
y = math_ops.multiply(x, 10)
self.assertEqual(500, sess.run(y, feed_dict={f: h.handle}))
def testHandleEval(self):
with self.cached_session() as sess:
# Return a handle.
a = constant_op.constant(10)
b = constant_op.constant(5)
c = math_ops.multiply(a, b)
h = session_ops.get_session_handle(c)
h = sess.run(h)
# Get the tensor from its handle.
self.assertEqual(50, h.eval())
def testHandleAndValue(self):
with self.cached_session() as sess:
# Return a handle and a value.
a = constant_op.constant(10)
b = constant_op.constant(5)
c = math_ops.multiply(a, b)
h = session_ops.get_session_handle(c)
v = math_ops.multiply(a, c)
h, v = sess.run([h, v])
self.assertEqual(50, h.eval())
self.assertEqual(500, v)
def testHandleCond(self):
with self.cached_session() as sess:
# Return a handle and a value
a = constant_op.constant(10)
b = constant_op.constant(5)
p = math_ops.less(a, b)
c = math_ops.multiply(a, b)
h = session_ops.get_session_handle(c)
p, h = sess.run([p, h])
# Run by feeding a tensor handle.
f, x = session_ops.get_session_tensor(h.handle, dtypes.int32)
if p:
y = math_ops.multiply(x, 10)
else:
y = math_ops.multiply(x, 100)
result = sess.run(y, feed_dict={f: h.handle})
self.assertEqual(5000, result)
def testHandleForLoop(self):
with self.cached_session() as sess:
# Initialize a handle.
a = constant_op.constant(0)
h = session_ops.get_session_handle(a)
h = sess.run(h)
# Do some computation.
f, x = session_ops.get_session_tensor(h.handle, dtypes.int32)
# Must define the loop body outside the loop.
h_x = session_ops.get_session_handle(math_ops.add(x, 1))
for _ in range(100):
# This exercises garbage collection.
h = sess.run(h_x, feed_dict={f: h.handle})
self.assertEqual(100, h.eval())
def testHandleWhileLoop(self):
with self.cached_session() as sess:
# Initialize a handle.
a = constant_op.constant(0)
h = session_ops.get_session_handle(a)
h = sess.run(h)
# Do some computation.
f, x = session_ops.get_session_tensor(h.handle, dtypes.int32)
b = constant_op.constant(100)
p = math_ops.less(x, b)
# Must define the loop body outside the loop.
h_x = session_ops.get_session_handle(math_ops.add(x, 1))
while True:
rp, h = sess.run([p, h_x], feed_dict={f: h.handle})
if not rp:
break
self.assertEqual(101, h.eval())
def testHandleMover(self):
with self.cached_session() as sess:
# Return a handle.
a = constant_op.constant(10)
b = constant_op.constant(5)
c = math_ops.multiply(a, b)
h = session_ops.get_session_handle(c)
h = sess.run(h)
# Feed a tensor handle.
f, x = session_ops.get_session_tensor(h.handle, dtypes.int32)
y = math_ops.multiply(x, 10)
self.assertEqual(500, sess.run(y, feed_dict={f: h.handle}))
# Feed another tensor handle.
with ops.device(test.gpu_device_name()):
a = constant_op.constant(10)
h = session_ops.get_session_handle(a)
h = sess.run(h)
self.assertEqual(100, sess.run(y, feed_dict={f: h.handle}))
def testHandleDelete(self):
with self.cached_session() as sess:
# Return a handle.
a = constant_op.constant(10)
b = constant_op.constant(5)
c = math_ops.multiply(a, b)
h = session_ops.get_session_handle(c)
sess.run(h).delete()
def testHandleDeleteRaw(self):
with self.cached_session() as sess:
# Return a handle.
a = constant_op.constant(10)
b = constant_op.constant(5)
c = math_ops.multiply(a, b)
h = session_ops.get_session_handle(c)
h = sess.run(h)
# Delete using a raw tensor handle.
raw_h = h.get_raw_handle()
f, x = session_ops.delete_session_tensor(raw_h)
sess.run(x, feed_dict={f: raw_h})
def testMultiDevices(self):
with self.cached_session() as sess:
with ops.device(test.gpu_device_name()):
a = constant_op.constant(1.0)
a_handle = sess.run(session_ops.get_session_handle(a))
with ops.device("/cpu:0"):
b = constant_op.constant(2.0)
b_handle = sess.run(session_ops.get_session_handle(b))
a_p, a_t = session_ops.get_session_tensor(a_handle.handle, dtypes.float32)
b_p, b_t = session_ops.get_session_tensor(b_handle.handle, dtypes.float32)
c = math_ops.add(a_t, b_t)
c_handle = sess.run(
session_ops.get_session_handle(c),
feed_dict={a_p: a_handle.handle,
b_p: b_handle.handle})
self.assertEqual(3.0, c_handle.eval())
def testHandleGC(self):
with self.cached_session() as sess:
# initial values live on CPU
with ops.device("/cpu:0"):
one = constant_op.constant(1, dtype=dtypes.float32)
one_handle = sess.run(session_ops.get_session_handle(one))
x_handle = sess.run(session_ops.get_session_handle(one))
# addition lives on GPU
with ops.device(test.gpu_device_name()):
add_h1, add_t1 = session_ops.get_session_tensor(one_handle.handle,
dtypes.float32)
add_h2, add_t2 = session_ops.get_session_tensor(x_handle.handle,
dtypes.float32)
add_op = math_ops.add(add_t1, add_t2)
add_output = session_ops.get_session_handle(add_op)
# add 1 to tensor 20 times
for _ in range(20):
x_handle = sess.run(
add_output,
feed_dict={add_h1: one_handle.handle,
add_h2: x_handle.handle})
def testHandlePlacement(self):
with self.cached_session() as sess:
a = constant_op.constant(1.0)
a_handle_op = session_ops.get_session_handle(a)
b = constant_op.constant(2.0)
b_handle_op = session_ops.get_session_handle(b)
a_handle = sess.run(a_handle_op)
b_handle = sess.run(b_handle_op)
a_p, a_t = session_ops.get_session_tensor(a_handle.handle, dtypes.float32)
b_p, b_t = session_ops.get_session_tensor(b_handle.handle, dtypes.float32)
c = math_ops.add(a_t, b_t)
c_handle = sess.run(
session_ops.get_session_handle(c),
feed_dict={a_p: a_handle.handle,
b_p: b_handle.handle})
self.assertEqual(3.0, c_handle.eval())
def testFeedOneHandleDirectly(self):
with self.cached_session() as sess:
a = constant_op.constant(10.0)
b = constant_op.constant(5.0)
c = math_ops.multiply(a, b)
d = math_ops.multiply(c, c)
h_c = sess.run(session_ops.get_session_handle(c))
self.assertAllClose(2500.0, sess.run(d, feed_dict={c: h_c}))
def testDirectHandleFeedOverlappingWithFetches(self):
with self.cached_session() as sess:
a = constant_op.constant(10.0)
b = constant_op.constant(5.0)
c = math_ops.multiply(a, b)
h_c = sess.run(session_ops.get_session_handle(c))
d = array_ops.identity(c)
c_val = sess.run(c, feed_dict={c: h_c})
self.assertAllClose(50.0, c_val)
d_val = sess.run(d, feed_dict={c: h_c})
self.assertAllClose(50.0, d_val)
c_val, d_val = sess.run([c, d], feed_dict={c: h_c, d: 60.0})
self.assertAllClose(50.0, c_val)
self.assertAllClose(60.0, d_val)
c_val, d_val = sess.run([c, d], feed_dict={c: 60.0, d: h_c})
self.assertAllClose(60.0, c_val)
self.assertAllClose(50.0, d_val)
c_val, d_val = sess.run([c, d], feed_dict={c: h_c, d: h_c})
self.assertAllClose(50.0, c_val)
self.assertAllClose(50.0, d_val)
def testFeedTwoHandlesDirectly(self):
with self.cached_session() as sess:
a = constant_op.constant(10.0)
b = constant_op.constant(5.0)
c = math_ops.multiply(a, b)
d = math_ops.div(a, b)
e = math_ops.subtract(c, d)
h_c = sess.run(session_ops.get_session_handle(c))
h_d = sess.run(session_ops.get_session_handle(d))
self.assertAllClose(48.0, sess.run(e, feed_dict={c: h_c, d: h_d}))
self.assertAllClose(-48.0, sess.run(e, feed_dict={c: h_d, d: h_c}))
def testFeedHandleToVariableDirectly(self):
with self.cached_session() as sess:
a = variables.Variable(12.0)
inc_a = state_ops.assign_add(a, 2.0)
b = math_ops.add(a, 5.0)
sess.run(a.initializer)
h_a_read = sess.run(session_ops.get_session_handle(a.read_value()))
self.assertAllClose(12.0, sess.run(a))
self.assertAllClose(17.0, sess.run(b, feed_dict={a: h_a_read}))
sess.run(inc_a)
self.assertAllClose(19.0, sess.run(b, feed_dict={a: h_a_read}))
if __name__ == "__main__":
test.main()
|
|
# python3
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A simple implementation of Bootstrapped DQN with prior networks.
References:
1. "Deep Exploration via Bootstrapped DQN" (Osband et al., 2016)
2. "Deep Exploration via Randomized Value Functions" (Osband et al., 2017)
3. "Randomized Prior Functions for Deep RL" (Osband et al, 2018)
Links:
1. https://arxiv.org/abs/1602.04621
2. https://arxiv.org/abs/1703.07608
3. https://arxiv.org/abs/1806.03335
Notes:
- This agent is implemented with TensorFlow 2 and Sonnet 2. For installation
instructions for these libraries, see the README.md in the parent folder.
- This implementation is potentially inefficient, as it does not parallelise
computation across the ensemble for simplicity and readability.
"""
from typing import Any, Callable, NamedTuple, Sequence
from bsuite.baselines import base
from bsuite.baselines.utils import replay
import dm_env
from dm_env import specs
import haiku as hk
import jax
from jax import lax
import jax.numpy as jnp
import numpy as np
import optax
import rlax
class TrainingState(NamedTuple):
params: hk.Params
target_params: hk.Params
opt_state: Any
step: int
class BootstrappedDqn(base.Agent):
"""Bootstrapped DQN with randomized prior functions."""
def __init__(
self,
obs_spec: specs.Array,
action_spec: specs.DiscreteArray,
network: Callable[[jnp.ndarray], jnp.ndarray],
num_ensemble: int,
batch_size: int,
discount: float,
replay_capacity: int,
min_replay_size: int,
sgd_period: int,
target_update_period: int,
optimizer: optax.GradientTransformation,
mask_prob: float,
noise_scale: float,
epsilon_fn: Callable[[int], float] = lambda _: 0.,
seed: int = 1,
):
# Transform the (impure) network into a pure function.
network = hk.without_apply_rng(hk.transform(network))
# Define loss function, including bootstrap mask `m_t` & reward noise `z_t`.
def loss(params: hk.Params, target_params: hk.Params,
transitions: Sequence[jnp.ndarray]) -> jnp.ndarray:
"""Q-learning loss with added reward noise + half-in bootstrap."""
o_tm1, a_tm1, r_t, d_t, o_t, m_t, z_t = transitions
q_tm1 = network.apply(params, o_tm1)
q_t = network.apply(target_params, o_t)
r_t += noise_scale * z_t
batch_q_learning = jax.vmap(rlax.q_learning)
td_error = batch_q_learning(q_tm1, a_tm1, r_t, discount * d_t, q_t)
return jnp.mean(m_t * td_error**2)
# Define update function for each member of ensemble..
@jax.jit
def sgd_step(state: TrainingState,
transitions: Sequence[jnp.ndarray]) -> TrainingState:
"""Does a step of SGD for the whole ensemble over `transitions`."""
gradients = jax.grad(loss)(state.params, state.target_params, transitions)
updates, new_opt_state = optimizer.update(gradients, state.opt_state)
new_params = optax.apply_updates(state.params, updates)
return TrainingState(
params=new_params,
target_params=state.target_params,
opt_state=new_opt_state,
step=state.step + 1)
# Initialize parameters and optimizer state for an ensemble of Q-networks.
rng = hk.PRNGSequence(seed)
dummy_obs = np.zeros((1, *obs_spec.shape), jnp.float32)
initial_params = [
network.init(next(rng), dummy_obs) for _ in range(num_ensemble)
]
initial_target_params = [
network.init(next(rng), dummy_obs) for _ in range(num_ensemble)
]
initial_opt_state = [optimizer.init(p) for p in initial_params]
# Internalize state.
self._ensemble = [
TrainingState(p, tp, o, step=0) for p, tp, o in zip(
initial_params, initial_target_params, initial_opt_state)
]
self._forward = jax.jit(network.apply)
self._sgd_step = sgd_step
self._num_ensemble = num_ensemble
self._optimizer = optimizer
self._replay = replay.Replay(capacity=replay_capacity)
# Agent hyperparameters.
self._num_actions = action_spec.num_values
self._batch_size = batch_size
self._sgd_period = sgd_period
self._target_update_period = target_update_period
self._min_replay_size = min_replay_size
self._epsilon_fn = epsilon_fn
self._mask_prob = mask_prob
# Agent state.
self._active_head = self._ensemble[0]
self._total_steps = 0
def select_action(self, timestep: dm_env.TimeStep) -> base.Action:
"""Select values via Thompson sampling, then use epsilon-greedy policy."""
self._total_steps += 1
if np.random.rand() < self._epsilon_fn(self._total_steps):
return np.random.randint(self._num_actions)
# Greedy policy, breaking ties uniformly at random.
batched_obs = timestep.observation[None, ...]
q_values = self._forward(self._active_head.params, batched_obs)
action = np.random.choice(np.flatnonzero(q_values == q_values.max()))
return int(action)
def update(
self,
timestep: dm_env.TimeStep,
action: base.Action,
new_timestep: dm_env.TimeStep,
):
"""Update the agent: add transition to replay and periodically do SGD."""
# Thompson sampling: every episode pick a new Q-network as the policy.
if new_timestep.last():
k = np.random.randint(self._num_ensemble)
self._active_head = self._ensemble[k]
# Generate bootstrapping mask & reward noise.
mask = np.random.binomial(1, self._mask_prob, self._num_ensemble)
noise = np.random.randn(self._num_ensemble)
# Make transition and add to replay.
transition = [
timestep.observation,
action,
np.float32(new_timestep.reward),
np.float32(new_timestep.discount),
new_timestep.observation,
mask,
noise,
]
self._replay.add(transition)
if self._replay.size < self._min_replay_size:
return
# Periodically sample from replay and do SGD for the whole ensemble.
if self._total_steps % self._sgd_period == 0:
transitions = self._replay.sample(self._batch_size)
o_tm1, a_tm1, r_t, d_t, o_t, m_t, z_t = transitions
for k, state in enumerate(self._ensemble):
transitions = [o_tm1, a_tm1, r_t, d_t, o_t, m_t[:, k], z_t[:, k]]
self._ensemble[k] = self._sgd_step(state, transitions)
# Periodically update target parameters.
for k, state in enumerate(self._ensemble):
if state.step % self._target_update_period == 0:
self._ensemble[k] = state._replace(target_params=state.params)
def default_agent(
obs_spec: specs.Array,
action_spec: specs.DiscreteArray,
seed: int = 0,
num_ensemble: int = 20,
) -> BootstrappedDqn:
"""Initialize a Bootstrapped DQN agent with default parameters."""
# Define network.
prior_scale = 5.
hidden_sizes = [50, 50]
def network(inputs: jnp.ndarray) -> jnp.ndarray:
"""Simple Q-network with randomized prior function."""
net = hk.nets.MLP([*hidden_sizes, action_spec.num_values])
prior_net = hk.nets.MLP([*hidden_sizes, action_spec.num_values])
x = hk.Flatten()(inputs)
return net(x) + prior_scale * lax.stop_gradient(prior_net(x))
optimizer = optax.adam(learning_rate=1e-3)
return BootstrappedDqn(
obs_spec=obs_spec,
action_spec=action_spec,
network=network,
batch_size=128,
discount=.99,
num_ensemble=num_ensemble,
replay_capacity=10000,
min_replay_size=128,
sgd_period=1,
target_update_period=4,
optimizer=optimizer,
mask_prob=1.,
noise_scale=0.,
epsilon_fn=lambda _: 0.,
seed=seed,
)
|
|
from __future__ import print_function
import unittest
import argparse
import re
import tempfile
import subprocess
import threading
import os
import sys
import inspect
lldb = ''
clrdir = ''
workdir = ''
corerun = ''
sosplugin = ''
assembly = ''
fail_flag = ''
fail_flag_lldb = ''
summary_file = ''
timeout = 0
regex = ''
repeat = 0
def runWithTimeout(cmd):
p = None
def run():
global p
p = subprocess.Popen(cmd, shell=True)
p.communicate()
thread = threading.Thread(target=run)
thread.start()
thread.join(timeout)
if thread.is_alive():
with open(summary_file, 'a+') as summary:
print('Timeout!', file=summary)
p.kill()
thread.join()
class TestSosCommands(unittest.TestCase):
def do_test(self, command):
open(fail_flag, 'a').close()
try:
os.unlink(fail_flag_lldb)
except:
pass
cmd = (('%s -b ' % lldb) +
("-k \"script open('%s', 'a').close()\" " % fail_flag_lldb) +
("-k 'quit' ") +
("--no-lldbinit ") +
("-O \"plugin load %s \" " % sosplugin) +
("-o \"script import testutils as test\" ") +
("-o \"script test.fail_flag = '%s'\" " % fail_flag) +
("-o \"script test.summary_file = '%s'\" " % summary_file) +
("-o \"script test.run('%s', '%s')\" " % (assembly, command)) +
("-o \"quit\" ") +
(" -- %s %s > %s.log 2> %s.log.2" % (corerun, assembly,
command, command)))
runWithTimeout(cmd)
self.assertFalse(os.path.isfile(fail_flag))
self.assertFalse(os.path.isfile(fail_flag_lldb))
try:
os.unlink(fail_flag)
except:
pass
try:
os.unlink(fail_flag_lldb)
except:
pass
def t_cmd_bpmd_nofuturemodule_module_function(self):
self.do_test('t_cmd_bpmd_nofuturemodule_module_function')
def t_cmd_bpmd_module_function(self):
self.do_test('t_cmd_bpmd_module_function')
def t_cmd_bpmd_module_function_iloffset(self):
self.do_test('t_cmd_bpmd_module_function_iloffset')
def t_cmd_bpmd_methoddesc(self):
self.do_test('t_cmd_bpmd_methoddesc')
def t_cmd_bpmd_clearall(self):
self.do_test('t_cmd_bpmd_clearall')
def t_cmd_clrstack(self):
self.do_test('t_cmd_clrstack')
def t_cmd_clrthreads(self):
self.do_test('t_cmd_clrthreads')
def t_cmd_clru(self):
self.do_test('t_cmd_clru')
def t_cmd_dumpclass(self):
self.do_test('t_cmd_dumpclass')
def t_cmd_dumpheap(self):
self.do_test('t_cmd_dumpheap')
def t_cmd_dumpil(self):
self.do_test('t_cmd_dumpil')
def t_cmd_dumplog(self):
self.do_test('t_cmd_dumplog')
def t_cmd_dumpmd(self):
self.do_test('t_cmd_dumpmd')
def t_cmd_dumpmodule(self):
self.do_test('t_cmd_dumpmodule')
def t_cmd_dumpmt(self):
self.do_test('t_cmd_dumpmt')
def t_cmd_dumpobj(self):
self.do_test('t_cmd_dumpobj')
def t_cmd_dumpstack(self):
self.do_test('t_cmd_dumpstack')
def t_cmd_dso(self):
self.do_test('t_cmd_dso')
def t_cmd_eeheap(self):
self.do_test('t_cmd_eeheap')
def t_cmd_eestack(self):
self.do_test('t_cmd_eestack')
def t_cmd_gcroot(self):
self.do_test('t_cmd_gcroot')
def t_cmd_ip2md(self):
self.do_test('t_cmd_ip2md')
def t_cmd_name2ee(self):
self.do_test('t_cmd_name2ee')
def t_cmd_pe(self):
self.do_test('t_cmd_pe')
def t_cmd_histclear(self):
self.do_test('t_cmd_histclear')
def t_cmd_histinit(self):
self.do_test('t_cmd_histinit')
def t_cmd_histobj(self):
self.do_test('t_cmd_histobj')
def t_cmd_histobjfind(self):
self.do_test('t_cmd_histobjfind')
def t_cmd_histroot(self):
self.do_test('t_cmd_histroot')
def t_cmd_sos(self):
self.do_test('t_cmd_sos')
def t_cmd_soshelp(self):
self.do_test('t_cmd_soshelp')
def generate_report():
report = [{'name': 'TOTAL', True: 0, False: 0, 'completed': True}]
fail_messages = []
if not os.path.isfile(summary_file):
print('No summary file to process!')
return
with open(summary_file, 'r') as summary:
for line in summary:
if line.startswith('new_suite: '):
report.append({'name': line.split()[-1], True: 0, False: 0,
'completed': False, 'timeout': False})
elif line.startswith('True'):
report[-1][True] += 1
elif line.startswith('False'):
report[-1][False] += 1
elif line.startswith('Completed!'):
report[-1]['completed'] = True
elif line.startswith('Timeout!'):
report[-1]['timeout'] = True
elif line.startswith('!!! '):
fail_messages.append(line.rstrip('\n'))
for suite in report[1:]:
report[0][True] += suite[True]
report[0][False] += suite[False]
report[0]['completed'] &= suite['completed']
for line in fail_messages:
print(line)
print()
print('=' * 79)
print('{:72} {:6}'.format('Test suite', 'Result'))
print('-' * 79)
for suite in report[1:]:
if suite['timeout']:
result = 'Timeout'
elif suite[False]:
result = 'Fail'
elif not suite['completed']:
result = 'Crash'
elif suite[True]:
result = 'Success'
else:
result = 'Please, report'
print('{:68} {:>10}'.format(suite['name'], result))
print('=' * 79)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--lldb', default='lldb')
parser.add_argument('--clrdir', default='.')
parser.add_argument('--workdir', default='.')
parser.add_argument('--assembly', default='Test.exe')
parser.add_argument('--timeout', default=90)
parser.add_argument('--regex', default='t_cmd_')
parser.add_argument('--repeat', default=1)
parser.add_argument('unittest_args', nargs='*')
args = parser.parse_args()
lldb = args.lldb
clrdir = args.clrdir
workdir = args.workdir
assembly = args.assembly
timeout = int(args.timeout)
regex = args.regex
repeat = int(args.repeat)
print("lldb: %s" % lldb)
print("clrdir: %s" % clrdir)
print("workdir: %s" % workdir)
print("assembly: %s" % assembly)
print("timeout: %i" % timeout)
print("regex: %s" % regex)
print("repeat: %i" % repeat)
corerun = os.path.join(clrdir, 'corerun')
sosplugin = os.path.join(clrdir, 'libsosplugin.so')
if os.name != 'posix':
print('Not implemented: corerun.exe, sosplugin.dll?')
exit(1)
print("corerun: %s" % corerun)
print("sosplugin: %s" % sosplugin)
fail_flag = os.path.join(workdir, 'fail_flag')
fail_flag_lldb = os.path.join(workdir, 'fail_flag.lldb')
print("fail_flag: %s" % fail_flag)
print("fail_flag_lldb: %s" % fail_flag_lldb)
summary_file = os.path.join(workdir, 'summary')
print("summary_file: %s" % summary_file)
try:
os.unlink(summary_file)
except:
pass
sys.argv[1:] = args.unittest_args
suite = unittest.TestSuite()
all_tests = inspect.getmembers(TestSosCommands, predicate=inspect.ismethod)
for (test_name, test_func) in all_tests:
if re.match(regex, test_name):
suite.addTest(TestSosCommands(test_name))
unittest.TextTestRunner(verbosity=1).run(suite)
generate_report()
|
|
#!/usr/bin/env python
"""
Update an ROI IR timeseries CSV file.
"""
from __future__ import absolute_import
from __future__ import print_function
import argparse
import os
import sys
from configparser import ConfigParser as configparser
from datetime import timedelta
# use this because numpy/openblas is automatically multi-threaded.
os.environ["OMP_NUM_THREADS"] = "1"
os.environ["MKL_NUM_THREADS"] = "1"
import numpy as np
from PIL import Image
import vegindex as vi
from vegindex.ir_roitimeseries import IRROITimeSeries
from vegindex.vegindex import get_roi_list
from . import utils
# use this because numpy/openblas is automatically multi-threaded.
os.environ["OMP_NUM_THREADS"] = "1"
# set vars
# you can set the archive directory to somewhere else for testing by
# using the env variable, PHENOCAM_ARCHIVE_DIR.
archive_dir = vi.config.archive_dir
debug = False
default_resize = vi.config.RESIZE
# if __name__ == "__main__":
def main():
# set up command line argument processing
parser = argparse.ArgumentParser()
# options
parser.add_argument(
"-v",
"--verbose",
help="increase output verbosity",
action="store_true",
default=False,
)
parser.add_argument(
"-n",
"--dry-run",
help="Process data but don't save results",
action="store_true",
default=False,
)
# positional arguments
parser.add_argument("site", help="PhenoCam site name")
parser.add_argument("roiname", help="ROI name, e.g. canopy_0001")
# get args
args = parser.parse_args()
sitename = args.site
roiname = args.roiname
verbose = args.verbose
dryrun = args.dry_run
if verbose:
print("site: {0}".format(sitename))
print("roiname: {0}".format(roiname))
print("verbose: {0}".format(verbose))
print("dryrun: {0}".format(dryrun))
# set input/output filename
inname = "%s_%s_IR_roistats.csv" % (sitename, roiname)
outname = inname
inpath = os.path.join(archive_dir, sitename, "ROI", outname)
outpath = inpath
if verbose:
print("output file: {0}".format(outname))
# get ROI list
roi_list = get_roi_list(sitename, roiname)
# read existing CSV file - since this is an update throw
# exception if the file doesn't already exist
try:
roits = IRROITimeSeries(site=sitename, ROIListID=roiname)
roits.readCSV(inpath)
except IOError:
errmsg = "Unable to read IR CSV file: {0}\n".format(outpath)
sys.stderr.write(errmsg)
sys.exit(1)
# read in config file for this site if it exists
config_file = "{0}_{1}.cfg".format(sitename, roiname)
config_path = os.path.join(archive_dir, sitename, "ROI", config_file)
if os.path.exists(config_path):
cfgparser = configparser(defaults={"resize": str(default_resize)})
cfgparser.read(config_path)
if cfgparser.has_section("roi_timeseries"):
resizeFlg = cfgparser.getboolean("roi_timeseries", "resize")
else:
resizeFlg = default_resize
# verify that config matches CSV header!
if resizeFlg != roits.resizeFlg:
errmsg = "resize flag from config doesn't match CSV header\n"
sys.stderr.write(errmsg)
sys.exit(1)
else:
resizeFlg = default_resize
# print config values
if verbose:
print("")
print("ROI timeseries config:")
print("======================")
print("roi_list: ", "{0}_{1}_roi.csv".format(sitename, roiname))
if os.path.exists(config_path):
print("config file: {0}".format(config_file))
else:
print("config file: None")
print("Resize Flag: ", resizeFlg)
# get list of images already in CSV
old_imglist = roits.get_image_list()
# find last dt in current timeseries CSV
nlast = len(roits.rows) - 1
dt_last = roits.rows[nlast]["datetime"]
# add five seconds so that we don't reprocess last image
dt_last = dt_last + timedelta(seconds=5)
# start with images newer than last dt
dt_start = dt_last
if verbose:
print("last image at: {0}".format(dt_last))
# loop over mask entries in ROI list
nimage = 0
nupdate = 0
for imask, roimask in enumerate(roi_list.masks):
roi_startDT = roimask["start_dt"]
roi_endDT = roimask["end_dt"]
# skip this ROI maskfile if it's validity interval ends
# before last date before update
if roi_endDT < dt_start:
continue
# start_date = roi_startDT.date()
# end_date = roi_endDT.date()
# start_time = roi_startDT.time()
# end_time = roi_endDT.time()
maskfile = roimask["maskfile"]
# okay set the start datetime to the larger of dt_start (from
# last row of existing timeseries CSV) and the beginning of
# the ROI validity. We need to do this for the case where
# there is a gap between last row of CSV and beginning of next
# validity interval. This will often be the case when there
# are a series of "transitional images" between two
# stable/useful camera positions.
if dt_start < roi_startDT:
dt_start = roi_startDT
mask_path = os.path.join(archive_dir, sitename, "ROI", maskfile)
# print roi_path
try:
mask_img = Image.open(mask_path)
except Exception:
sys.stderr.write("Unable to open ROI mask file\n")
sys.exit(1)
# check that mask_img is in expected form
mask_mode = mask_img.mode
if mask_mode != "L":
# convert to 8-bit mask
mask_img = mask_img.convert("L")
# make a numpy mask
roimask = np.asarray(mask_img, dtype=np.bool8)
# get list of images for this timeperiod
imglist = utils.getsiteimglist(
sitename, getIR=True, startDT=dt_start, endDT=roi_endDT
)
nimage += len(imglist)
for impath in imglist:
if debug:
print(maskfile, impath)
# check if image already exists in list -- just to be
# sure!
fn = os.path.basename(impath)
try:
row_index = old_imglist.index(fn)
except Exception:
row_index = None
# append/insert row for this image/mask - shouldn't happen
# but just to be on safe side!
if row_index:
roits_row = roits.insert_row(impath, roimask, imask + 1)
else:
roits_row = roits.append_row(impath, roimask, imask + 1)
# check that we could append/insert a row
if roits_row:
nupdate += 1
else:
continue
if verbose:
csvstr = roits.format_csvrow(roits_row)
print(csvstr)
if debug:
if nupdate == 10:
break
# output CSV file
if dryrun:
nout = 0
else:
nout = roits.writeCSV(outpath)
print("Images processed: %d" % (nimage,))
print("Images added to CSV: %d" % (nupdate,))
print("Total: %d" % (nout,))
|
|
"""
Templating for ops docstrings
"""
from typing import Dict, Optional
def _make_flex_doc(op_name, typ):
"""
Make the appropriate substitutions for the given operation and class-typ
into either _flex_doc_SERIES or _flex_doc_FRAME to return the docstring
to attach to a generated method.
Parameters
----------
op_name : str {'__add__', '__sub__', ... '__eq__', '__ne__', ...}
typ : str {series, 'dataframe']}
Returns
-------
doc : str
"""
op_name = op_name.replace("__", "")
op_desc = _op_descriptions[op_name]
if op_name.startswith("r"):
equiv = "other " + op_desc["op"] + " " + typ
else:
equiv = typ + " " + op_desc["op"] + " other"
if typ == "series":
base_doc = _flex_doc_SERIES
doc_no_examples = base_doc.format(
desc=op_desc["desc"],
op_name=op_name,
equiv=equiv,
reverse=op_desc["reverse"],
)
if op_desc["series_examples"]:
doc = doc_no_examples + op_desc["series_examples"]
else:
doc = doc_no_examples
elif typ == "dataframe":
base_doc = _flex_doc_FRAME
doc = base_doc.format(
desc=op_desc["desc"],
op_name=op_name,
equiv=equiv,
reverse=op_desc["reverse"],
)
else:
raise AssertionError("Invalid typ argument.")
return doc
_add_example_SERIES = """
Examples
--------
>>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd'])
>>> a
a 1.0
b 1.0
c 1.0
d NaN
dtype: float64
>>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e'])
>>> b
a 1.0
b NaN
d 1.0
e NaN
dtype: float64
>>> a.add(b, fill_value=0)
a 2.0
b 1.0
c 1.0
d 1.0
e NaN
dtype: float64
"""
_sub_example_SERIES = """
Examples
--------
>>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd'])
>>> a
a 1.0
b 1.0
c 1.0
d NaN
dtype: float64
>>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e'])
>>> b
a 1.0
b NaN
d 1.0
e NaN
dtype: float64
>>> a.subtract(b, fill_value=0)
a 0.0
b 1.0
c 1.0
d -1.0
e NaN
dtype: float64
"""
_mul_example_SERIES = """
Examples
--------
>>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd'])
>>> a
a 1.0
b 1.0
c 1.0
d NaN
dtype: float64
>>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e'])
>>> b
a 1.0
b NaN
d 1.0
e NaN
dtype: float64
>>> a.multiply(b, fill_value=0)
a 1.0
b 0.0
c 0.0
d 0.0
e NaN
dtype: float64
"""
_div_example_SERIES = """
Examples
--------
>>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd'])
>>> a
a 1.0
b 1.0
c 1.0
d NaN
dtype: float64
>>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e'])
>>> b
a 1.0
b NaN
d 1.0
e NaN
dtype: float64
>>> a.divide(b, fill_value=0)
a 1.0
b inf
c inf
d 0.0
e NaN
dtype: float64
"""
_floordiv_example_SERIES = """
Examples
--------
>>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd'])
>>> a
a 1.0
b 1.0
c 1.0
d NaN
dtype: float64
>>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e'])
>>> b
a 1.0
b NaN
d 1.0
e NaN
dtype: float64
>>> a.floordiv(b, fill_value=0)
a 1.0
b NaN
c NaN
d 0.0
e NaN
dtype: float64
"""
_mod_example_SERIES = """
Examples
--------
>>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd'])
>>> a
a 1.0
b 1.0
c 1.0
d NaN
dtype: float64
>>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e'])
>>> b
a 1.0
b NaN
d 1.0
e NaN
dtype: float64
>>> a.mod(b, fill_value=0)
a 0.0
b NaN
c NaN
d 0.0
e NaN
dtype: float64
"""
_pow_example_SERIES = """
Examples
--------
>>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd'])
>>> a
a 1.0
b 1.0
c 1.0
d NaN
dtype: float64
>>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e'])
>>> b
a 1.0
b NaN
d 1.0
e NaN
dtype: float64
>>> a.pow(b, fill_value=0)
a 1.0
b 1.0
c 1.0
d 0.0
e NaN
dtype: float64
"""
_op_descriptions = {
# Arithmetic Operators
"add": {
"op": "+",
"desc": "Addition",
"reverse": "radd",
"series_examples": _add_example_SERIES,
},
"sub": {
"op": "-",
"desc": "Subtraction",
"reverse": "rsub",
"series_examples": _sub_example_SERIES,
},
"mul": {
"op": "*",
"desc": "Multiplication",
"reverse": "rmul",
"series_examples": _mul_example_SERIES,
"df_examples": None,
},
"mod": {
"op": "%",
"desc": "Modulo",
"reverse": "rmod",
"series_examples": _mod_example_SERIES,
},
"pow": {
"op": "**",
"desc": "Exponential power",
"reverse": "rpow",
"series_examples": _pow_example_SERIES,
"df_examples": None,
},
"truediv": {
"op": "/",
"desc": "Floating division",
"reverse": "rtruediv",
"series_examples": _div_example_SERIES,
"df_examples": None,
},
"floordiv": {
"op": "//",
"desc": "Integer division",
"reverse": "rfloordiv",
"series_examples": _floordiv_example_SERIES,
"df_examples": None,
},
"divmod": {
"op": "divmod",
"desc": "Integer division and modulo",
"reverse": "rdivmod",
"series_examples": None,
"df_examples": None,
},
# Comparison Operators
"eq": {"op": "==", "desc": "Equal to", "reverse": None, "series_examples": None},
"ne": {
"op": "!=",
"desc": "Not equal to",
"reverse": None,
"series_examples": None,
},
"lt": {"op": "<", "desc": "Less than", "reverse": None, "series_examples": None},
"le": {
"op": "<=",
"desc": "Less than or equal to",
"reverse": None,
"series_examples": None,
},
"gt": {"op": ">", "desc": "Greater than", "reverse": None, "series_examples": None},
"ge": {
"op": ">=",
"desc": "Greater than or equal to",
"reverse": None,
"series_examples": None,
},
} # type: Dict[str, Dict[str, Optional[str]]]
_op_names = list(_op_descriptions.keys())
for key in _op_names:
reverse_op = _op_descriptions[key]["reverse"]
if reverse_op is not None:
_op_descriptions[reverse_op] = _op_descriptions[key].copy()
_op_descriptions[reverse_op]["reverse"] = key
_flex_doc_SERIES = """
Return {desc} of series and other, element-wise (binary operator `{op_name}`).
Equivalent to ``{equiv}``, but with support to substitute a fill_value for
missing data in one of the inputs.
Parameters
----------
other : Series or scalar value
fill_value : None or float value, default None (NaN)
Fill existing missing (NaN) values, and any new element needed for
successful Series alignment, with this value before computation.
If data in both corresponding Series locations is missing
the result will be missing.
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level.
Returns
-------
Series
The result of the operation.
See Also
--------
Series.{reverse}
"""
_arith_doc_FRAME = """
Binary operator %s with support to substitute a fill_value for missing data in
one of the inputs
Parameters
----------
other : Series, DataFrame, or constant
axis : {0, 1, 'index', 'columns'}
For Series input, axis to match Series index on
fill_value : None or float value, default None
Fill existing missing (NaN) values, and any new element needed for
successful DataFrame alignment, with this value before computation.
If data in both corresponding DataFrame locations is missing
the result will be missing
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level
Returns
-------
result : DataFrame
Notes
-----
Mismatched indices will be unioned together
"""
_flex_doc_FRAME = """
Get {desc} of dataframe and other, element-wise (binary operator `{op_name}`).
Equivalent to ``{equiv}``, but with support to substitute a fill_value
for missing data in one of the inputs. With reverse version, `{reverse}`.
Among flexible wrappers (`add`, `sub`, `mul`, `div`, `mod`, `pow`) to
arithmetic operators: `+`, `-`, `*`, `/`, `//`, `%`, `**`.
Parameters
----------
other : scalar, sequence, Series, or DataFrame
Any single or multiple element data structure, or list-like object.
axis : {{0 or 'index', 1 or 'columns'}}
Whether to compare by the index (0 or 'index') or columns
(1 or 'columns'). For Series input, axis to match Series index on.
level : int or label
Broadcast across a level, matching Index values on the
passed MultiIndex level.
fill_value : float or None, default None
Fill existing missing (NaN) values, and any new element needed for
successful DataFrame alignment, with this value before computation.
If data in both corresponding DataFrame locations is missing
the result will be missing.
Returns
-------
DataFrame
Result of the arithmetic operation.
See Also
--------
DataFrame.add : Add DataFrames.
DataFrame.sub : Subtract DataFrames.
DataFrame.mul : Multiply DataFrames.
DataFrame.div : Divide DataFrames (float division).
DataFrame.truediv : Divide DataFrames (float division).
DataFrame.floordiv : Divide DataFrames (integer division).
DataFrame.mod : Calculate modulo (remainder after division).
DataFrame.pow : Calculate exponential power.
Notes
-----
Mismatched indices will be unioned together.
Examples
--------
>>> df = pd.DataFrame({{'angles': [0, 3, 4],
... 'degrees': [360, 180, 360]}},
... index=['circle', 'triangle', 'rectangle'])
>>> df
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
Add a scalar with operator version which return the same
results.
>>> df + 1
angles degrees
circle 1 361
triangle 4 181
rectangle 5 361
>>> df.add(1)
angles degrees
circle 1 361
triangle 4 181
rectangle 5 361
Divide by constant with reverse version.
>>> df.div(10)
angles degrees
circle 0.0 36.0
triangle 0.3 18.0
rectangle 0.4 36.0
>>> df.rdiv(10)
angles degrees
circle inf 0.027778
triangle 3.333333 0.055556
rectangle 2.500000 0.027778
Subtract a list and Series by axis with operator version.
>>> df - [1, 2]
angles degrees
circle -1 358
triangle 2 178
rectangle 3 358
>>> df.sub([1, 2], axis='columns')
angles degrees
circle -1 358
triangle 2 178
rectangle 3 358
>>> df.sub(pd.Series([1, 1, 1], index=['circle', 'triangle', 'rectangle']),
... axis='index')
angles degrees
circle -1 359
triangle 2 179
rectangle 3 359
Multiply a DataFrame of different shape with operator version.
>>> other = pd.DataFrame({{'angles': [0, 3, 4]}},
... index=['circle', 'triangle', 'rectangle'])
>>> other
angles
circle 0
triangle 3
rectangle 4
>>> df * other
angles degrees
circle 0 NaN
triangle 9 NaN
rectangle 16 NaN
>>> df.mul(other, fill_value=0)
angles degrees
circle 0 0.0
triangle 9 0.0
rectangle 16 0.0
Divide by a MultiIndex by level.
>>> df_multindex = pd.DataFrame({{'angles': [0, 3, 4, 4, 5, 6],
... 'degrees': [360, 180, 360, 360, 540, 720]}},
... index=[['A', 'A', 'A', 'B', 'B', 'B'],
... ['circle', 'triangle', 'rectangle',
... 'square', 'pentagon', 'hexagon']])
>>> df_multindex
angles degrees
A circle 0 360
triangle 3 180
rectangle 4 360
B square 4 360
pentagon 5 540
hexagon 6 720
>>> df.div(df_multindex, level=1, fill_value=0)
angles degrees
A circle NaN 1.0
triangle 1.0 1.0
rectangle 1.0 1.0
B square 0.0 0.0
pentagon 0.0 0.0
hexagon 0.0 0.0
"""
_flex_comp_doc_FRAME = """
Get {desc} of dataframe and other, element-wise (binary operator `{op_name}`).
Among flexible wrappers (`eq`, `ne`, `le`, `lt`, `ge`, `gt`) to comparison
operators.
Equivalent to `==`, `=!`, `<=`, `<`, `>=`, `>` with support to choose axis
(rows or columns) and level for comparison.
Parameters
----------
other : scalar, sequence, Series, or DataFrame
Any single or multiple element data structure, or list-like object.
axis : {{0 or 'index', 1 or 'columns'}}, default 'columns'
Whether to compare by the index (0 or 'index') or columns
(1 or 'columns').
level : int or label
Broadcast across a level, matching Index values on the passed
MultiIndex level.
Returns
-------
DataFrame of bool
Result of the comparison.
See Also
--------
DataFrame.eq : Compare DataFrames for equality elementwise.
DataFrame.ne : Compare DataFrames for inequality elementwise.
DataFrame.le : Compare DataFrames for less than inequality
or equality elementwise.
DataFrame.lt : Compare DataFrames for strictly less than
inequality elementwise.
DataFrame.ge : Compare DataFrames for greater than inequality
or equality elementwise.
DataFrame.gt : Compare DataFrames for strictly greater than
inequality elementwise.
Notes
-----
Mismatched indices will be unioned together.
`NaN` values are considered different (i.e. `NaN` != `NaN`).
Examples
--------
>>> df = pd.DataFrame({{'cost': [250, 150, 100],
... 'revenue': [100, 250, 300]}},
... index=['A', 'B', 'C'])
>>> df
cost revenue
A 250 100
B 150 250
C 100 300
Comparison with a scalar, using either the operator or method:
>>> df == 100
cost revenue
A False True
B False False
C True False
>>> df.eq(100)
cost revenue
A False True
B False False
C True False
When `other` is a :class:`Series`, the columns of a DataFrame are aligned
with the index of `other` and broadcast:
>>> df != pd.Series([100, 250], index=["cost", "revenue"])
cost revenue
A True True
B True False
C False True
Use the method to control the broadcast axis:
>>> df.ne(pd.Series([100, 300], index=["A", "D"]), axis='index')
cost revenue
A True False
B True True
C True True
D True True
When comparing to an arbitrary sequence, the number of columns must
match the number elements in `other`:
>>> df == [250, 100]
cost revenue
A True True
B False False
C False False
Use the method to control the axis:
>>> df.eq([250, 250, 100], axis='index')
cost revenue
A True False
B False True
C True False
Compare to a DataFrame of different shape.
>>> other = pd.DataFrame({{'revenue': [300, 250, 100, 150]}},
... index=['A', 'B', 'C', 'D'])
>>> other
revenue
A 300
B 250
C 100
D 150
>>> df.gt(other)
cost revenue
A False False
B False False
C False True
D False False
Compare to a MultiIndex by level.
>>> df_multindex = pd.DataFrame({{'cost': [250, 150, 100, 150, 300, 220],
... 'revenue': [100, 250, 300, 200, 175, 225]}},
... index=[['Q1', 'Q1', 'Q1', 'Q2', 'Q2', 'Q2'],
... ['A', 'B', 'C', 'A', 'B', 'C']])
>>> df_multindex
cost revenue
Q1 A 250 100
B 150 250
C 100 300
Q2 A 150 200
B 300 175
C 220 225
>>> df.le(df_multindex, level=1)
cost revenue
Q1 A True True
B True True
C True True
Q2 A False True
B True False
C True False
"""
|
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Inverse Cloze Task model."""
import functools
from bert import optimization
from language.common.utils import tensor_utils
from language.common.utils import tpu_utils
from language.orqa.datasets import ict_dataset
import tensorflow.compat.v1 as tf
import tensorflow_hub as hub
def module_fn(is_training, params):
"""Module function."""
input_ids = tf.placeholder(tf.int32, [None, None], "input_ids")
input_mask = tf.placeholder(tf.int32, [None, None], "input_mask")
segment_ids = tf.placeholder(tf.int32, [None, None], "segment_ids")
bert_module = hub.Module(
params["bert_hub_module_path"],
tags={"train"} if is_training else {},
trainable=True)
output_layer = bert_module(
inputs=dict(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids),
signature="tokens",
as_dict=True)["pooled_output"]
projected_emb = tf.layers.dense(output_layer, params["projection_size"])
projected_emb = tf.keras.layers.LayerNormalization(axis=-1)(projected_emb)
if is_training:
projected_emb = tf.nn.dropout(projected_emb, rate=0.1)
hub.add_signature(
name="projected",
inputs=dict(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids),
outputs=projected_emb)
hub.add_signature(
name="tokenization_info",
inputs={},
outputs=bert_module(signature="tokenization_info", as_dict=True))
def create_ict_module(params, mode):
"""Create hub module."""
tags_and_args = []
for is_training in (True, False):
tags = set()
if is_training:
tags.add("train")
tags_and_args.append((tags, dict(is_training=is_training)))
ict_module_spec = hub.create_module_spec(
functools.partial(module_fn, params=params),
tags_and_args=tags_and_args)
ict_module = hub.Module(
ict_module_spec,
tags={"train"} if mode == tf.estimator.ModeKeys.TRAIN else {},
trainable=True)
hub.register_module_for_export(ict_module, "ict")
return ict_module
def model_fn(features, labels, mode, params):
"""Model function."""
del labels
# [local_batch_size, block_seq_len]
block_ids = features["block_ids"]
block_mask = features["block_mask"]
block_segment_ids = features["block_segment_ids"]
# [local_batch_size, query_seq_len]
query_ids = features["query_ids"]
query_mask = features["query_mask"]
local_batch_size = tensor_utils.shape(block_ids, 0)
tf.logging.info("Model batch size: %d", local_batch_size)
ict_module = create_ict_module(params, mode)
query_emb = ict_module(
inputs=dict(
input_ids=query_ids,
input_mask=query_mask,
segment_ids=tf.zeros_like(query_ids)),
signature="projected")
block_emb = ict_module(
inputs=dict(
input_ids=block_ids,
input_mask=block_mask,
segment_ids=block_segment_ids),
signature="projected")
if params["use_tpu"]:
# [global_batch_size, hidden_size]
block_emb = tpu_utils.cross_shard_concat(block_emb)
# [global_batch_size, local_batch_size]
labels = tpu_utils.cross_shard_pad(tf.eye(local_batch_size))
# [local_batch_size]
labels = tf.argmax(labels, 0)
else:
# [local_batch_size]
labels = tf.range(local_batch_size)
tf.logging.info("Global batch size: %s", tensor_utils.shape(block_emb, 0))
# [batch_size, global_batch_size]
logits = tf.matmul(query_emb, block_emb, transpose_b=True)
# []
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
train_op = optimization.create_optimizer(
loss=loss,
init_lr=params["learning_rate"],
num_train_steps=params["num_train_steps"],
num_warmup_steps=min(10000, max(100, int(params["num_train_steps"]/10))),
use_tpu=params["use_tpu"] if "use_tpu" in params else False)
predictions = tf.argmax(logits, -1)
metric_args = [query_mask, block_mask, labels, predictions,
features["mask_query"]]
def metric_fn(query_mask, block_mask, labels, predictions, mask_query):
masked_accuracy = tf.metrics.accuracy(
labels=labels,
predictions=predictions,
weights=mask_query)
unmasked_accuracy = tf.metrics.accuracy(
labels=labels,
predictions=predictions,
weights=tf.logical_not(mask_query))
return dict(
query_non_padding=tf.metrics.mean(query_mask),
block_non_padding=tf.metrics.mean(block_mask),
actual_mask_ratio=tf.metrics.mean(mask_query),
masked_accuracy=masked_accuracy,
unmasked_accuracy=unmasked_accuracy)
if params["use_tpu"]:
return tf.estimator.tpu.TPUEstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
eval_metrics=(metric_fn, metric_args))
else:
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
eval_metric_ops=metric_fn(*metric_args),
predictions=predictions)
def input_fn(params, is_train):
"""An input function satisfying the tf.estimator API."""
dataset = ict_dataset.get_dataset(
examples_path=params["examples_path"],
mask_rate=params["mask_rate"],
bert_hub_module_path=params["bert_hub_module_path"],
query_seq_len=params["query_seq_len"],
block_seq_len=params["block_seq_len"],
num_block_records=params["num_block_records"],
num_input_threads=params["num_input_threads"])
batch_size = params["batch_size"] if is_train else params["eval_batch_size"]
dataset = dataset.batch(batch_size, drop_remainder=True)
return dataset
def exporter():
"""Create exporters."""
serving_input_fn = tf.estimator.export.build_raw_serving_input_receiver_fn(
features=dict(
block_ids=tf.placeholder(tf.int32, [None, None]),
block_mask=tf.placeholder(tf.int32, [None, None]),
block_segment_ids=tf.placeholder(tf.int32, [None, None]),
query_ids=tf.placeholder(tf.int32, [None, None]),
query_mask=tf.placeholder(tf.int32, [None, None]),
mask_query=tf.placeholder(tf.bool, [None])),
default_batch_size=8)
return hub.LatestModuleExporter("tf_hub", serving_input_fn, exports_to_keep=1)
|
|
from __future__ import print_function
from io import open
import vstruct
import vstruct.defs.inet as vs_inet
from vstruct.primitives import *
PCAP_LINKTYPE_ETHER = 1
PCAP_LINKTYPE_RAW = 101
PCAP_LINKTYPE_LINUX_SLL = 113
PCAP_DLT_RAW = 12
PCAPNG_BOM = 0x1A2B3C4D
OPT_ENDOFOPT = 0
OPT_COMMENT = 1
#PCAPNG_BLOCKTYPE_SECTION_HEADER options
OPT_SHB_HARDWARE = 2
OPT_SHB_OS = 3
OPT_SHB_USERAPPL = 4
#PCAPNG_INTERFACE_DESCRIPTION_BLOCK options
OPT_IF_NAME = 2
OPT_IF_DESCRIPTION = 3
OPT_IF_IPV4ADDR = 4
OPT_IF_IPV6ADDR = 5
OPT_IF_MACADDR = 6
OPT_IF_EUIADDR = 7
OPT_IF_SPEED = 8
OPT_IF_TSRESOL = 9
OPT_IF_TZONE = 10
OPT_IF_FILTER = 11
OPT_IF_OS = 12
OPT_IF_FCSLEN = 13
OPT_IF_TSOFFSET = 14
# options for PCAPNG_ENHANCED_PACKET_BLOCK
OPT_EPB_FLAGS = 2
OPT_EPB_HASH = 3
OPT_EPB_DROPCOUNT = 4
# values used in the blocktype field
PCAPNG_BLOCKTYPE_INTERFACE_DESCRIPTION = 0x00000001
PCAPNG_BLOCKTYPE_PACKET = 0x00000002
PCAPNG_BLOCKTYPE_SIMPLE_PACKET = 0x00000003
PCAPNG_BLOCKTYPE_NAME_RESOLUTION = 0x00000004
PCAPNG_BLOCKTYPE_INTERFACE_STATS = 0x00000005
PCAPNG_BLOCKTYPE_ENHANCED_PACKET = 0x00000006
PCAPNG_BLOCKTYPE_SECTION_HEADER = 0x0a0d0d0a
def pad4bytes(size):
if (size % 4) == 0:
return size
return size + (4 -( size % 4))
class PCAP_FILE_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.magic = v_uint32()
self.vers_maj = v_uint16()
self.vers_min = v_uint16()
self.thiszone = v_uint32()
self.sigfigs = v_uint32()
self.snaplen = v_uint32()
self.linktype = v_uint32()
class PCAP_PACKET_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.tvsec = v_uint32()
self.tvusec = v_uint32()
self.caplen = v_uint32()
self.len = v_uint32()
class PCAPNG_GENERIC_BLOCK_HEADER(vstruct.VStruct):
'''
Used to read the block type & size when parsing the file
'''
def __init__(self, bigend=False):
vstruct.VStruct.__init__(self)
self.blocktype = v_uint32(bigend=bigend)
self.blocksize = v_uint32(bigend=bigend)
class PCAPNG_BLOCK_PARENT(vstruct.VStruct):
'''
Used to inherit the weird parsing style where there's variable length
options at the end, followed by the duplicate block total length
'''
def __init__(self, bigend=False):
vstruct.VStruct.__init__(self)
#non-vstruct field, set during checking BOM
self.bigend = False
def vsParse(self, bytez, offset=0):
startoff = offset
roff = vstruct.VStruct.vsParse(self, bytez, offset=offset)
#(blocksize-4): because we still need the trailing blocksize2
# apparently blocks can completely omit the options list and not
# even have the OPT_ENDOFOPT entry
while (roff < len(bytez)) and ((roff-startoff) < (self.blocksize-4)):
opt = PCAPNG_OPTION(bigend=self.bigend)
roff = opt.vsParse(bytez, roff)
if opt.code == OPT_ENDOFOPT:
break
self.options.vsAddElement(opt)
# append trailing blocksize2
bs2 = v_uint32(bigend=self.bigend)
self.vsAddField('blocksize2', bs2)
roff = bs2.vsParse(bytez, roff)
#pad, plus we skip
return pad4bytes(roff)
class PCAPNG_SECTION_HEADER_BLOCK(PCAPNG_BLOCK_PARENT):
def __init__(self, bigend=False):
PCAPNG_BLOCK_PARENT.__init__(self, bigend)
self.blocktype = v_uint32(bigend=bigend)
self.blocksize = v_uint32(bigend=bigend)
self.bom = v_uint32(bigend=bigend)
self.vers_maj = v_uint16(bigend=bigend)
self.vers_min = v_uint16(bigend=bigend)
self.sectionsize = v_uint64(bigend=bigend)
self.options = vstruct.VArray([])
#blocksize2: dynamcally added in vsParse()
#self.blocksize2 = v_uint32(bigend=bigend)
def pcb_bom(self):
bom = self.vsGetField('bom')
if self.bom == PCAPNG_BOM:
#if it matches, then the endian of bom is correct
self.bigend = bom._vs_bigend
else:
self.bigend = not bom._vs_bigend
class PCAPNG_OPTION(vstruct.VStruct):
def __init__(self, bigend=False):
vstruct.VStruct.__init__(self)
self.code = v_uint16(bigend=bigend)
self.optsize = v_uint16(bigend=bigend)
self.bytes = v_bytes(0)
def pcb_optsize(self):
size = pad4bytes(self.optsize)
self.vsGetField('bytes').vsSetLength(size)
class PCAPNG_INTERFACE_DESCRIPTION_BLOCK(PCAPNG_BLOCK_PARENT):
def __init__(self, bigend=False):
PCAPNG_BLOCK_PARENT.__init__(self, bigend)
self.blocktype = v_uint32(bigend=bigend)
self.blocksize = v_uint32(bigend=bigend)
self.linktype = v_uint16(bigend=bigend)
self.reserved = v_uint16(bigend=bigend)
self.snaplen = v_uint32(bigend=bigend)
self.options = vstruct.VArray([])
#blocksize2: dynamcally added in vsParse()
#self.blocksize2 = v_uint32(bigend=bigend)
def vsParse(self, bytez, offset=0):
'''
We need the tsresol value to adjust timestamp values, so pull it
out here
'''
ret = PCAPNG_BLOCK_PARENT.vsParse(self, bytez, offset=0)
self.tsresol = None
#default offset is 0
self.tsoffset = 0
#sys.stderr.write('PCAPNG_INTERFACE_DESCRIPTION_BLOCK: searching options')
for i, opt in self.options:
if opt.code == OPT_IF_TSRESOL:
self.tsresol = opt.bytes[0]
#sys.stderr.write('Got tsresol: 0x%x\n' % self.tsresol)
elif opt.code == OPT_IF_TSOFFSET:
fmt = '<Q'
if self.bigend:
fmt = '>Q'
self.tsoffset = struct.unpack_from(fmt, opt.bytes)[0]
#sys.stderr.write('Got tsoffset: 0x%x\n' % self.tsoffset)
return ret
class PCAPNG_ENHANCED_PACKET_BLOCK(PCAPNG_BLOCK_PARENT):
def __init__(self, bigend=False):
PCAPNG_BLOCK_PARENT.__init__(self, bigend)
self.blocktype = v_uint32(bigend=bigend)
self.blocksize = v_uint32(bigend=bigend)
self.interfaceid = v_uint32(bigend=bigend)
self.tstamphi = v_uint32(bigend=bigend)
self.tstamplow = v_uint32(bigend=bigend)
self.caplen = v_uint32(bigend=bigend)
self.packetlen = v_uint32(bigend=bigend)
self.data = v_bytes(0)
self.options = vstruct.VArray([])
#blocksize2: dynamcally added in vsParse()
#self.blocksize2 = v_uint32(bigend=bigend)
def pcb_caplen(self):
size = pad4bytes(self.caplen)
self.vsGetField('data').vsSetLength(size)
def setPcapTimestamp(self, idb):
'''
Adds a libpcap compatible tvsec and tvusec fields, based on the pcapng timestamp
'''
#orange left off here
self.snaplen = idb.snaplen
tstamp = (self.tstamphi << 32) | self.tstamplow
scale = 1000000
if idb.tsresol is None:
#if not set, capture assumes 10e-6 resolution
pass
elif (0x80 & idb.tsresol) == 0:
# remaining bits are resolution, to a negative power of 10
scale = 10**(idb.tsresol & 0x7f)
else:
# remaining bits are resolution, to a negative power of 2
scale = 1 << (idb.tsresol & 0x7f)
self.tvsec = (tstamp / scale) + idb.tsoffset
self.tvusec = tstamp % scale
class PCAPNG_SIMPLE_PACKET_BLOCK(vstruct.VStruct):
'''
Note: no variable length options fields, so inheriting from vstruct directly
'''
def __init__(self, bigend=False):
vstruct.VStruct.__init__(self)
self.blocktype = v_uint32(bigend=bigend)
self.blocksize = v_uint32(bigend=bigend)
self.packetlen = v_uint32(bigend=bigend)
self.data = v_bytes(0)
self.blocksize2 = v_uint32(bigend=bigend)
def pcb_blocksize(self):
self.caplen = pad4bytes(self.blocksize - 16)
self.vsGetField('data').vsSetLength(self.caplen)
def setPcapTimestamp(self, idb):
#no timestamp in this type of block :(
self.tvsec = idb.tsoffset
self.tvusec = 0
def iterPcapFileName(filename, reuse=False):
fd = open(filename, 'rb')
for x in iterPcapFile(fd, reuse=reuse):
yield x
def iterPcapFile(fd, reuse=False):
'''
Figure out if it's a tcpdump format, or pcapng
'''
h = PCAP_FILE_HEADER()
b = fd.read(len(h))
h.vsParse(b, fast=True)
fd.seek(0)
if h.magic == PCAPNG_BLOCKTYPE_SECTION_HEADER:
return _iterPcapNgFile(fd, reuse)
return _iterPcapFile(fd, reuse)
def _iterPcapFile(fd, reuse=False):
h = PCAP_FILE_HEADER()
b = fd.read(len(h))
h.vsParse(b, fast=True)
linktype = h.linktype
if linktype not in (PCAP_LINKTYPE_ETHER, PCAP_LINKTYPE_RAW):
raise Exception('PCAP Link Type %d Not Supported Yet!' % linktype)
pkt = PCAP_PACKET_HEADER()
eII = vs_inet.ETHERII()
pktsize = len(pkt)
eIIsize = len(eII)
ipv4 = vs_inet.IPv4()
ipv4size = 20
tcp_hdr = vs_inet.TCP()
udp_hdr = vs_inet.UDP()
icmp_hdr = vs_inet.ICMP()
go = True
while go:
hdr = fd.read(pktsize)
if len(hdr) != pktsize:
break
pkt.vsParse(hdr, fast=True)
b = fd.read(pkt.caplen)
offset = 0
if linktype == PCAP_LINKTYPE_ETHER:
if len(b) < eIIsize:
continue
eII.vsParse(b, 0, fast=True)
# No support for non-ip protocol yet...
if eII.etype not in (vs_inet.ETH_P_IP,vs_inet.ETH_P_VLAN):
continue
offset += eIIsize
if eII.etype == vs_inet.ETH_P_VLAN:
offset += 4
elif linktype == PCAP_LINKTYPE_RAW:
pass
#print(eII.tree())
if not reuse:
ipv4 = vs_inet.IPv4()
if (len(b) - offset) < ipv4size:
continue
ipv4.vsParse(b, offset, fast=True)
# Make b *only* the IP datagram bytes...
b = b[offset:offset+ipv4.totlen]
offset = 0
offset += len(ipv4)
tsize = len(b) - offset
if ipv4.proto == vs_inet.IPPROTO_TCP:
if tsize < 20:
continue
if not reuse:
tcp_hdr = vs_inet.TCP()
tcp_hdr.vsParse(b, offset, fast=True)
offset += len(tcp_hdr)
pdata = b[offset:]
yield pkt,ipv4,tcp_hdr,pdata
elif ipv4.proto == vs_inet.IPPROTO_UDP:
if tsize < 8:
continue
if not reuse:
udp_hdr = vs_inet.UDP()
udp_hdr.vsParse(b, offset, fast=True)
offset += len(udp_hdr)
pdata = b[offset:]
yield pkt,ipv4,udp_hdr,pdata
elif ipv4.proto == vs_inet.IPPROTO_ICMP:
if tsize < 4:
continue
if not reuse:
icmp_hdr = vs_inet.ICMP()
icmp_hdr.vsParse(b, offset, fast=True)
offset += len(icmp_hdr)
pdata = b[offset:]
yield pkt,ipv4,icmp_hdr,pdata
else:
pass
#print('UNHANDLED IP PROTOCOL: %d' % ipv4.proto)
def _iterPcapNgFile(fd, reuse=False):
header = PCAPNG_GENERIC_BLOCK_HEADER()
ifaceidx = 0
ifacedict = {}
roff = 0
bigend = False
curroff = fd.tell()
b0 = fd.read(len(header))
fd.seek(curroff)
while len(b0) == len(header):
header.vsParse(b0, fast=True)
body = fd.read(header.blocksize)
if header.blocktype == PCAPNG_BLOCKTYPE_SECTION_HEADER:
shb = PCAPNG_SECTION_HEADER_BLOCK()
roff = shb.vsParse(body)
bigend = shb.bigend
#reset interface stuff since we're in a new section
ifaceidx = 0
ifacedict = {}
elif header.blocktype == PCAPNG_BLOCKTYPE_INTERFACE_DESCRIPTION:
idb = PCAPNG_INTERFACE_DESCRIPTION_BLOCK(bigend)
roff = idb.vsParse(body)
#save off the interface for later reference
ifacedict[ifaceidx] = idb
ifaceidx += 1
elif header.blocktype == PCAPNG_BLOCKTYPE_SIMPLE_PACKET:
spb = PCAPNG_SIMPLE_PACKET_BLOCK(bigend)
roff = spb.vsParse(body)
tup = _parsePcapngPacketBytes(iface.linktype, spb)
if tup is not None:
#if it is None, just fall through & read next block
yield tup
elif header.blocktype == PCAPNG_BLOCKTYPE_ENHANCED_PACKET:
epb = PCAPNG_ENHANCED_PACKET_BLOCK(bigend)
roff = epb.vsParse(body)
iface = ifacedict.get(epb.interfaceid)
epb.setPcapTimestamp(iface)
tup = _parsePcapngPacketBytes(iface.linktype, epb)
if tup is not None:
#if tup is None, just fall through & read next block
yield tup
#TODO: other blocks needed?
#PCAPNG_BLOCKTYPE_PACKET (obsolete)
#PCAPNG_BLOCKTYPE_NAME_RESOLUTION:
#PCAPNG_BLOCKTYPE_INTERFACE_STATS:
else:
#print('Unknown block type: 0x%08x: 0x%08x 0x%08x bytes' % (roff, header.blocktype, header.blocksize))
pass
curroff = fd.tell()
b0 = fd.read(len(header))
fd.seek(curroff)
def _parsePcapngPacketBytes(linktype, pkt):
'''
pkt is either a parsed PCAPNG_SIMPLE_PACKET_BLOCK or PCAPNG_ENHANCED_PACKET_BLOCK
On success Returns tuple (pcapng_pkt, ipv4_vstruct, transport_vstruc, pdata)
Returns None if the packet can't be parsed
'''
if linktype not in (PCAP_LINKTYPE_ETHER, PCAP_LINKTYPE_RAW):
raise Exception('PCAP Link Type %d Not Supported Yet!' % linktype)
#pkt = PCAP_PACKET_HEADER()
eII = vs_inet.ETHERII()
eIIsize = len(eII)
offset = 0
if linktype == PCAP_LINKTYPE_ETHER:
if len(pkt.data) < eIIsize:
return None
eII.vsParse(pkt.data, 0, fast=True)
# No support for non-ip protocol yet...
if eII.etype not in (vs_inet.ETH_P_IP,vs_inet.ETH_P_VLAN):
return None
offset += eIIsize
if eII.etype == vs_inet.ETH_P_VLAN:
offset += 4
elif linktype == PCAP_LINKTYPE_RAW:
pass
ipv4 = vs_inet.IPv4()
if (len(pkt.data) - offset) < len(ipv4):
return None
ipv4.vsParse(pkt.data, offset, fast=True)
# Make b *only* the IP datagram bytes...
b = pkt.data[offset:offset+ipv4.totlen]
offset = 0
offset += len(ipv4)
tsize = len(b) - offset
if ipv4.proto == vs_inet.IPPROTO_TCP:
if tsize < 20:
return None
tcp_hdr = vs_inet.TCP()
tcp_hdr.vsParse(b, offset, fast=True)
offset += len(tcp_hdr)
pdata = b[offset:]
return pkt,ipv4,tcp_hdr,pdata
elif ipv4.proto == vs_inet.IPPROTO_UDP:
if tsize < 8:
return None
udp_hdr = vs_inet.UDP()
udp_hdr.vsParse(b, offset, fast=True)
offset += len(udp_hdr)
pdata = b[offset:]
return pkt,ipv4,udp_hdr,pdata
elif ipv4.proto == vs_inet.IPPROTO_ICMP:
if tsize < 4:
return None
icmp_hdr = vs_inet.ICMP()
icmp_hdr.vsParse(b, offset, fast=True)
offset += len(icmp_hdr)
pdata = b[offset:]
return pkt,ipv4,icmp_hdr,pdata
else:
pass
#print('UNHANDLED IP PROTOCOL: %d' % ipv4.proto)
return None
|
|
from __future__ import absolute_import, division, print_function
import collections
from contextlib import closing
import errno
import gzip
import logging
import os
import re
import socket
import ssl
import sys
from tornado.escape import to_unicode, utf8
from tornado import gen
from tornado.httpclient import AsyncHTTPClient
from tornado.httputil import HTTPHeaders, ResponseStartLine
from tornado.ioloop import IOLoop
from tornado.iostream import UnsatisfiableReadError
from tornado.locks import Event
from tornado.log import gen_log
from tornado.concurrent import Future
from tornado.netutil import Resolver, bind_sockets
from tornado.simple_httpclient import SimpleAsyncHTTPClient, HTTPStreamClosedError, HTTPTimeoutError
from tornado.test.httpclient_test import ChunkHandler, CountdownHandler, HelloWorldHandler, RedirectHandler # noqa: E501
from tornado.test import httpclient_test
from tornado.testing import (AsyncHTTPTestCase, AsyncHTTPSTestCase, AsyncTestCase,
ExpectLog, gen_test)
from tornado.test.util import skipOnTravis, skipIfNoIPv6, refusing_port, skipBefore35, exec_test
from tornado.web import RequestHandler, Application, url, stream_request_body
class SimpleHTTPClientCommonTestCase(httpclient_test.HTTPClientCommonTestCase):
def get_http_client(self):
client = SimpleAsyncHTTPClient(force_instance=True)
self.assertTrue(isinstance(client, SimpleAsyncHTTPClient))
return client
class TriggerHandler(RequestHandler):
def initialize(self, queue, wake_callback):
self.queue = queue
self.wake_callback = wake_callback
@gen.coroutine
def get(self):
logging.debug("queuing trigger")
self.queue.append(self.finish)
if self.get_argument("wake", "true") == "true":
self.wake_callback()
never_finish = Event()
yield never_finish.wait()
class HangHandler(RequestHandler):
@gen.coroutine
def get(self):
never_finish = Event()
yield never_finish.wait()
class ContentLengthHandler(RequestHandler):
def get(self):
self.stream = self.detach()
IOLoop.current().spawn_callback(self.write_response)
@gen.coroutine
def write_response(self):
yield self.stream.write(utf8("HTTP/1.0 200 OK\r\nContent-Length: %s\r\n\r\nok" %
self.get_argument("value")))
self.stream.close()
class HeadHandler(RequestHandler):
def head(self):
self.set_header("Content-Length", "7")
class OptionsHandler(RequestHandler):
def options(self):
self.set_header("Access-Control-Allow-Origin", "*")
self.write("ok")
class NoContentHandler(RequestHandler):
def get(self):
self.set_status(204)
self.finish()
class SeeOtherPostHandler(RequestHandler):
def post(self):
redirect_code = int(self.request.body)
assert redirect_code in (302, 303), "unexpected body %r" % self.request.body
self.set_header("Location", "/see_other_get")
self.set_status(redirect_code)
class SeeOtherGetHandler(RequestHandler):
def get(self):
if self.request.body:
raise Exception("unexpected body %r" % self.request.body)
self.write("ok")
class HostEchoHandler(RequestHandler):
def get(self):
self.write(self.request.headers["Host"])
class NoContentLengthHandler(RequestHandler):
def get(self):
if self.request.version.startswith('HTTP/1'):
# Emulate the old HTTP/1.0 behavior of returning a body with no
# content-length. Tornado handles content-length at the framework
# level so we have to go around it.
stream = self.detach()
stream.write(b"HTTP/1.0 200 OK\r\n\r\n"
b"hello")
stream.close()
else:
self.finish('HTTP/1 required')
class EchoPostHandler(RequestHandler):
def post(self):
self.write(self.request.body)
@stream_request_body
class RespondInPrepareHandler(RequestHandler):
def prepare(self):
self.set_status(403)
self.finish("forbidden")
class SimpleHTTPClientTestMixin(object):
def get_app(self):
# callable objects to finish pending /trigger requests
self.triggers = collections.deque()
return Application([
url("/trigger", TriggerHandler, dict(queue=self.triggers,
wake_callback=self.stop)),
url("/chunk", ChunkHandler),
url("/countdown/([0-9]+)", CountdownHandler, name="countdown"),
url("/hang", HangHandler),
url("/hello", HelloWorldHandler),
url("/content_length", ContentLengthHandler),
url("/head", HeadHandler),
url("/options", OptionsHandler),
url("/no_content", NoContentHandler),
url("/see_other_post", SeeOtherPostHandler),
url("/see_other_get", SeeOtherGetHandler),
url("/host_echo", HostEchoHandler),
url("/no_content_length", NoContentLengthHandler),
url("/echo_post", EchoPostHandler),
url("/respond_in_prepare", RespondInPrepareHandler),
url("/redirect", RedirectHandler),
], gzip=True)
def test_singleton(self):
# Class "constructor" reuses objects on the same IOLoop
self.assertTrue(SimpleAsyncHTTPClient() is
SimpleAsyncHTTPClient())
# unless force_instance is used
self.assertTrue(SimpleAsyncHTTPClient() is not
SimpleAsyncHTTPClient(force_instance=True))
# different IOLoops use different objects
with closing(IOLoop()) as io_loop2:
client1 = self.io_loop.run_sync(gen.coroutine(SimpleAsyncHTTPClient))
client2 = io_loop2.run_sync(gen.coroutine(SimpleAsyncHTTPClient))
self.assertTrue(client1 is not client2)
def test_connection_limit(self):
with closing(self.create_client(max_clients=2)) as client:
self.assertEqual(client.max_clients, 2)
seen = []
# Send 4 requests. Two can be sent immediately, while the others
# will be queued
for i in range(4):
client.fetch(self.get_url("/trigger")).add_done_callback(
lambda fut, i=i: (seen.append(i), self.stop()))
self.wait(condition=lambda: len(self.triggers) == 2)
self.assertEqual(len(client.queue), 2)
# Finish the first two requests and let the next two through
self.triggers.popleft()()
self.triggers.popleft()()
self.wait(condition=lambda: (len(self.triggers) == 2 and
len(seen) == 2))
self.assertEqual(set(seen), set([0, 1]))
self.assertEqual(len(client.queue), 0)
# Finish all the pending requests
self.triggers.popleft()()
self.triggers.popleft()()
self.wait(condition=lambda: len(seen) == 4)
self.assertEqual(set(seen), set([0, 1, 2, 3]))
self.assertEqual(len(self.triggers), 0)
@gen_test
def test_redirect_connection_limit(self):
# following redirects should not consume additional connections
with closing(self.create_client(max_clients=1)) as client:
response = yield client.fetch(self.get_url('/countdown/3'),
max_redirects=3)
response.rethrow()
def test_gzip(self):
# All the tests in this file should be using gzip, but this test
# ensures that it is in fact getting compressed.
# Setting Accept-Encoding manually bypasses the client's
# decompression so we can see the raw data.
response = self.fetch("/chunk", use_gzip=False,
headers={"Accept-Encoding": "gzip"})
self.assertEqual(response.headers["Content-Encoding"], "gzip")
self.assertNotEqual(response.body, b"asdfqwer")
# Our test data gets bigger when gzipped. Oops. :)
# Chunked encoding bypasses the MIN_LENGTH check.
self.assertEqual(len(response.body), 34)
f = gzip.GzipFile(mode="r", fileobj=response.buffer)
self.assertEqual(f.read(), b"asdfqwer")
def test_max_redirects(self):
response = self.fetch("/countdown/5", max_redirects=3)
self.assertEqual(302, response.code)
# We requested 5, followed three redirects for 4, 3, 2, then the last
# unfollowed redirect is to 1.
self.assertTrue(response.request.url.endswith("/countdown/5"))
self.assertTrue(response.effective_url.endswith("/countdown/2"))
self.assertTrue(response.headers["Location"].endswith("/countdown/1"))
def test_header_reuse(self):
# Apps may reuse a headers object if they are only passing in constant
# headers like user-agent. The header object should not be modified.
headers = HTTPHeaders({'User-Agent': 'Foo'})
self.fetch("/hello", headers=headers)
self.assertEqual(list(headers.get_all()), [('User-Agent', 'Foo')])
def test_see_other_redirect(self):
for code in (302, 303):
response = self.fetch("/see_other_post", method="POST", body="%d" % code)
self.assertEqual(200, response.code)
self.assertTrue(response.request.url.endswith("/see_other_post"))
self.assertTrue(response.effective_url.endswith("/see_other_get"))
# request is the original request, is a POST still
self.assertEqual("POST", response.request.method)
@skipOnTravis
@gen_test
def test_connect_timeout(self):
timeout = 0.1
class TimeoutResolver(Resolver):
def resolve(self, *args, **kwargs):
return Future() # never completes
with closing(self.create_client(resolver=TimeoutResolver())) as client:
with self.assertRaises(HTTPTimeoutError):
yield client.fetch(self.get_url('/hello'),
connect_timeout=timeout,
request_timeout=3600,
raise_error=True)
@skipOnTravis
def test_request_timeout(self):
timeout = 0.1
if os.name == 'nt':
timeout = 0.5
with self.assertRaises(HTTPTimeoutError):
self.fetch('/trigger?wake=false', request_timeout=timeout, raise_error=True)
# trigger the hanging request to let it clean up after itself
self.triggers.popleft()()
@skipIfNoIPv6
def test_ipv6(self):
[sock] = bind_sockets(None, '::1', family=socket.AF_INET6)
port = sock.getsockname()[1]
self.http_server.add_socket(sock)
url = '%s://[::1]:%d/hello' % (self.get_protocol(), port)
# ipv6 is currently enabled by default but can be disabled
with self.assertRaises(Exception):
self.fetch(url, allow_ipv6=False, raise_error=True)
response = self.fetch(url)
self.assertEqual(response.body, b"Hello world!")
def test_multiple_content_length_accepted(self):
response = self.fetch("/content_length?value=2,2")
self.assertEqual(response.body, b"ok")
response = self.fetch("/content_length?value=2,%202,2")
self.assertEqual(response.body, b"ok")
with ExpectLog(gen_log, ".*Multiple unequal Content-Lengths"):
with self.assertRaises(HTTPStreamClosedError):
self.fetch("/content_length?value=2,4", raise_error=True)
with self.assertRaises(HTTPStreamClosedError):
self.fetch("/content_length?value=2,%202,3", raise_error=True)
def test_head_request(self):
response = self.fetch("/head", method="HEAD")
self.assertEqual(response.code, 200)
self.assertEqual(response.headers["content-length"], "7")
self.assertFalse(response.body)
def test_options_request(self):
response = self.fetch("/options", method="OPTIONS")
self.assertEqual(response.code, 200)
self.assertEqual(response.headers["content-length"], "2")
self.assertEqual(response.headers["access-control-allow-origin"], "*")
self.assertEqual(response.body, b"ok")
def test_no_content(self):
response = self.fetch("/no_content")
self.assertEqual(response.code, 204)
# 204 status shouldn't have a content-length
#
# Tests with a content-length header are included below
# in HTTP204NoContentTestCase.
self.assertNotIn("Content-Length", response.headers)
def test_host_header(self):
host_re = re.compile(b"^127.0.0.1:[0-9]+$")
response = self.fetch("/host_echo")
self.assertTrue(host_re.match(response.body))
url = self.get_url("/host_echo").replace("http://", "http://me:secret@")
response = self.fetch(url)
self.assertTrue(host_re.match(response.body), response.body)
def test_connection_refused(self):
cleanup_func, port = refusing_port()
self.addCleanup(cleanup_func)
with ExpectLog(gen_log, ".*", required=False):
with self.assertRaises(socket.error) as cm:
self.fetch("http://127.0.0.1:%d/" % port, raise_error=True)
if sys.platform != 'cygwin':
# cygwin returns EPERM instead of ECONNREFUSED here
contains_errno = str(errno.ECONNREFUSED) in str(cm.exception)
if not contains_errno and hasattr(errno, "WSAECONNREFUSED"):
contains_errno = str(errno.WSAECONNREFUSED) in str(cm.exception)
self.assertTrue(contains_errno, cm.exception)
# This is usually "Connection refused".
# On windows, strerror is broken and returns "Unknown error".
expected_message = os.strerror(errno.ECONNREFUSED)
self.assertTrue(expected_message in str(cm.exception),
cm.exception)
def test_queue_timeout(self):
with closing(self.create_client(max_clients=1)) as client:
# Wait for the trigger request to block, not complete.
fut1 = client.fetch(self.get_url('/trigger'), request_timeout=10)
self.wait()
with self.assertRaises(HTTPTimeoutError) as cm:
self.io_loop.run_sync(lambda: client.fetch(
self.get_url('/hello'), connect_timeout=0.1, raise_error=True))
self.assertEqual(str(cm.exception), "Timeout in request queue")
self.triggers.popleft()()
self.io_loop.run_sync(lambda: fut1)
def test_no_content_length(self):
response = self.fetch("/no_content_length")
if response.body == b"HTTP/1 required":
self.skipTest("requires HTTP/1.x")
else:
self.assertEquals(b"hello", response.body)
def sync_body_producer(self, write):
write(b'1234')
write(b'5678')
@gen.coroutine
def async_body_producer(self, write):
yield write(b'1234')
yield gen.moment
yield write(b'5678')
def test_sync_body_producer_chunked(self):
response = self.fetch("/echo_post", method="POST",
body_producer=self.sync_body_producer)
response.rethrow()
self.assertEqual(response.body, b"12345678")
def test_sync_body_producer_content_length(self):
response = self.fetch("/echo_post", method="POST",
body_producer=self.sync_body_producer,
headers={'Content-Length': '8'})
response.rethrow()
self.assertEqual(response.body, b"12345678")
def test_async_body_producer_chunked(self):
response = self.fetch("/echo_post", method="POST",
body_producer=self.async_body_producer)
response.rethrow()
self.assertEqual(response.body, b"12345678")
def test_async_body_producer_content_length(self):
response = self.fetch("/echo_post", method="POST",
body_producer=self.async_body_producer,
headers={'Content-Length': '8'})
response.rethrow()
self.assertEqual(response.body, b"12345678")
@skipBefore35
def test_native_body_producer_chunked(self):
namespace = exec_test(globals(), locals(), """
async def body_producer(write):
await write(b'1234')
import asyncio
await asyncio.sleep(0)
await write(b'5678')
""")
response = self.fetch("/echo_post", method="POST",
body_producer=namespace["body_producer"])
response.rethrow()
self.assertEqual(response.body, b"12345678")
@skipBefore35
def test_native_body_producer_content_length(self):
namespace = exec_test(globals(), locals(), """
async def body_producer(write):
await write(b'1234')
import asyncio
await asyncio.sleep(0)
await write(b'5678')
""")
response = self.fetch("/echo_post", method="POST",
body_producer=namespace["body_producer"],
headers={'Content-Length': '8'})
response.rethrow()
self.assertEqual(response.body, b"12345678")
def test_100_continue(self):
response = self.fetch("/echo_post", method="POST",
body=b"1234",
expect_100_continue=True)
self.assertEqual(response.body, b"1234")
def test_100_continue_early_response(self):
def body_producer(write):
raise Exception("should not be called")
response = self.fetch("/respond_in_prepare", method="POST",
body_producer=body_producer,
expect_100_continue=True)
self.assertEqual(response.code, 403)
def test_streaming_follow_redirects(self):
# When following redirects, header and streaming callbacks
# should only be called for the final result.
# TODO(bdarnell): this test belongs in httpclient_test instead of
# simple_httpclient_test, but it fails with the version of libcurl
# available on travis-ci. Move it when that has been upgraded
# or we have a better framework to skip tests based on curl version.
headers = []
chunks = []
self.fetch("/redirect?url=/hello",
header_callback=headers.append,
streaming_callback=chunks.append)
chunks = list(map(to_unicode, chunks))
self.assertEqual(chunks, ['Hello world!'])
# Make sure we only got one set of headers.
num_start_lines = len([h for h in headers if h.startswith("HTTP/")])
self.assertEqual(num_start_lines, 1)
class SimpleHTTPClientTestCase(SimpleHTTPClientTestMixin, AsyncHTTPTestCase):
def setUp(self):
super(SimpleHTTPClientTestCase, self).setUp()
self.http_client = self.create_client()
def create_client(self, **kwargs):
return SimpleAsyncHTTPClient(force_instance=True, **kwargs)
class SimpleHTTPSClientTestCase(SimpleHTTPClientTestMixin, AsyncHTTPSTestCase):
def setUp(self):
super(SimpleHTTPSClientTestCase, self).setUp()
self.http_client = self.create_client()
def create_client(self, **kwargs):
return SimpleAsyncHTTPClient(force_instance=True,
defaults=dict(validate_cert=False),
**kwargs)
def test_ssl_options(self):
resp = self.fetch("/hello", ssl_options={})
self.assertEqual(resp.body, b"Hello world!")
def test_ssl_context(self):
resp = self.fetch("/hello",
ssl_options=ssl.SSLContext(ssl.PROTOCOL_SSLv23))
self.assertEqual(resp.body, b"Hello world!")
def test_ssl_options_handshake_fail(self):
with ExpectLog(gen_log, "SSL Error|Uncaught exception",
required=False):
with self.assertRaises(ssl.SSLError):
self.fetch(
"/hello", ssl_options=dict(cert_reqs=ssl.CERT_REQUIRED),
raise_error=True)
def test_ssl_context_handshake_fail(self):
with ExpectLog(gen_log, "SSL Error|Uncaught exception"):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
with self.assertRaises(ssl.SSLError):
self.fetch("/hello", ssl_options=ctx, raise_error=True)
def test_error_logging(self):
# No stack traces are logged for SSL errors (in this case,
# failure to validate the testing self-signed cert).
# The SSLError is exposed through ssl.SSLError.
with ExpectLog(gen_log, '.*') as expect_log:
with self.assertRaises(ssl.SSLError):
self.fetch("/", validate_cert=True, raise_error=True)
self.assertFalse(expect_log.logged_stack)
class CreateAsyncHTTPClientTestCase(AsyncTestCase):
def setUp(self):
super(CreateAsyncHTTPClientTestCase, self).setUp()
self.saved = AsyncHTTPClient._save_configuration()
def tearDown(self):
AsyncHTTPClient._restore_configuration(self.saved)
super(CreateAsyncHTTPClientTestCase, self).tearDown()
def test_max_clients(self):
AsyncHTTPClient.configure(SimpleAsyncHTTPClient)
with closing(AsyncHTTPClient(force_instance=True)) as client:
self.assertEqual(client.max_clients, 10)
with closing(AsyncHTTPClient(
max_clients=11, force_instance=True)) as client:
self.assertEqual(client.max_clients, 11)
# Now configure max_clients statically and try overriding it
# with each way max_clients can be passed
AsyncHTTPClient.configure(SimpleAsyncHTTPClient, max_clients=12)
with closing(AsyncHTTPClient(force_instance=True)) as client:
self.assertEqual(client.max_clients, 12)
with closing(AsyncHTTPClient(
max_clients=13, force_instance=True)) as client:
self.assertEqual(client.max_clients, 13)
with closing(AsyncHTTPClient(
max_clients=14, force_instance=True)) as client:
self.assertEqual(client.max_clients, 14)
class HTTP100ContinueTestCase(AsyncHTTPTestCase):
def respond_100(self, request):
self.http1 = request.version.startswith('HTTP/1.')
if not self.http1:
request.connection.write_headers(ResponseStartLine('', 200, 'OK'),
HTTPHeaders())
request.connection.finish()
return
self.request = request
fut = self.request.connection.stream.write(
b"HTTP/1.1 100 CONTINUE\r\n\r\n")
fut.add_done_callback(self.respond_200)
def respond_200(self, fut):
fut.result()
fut = self.request.connection.stream.write(
b"HTTP/1.1 200 OK\r\nContent-Length: 1\r\n\r\nA")
fut.add_done_callback(lambda f: self.request.connection.stream.close())
def get_app(self):
# Not a full Application, but works as an HTTPServer callback
return self.respond_100
def test_100_continue(self):
res = self.fetch('/')
if not self.http1:
self.skipTest("requires HTTP/1.x")
self.assertEqual(res.body, b'A')
class HTTP204NoContentTestCase(AsyncHTTPTestCase):
def respond_204(self, request):
self.http1 = request.version.startswith('HTTP/1.')
if not self.http1:
# Close the request cleanly in HTTP/2; it will be skipped anyway.
request.connection.write_headers(ResponseStartLine('', 200, 'OK'),
HTTPHeaders())
request.connection.finish()
return
# A 204 response never has a body, even if doesn't have a content-length
# (which would otherwise mean read-until-close). We simulate here a
# server that sends no content length and does not close the connection.
#
# Tests of a 204 response with no Content-Length header are included
# in SimpleHTTPClientTestMixin.
stream = request.connection.detach()
stream.write(b"HTTP/1.1 204 No content\r\n")
if request.arguments.get("error", [False])[-1]:
stream.write(b"Content-Length: 5\r\n")
else:
stream.write(b"Content-Length: 0\r\n")
stream.write(b"\r\n")
stream.close()
def get_app(self):
return self.respond_204
def test_204_no_content(self):
resp = self.fetch('/')
if not self.http1:
self.skipTest("requires HTTP/1.x")
self.assertEqual(resp.code, 204)
self.assertEqual(resp.body, b'')
def test_204_invalid_content_length(self):
# 204 status with non-zero content length is malformed
with ExpectLog(gen_log, ".*Response with code 204 should not have body"):
with self.assertRaises(HTTPStreamClosedError):
self.fetch("/?error=1", raise_error=True)
if not self.http1:
self.skipTest("requires HTTP/1.x")
if self.http_client.configured_class != SimpleAsyncHTTPClient:
self.skipTest("curl client accepts invalid headers")
class HostnameMappingTestCase(AsyncHTTPTestCase):
def setUp(self):
super(HostnameMappingTestCase, self).setUp()
self.http_client = SimpleAsyncHTTPClient(
hostname_mapping={
'www.example.com': '127.0.0.1',
('foo.example.com', 8000): ('127.0.0.1', self.get_http_port()),
})
def get_app(self):
return Application([url("/hello", HelloWorldHandler), ])
def test_hostname_mapping(self):
response = self.fetch(
'http://www.example.com:%d/hello' % self.get_http_port())
response.rethrow()
self.assertEqual(response.body, b'Hello world!')
def test_port_mapping(self):
response = self.fetch('http://foo.example.com:8000/hello')
response.rethrow()
self.assertEqual(response.body, b'Hello world!')
class ResolveTimeoutTestCase(AsyncHTTPTestCase):
def setUp(self):
# Dummy Resolver subclass that never finishes.
class BadResolver(Resolver):
@gen.coroutine
def resolve(self, *args, **kwargs):
yield Event().wait()
super(ResolveTimeoutTestCase, self).setUp()
self.http_client = SimpleAsyncHTTPClient(
resolver=BadResolver())
def get_app(self):
return Application([url("/hello", HelloWorldHandler), ])
def test_resolve_timeout(self):
with self.assertRaises(HTTPTimeoutError):
self.fetch('/hello', connect_timeout=0.1, raise_error=True)
class MaxHeaderSizeTest(AsyncHTTPTestCase):
def get_app(self):
class SmallHeaders(RequestHandler):
def get(self):
self.set_header("X-Filler", "a" * 100)
self.write("ok")
class LargeHeaders(RequestHandler):
def get(self):
self.set_header("X-Filler", "a" * 1000)
self.write("ok")
return Application([('/small', SmallHeaders),
('/large', LargeHeaders)])
def get_http_client(self):
return SimpleAsyncHTTPClient(max_header_size=1024)
def test_small_headers(self):
response = self.fetch('/small')
response.rethrow()
self.assertEqual(response.body, b'ok')
def test_large_headers(self):
with ExpectLog(gen_log, "Unsatisfiable read"):
with self.assertRaises(UnsatisfiableReadError):
self.fetch('/large', raise_error=True)
class MaxBodySizeTest(AsyncHTTPTestCase):
def get_app(self):
class SmallBody(RequestHandler):
def get(self):
self.write("a" * 1024 * 64)
class LargeBody(RequestHandler):
def get(self):
self.write("a" * 1024 * 100)
return Application([('/small', SmallBody),
('/large', LargeBody)])
def get_http_client(self):
return SimpleAsyncHTTPClient(max_body_size=1024 * 64)
def test_small_body(self):
response = self.fetch('/small')
response.rethrow()
self.assertEqual(response.body, b'a' * 1024 * 64)
def test_large_body(self):
with ExpectLog(gen_log, "Malformed HTTP message from None: Content-Length too long"):
with self.assertRaises(HTTPStreamClosedError):
self.fetch('/large', raise_error=True)
class MaxBufferSizeTest(AsyncHTTPTestCase):
def get_app(self):
class LargeBody(RequestHandler):
def get(self):
self.write("a" * 1024 * 100)
return Application([('/large', LargeBody)])
def get_http_client(self):
# 100KB body with 64KB buffer
return SimpleAsyncHTTPClient(max_body_size=1024 * 100, max_buffer_size=1024 * 64)
def test_large_body(self):
response = self.fetch('/large')
response.rethrow()
self.assertEqual(response.body, b'a' * 1024 * 100)
class ChunkedWithContentLengthTest(AsyncHTTPTestCase):
def get_app(self):
class ChunkedWithContentLength(RequestHandler):
def get(self):
# Add an invalid Transfer-Encoding to the response
self.set_header('Transfer-Encoding', 'chunked')
self.write("Hello world")
return Application([('/chunkwithcl', ChunkedWithContentLength)])
def get_http_client(self):
return SimpleAsyncHTTPClient()
def test_chunked_with_content_length(self):
# Make sure the invalid headers are detected
with ExpectLog(gen_log, ("Malformed HTTP message from None: Response "
"with both Transfer-Encoding and Content-Length")):
with self.assertRaises(HTTPStreamClosedError):
self.fetch('/chunkwithcl', raise_error=True)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class CustomIPPrefixesOperations:
"""CustomIPPrefixesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
custom_ip_prefix_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'customIpPrefixName': self._serialize.url("custom_ip_prefix_name", custom_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/customIpPrefixes/{customIpPrefixName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
custom_ip_prefix_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified custom IP prefix.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param custom_ip_prefix_name: The name of the CustomIpPrefix.
:type custom_ip_prefix_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
custom_ip_prefix_name=custom_ip_prefix_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'customIpPrefixName': self._serialize.url("custom_ip_prefix_name", custom_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/customIpPrefixes/{customIpPrefixName}'} # type: ignore
async def get(
self,
resource_group_name: str,
custom_ip_prefix_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.CustomIpPrefix":
"""Gets the specified custom IP prefix in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param custom_ip_prefix_name: The name of the custom IP prefix.
:type custom_ip_prefix_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CustomIpPrefix, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_08_01.models.CustomIpPrefix
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CustomIpPrefix"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'customIpPrefixName': self._serialize.url("custom_ip_prefix_name", custom_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CustomIpPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/customIpPrefixes/{customIpPrefixName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
custom_ip_prefix_name: str,
parameters: "_models.CustomIpPrefix",
**kwargs: Any
) -> "_models.CustomIpPrefix":
cls = kwargs.pop('cls', None) # type: ClsType["_models.CustomIpPrefix"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'customIpPrefixName': self._serialize.url("custom_ip_prefix_name", custom_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'CustomIpPrefix')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('CustomIpPrefix', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('CustomIpPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/customIpPrefixes/{customIpPrefixName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
custom_ip_prefix_name: str,
parameters: "_models.CustomIpPrefix",
**kwargs: Any
) -> AsyncLROPoller["_models.CustomIpPrefix"]:
"""Creates or updates a custom IP prefix.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param custom_ip_prefix_name: The name of the custom IP prefix.
:type custom_ip_prefix_name: str
:param parameters: Parameters supplied to the create or update custom IP prefix operation.
:type parameters: ~azure.mgmt.network.v2020_08_01.models.CustomIpPrefix
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either CustomIpPrefix or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_08_01.models.CustomIpPrefix]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.CustomIpPrefix"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
custom_ip_prefix_name=custom_ip_prefix_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('CustomIpPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'customIpPrefixName': self._serialize.url("custom_ip_prefix_name", custom_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/customIpPrefixes/{customIpPrefixName}'} # type: ignore
async def update_tags(
self,
resource_group_name: str,
custom_ip_prefix_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.CustomIpPrefix":
"""Updates custom IP prefix tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param custom_ip_prefix_name: The name of the custom IP prefix.
:type custom_ip_prefix_name: str
:param parameters: Parameters supplied to update custom IP prefix tags.
:type parameters: ~azure.mgmt.network.v2020_08_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CustomIpPrefix, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_08_01.models.CustomIpPrefix
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CustomIpPrefix"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'customIpPrefixName': self._serialize.url("custom_ip_prefix_name", custom_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CustomIpPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/customIpPrefixes/{customIpPrefixName}'} # type: ignore
def list_all(
self,
**kwargs: Any
) -> AsyncIterable["_models.CustomIpPrefixListResult"]:
"""Gets all the custom IP prefixes in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CustomIpPrefixListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_08_01.models.CustomIpPrefixListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CustomIpPrefixListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('CustomIpPrefixListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/customIpPrefixes'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.CustomIpPrefixListResult"]:
"""Gets all custom IP prefixes in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CustomIpPrefixListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_08_01.models.CustomIpPrefixListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CustomIpPrefixListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('CustomIpPrefixListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/customIpPrefixes'} # type: ignore
|
|
"""Support to manage a shopping list."""
import asyncio
import logging
import uuid
import voluptuous as vol
from homeassistant.const import HTTP_NOT_FOUND, HTTP_BAD_REQUEST
from homeassistant.core import callback
from homeassistant.components import http
from homeassistant.components.http.data_validator import RequestDataValidator
from homeassistant.helpers import intent
import homeassistant.helpers.config_validation as cv
from homeassistant.util.json import load_json, save_json
from homeassistant.components import websocket_api
ATTR_NAME = "name"
DOMAIN = "shopping_list"
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema({DOMAIN: {}}, extra=vol.ALLOW_EXTRA)
EVENT = "shopping_list_updated"
INTENT_ADD_ITEM = "HassShoppingListAddItem"
INTENT_LAST_ITEMS = "HassShoppingListLastItems"
ITEM_UPDATE_SCHEMA = vol.Schema({"complete": bool, ATTR_NAME: str})
PERSISTENCE = ".shopping_list.json"
SERVICE_ADD_ITEM = "add_item"
SERVICE_COMPLETE_ITEM = "complete_item"
SERVICE_ITEM_SCHEMA = vol.Schema({vol.Required(ATTR_NAME): vol.Any(None, cv.string)})
WS_TYPE_SHOPPING_LIST_ITEMS = "shopping_list/items"
WS_TYPE_SHOPPING_LIST_ADD_ITEM = "shopping_list/items/add"
WS_TYPE_SHOPPING_LIST_UPDATE_ITEM = "shopping_list/items/update"
WS_TYPE_SHOPPING_LIST_CLEAR_ITEMS = "shopping_list/items/clear"
SCHEMA_WEBSOCKET_ITEMS = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{vol.Required("type"): WS_TYPE_SHOPPING_LIST_ITEMS}
)
SCHEMA_WEBSOCKET_ADD_ITEM = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{vol.Required("type"): WS_TYPE_SHOPPING_LIST_ADD_ITEM, vol.Required("name"): str}
)
SCHEMA_WEBSOCKET_UPDATE_ITEM = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{
vol.Required("type"): WS_TYPE_SHOPPING_LIST_UPDATE_ITEM,
vol.Required("item_id"): str,
vol.Optional("name"): str,
vol.Optional("complete"): bool,
}
)
SCHEMA_WEBSOCKET_CLEAR_ITEMS = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{vol.Required("type"): WS_TYPE_SHOPPING_LIST_CLEAR_ITEMS}
)
@asyncio.coroutine
def async_setup(hass, config):
"""Initialize the shopping list."""
@asyncio.coroutine
def add_item_service(call):
"""Add an item with `name`."""
data = hass.data[DOMAIN]
name = call.data.get(ATTR_NAME)
if name is not None:
data.async_add(name)
@asyncio.coroutine
def complete_item_service(call):
"""Mark the item provided via `name` as completed."""
data = hass.data[DOMAIN]
name = call.data.get(ATTR_NAME)
if name is None:
return
try:
item = [item for item in data.items if item["name"] == name][0]
except IndexError:
_LOGGER.error("Removing of item failed: %s cannot be found", name)
else:
data.async_update(item["id"], {"name": name, "complete": True})
data = hass.data[DOMAIN] = ShoppingData(hass)
yield from data.async_load()
intent.async_register(hass, AddItemIntent())
intent.async_register(hass, ListTopItemsIntent())
hass.services.async_register(
DOMAIN, SERVICE_ADD_ITEM, add_item_service, schema=SERVICE_ITEM_SCHEMA
)
hass.services.async_register(
DOMAIN, SERVICE_COMPLETE_ITEM, complete_item_service, schema=SERVICE_ITEM_SCHEMA
)
hass.http.register_view(ShoppingListView)
hass.http.register_view(CreateShoppingListItemView)
hass.http.register_view(UpdateShoppingListItemView)
hass.http.register_view(ClearCompletedItemsView)
hass.components.conversation.async_register(
INTENT_ADD_ITEM, ["Add [the] [a] [an] {item} to my shopping list"]
)
hass.components.conversation.async_register(
INTENT_LAST_ITEMS, ["What is on my shopping list"]
)
hass.components.frontend.async_register_built_in_panel(
"shopping-list", "shopping_list", "mdi:cart"
)
hass.components.websocket_api.async_register_command(
WS_TYPE_SHOPPING_LIST_ITEMS, websocket_handle_items, SCHEMA_WEBSOCKET_ITEMS
)
hass.components.websocket_api.async_register_command(
WS_TYPE_SHOPPING_LIST_ADD_ITEM, websocket_handle_add, SCHEMA_WEBSOCKET_ADD_ITEM
)
hass.components.websocket_api.async_register_command(
WS_TYPE_SHOPPING_LIST_UPDATE_ITEM,
websocket_handle_update,
SCHEMA_WEBSOCKET_UPDATE_ITEM,
)
hass.components.websocket_api.async_register_command(
WS_TYPE_SHOPPING_LIST_CLEAR_ITEMS,
websocket_handle_clear,
SCHEMA_WEBSOCKET_CLEAR_ITEMS,
)
return True
class ShoppingData:
"""Class to hold shopping list data."""
def __init__(self, hass):
"""Initialize the shopping list."""
self.hass = hass
self.items = []
@callback
def async_add(self, name):
"""Add a shopping list item."""
item = {"name": name, "id": uuid.uuid4().hex, "complete": False}
self.items.append(item)
self.hass.async_add_job(self.save)
return item
@callback
def async_update(self, item_id, info):
"""Update a shopping list item."""
item = next((itm for itm in self.items if itm["id"] == item_id), None)
if item is None:
raise KeyError
info = ITEM_UPDATE_SCHEMA(info)
item.update(info)
self.hass.async_add_job(self.save)
return item
@callback
def async_clear_completed(self):
"""Clear completed items."""
self.items = [itm for itm in self.items if not itm["complete"]]
self.hass.async_add_job(self.save)
@asyncio.coroutine
def async_load(self):
"""Load items."""
def load():
"""Load the items synchronously."""
return load_json(self.hass.config.path(PERSISTENCE), default=[])
self.items = yield from self.hass.async_add_job(load)
def save(self):
"""Save the items."""
save_json(self.hass.config.path(PERSISTENCE), self.items)
class AddItemIntent(intent.IntentHandler):
"""Handle AddItem intents."""
intent_type = INTENT_ADD_ITEM
slot_schema = {"item": cv.string}
@asyncio.coroutine
def async_handle(self, intent_obj):
"""Handle the intent."""
slots = self.async_validate_slots(intent_obj.slots)
item = slots["item"]["value"]
intent_obj.hass.data[DOMAIN].async_add(item)
response = intent_obj.create_response()
response.async_set_speech("I've added {} to your shopping list".format(item))
intent_obj.hass.bus.async_fire(EVENT)
return response
class ListTopItemsIntent(intent.IntentHandler):
"""Handle AddItem intents."""
intent_type = INTENT_LAST_ITEMS
slot_schema = {"item": cv.string}
@asyncio.coroutine
def async_handle(self, intent_obj):
"""Handle the intent."""
items = intent_obj.hass.data[DOMAIN].items[-5:]
response = intent_obj.create_response()
if not items:
response.async_set_speech("There are no items on your shopping list")
else:
response.async_set_speech(
"These are the top {} items on your shopping list: {}".format(
min(len(items), 5),
", ".join(itm["name"] for itm in reversed(items)),
)
)
return response
class ShoppingListView(http.HomeAssistantView):
"""View to retrieve shopping list content."""
url = "/api/shopping_list"
name = "api:shopping_list"
@callback
def get(self, request):
"""Retrieve shopping list items."""
return self.json(request.app["hass"].data[DOMAIN].items)
class UpdateShoppingListItemView(http.HomeAssistantView):
"""View to retrieve shopping list content."""
url = "/api/shopping_list/item/{item_id}"
name = "api:shopping_list:item:id"
async def post(self, request, item_id):
"""Update a shopping list item."""
data = await request.json()
try:
item = request.app["hass"].data[DOMAIN].async_update(item_id, data)
request.app["hass"].bus.async_fire(EVENT)
return self.json(item)
except KeyError:
return self.json_message("Item not found", HTTP_NOT_FOUND)
except vol.Invalid:
return self.json_message("Item not found", HTTP_BAD_REQUEST)
class CreateShoppingListItemView(http.HomeAssistantView):
"""View to retrieve shopping list content."""
url = "/api/shopping_list/item"
name = "api:shopping_list:item"
@RequestDataValidator(vol.Schema({vol.Required("name"): str}))
@asyncio.coroutine
def post(self, request, data):
"""Create a new shopping list item."""
item = request.app["hass"].data[DOMAIN].async_add(data["name"])
request.app["hass"].bus.async_fire(EVENT)
return self.json(item)
class ClearCompletedItemsView(http.HomeAssistantView):
"""View to retrieve shopping list content."""
url = "/api/shopping_list/clear_completed"
name = "api:shopping_list:clear_completed"
@callback
def post(self, request):
"""Retrieve if API is running."""
hass = request.app["hass"]
hass.data[DOMAIN].async_clear_completed()
hass.bus.async_fire(EVENT)
return self.json_message("Cleared completed items.")
@callback
def websocket_handle_items(hass, connection, msg):
"""Handle get shopping_list items."""
connection.send_message(
websocket_api.result_message(msg["id"], hass.data[DOMAIN].items)
)
@callback
def websocket_handle_add(hass, connection, msg):
"""Handle add item to shopping_list."""
item = hass.data[DOMAIN].async_add(msg["name"])
hass.bus.async_fire(EVENT)
connection.send_message(websocket_api.result_message(msg["id"], item))
@websocket_api.async_response
async def websocket_handle_update(hass, connection, msg):
"""Handle update shopping_list item."""
msg_id = msg.pop("id")
item_id = msg.pop("item_id")
msg.pop("type")
data = msg
try:
item = hass.data[DOMAIN].async_update(item_id, data)
hass.bus.async_fire(EVENT)
connection.send_message(websocket_api.result_message(msg_id, item))
except KeyError:
connection.send_message(
websocket_api.error_message(msg_id, "item_not_found", "Item not found")
)
@callback
def websocket_handle_clear(hass, connection, msg):
"""Handle clearing shopping_list items."""
hass.data[DOMAIN].async_clear_completed()
hass.bus.async_fire(EVENT)
connection.send_message(websocket_api.result_message(msg["id"]))
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_db.sqlalchemy import utils as sa_utils
from oslo_utils import timeutils as tu
import six
from senlin.common import consts
from senlin.common import exception
from senlin.db.sqlalchemy import api as db_api
from senlin.engine import parser
from senlin.tests.unit.common import base
from senlin.tests.unit.common import utils
from senlin.tests.unit.db import shared
class DBAPIProfileTest(base.SenlinTestCase):
def setUp(self):
super(DBAPIProfileTest, self).setUp()
self.ctx = utils.dummy_context()
def test_profile_create(self):
data = parser.simple_parse(shared.sample_profile)
profile = shared.create_profile(self.ctx)
self.assertIsNotNone(profile.id)
self.assertEqual(data['name'], profile.name)
self.assertEqual(data['type'], profile.type)
self.assertEqual(data['spec'], profile.spec)
def test_profile_get(self):
profile = shared.create_profile(self.ctx)
retobj = db_api.profile_get(self.ctx, profile.id)
self.assertEqual(profile.id, retobj.id)
self.assertEqual(profile.spec, retobj.spec)
def test_profile_get_diff_project(self):
profile = shared.create_profile(self.ctx)
new_ctx = utils.dummy_context(project='a-different-project')
res = db_api.profile_get(new_ctx, profile.id)
self.assertIsNone(res)
res = db_api.profile_get(new_ctx, profile.id, project_safe=False)
self.assertIsNotNone(res)
self.assertEqual(profile.id, res.id)
def test_profile_get_admin_context(self):
profile = shared.create_profile(self.ctx)
admin_ctx = utils.dummy_context(project='a-different-project',
is_admin=True)
res = db_api.profile_get(admin_ctx, profile.id, project_safe=True)
self.assertIsNotNone(res)
def test_profile_get_not_found(self):
profile = db_api.profile_get(self.ctx, 'BogusProfileID')
self.assertIsNone(profile)
def test_profile_get_by_name(self):
profile_name = 'my_best_profile'
# before creation
profile = db_api.profile_get_by_name(self.ctx, profile_name)
self.assertIsNone(profile)
profile = shared.create_profile(self.ctx, name=profile_name)
# after creation
retobj = db_api.profile_get_by_name(self.ctx, profile_name)
self.assertIsNotNone(retobj)
self.assertEqual(profile_name, retobj.name)
# bad name
retobj = db_api.profile_get_by_name(self.ctx, 'non-exist')
self.assertIsNone(retobj)
def test_profile_get_by_name_diff_project(self):
profile_name = 'my_best_profile'
shared.create_profile(self.ctx, name=profile_name)
new_ctx = utils.dummy_context(project='a-different-project')
res = db_api.profile_get_by_name(new_ctx, profile_name)
self.assertIsNone(res)
res = db_api.profile_get_by_name(new_ctx, profile_name,
project_safe=False)
self.assertIsNotNone(res)
self.assertEqual(profile_name, res.name)
def test_profile_get_by_short_id(self):
profile_ids = ['same-part-unique-part',
'same-part-part-unique']
for pid in profile_ids:
shared.create_profile(self.ctx, id=pid)
# verify creation with set ID
profile = db_api.profile_get(self.ctx, pid)
self.assertIsNotNone(profile)
self.assertEqual(pid, profile.id)
# too short -> multiple choices
for x in range(len('same-part-')):
self.assertRaises(exception.MultipleChoices,
db_api.profile_get_by_short_id,
self.ctx, profile_ids[0][:x])
# ids are unique
profile = db_api.profile_get_by_short_id(self.ctx, profile_ids[0][:11])
self.assertEqual(profile_ids[0], profile.id)
profile = db_api.profile_get_by_short_id(self.ctx, profile_ids[1][:11])
self.assertEqual(profile_ids[1], profile.id)
# bad ids
res = db_api.profile_get_by_short_id(self.ctx, 'non-existent')
self.assertIsNone(res)
def test_profile_get_by_short_id_diff_project(self):
profile_id = 'same-part-unique-part'
shared.create_profile(self.ctx, id=profile_id)
new_ctx = utils.dummy_context(project='a-different-project')
res = db_api.profile_get_by_short_id(new_ctx, profile_id)
self.assertIsNone(res)
res = db_api.profile_get_by_short_id(new_ctx, profile_id,
project_safe=False)
self.assertIsNotNone(res)
self.assertEqual(profile_id, res.id)
def test_profile_get_all(self):
ids = ['profile1', 'profile2']
for pid in ids:
shared.create_profile(self.ctx, id=pid)
profiles = db_api.profile_get_all(self.ctx)
self.assertEqual(2, len(profiles))
profile_ids = [p.id for p in profiles]
for pid in ids:
self.assertIn(pid, profile_ids)
db_api.profile_delete(self.ctx, profiles[1].id)
# after delete one of them
profiles = db_api.profile_get_all(self.ctx)
self.assertEqual(1, len(profiles))
# after delete both profiles
db_api.profile_delete(self.ctx, profiles[0].id)
profiles = db_api.profile_get_all(self.ctx)
self.assertEqual(0, len(profiles))
def test_profile_get_all_diff_project(self):
ids = ['profile1', 'profile2']
for pid in ids:
shared.create_profile(self.ctx, id=pid)
new_ctx = utils.dummy_context(project='a-different-project')
profiles = db_api.profile_get_all(new_ctx)
self.assertEqual(0, len(profiles))
profiles = db_api.profile_get_all(new_ctx, project_safe=False)
self.assertEqual(2, len(profiles))
def test_profile_get_all_admin_context(self):
ids = ['profile1', 'profile2']
for pid in ids:
shared.create_profile(self.ctx, id=pid)
admin_ctx = utils.dummy_context(project='a-different-project',
is_admin=True)
profiles = db_api.profile_get_all(admin_ctx, project_safe=True)
self.assertEqual(2, len(profiles))
def test_profile_get_all_with_limit_marker(self):
ids = ['profile1', 'profile2', 'profile3']
for pid in ids:
timestamp = tu.utcnow()
shared.create_profile(self.ctx, id=pid, created_at=timestamp)
# different limit settings
profiles = db_api.profile_get_all(self.ctx, limit=1)
self.assertEqual(1, len(profiles))
profiles = db_api.profile_get_all(self.ctx, limit=2)
self.assertEqual(2, len(profiles))
# a large limit
profiles = db_api.profile_get_all(self.ctx, limit=5)
self.assertEqual(3, len(profiles))
# use marker here
profiles = db_api.profile_get_all(self.ctx, marker='profile1')
self.assertEqual(2, len(profiles))
profiles = db_api.profile_get_all(self.ctx, marker='profile2')
self.assertEqual(1, len(profiles))
profiles = db_api.profile_get_all(self.ctx, marker='profile3')
self.assertEqual(0, len(profiles))
profiles = db_api.profile_get_all(self.ctx, limit=1, marker='profile1')
self.assertEqual(1, len(profiles))
@mock.patch.object(sa_utils, 'paginate_query')
def test_profile_get_all_used_sort_keys(self, mock_paginate):
ids = ['profile1', 'profile2', 'profile3']
for pid in ids:
shared.create_profile(self.ctx, id=pid)
sort_keys = consts.PROFILE_SORT_KEYS
db_api.profile_get_all(self.ctx, sort=','.join(sort_keys))
args = mock_paginate.call_args[0]
sort_keys.append('id')
self.assertEqual(set(sort_keys), set(args[3]))
def test_profile_get_all_sorting(self):
values = [{'id': '001', 'name': 'profile1', 'type': 'C'},
{'id': '002', 'name': 'profile3', 'type': 'B'},
{'id': '003', 'name': 'profile2', 'type': 'A'}]
for v in values:
shared.create_profile(self.ctx, **v)
# Sorted by name,type
profiles = db_api.profile_get_all(self.ctx, sort='name,type')
self.assertEqual(3, len(profiles))
self.assertEqual('001', profiles[0].id)
self.assertEqual('003', profiles[1].id)
self.assertEqual('002', profiles[2].id)
# Sorted by type,name (ascending)
profiles = db_api.profile_get_all(self.ctx, sort='type,name')
self.assertEqual(3, len(profiles))
self.assertEqual('003', profiles[0].id)
self.assertEqual('002', profiles[1].id)
self.assertEqual('001', profiles[2].id)
# Sorted by type,name (descending)
profiles = db_api.profile_get_all(self.ctx, sort='type:desc,name:desc')
self.assertEqual(3, len(profiles))
self.assertEqual('001', profiles[0].id)
self.assertEqual('002', profiles[1].id)
self.assertEqual('003', profiles[2].id)
def test_profile_get_all_default_sorting(self):
profiles = []
for x in range(3):
profile = shared.create_profile(self.ctx, created_at=tu.utcnow())
profiles.append(profile)
results = db_api.profile_get_all(self.ctx)
self.assertEqual(3, len(results))
self.assertEqual(profiles[0].id, results[0].id)
self.assertEqual(profiles[1].id, results[1].id)
self.assertEqual(profiles[2].id, results[2].id)
def test_profile_get_all_with_filters(self):
for name in ['profile1', 'profile2']:
shared.create_profile(self.ctx, name=name)
filters = {'name': ['profile1', 'profilex']}
results = db_api.profile_get_all(self.ctx, filters=filters)
self.assertEqual(1, len(results))
self.assertEqual('profile1', results[0]['name'])
filters = {'name': 'profile1'}
results = db_api.profile_get_all(self.ctx, filters=filters)
self.assertEqual(1, len(results))
self.assertEqual('profile1', results[0]['name'])
def test_profile_get_all_with_empty_filters(self):
for name in ['profile1', 'profile2']:
shared.create_profile(self.ctx, name=name)
filters = None
results = db_api.profile_get_all(self.ctx, filters=filters)
self.assertEqual(2, len(results))
def test_profile_update(self):
new_fields = {
'name': 'test_profile_name_2',
'type': 'my_test_profile_type',
'spec': {
'template': {
'heat_template_version': '2013-05-23',
'resources': {
'myrandom': 'OS::Heat::RandomString',
},
},
'files': {
'myfile': 'new contents',
},
},
}
old_profile = shared.create_profile(self.ctx)
new_profile = db_api.profile_update(self.ctx, old_profile.id,
new_fields)
self.assertEqual(old_profile.id, new_profile.id)
self.assertEqual(new_fields['name'], new_profile.name)
self.assertEqual('test_profile_name_2', new_profile.name)
def test_profile_update_not_found(self):
self.assertRaises(exception.ProfileNotFound,
db_api.profile_update,
self.ctx, 'BogusID', {})
def test_profile_delete(self):
profile = shared.create_profile(self.ctx)
self.assertIsNotNone(profile)
profile_id = profile.id
db_api.profile_delete(self.ctx, profile_id)
profile = db_api.profile_get(self.ctx, profile_id)
self.assertIsNone(profile)
# not found in delete is okay
res = db_api.profile_delete(self.ctx, profile_id)
self.assertIsNone(res)
def test_profile_delete_profile_used_by_cluster(self):
profile = shared.create_profile(self.ctx)
cluster = shared.create_cluster(self.ctx, profile)
profile_id = profile.id
ex = self.assertRaises(exception.ResourceBusyError,
db_api.profile_delete, self.ctx, profile_id)
self.assertEqual('The profile (%s) is busy now.' % profile_id,
six.text_type(ex))
db_api.cluster_delete(self.ctx, cluster.id)
db_api.profile_delete(self.ctx, profile_id)
def test_profile_delete_profile_used_by_node(self):
profile = shared.create_profile(self.ctx)
node = shared.create_node(self.ctx, None, profile)
profile_id = profile.id
ex = self.assertRaises(exception.ResourceBusyError,
db_api.profile_delete, self.ctx, profile_id)
self.assertEqual('The profile (%s) is busy now.' % profile_id,
six.text_type(ex))
db_api.node_delete(self.ctx, node.id)
db_api.profile_delete(self.ctx, profile_id)
|
|
import numpy as np
import pylab as plt
import sys
plt.ion()
#plt.close('all')
#constants
TRAIN=0
TESTLOC=1
TESTHIC=2
D=10
a2str=['B','W','G','RS','RM','RL','S','M','L','XL']
a2data=np.array([[0,1,2,2,1,0,2,1,0,np.nan],
[2,1,0,2,1,0,2,1,0,np.nan],[np.nan,np.nan,np.nan,2,1,0,np.nan,2,1,0]])
data2a=np.zeros((3,D,3))
for i in range(3):
data2a[i,:,:] = np.int32(a2data==i).T
feedback=np.array([[1,0,0,0,0,1,0,0,1,np.nan],
[0,0,1,0,0,1,0,0,1,np.nan],[np.nan,np.nan,np.nan,0,0,1,0,0,0,1]])
w=np.array([1,1,1,0.5,0.5,0.5,0.5,0.5,0.5,0.5])
# functions
def getProb(a,d):
p=np.power(a,d)
p/=np.nansum(p)
return p
def chooseAction(p):
action=np.random.multinomial(1,p)
return action.nonzero()[0][0]
class Model():
def __init__(self,q0=0.5,u0=0.5,d=1,g=0.7,h=0.5,m=1):
''' q0 - prior preference of color over length (0,1)
u0 - prior preference of rel. over abs. length (0,1)
d - decision consistency (0,inf), 0=random, 1=deterministic
h - learning from positive feedback (0,1);
1=current evidence (fast shifting), 0= prior(slow shifing)
g - learning from negative feedback (0,1);
m - attentional focus (0, inf); 0= uniform distribution
'''
self.q0=q0; self.u0=u0; self.d=d
self.g=g; self.h=h; self.m=m
def exp1run(self):
T=20
#initialize
q=np.zeros(T+1); q[0]=self.q0
u=np.zeros(T+1); u[0]=self.u0
a=np.zeros((T+1,D));self.f=[]
p=np.zeros((T+1,D));dat=np.zeros(T)
a[0,:]=np.ones(10)/3.0
a[0,-1]=np.nan
a[0,:3]*=q[0]
a[0,3:6]*=(1-q[0])*u[0]
a[0,6:]*=(1-q[0])*(1-u[0])
b=np.zeros(T)# observed behavior
phase=0
#print a[0,:]
for t in range(T):
if t>10: phase=1
else: phase=0
p[t,:]=getProb(a[t,:],self.d)
b[t]=chooseAction(p[t,:])
dat[t]=a2data[phase,b[t]]
m=data2a[dat[t],:,phase]
f=feedback[phase,b[t]]
w=np.power(a[t,:],self.m)
self.f.append(f)
if f==1:
s=m*w
a[t+1,:]= self.h*s/np.nansum(s) + (1-self.h)*a[t,:]
else:
s=(1-m)*w
a[t+1,:]= self.g*s/np.nansum(s) + (1-self.g)*a[t,:]
u[t+1]= np.nansum(a[t+1,3:6])/np.nansum(a[t+1,3:])
q[t+1]= np.nansum(a[t+1,:3])/np.nansum(a[t+1,:])
#(np.nansum(a[t+1,:3])+(1-u[t+1])*np.nansum(a[t+1,6:])+u[t+1]*np.nansum(a[t+1,3:6])
self.a=a
self.b=b
self.dat=dat
self.f=np.array(self.f)
return self.dat,self.f
def exp1computeLL(self,dat,f):
T=20
#initialize
q=np.zeros(T+1); q[0]=self.q0
u=np.zeros(T+1); u[0]=self.u0
a=np.zeros((T+1,D));self.f=[]
p=np.zeros((T+1,D));
a[0,:]=np.ones(10)/3.0
a[0,-1]=np.nan
a[0,:3]*=q[0]
a[0,3:6]*=(1-q[0])*u[0]
a[0,6:]*=(1-q[0])*(1-u[0])
phase=0
LL=0
#print a[0,:]
for t in range(T):
if t>10: phase=1
else: phase=0
p[t,:]=getProb(a[t,:],self.d)
m=data2a[dat[t],:,phase]
w=np.power(a[t,:],self.m)
loglik= np.nansum(np.log(np.maximum(0.001,p[t,m==f[t]])))
if f[t]==1:
s=m*w
a[t+1,:]= self.h*s/np.nansum(s) + (1-self.h)*a[t,:]
else:
s=(1-m)*w
a[t+1,:]= self.g*s/np.nansum(s) + (1-self.g)*a[t,:]
#print t,dat[t],f[t],np.nansum(p[t,m==f[t]]),loglik
#print 'm= ',m
#print 'p= ',p
LL+=loglik
return LL
def plothistory(self):
a=self.a
b=self.b
plt.figure(figsize=(12,6))
I=np.concatenate([a.T,np.array(np.nansum(a[:,:3],1),ndmin=2),
np.array(np.nansum(a[:,3:6],1),ndmin=2),np.array(np.nansum(a[:,6:],1),ndmin=2)],axis=0)
plt.plot(range(b.size),b,'rx',ms=8,mew=2)
plt.plot([10.5,10.5],[-1,I.shape[1]],'r',lw=2)
plt.imshow(I,interpolation='nearest',cmap='winter')
plt.colorbar()
ax=plt.gca()
ax.set_yticks(range(I.shape[0]))
ax.set_yticklabels(['']*a.shape[0]+['color','rel len','abs len'])
c1=plt.Circle((-1.5,0),radius=0.4,color='blue',clip_on=False)
c2=plt.Circle((-1.5,1),radius=0.4,color='white',clip_on=False)
c3=plt.Circle((-1.5,2),radius=0.4,color='yellow',clip_on=False)
ax.add_patch(c1);ax.add_patch(c2);ax.add_patch(c3);
c1=plt.Rectangle((-2,3),1,0.2,color='white',clip_on=False)
c2=plt.Rectangle((-2.5,4),1.5,0.2,color='white',clip_on=False)
c3=plt.Rectangle((-3,5),2,0.2,color='white',clip_on=False)
ax.add_patch(c1);ax.add_patch(c2);ax.add_patch(c3);
c1=plt.Rectangle((-2,6),1,0.2,color='gray',clip_on=False)
c2=plt.Rectangle((-2.5,7),1.5,0.2,color='gray',clip_on=False)
c3=plt.Rectangle((-3,8),2,0.2,color='gray',clip_on=False)
c4=plt.Rectangle((-3.5,9),2.5,0.2,color='gray',clip_on=False)
ax.add_patch(c1);ax.add_patch(c2);ax.add_patch(c3);ax.add_patch(c4);
print I[-3,-1]
def LLsample(M,Y):
LL=0
for y in Y:
LL+= M.exp1computeLL(y[0],y[1])
return LL
def checkLL(M,n=50):
np.random.seed(4)
fname='LLRq%.2fu%.2fh%.2fm%.2fd%.2f'%(M.q0,M.u0,M.h,M.m,M.d)
Y=[]
for i in range(n):
dat,f=M.exp1run()
Y.append([dat,f])
#return np.array(Y)
#M.plothistory()
h= np.linspace(0,1,21)#np.array([1])
#g= np.linspace(0,1,21)
g=np.linspace(0,1,21)
import time
t0=time.time()
out=np.ones((h.size,g.size))
for hh in range(h.size):
print np.round(hh/float(h.size),2)
#for gg in range(g.size):
for gg in range(g.size):
M.h=h[hh];#M.g=g[gg]
M.g=g[gg]
out[hh,gg]=LLsample(M,Y)
print time.time()-t0
np.save(fname,out)
return out
def plotLL(fname='out4.npy'):
plt.figure()
h= np.linspace(0,1,21)
g= np.linspace(0,1,21)
m=np.linspace(0,2,21)
d=np.linspace(0,2,21)
out=np.load(fname)
print np.nanmax(out),np.nanmin(out)
rang=np.nanmax(out)-np.nanmin(out)
maxloc= np.squeeze(np.array((np.nanmax(out)==out).nonzero()))
H,G=np.meshgrid(h,g)
print maxloc
for mm in range(m.size/2):
for dd in range(d.size/2):
plt.subplot(10,10,(9-mm)*10+dd+1)
plt.pcolormesh(h,g,out[:,:,mm*2,dd*2].T,
vmax=np.nanmax(out),vmin=np.nanmax(out)-rang/4.)
plt.gca().set_xticks([])
plt.gca().set_yticks([])
if mm==maxloc[2]/2 and dd==maxloc[3]/2:
plt.plot(h[maxloc[0]],g[maxloc[1]],'ow',ms=8)
if dd==0:
print mm,dd
plt.ylabel('%.1f'%m[mm*2])
if mm==0: plt.xlabel('%.1f'%d[dd*2])
plt.title(fname[:6])
if __name__ == '__main__':
ags=[]
#for i in range(1,len(sys.argv)): ags.append(float(sys.argv[i]))
np.random.seed(5)
M=Model(q0=0.9,u0=1,h=0.9,g=0.5,m=1,d=1)
out=checkLL(M)
|
|
import sublime
import sublime_plugin
from os import path
import tempfile
import sys
import re
PACKAGE_SETTINGS = "ExportHtml.sublime-settings"
if sublime.platform() == "linux":
# Try and load Linux Python2.6 lib. Default path is for Ubuntu.
linux_lib = sublime.load_settings(PACKAGE_SETTINGS).get("linux_python2.6_lib", "/usr/lib/python2.6/lib-dynload")
if not linux_lib in sys.path and path.exists(linux_lib):
sys.path.append(linux_lib)
from plistlib import readPlist
from ExportHtmlLib.rgba.rgba import RGBA
NUMBERED_BBCODE_LINE = '[color=%(color)s]%(line)s [/color]%(code)s\n'
BBCODE_LINE = '%(code)s\n'
BBCODE_CODE = '[color=%(color)s]%(content)s[/color]'
BBCODE_ESCAPE = '[/color][color=%(color_open)s]%(content)s[/color][color=%(color_close)s]'
BBCODE_BOLD = '[b]%(content)s[/b]'
BBCODE_ITALIC = '[i]%(content)s[/i]'
POST_START = '[pre=%(bg_color)s]'
POST_END = '[/pre]\n'
BBCODE_MATCH = re.compile(r"""(\[/?)((?:code|pre|table|tr|td|th|b|i|u|sup|color|url|img|list|trac|center|quote|size|li|ul|ol|youtube|gvideo)(?:=[^\]]+)?)(\])""")
FILTER_MATCH = re.compile(r'^(?:(brightness|saturation|hue|colorize)\((-?[\d]+|[\d]*\.[\d]+)\)|(sepia|grayscale|invert))$')
class ExportBbcodePanelCommand(sublime_plugin.WindowCommand):
def execute(self, value):
if value >= 0:
view = self.window.active_view()
if view != None:
ExportBbcode(view).run(**self.args[value])
def run(self):
options = sublime.load_settings(PACKAGE_SETTINGS).get("bbcode_panel", {})
menu = []
self.args = []
for opt in options:
k, v = opt.items()[0]
menu.append(k)
self.args.append(v)
if len(menu):
self.window.show_quick_panel(
menu,
self.execute
)
class ExportBbcodeCommand(sublime_plugin.WindowCommand):
def run(self, **kwargs):
view = self.window.active_view()
if view != None:
ExportBbcode(view).run(**kwargs)
class ExportBbcode(object):
def __init__(self, view):
self.view = view
def process_inputs(self, **kwargs):
return {
"numbers": bool(kwargs.get("numbers", False)),
"color_scheme": kwargs.get("color_scheme", None),
"multi_select": bool(kwargs.get("multi_select", False)),
"clipboard_copy": bool(kwargs.get("clipboard_copy", True)),
"view_open": bool(kwargs.get("view_open", False)),
"filter": kwargs.get("filter", "")
}
def setup(self, **kwargs):
path_packages = sublime.packages_path()
# Get get general document preferences from sublime preferences
settings = sublime.load_settings('Preferences.sublime-settings')
eh_settings = sublime.load_settings(PACKAGE_SETTINGS)
self.tab_size = settings.get('tab_size', 4)
self.char_limit = int(eh_settings.get("valid_selection_size", 4))
self.bground = ''
self.fground = ''
self.gbground = ''
self.gfground = ''
self.sbground = ''
self.sfground = ''
self.numbers = kwargs["numbers"]
self.hl_continue = None
self.curr_hl = None
self.sels = []
self.multi_select = self.check_sel() if kwargs["multi_select"] else False
self.size = self.view.size()
self.pt = 0
self.end = 0
self.curr_row = 0
self.empty_space = None
self.filter = []
for f in kwargs["filter"].split(";"):
m = FILTER_MATCH.match(f)
if m:
if m.group(1):
self.filter.append((m.group(1), float(m.group(2))))
else:
self.filter.append((m.group(3), 0.0))
# Get color scheme
if kwargs["color_scheme"] != None:
alt_scheme = kwargs["color_scheme"]
else:
alt_scheme = eh_settings.get("alternate_scheme", False)
scheme_file = settings.get('color_scheme') if alt_scheme == False else alt_scheme
colour_scheme = path.normpath(scheme_file)
self.plist_file = self.apply_filters(readPlist(path_packages + colour_scheme.replace('Packages', '')))
colour_settings = self.plist_file["settings"][0]["settings"]
# Get general theme colors from color scheme file
self.bground = self.strip_transparency(colour_settings.get("background", '#FFFFFF'), simple_strip=True)
self.fground = self.strip_transparency(colour_settings.get("foreground", '#000000'))
self.gbground = self.bground
self.gfground = self.fground
# Create scope colors mapping from color scheme file
self.colours = {self.view.scope_name(self.end).split(' ')[0]: {"color": self.fground, "style": []}}
for item in self.plist_file["settings"]:
scope = item.get('scope', None)
colour = None
style = []
if 'scope' in item:
scope = item['scope']
if 'settings' in item:
colour = item['settings'].get('foreground', None)
if 'fontStyle' in item['settings']:
for s in item['settings']['fontStyle'].split(' '):
if s == "bold" or s == "italic": # or s == "underline":
style.append(s)
if scope != None and colour != None:
self.colours[scope] = {"color": self.strip_transparency(colour), "style": style}
def apply_filters(self, tmtheme):
def filter_color(color):
rgba = RGBA(color)
for f in self.filter:
name = f[0]
value = f[1]
if name == "grayscale":
rgba.grayscale()
elif name == "sepia":
rgba.sepia()
elif name == "saturation":
rgba.saturation(value)
elif name == "invert":
rgba.invert()
elif name == "brightness":
rgba.brightness(value)
elif name == "hue":
rgba.hue(value)
elif name == "colorize":
rgba.colorize(value)
return rgba.get_rgba()
if len(self.filter):
general_settings_read = False
for settings in tmtheme["settings"]:
if not general_settings_read:
for k, v in settings["settings"].items():
try:
settings["settings"][k] = filter_color(v)
except:
pass
general_settings_read = True
continue
try:
settings["settings"]["foreground"] = filter_color(settings["settings"]["foreground"])
except:
pass
try:
settings["settings"]["background"] = filter_color(settings["settings"]["background"])
except:
pass
return tmtheme
def strip_transparency(self, color, track_darkness=False, simple_strip=False):
if color is None:
return color
rgba = RGBA(color.replace(" ", ""))
if not simple_strip:
rgba.apply_alpha(self.bground if self.bground != "" else "#FFFFFF")
return rgba.get_rgb()
def setup_print_block(self, curr_sel, multi=False):
# Determine start and end points and whether to parse whole file or selection
if not multi and (curr_sel.empty() or curr_sel.size() <= self.char_limit):
self.size = self.view.size()
self.pt = 0
self.end = 1
self.curr_row = 1
else:
self.size = curr_sel.end()
self.pt = curr_sel.begin()
self.end = self.pt + 1
self.curr_row = self.view.rowcol(self.pt)[0] + 1
self.start_line = self.curr_row
self.gutter_pad = len(str(self.view.rowcol(self.size)[0])) + 1
def check_sel(self):
multi = False
for sel in self.view.sel():
if not sel.empty() and sel.size() >= self.char_limit:
multi = True
self.sels.append(sel)
return multi
def guess_colour(self, the_key):
the_colour = None
the_style = None
if the_key in self.colours:
the_colour = self.colours[the_key]["color"]
the_style = self.colours[the_key]["style"]
else:
best_match = 0
for key in self.colours:
if self.view.score_selector(self.pt, key) > best_match:
best_match = self.view.score_selector(self.pt, key)
the_colour = self.colours[key]["color"]
the_style = self.colours[key]["style"]
self.colours[the_key] = {"color": the_colour, "style": the_style}
return the_colour, the_style
def print_line(self, line, num):
if self.numbers:
bbcode_line = NUMBERED_BBCODE_LINE % {
"color": self.gfground,
"line": str(num).rjust(self.gutter_pad),
"code": line
}
else:
bbcode_line = BBCODE_LINE % {"code": line}
return bbcode_line
def convert_view_to_bbcode(self, the_bbcode):
for line in self.view.split_by_newlines(sublime.Region(self.end, self.size)):
self.empty_space = None
self.size = line.end()
line = self.convert_line_to_bbcode()
the_bbcode.write(self.print_line(line, self.curr_row))
self.curr_row += 1
def repl(self, m, the_colour):
return m.group(1) + (
BBCODE_ESCAPE % {
"color_open": the_colour,
"color_close": the_colour,
"content": m.group(2)
}
) + m.group(3)
def format_text(self, line, text, the_colour, the_style):
text = text.replace('\t', ' ' * self.tab_size).replace('\n', '')
if self.empty_space != None:
text = self.empty_space + text
self.empty_space = None
if text.strip(' ') == '':
self.empty_space = text
else:
code = ""
text = BBCODE_MATCH.sub(lambda m: self.repl(m, the_colour), text)
bold = False
italic = False
for s in the_style:
if s == "bold":
bold = True
if s == "italic":
italic = True
code += (BBCODE_CODE % {"color": the_colour, "content": text})
if italic:
code = (BBCODE_ITALIC % {"color": the_colour, "content": code})
if bold:
code = (BBCODE_BOLD % {"color": the_colour, "content": code})
line.append(code)
def convert_line_to_bbcode(self):
line = []
while self.end <= self.size:
# Get text of like scope up to a highlight
scope_name = self.view.scope_name(self.pt)
while self.view.scope_name(self.end) == scope_name and self.end < self.size:
self.end += 1
the_colour, the_style = self.guess_colour(scope_name)
region = sublime.Region(self.pt, self.end)
# Normal text formatting
text = self.view.substr(region)
self.format_text(line, text, the_colour, the_style)
# Continue walking through line
self.pt = self.end
self.end = self.pt + 1
# Join line segments
return ''.join(line)
def write_body(self, the_bbcode):
the_bbcode.write(POST_START % {"bg_color": self.bground})
# Convert view to HTML
if self.multi_select:
count = 0
total = len(self.sels)
for sel in self.sels:
self.setup_print_block(sel, multi=True)
self.convert_view_to_bbcode(the_bbcode)
count += 1
if count < total:
the_bbcode.write("\n" + (BBCODE_CODE % {"color": self.fground, "content": "..."}) + "\n\n")
else:
self.setup_print_block(self.view.sel()[0])
self.convert_view_to_bbcode(the_bbcode)
the_bbcode.write(POST_END)
def run(self, **kwargs):
inputs = self.process_inputs(**kwargs)
self.setup(**inputs)
delete = False if inputs["view_open"] else True
with tempfile.NamedTemporaryFile(delete=delete, suffix='.txt') as the_bbcode:
self.write_body(the_bbcode)
if inputs["clipboard_copy"]:
the_bbcode.seek(0)
sublime.set_clipboard(the_bbcode.read())
sublime.status_message("Export to BBCode: copied to clipboard")
if inputs["view_open"]:
self.view.window().open_file(the_bbcode.name)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test suite for VMwareAPI.
"""
from nova.compute import power_state
from nova.compute import task_states
from nova import context
from nova import db
from nova import exception
from nova import test
import nova.tests.image.fake
from nova.tests import matchers
from nova.tests.vmwareapi import db_fakes
from nova.tests.vmwareapi import stubs
from nova.virt.vmwareapi import driver
from nova.virt.vmwareapi import fake as vmwareapi_fake
class VMwareAPIVMTestCase(test.TestCase):
"""Unit tests for Vmware API connection calls."""
def setUp(self):
super(VMwareAPIVMTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake', is_admin=False)
self.flags(vmwareapi_host_ip='test_url',
vmwareapi_host_username='test_username',
vmwareapi_host_password='test_pass',
vnc_enabled=False,
use_linked_clone=False)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
vmwareapi_fake.reset()
db_fakes.stub_out_db_instance_api(self.stubs)
stubs.set_stubs(self.stubs)
self.conn = driver.VMwareESXDriver(None, False)
# NOTE(vish): none of the network plugging code is actually
# being tested
self.network_info = [({'bridge': 'fa0',
'id': 0,
'vlan': None,
'bridge_interface': None,
'injected': True},
{'broadcast': '192.168.0.255',
'dns': ['192.168.0.1'],
'gateway': '192.168.0.1',
'gateway_v6': 'dead:beef::1',
'ip6s': [{'enabled': '1',
'ip': 'dead:beef::dcad:beff:feef:0',
'netmask': '64'}],
'ips': [{'enabled': '1',
'ip': '192.168.0.100',
'netmask': '255.255.255.0'}],
'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})]
self.image = {
'id': 'c1c8ce3d-c2e0-4247-890c-ccf5cc1c004c',
'disk_format': 'vhd',
'size': 512,
}
nova.tests.image.fake.stub_out_image_service(self.stubs)
def tearDown(self):
super(VMwareAPIVMTestCase, self).tearDown()
vmwareapi_fake.cleanup()
nova.tests.image.fake.FakeImageService_reset()
def _create_instance_in_the_db(self):
values = {'name': 1,
'id': 1,
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': "1",
'kernel_id': "1",
'ramdisk_id': "1",
'mac_address': "de:ad:be:ef:be:ef",
'instance_type': 'm1.large',
}
self.instance = db.instance_create(None, values)
def _create_vm(self):
"""Create and spawn the VM."""
self._create_instance_in_the_db()
self.type_data = db.instance_type_get_by_name(None, 'm1.large')
self.conn.spawn(self.context, self.instance, self.image,
injected_files=[], admin_password=None,
network_info=self.network_info,
block_device_info=None)
self._check_vm_record()
def _check_vm_record(self):
"""
Check if the spawned VM's properties correspond to the instance in
the db.
"""
instances = self.conn.list_instances()
self.assertEquals(len(instances), 1)
# Get Nova record for VM
vm_info = self.conn.get_info({'name': 1})
# Get record for VM
vms = vmwareapi_fake._get_objects("VirtualMachine")
vm = vms[0]
# Check that m1.large above turned into the right thing.
mem_kib = long(self.type_data['memory_mb']) << 10
vcpus = self.type_data['vcpus']
self.assertEquals(vm_info['max_mem'], mem_kib)
self.assertEquals(vm_info['mem'], mem_kib)
self.assertEquals(vm.get("summary.config.numCpu"), vcpus)
self.assertEquals(vm.get("summary.config.memorySizeMB"),
self.type_data['memory_mb'])
# Check that the VM is running according to Nova
self.assertEquals(vm_info['state'], power_state.RUNNING)
# Check that the VM is running according to vSphere API.
self.assertEquals(vm.get("runtime.powerState"), 'poweredOn')
def _check_vm_info(self, info, pwr_state=power_state.RUNNING):
"""
Check if the get_info returned values correspond to the instance
object in the db.
"""
mem_kib = long(self.type_data['memory_mb']) << 10
self.assertEquals(info["state"], pwr_state)
self.assertEquals(info["max_mem"], mem_kib)
self.assertEquals(info["mem"], mem_kib)
self.assertEquals(info["num_cpu"], self.type_data['vcpus'])
def test_list_instances(self):
instances = self.conn.list_instances()
self.assertEquals(len(instances), 0)
def test_list_instances_1(self):
self._create_vm()
instances = self.conn.list_instances()
self.assertEquals(len(instances), 1)
def test_list_interfaces(self):
self._create_vm()
interfaces = self.conn.list_interfaces(1)
self.assertEquals(len(interfaces), 1)
self.assertEquals(interfaces[0], 4000)
def test_spawn(self):
self._create_vm()
info = self.conn.get_info({'name': 1})
self._check_vm_info(info, power_state.RUNNING)
def test_snapshot(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
self._create_vm()
info = self.conn.get_info({'name': 1})
self._check_vm_info(info, power_state.RUNNING)
self.conn.snapshot(self.context, self.instance, "Test-Snapshot",
func_call_matcher.call)
info = self.conn.get_info({'name': 1})
self._check_vm_info(info, power_state.RUNNING)
self.assertIsNone(func_call_matcher.match())
def test_snapshot_non_existent(self):
self._create_instance_in_the_db()
self.assertRaises(exception.InstanceNotFound, self.conn.snapshot,
self.context, self.instance, "Test-Snapshot",
lambda *args, **kwargs: None)
def test_reboot(self):
self._create_vm()
info = self.conn.get_info({'name': 1})
self._check_vm_info(info, power_state.RUNNING)
reboot_type = "SOFT"
self.conn.reboot(self.instance, self.network_info, reboot_type)
info = self.conn.get_info({'name': 1})
self._check_vm_info(info, power_state.RUNNING)
def test_reboot_non_existent(self):
self._create_instance_in_the_db()
self.assertRaises(exception.InstanceNotFound, self.conn.reboot,
self.instance, self.network_info, 'SOFT')
def test_reboot_not_poweredon(self):
self._create_vm()
info = self.conn.get_info({'name': 1})
self._check_vm_info(info, power_state.RUNNING)
self.conn.suspend(self.instance)
info = self.conn.get_info({'name': 1})
self._check_vm_info(info, power_state.SUSPENDED)
self.assertRaises(exception.InstanceRebootFailure, self.conn.reboot,
self.instance, self.network_info, 'SOFT')
def test_suspend(self):
self._create_vm()
info = self.conn.get_info({'name': 1})
self._check_vm_info(info, power_state.RUNNING)
self.conn.suspend(self.instance)
info = self.conn.get_info({'name': 1})
self._check_vm_info(info, power_state.SUSPENDED)
def test_suspend_non_existent(self):
self._create_instance_in_the_db()
self.assertRaises(exception.InstanceNotFound, self.conn.suspend,
self.instance)
def test_resume(self):
self._create_vm()
info = self.conn.get_info({'name': 1})
self._check_vm_info(info, power_state.RUNNING)
self.conn.suspend(self.instance)
info = self.conn.get_info({'name': 1})
self._check_vm_info(info, power_state.SUSPENDED)
self.conn.resume(self.instance, self.network_info)
info = self.conn.get_info({'name': 1})
self._check_vm_info(info, power_state.RUNNING)
def test_resume_non_existent(self):
self._create_instance_in_the_db()
self.assertRaises(exception.InstanceNotFound, self.conn.resume,
self.instance, self.network_info)
def test_resume_not_suspended(self):
self._create_vm()
info = self.conn.get_info({'name': 1})
self._check_vm_info(info, power_state.RUNNING)
self.assertRaises(exception.InstanceResumeFailure, self.conn.resume,
self.instance, self.network_info)
def test_power_on(self):
self._create_vm()
info = self.conn.get_info({'name': 1})
self._check_vm_info(info, power_state.RUNNING)
self.conn.power_off(self.instance)
info = self.conn.get_info({'name': 1})
self._check_vm_info(info, power_state.SHUTDOWN)
self.conn.power_on(self.instance)
info = self.conn.get_info({'name': 1})
self._check_vm_info(info, power_state.RUNNING)
def test_power_on_non_existent(self):
self._create_instance_in_the_db()
self.assertRaises(exception.InstanceNotFound, self.conn.power_on,
self.instance)
def test_power_off(self):
self._create_vm()
info = self.conn.get_info({'name': 1})
self._check_vm_info(info, power_state.RUNNING)
self.conn.power_off(self.instance)
info = self.conn.get_info({'name': 1})
self._check_vm_info(info, power_state.SHUTDOWN)
def test_power_off_non_existent(self):
self._create_instance_in_the_db()
self.assertRaises(exception.InstanceNotFound, self.conn.power_off,
self.instance)
def test_power_off_suspended(self):
self._create_vm()
self.conn.suspend(self.instance)
info = self.conn.get_info({'name': 1})
self._check_vm_info(info, power_state.SUSPENDED)
self.assertRaises(exception.InstancePowerOffFailure,
self.conn.power_off, self.instance)
def test_get_info(self):
self._create_vm()
info = self.conn.get_info({'name': 1})
self._check_vm_info(info, power_state.RUNNING)
def test_destroy(self):
self._create_vm()
info = self.conn.get_info({'name': 1})
self._check_vm_info(info, power_state.RUNNING)
instances = self.conn.list_instances()
self.assertEquals(len(instances), 1)
self.conn.destroy(self.instance, self.network_info)
instances = self.conn.list_instances()
self.assertEquals(len(instances), 0)
def test_destroy_non_existent(self):
self._create_instance_in_the_db()
self.assertEquals(self.conn.destroy(self.instance, self.network_info),
None)
def test_pause(self):
pass
def test_unpause(self):
pass
def test_diagnostics(self):
pass
def test_get_console_output(self):
pass
class VMwareAPIHostTestCase(test.TestCase):
"""Unit tests for Vmware API host calls."""
def setUp(self):
super(VMwareAPIHostTestCase, self).setUp()
self.flags(vmwareapi_host_ip='test_url',
vmwareapi_host_username='test_username',
vmwareapi_host_password='test_pass')
vmwareapi_fake.reset()
stubs.set_stubs(self.stubs)
self.conn = driver.VMwareESXDriver(False)
def tearDown(self):
super(VMwareAPIHostTestCase, self).tearDown()
vmwareapi_fake.cleanup()
def test_host_state(self):
stats = self.conn.get_host_stats()
self.assertEquals(stats['vcpus'], 16)
self.assertEquals(stats['disk_total'], 1024)
self.assertEquals(stats['disk_available'], 500)
self.assertEquals(stats['disk_used'], 1024 - 500)
self.assertEquals(stats['host_memory_total'], 1024)
self.assertEquals(stats['host_memory_free'], 1024 - 500)
def _test_host_action(self, method, action, expected=None):
result = method('host', action)
self.assertEqual(result, expected)
def test_host_reboot(self):
self._test_host_action(self.conn.host_power_action, 'reboot')
def test_host_shutdown(self):
self._test_host_action(self.conn.host_power_action, 'shutdown')
def test_host_startup(self):
self._test_host_action(self.conn.host_power_action, 'startup')
def test_host_maintenance_on(self):
self._test_host_action(self.conn.host_maintenance_mode, True)
def test_host_maintenance_off(self):
self._test_host_action(self.conn.host_maintenance_mode, False)
|
|
"""
Maximum likelihood covariance estimator.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
# avoid division truncation
import warnings
import numpy as np
from scipy import linalg
from .. import config_context
from ..base import BaseEstimator
from ..utils import check_array
from ..utils.extmath import fast_logdet
from ..metrics.pairwise import pairwise_distances
def log_likelihood(emp_cov, precision):
"""Computes the sample mean of the log_likelihood under a covariance model
computes the empirical expected log-likelihood (accounting for the
normalization terms and scaling), allowing for universal comparison (beyond
this software package)
Parameters
----------
emp_cov : ndarray of shape (n_features, n_features)
Maximum Likelihood Estimator of covariance.
precision : ndarray of shape (n_features, n_features)
The precision matrix of the covariance model to be tested.
Returns
-------
log_likelihood_ : float
Sample mean of the log-likelihood.
"""
p = precision.shape[0]
log_likelihood_ = - np.sum(emp_cov * precision) + fast_logdet(precision)
log_likelihood_ -= p * np.log(2 * np.pi)
log_likelihood_ /= 2.
return log_likelihood_
def empirical_covariance(X, *, assume_centered=False):
"""Computes the Maximum likelihood covariance estimator
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Data from which to compute the covariance estimate
assume_centered : bool, default=False
If True, data will not be centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data will be centered before computation.
Returns
-------
covariance : ndarray of shape (n_features, n_features)
Empirical covariance (Maximum Likelihood Estimator).
Examples
--------
>>> from sklearn.covariance import empirical_covariance
>>> X = [[1,1,1],[1,1,1],[1,1,1],
... [0,0,0],[0,0,0],[0,0,0]]
>>> empirical_covariance(X)
array([[0.25, 0.25, 0.25],
[0.25, 0.25, 0.25],
[0.25, 0.25, 0.25]])
"""
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
if X.shape[0] == 1:
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
if assume_centered:
covariance = np.dot(X.T, X) / X.shape[0]
else:
covariance = np.cov(X.T, bias=1)
if covariance.ndim == 0:
covariance = np.array([[covariance]])
return covariance
class EmpiricalCovariance(BaseEstimator):
"""Maximum likelihood covariance estimator
Read more in the :ref:`User Guide <covariance>`.
Parameters
----------
store_precision : bool, default=True
Specifies if the estimated precision is stored.
assume_centered : bool, default=False
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False (default), data are centered before computation.
Attributes
----------
location_ : ndarray of shape (n_features,)
Estimated location, i.e. the estimated mean.
covariance_ : ndarray of shape (n_features, n_features)
Estimated covariance matrix
precision_ : ndarray of shape (n_features, n_features)
Estimated pseudo-inverse matrix.
(stored only if store_precision is True)
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
Examples
--------
>>> import numpy as np
>>> from sklearn.covariance import EmpiricalCovariance
>>> from sklearn.datasets import make_gaussian_quantiles
>>> real_cov = np.array([[.8, .3],
... [.3, .4]])
>>> rng = np.random.RandomState(0)
>>> X = rng.multivariate_normal(mean=[0, 0],
... cov=real_cov,
... size=500)
>>> cov = EmpiricalCovariance().fit(X)
>>> cov.covariance_
array([[0.7569..., 0.2818...],
[0.2818..., 0.3928...]])
>>> cov.location_
array([0.0622..., 0.0193...])
"""
def __init__(self, *, store_precision=True, assume_centered=False):
self.store_precision = store_precision
self.assume_centered = assume_centered
def _set_covariance(self, covariance):
"""Saves the covariance and precision estimates
Storage is done accordingly to `self.store_precision`.
Precision stored only if invertible.
Parameters
----------
covariance : array-like of shape (n_features, n_features)
Estimated covariance matrix to be stored, and from which precision
is computed.
"""
covariance = check_array(covariance)
# set covariance
self.covariance_ = covariance
# set precision
if self.store_precision:
self.precision_ = linalg.pinvh(covariance, check_finite=False)
else:
self.precision_ = None
def get_precision(self):
"""Getter for the precision matrix.
Returns
-------
precision_ : array-like of shape (n_features, n_features)
The precision matrix associated to the current covariance object.
"""
if self.store_precision:
precision = self.precision_
else:
precision = linalg.pinvh(self.covariance_, check_finite=False)
return precision
def fit(self, X, y=None):
"""Fits the Maximum Likelihood Estimator covariance model
according to the given training data and parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
"""
X = self._validate_data(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance = empirical_covariance(
X, assume_centered=self.assume_centered)
self._set_covariance(covariance)
return self
def score(self, X_test, y=None):
"""Computes the log-likelihood of a Gaussian data set with
`self.covariance_` as an estimator of its covariance matrix.
Parameters
----------
X_test : array-like of shape (n_samples, n_features)
Test data of which we compute the likelihood, where n_samples is
the number of samples and n_features is the number of features.
X_test is assumed to be drawn from the same distribution than
the data used in fit (including centering).
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
res : float
The likelihood of the data set with `self.covariance_` as an
estimator of its covariance matrix.
"""
X_test = self._validate_data(X_test, reset=False)
# compute empirical covariance of the test set
test_cov = empirical_covariance(
X_test - self.location_, assume_centered=True)
# compute log likelihood
res = log_likelihood(test_cov, self.get_precision())
return res
def error_norm(self, comp_cov, norm='frobenius', scaling=True,
squared=True):
"""Computes the Mean Squared Error between two covariance estimators.
(In the sense of the Frobenius norm).
Parameters
----------
comp_cov : array-like of shape (n_features, n_features)
The covariance to compare with.
norm : {"frobenius", "spectral"}, default="frobenius"
The type of norm used to compute the error. Available error types:
- 'frobenius' (default): sqrt(tr(A^t.A))
- 'spectral': sqrt(max(eigenvalues(A^t.A))
where A is the error ``(comp_cov - self.covariance_)``.
scaling : bool, default=True
If True (default), the squared error norm is divided by n_features.
If False, the squared error norm is not rescaled.
squared : bool, default=True
Whether to compute the squared error norm or the error norm.
If True (default), the squared error norm is returned.
If False, the error norm is returned.
Returns
-------
result : float
The Mean Squared Error (in the sense of the Frobenius norm) between
`self` and `comp_cov` covariance estimators.
"""
# compute the error
error = comp_cov - self.covariance_
# compute the error norm
if norm == "frobenius":
squared_norm = np.sum(error ** 2)
elif norm == "spectral":
squared_norm = np.amax(linalg.svdvals(np.dot(error.T, error)))
else:
raise NotImplementedError(
"Only spectral and frobenius norms are implemented")
# optionally scale the error norm
if scaling:
squared_norm = squared_norm / error.shape[0]
# finally get either the squared norm or the norm
if squared:
result = squared_norm
else:
result = np.sqrt(squared_norm)
return result
def mahalanobis(self, X):
"""Computes the squared Mahalanobis distances of given observations.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The observations, the Mahalanobis distances of the which we
compute. Observations are assumed to be drawn from the same
distribution than the data used in fit.
Returns
-------
dist : ndarray of shape (n_samples,)
Squared Mahalanobis distances of the observations.
"""
X = self._validate_data(X, reset=False)
precision = self.get_precision()
with config_context(assume_finite=True):
# compute mahalanobis distances
dist = pairwise_distances(X, self.location_[np.newaxis, :],
metric='mahalanobis', VI=precision)
return np.reshape(dist, (len(X),)) ** 2
|
|
# -*- coding: utf-8 -*-
__author__ = 'tivvit'
from google.appengine.ext import ndb
from google.appengine.ext.ndb import msgprop
from protorpc import messages
from google.appengine.api import memcache
from backend.cdh_m import User_m, UsersCollection_m, SolvedQuestSum_m, SolvedQuest_m, SolvedQuestsCollection_m
from solved_quest import SolvedQuest
from quests import Quests
from faction_names import faction_names
# from game import Game
import logging
class Users(ndb.Model):
# id = messages.IntegerField(1)
name = ndb.StringProperty()
email = ndb.StringProperty()
faction = ndb.IntegerProperty()
# user = msgprop.MessageProperty(User_m, indexed_fields=['name', 'faction'])
# def __init__(self):
# self.solved_quest = SolvedQuest()
def list(self):
data = memcache.get('users')
if data is not None:
return data
else:
users = []
for user in Users.query().order(Users.name).fetch():
users.append(self._map_message(user))
# logging.info(users)
users_m = UsersCollection_m(user=users)
memcache.add(key="users", value=users_m, time=600)
return users_m
def search(self, query):
users = []
# for user in Users.query(Users.name==query).fetch():
# todo use search API
for user in Users.query().fetch():
if user.name and query in user.name:
users.append(self._map_message(user))
logging.info(users)
return UsersCollection_m(user=users)
def get(self, id):
return self._map_message(ndb.Key(Users, id).get())
def delete(self, id):
return ndb.Key(Users, id).delete()
def allowed_to_faction(self, game, user_id):
user_points = self.get_points_sum(user_id)
# print "points" + str(user_points)
# print game.get_min_faction_points()
# print user_points >= game.get_min_faction_points()
return user_points >= game.get_min_faction_points()
def set_faction(self, game, user_id, faction_id):
user = ndb.Key(Users, user_id).get()
print self.allowed_to_faction(game, user_id)
print game.faction_hiring(faction_id).hiring
if not user.faction and self.allowed_to_faction(game, user_id) and game.faction_hiring(faction_id).hiring:
user.faction = faction_id
user.put()
return self._map_message(user)
def create(self, name, email, faction=0):
user = Users(
name=name,
email=email,
faction=faction
)
user.put()
return self._map_message(user)
def get_points(self, user_id):
solved_quest = SolvedQuest()
solved = solved_quest.get_user_points_list(user_id)
# logging.debug(solved)
solved_quests = []
for solve in solved:
if solve.id_quest:
quest = Quests().get(solve.id_quest)
else:
quest = None
solved_quests.append(
SolvedQuest_m(
userId=solve.id_user,
questId=solve.id_quest,
points=solve.points,
quest=quest
)
)
return SolvedQuestsCollection_m(solved_quests=solved_quests)
def get_points_sum(self, user_id):
solved_quest = SolvedQuest()
return solved_quest.get_user_points_sum(user_id)
def get_points_sum_m(self, user_id):
return SolvedQuestSum_m(
sum=self.get_points_sum(user_id)
)
def add_points(self, user_id, points):
solved_quest = SolvedQuest()
points = solved_quest.add_points(user_id, points)
return SolvedQuest_m(
userId=points.id_user,
points=points.points
)
def solve_quest(self, user_id, quest_id):
# solved_quest = SolvedQuest()
# quests = Quests()
quest = Quests.query(Quests.num == quest_id).get()
points = quest.points
user = ndb.Key(Users, user_id).get()
logging.info(user.faction)
logging.info(quest.faction)
if quest.faction == 0 or quest.faction == user.faction:
solved_c = self.get_points(user_id)
solved_c = solved_c.solved_quests
alreadySolved = False
for s in solved_c:
if s.quest and quest.num == s.quest.num:
alreadySolved = True
break
if not alreadySolved:
solved_quest = SolvedQuest()
solved = solved_quest.solve_quest(user_id, quest_id, points)
logging.warning(solved)
return SolvedQuest_m(
userId=solved.id_user,
questId=solved.id_quest,
points=solved.points
)
# else:
# raise Exception
else:
raise Exception
def get_stats(self, game, user_id, faction_id):
from backend.cdh_m import Quest_m, User_stats_m
user = ndb.Key(Users, user_id).get()
user_m = self._map_message(user)
# q = []
# q.append(Quest_m(name="Zabij vsechny kolem", faction="Nefrakcni", points=2))
# q.append(Quest_m(name="Zabij vsechny Vitalisty", faction="Metalide", points=10))
#logging.debug()
solved = self.get_points(user_id)
solved = solved.solved_quests
# solved = []
# for s in solved_c:
# if s.quest:
# solved.append(s.quest)
q = Quests()
list_faction = 0
if user.faction == faction_id:
list_faction = faction_id
todo = q.list_by_fraction(list_faction)
todo = todo.quest
# logging.info("cnt: " + todo)
# logging.info(">>>>>>>>>>>>>>")
filtered_todo = []
for t in todo:
add = True
# logging.info(solved)
# logging.info("========")
logging.info("========")
for solv in solved:
# logging.info("solv" + solv.num)
# logging.info("todo" + t.num)
if t.num and solv.quest and solv.quest.num == t.num:
add = False
break
if add:
filtered_todo.append(SolvedQuest_m(quest=t, points=t.points))
return User_stats_m(
user=user_m,
todo=filtered_todo,
quests=solved,
pointsSum=self.get_points_sum(user_id),
allowedToFaction=int(user.faction == 0 and self.allowed_to_faction(game, user_id) and game.faction_hiring(faction_id).hiring)
)
def _map_message(self, user):
return User_m(
name=user.name,
email=user.email,
factionId=user.faction,
faction=faction_names[user.faction] if user.faction >= 0 else "",
id=long(user.key.id())
)
|
|
###############################################################################
##
## Copyright (C) 2014-2016, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from __future__ import division
import colorsys
from string import Template
from vistrails.core.modules.config import IPort, OPort, ModuleSettings
from vistrails.core.modules.vistrails_module import ModuleError, Module
from vistrails.core.utils import InstanceObject
# FIXME make package imports better
from vistrails.packages.tabledata.common import choose_column
from .utils import *
###############################################################################
# Modules and Data
class GMapVisData(object):
def __init__(self, gmaps_libs, init_template, data, center=None):
self.gmaps_libs = gmaps_libs
self.init_template = init_template
self.data = data
self.center = center
def convert_color(c):
c = c.tuple
return GMapColor(c[0] * 255.0, c[1] * 255.0, c[2] * 255.0)
class TitlesMixin(object):
def get_titles(self, table=None, default_col=None):
if table is None:
table = self.get_input("table")
title_col_idx = self.force_get_input('titleColIdx')
title_col_name = self.force_get_input('titleColName')
print "title_col_idx:", title_col_idx
print "title_col_name:", title_col_name
if (title_col_idx is None and
title_col_name is None and
default_col is not None and
default_col < table.columns):
# only allow default if in range
title_col_idx = default_col
if title_col_idx is not None or title_col_name is not None:
title_idx = choose_column(table.columns, table.names,
title_col_name, title_col_idx)
title_col = table.get_column(title_idx)
return title_col
return None
class GMapVis(Module, OptionsMixin):
_input_ports = [IPort('table', 'tabledata:Table'),
IPort('latitudeColIdx', 'basic:Integer', optional=True,
default=0),
IPort('latitudeColName', 'basic:String', optional=True),
IPort('longitudeColIdx', 'basic:Integer', optional=True,
default=1),
IPort('longitudeColName', 'basic:String', optional=True)]
_output_ports = [OPort('self', 'GMapVis')]
_settings = ModuleSettings(abstract=True)
def get_positions(self, table=None):
if table is None:
table = self.get_input("table")
lat_col_idx = self.force_get_input('latitudeColIdx')
lat_col_name = self.force_get_input('latitudeColName')
lng_col_idx = self.force_get_input('longitudeColIdx')
lng_col_name = self.force_get_input('longitudeColName')
if lat_col_idx is None and lat_col_name is None:
lat_idx = self.get_input('latitudeColIdx') # default 0
else:
lat_idx = choose_column(table.columns, table.names,
lat_col_name, lat_col_idx)
if lng_col_idx is None and lng_col_name is None:
lng_idx = self.get_input('longitudeColIdx') # default 1
else:
lng_idx = choose_column(table.columns, table.names,
lng_col_name, lng_col_idx)
lat_col = table.get_column(lat_idx, True)
lng_col = table.get_column(lng_idx, True)
center = (sum(float(x) for x in lat_col)/len(lat_col),
sum(float(x) for x in lng_col)/len(lng_col))
positions = []
for i in xrange(table.rows):
positions.append(GMapLatLng(lat_col[i], lng_col[i]))
return (positions, center)
class GMapMarkers(GMapVis, TitlesMixin):
"""Turns tabular data into markers to be shown on a map.
"""
TEMPLATE = Template("""
var positions = $marker_data;
var options = $marker_options;
var titles = $marker_titles;
for (var i=0; i < positions.length; i++) {
marker = new google.maps.Marker({"position": positions[i],
"map": map});
marker.setOptions(options);
if (titles) {
marker.setTitle(titles[i]);
}
}
""")
SPECS = ['flat']
_input_ports = [IPort("flat", "basic:Boolean", optional=True),
IPort('titleColIdx', 'basic:Integer', optional=True),
IPort('titleColName', 'basic:String', optional=True)]
def compute(self):
(positions, center) = self.get_positions()
marker_options = self.get_options(self.SPECS)
titles = self.get_titles()
print "got titles:", titles
data = {"marker_options": marker_options,
"marker_data": positions,
"marker_titles": titles}
vis_data = GMapVisData([], self.TEMPLATE, data, center)
self.set_output("self", vis_data)
class GMapValueVis(GMapVis):
_input_ports = [IPort('valueColIdx', 'basic:Integer', optional=True,
default=2),
IPort('valueColName', 'basic:String', optional=True),]
_settings = ModuleSettings(abstract=True)
def get_values(self, table=None):
if table is None:
table = self.get_input("table")
value_col_idx = self.force_get_input("valueColIdx")
value_col_name = self.force_get_input("valueColName")
if value_col_idx is None and value_col_name is None:
value_idx = self.get_input("valueColIdx") # default 2
else:
value_idx = choose_column(table.columns, table.names,
value_col_name, value_col_idx)
value_col = table.get_column(value_idx, True)
return value_col
class GMapCircles(GMapValueVis):
"""Turns tabular data into circles of different sizes to be shown on a map.
"""
TEMPLATE = Template("""
var data = $circle_data;
for (var i=0; i < data.length; i++) {
var options = $circle_options;
options["center"] = data[i][0];
options["radius"] = data[i][1];
options["map"] = map;
circle = new google.maps.Circle(options);
}
""")
SPECS = [('strokeColor', convert_color, True),
('fillColor', convert_color),
'strokeWeight',
'strokeOpacity',
'fillOpacity']
_input_ports = [IPort("strokeColor", "basic:Color", optional=True,
default=InstanceObject(tuple=(0,0,0))),
IPort("strokeWeight", "basic:Integer", optional=True),
IPort("strokeOpacity", "basic:Float", optional=True),
IPort("fillColor", "basic:Color", optional=True),
IPort("fillOpacity", "basic:Float", optional=True),
IPort("scale", "basic:Float", optional=True)]
def compute(self):
(positions, center) = self.get_positions()
values = self.get_values()
circle_data = [[positions[i], float(values[i])/200.0]
for i in xrange(len(positions))]
circle_options = self.get_options(self.SPECS)
data = {"circle_options": circle_options,
"circle_data": circle_data}
vis_data = GMapVisData([], self.TEMPLATE, data, center)
self.set_output("self", vis_data)
class GMapSymbols(GMapValueVis, TitlesMixin):
"""Turns tabular data into different symbols to be shown on a map.
"""
TEMPLATE = Template("""
var data = $symbol_data;
var titles = $symbol_titles;
for (var i=0; i < data.length; i++) {
var marker_options = {"position": data[i][0],
"map": map};
if (titles) {
marker_options["title"] = titles[i];
}
if ($use_values) {
var icon_options = $symbol_options;
icon_options["fillColor"] = data[i][1];
marker_options["icon"] = icon_options;
}
marker = new google.maps.Marker(marker_options);
}
""")
SPECS = [('strokeColor', convert_color, True),
('fillStartColor', None, True),
('fillEndColor', None, True),
('strokeWeight', None, True),
'strokeOpacity',
('fillOpacity', None, True),
('scale', None, True)]
_input_ports = [IPort("strokeColor", "basic:Color", optional=True,
default=InstanceObject(tuple=(0,0,0))),
IPort("strokeWeight", "basic:Integer", optional=True,
default=1),
IPort("strokeOpacity", "basic:Float", optional=True),
IPort("fillStartColor", "basic:Color", optional=True,
default=InstanceObject(tuple=(1,1,1))),
IPort("fillEndColor", "basic:Color", optional=True,
default=InstanceObject(tuple=(1,0,0))),
IPort("fillOpacity", "basic:Float", optional=True,
default=1.0),
IPort("scale", "basic:Float", optional=True,
default=5.0),
IPort('titleColIdx', 'basic:Integer', optional=True),
IPort('titleColName', 'basic:String', optional=True),
IPort("allowLegacy", "basic:Boolean", optional=True,
default=False)]
def compute(self):
(positions, center) = self.get_positions()
legacy = self.get_input("allowLegacy")
use_values = True
try:
values = [float(x) for x in self.get_values()]
except ValueError, e:
# LEGACY SUPPORT
if legacy:
use_values = False
else:
raise ModuleError(self, "Must provide values column")
if not use_values:
symbol_data = positions
symbol_options = {}
else:
symbol_options = self.get_options(self.SPECS)
symbol_options["path"] = \
RawJavaScriptText("google.maps.SymbolPath.CIRCLE")
min_value = min(values)
max_value = max(values)
# if we have black or white, we want hue to match the other side
def white_or_black(c):
return ((c[0] < 1e-8 and c[1] < 1e-8 and c[2] < 1e-8) or
(c[0] > 1-1e-8 and c[1] > 1-1e-8 and c[2] > 1-1e-8))
start_c = symbol_options.pop("fillStartColor").tuple
end_c = symbol_options.pop("fillEndColor").tuple
start_wb = white_or_black(start_c)
end_wb = white_or_black(end_c)
start_c = list(colorsys.rgb_to_hsv(*start_c))
end_c = list(colorsys.rgb_to_hsv(*end_c))
if start_wb:
start_c[0] = end_c[0]
elif end_wb:
end_c[0] = start_c[0]
symbol_data = []
for i in xrange(len(positions)):
val = values[i]
if max_value - min_value < 1e-8:
norm_val = 1.0
else:
norm_val = (val - min_value) / (max_value - min_value)
color = []
for j in xrange(len(start_c)):
color.append((1.0 - norm_val) * start_c[j] +
norm_val * end_c[j])
color = colorsys.hsv_to_rgb(*color)
symbol_data.append([positions[i],
GMapColor(255 * color[0],
255 * color[1],
255 * color[2])])
symbol_titles = self.get_titles(default_col=(3 if legacy else None))
data = {"symbol_data": symbol_data,
"symbol_options": symbol_options,
"symbol_titles": symbol_titles,
"use_values": use_values}
vis_data = GMapVisData([], self.TEMPLATE, data, center)
self.set_output("self", vis_data)
class GMapHeatmap(GMapValueVis):
"""Turns tabular data into a heatmap layer to be shown on a map.
"""
TEMPLATE = Template("""
var data = $heatmap_data;
var options = $heatmap_options;
options["data"] = data;
options["map"] = map;
heatmap = new google.maps.visualization.HeatmapLayer(options);
""")
SPECS = ['dissipating',
'maxIntensity',
'opacity',
'radius']
_input_ports = [IPort("dissipating", "basic:Boolean", optional=True,
default=True),
IPort("maxIntensity", "basic:Float", optional=True),
IPort("opacity", "basic:Float", optional=True,
default=0.6),
IPort("radius", "basic:Float", optional=True)]
def compute(self):
(positions, center) = self.get_positions()
values = self.get_values()
heatmap_data = [{"location": positions[i],
"weight": float(values[i])}
for i in xrange(len(positions))]
heatmap_options = self.get_options(self.SPECS)
data = {"heatmap_data": heatmap_data,
"heatmap_options": heatmap_options}
vis_data = GMapVisData([], self.TEMPLATE, data, center)
self.set_output("self", vis_data)
_modules = [GMapVis, GMapMarkers, GMapValueVis, GMapCircles, GMapSymbols,
GMapHeatmap]
|
|
#
# DrawingMixin.py -- enable drawing capabilities.
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import time
import math
from ginga import trcalc
from ginga.misc.Bunch import Bunch
from ginga.Bindings import KeyEvent
from .CanvasMixin import CanvasMixin
__all__ = ['DrawingMixin']
class DrawingMixin(object):
"""The DrawingMixin is a mixin class that adds drawing capability for
some of the basic CanvasObject-derived types. The set_surface method is
used to associate a ImageViewCanvas object for layering on.
"""
def __init__(self):
assert isinstance(self, CanvasMixin), "Missing CanvasMixin class"
from .CanvasObject import drawCatalog
# For interactive drawing
self.candraw = False
self.draw_dict = drawCatalog
# canvas objects which we know how to draw have an "idraw"
# class method
self.drawtypes = [ key for key in self.draw_dict.keys()
if hasattr(self.draw_dict[key], 'idraw') ]
self.drawtypes.sort()
self.t_drawtype = 'point'
self.t_drawparams = {}
# holds the drawing context
self._draw_cxt = None
# For interactive editing
self.canedit = False
# Set to False to disable drag moves except from move control pt
self.easymove = True
self._start_x = 0
self._start_y = 0
self._cp_index = None
self._edit_obj = None
self._edit_status = False
self._edit_detail = {}
self._pick_cur_obj = None
# For modes
self._mode = 'draw'
self._mode_tbl = Bunch()
self.add_draw_mode(None)
self.add_draw_mode('draw', down=self.draw_start,
move=self.draw_motion, up=self.draw_stop,
poly_add=self.draw_poly_add,
poly_delete=self.draw_poly_delete)
self.add_draw_mode('edit', down=self.edit_start,
move=self.edit_motion, up=self.edit_stop,
poly_add=self.edit_poly_add,
poly_delete=self.edit_poly_delete)
self.add_draw_mode('pick', down=self.pick_start,
move=self.pick_motion, up=self.pick_stop,
hover=self.pick_hover,
poly_add=self.edit_poly_add,
poly_delete=self.edit_poly_delete)
# For selection
self._selected = []
self.multi_select_ok = False
# this controls whether an object is automatically selected for
# editing immediately after being drawn
self.edit_follows_draw = False
self._process_time = 0.0
# time delta threshold for deciding whether to update the image
self._delta_time = 0.020
self._draw_obj = None
# NOTE: must be mixed in with a Callback.Callbacks
for name in ('draw-event', 'draw-down', 'draw-move', 'draw-up',
'cursor-down', 'cursor-up', 'cursor-move',
'draw-scroll', 'keydown-poly_add', 'keydown-poly_del',
'keydown-edit_del', 'edit-event',
'edit-select', 'drag-drop'):
self.enable_callback(name)
def set_surface(self, viewer):
self.viewer = viewer
# Register this canvas for events of interest.
# Assumes we are mixed in with a canvas
canvas = self
# for legacy drawing via draw mode in Bindmap
canvas.add_callback('draw-down', self.draw_start, viewer)
canvas.add_callback('draw-move', self.draw_motion, viewer)
canvas.add_callback('draw-up', self.draw_stop, viewer)
canvas.add_callback('key-press', self._draw_key, 'key', viewer)
canvas.add_callback('keydown-poly_add', self._draw_op, 'poly_add',
viewer)
canvas.add_callback('keydown-poly_del', self._draw_op, 'poly_delete',
viewer)
canvas.add_callback('keydown-edit_del', self.edit_delete_cb, viewer)
#canvas.add_callback('draw-scroll', self._edit_rotate_cb, viewer)
#canvas.add_callback('draw-scroll', self._edit_scale_cb, viewer)
def register_for_cursor_drawing(self, viewer):
canvas = self
canvas.add_callback('cursor-down', self._draw_op, 'down', viewer)
canvas.add_callback('cursor-move', self._draw_op, 'move', viewer)
canvas.add_callback('cursor-up', self._draw_op, 'up', viewer)
canvas.set_callback('none-move', self._draw_op, 'hover', viewer)
##### MODE LOGIC #####
def add_draw_mode(self, name, **kwargs):
try:
bnch = self._mode_tbl[name]
except KeyError:
bnch = Bunch(name=name, **kwargs)
self._mode_tbl[name] = bnch
return bnch
def set_draw_mode(self, mode):
if not mode in self._mode_tbl:
modes = list(self._mode_tbl.keys())
raise ValueError("mode must be one of: %s" % (str(modes)))
self._mode = mode
if mode != 'edit':
self.clear_selected()
self.update_canvas()
def get_draw_mode(self):
return self._mode
def _draw_op(self, canvas, event, data_x, data_y, opn, viewer):
if viewer != event.viewer:
return False
mode = self._mode
# Hack to handle legacy drawing using draw mode in Bindmap
if self.is_drawing():
mode = 'draw'
try:
method = self._mode_tbl[mode][opn]
except KeyError:
return False
if method is not None:
return method(canvas, event, data_x, data_y, viewer)
return False
def _draw_key(self, canvas, keyname, opn, viewer):
# synthesize a KeyEvent
# TODO: this is hacky--see if we can rethink how this is handled
# so that we get passed an event similar to _draw_op()
last_x, last_y = viewer.get_last_data_xy()
event = KeyEvent(key=keyname, state='down', mode=self._mode,
modifiers=[], viewer=viewer,
data_x=last_x, data_y=last_y)
return self._draw_op(canvas, event, last_x, last_y, opn, viewer)
##### DRAWING LOGIC #####
def _draw_update(self, data_x, data_y, cxt, force_update=False):
obj = None
# update the context with current position
x, y = cxt.crdmap.data_to(data_x, data_y)
cxt.setvals(x=x, y=y, data_x=data_x, data_y=data_y)
draw_class = cxt.draw_class
if draw_class is None:
return False
obj = draw_class.idraw(self, cxt)
# update display every delta_time secs
if obj is not None:
obj.initialize(self, cxt.viewer, self.logger)
self._draw_obj = obj
if force_update or (time.time() - self._process_time > self._delta_time):
self.process_drawing()
return True
def draw_start(self, canvas, event, data_x, data_y, viewer):
if not self.candraw:
return False
self._draw_obj = None
# get the drawing coordinate type (default 'data')
crdtype = self.t_drawparams.get('coord', 'data')
crdmap = viewer.get_coordmap(crdtype)
x, y = crdmap.data_to(data_x, data_y)
klass = self.draw_dict.get(self.t_drawtype, None)
# create the drawing context
self._draw_cxt = Bunch(start_x=x, start_y=y, points=[(x, y)],
x=x, y=y, data_x=data_x, data_y=data_y,
drawparams=self.t_drawparams,
crdmap=crdmap, viewer=viewer,
draw_class=klass, logger=self.logger)
self._draw_update(data_x, data_y, self._draw_cxt, force_update=True)
return True
def draw_stop(self, canvas, event, data_x, data_y, viewer):
if not self.candraw:
return False
self._draw_update(data_x, data_y, self._draw_cxt)
obj, self._draw_obj = self._draw_obj, None
if obj is not None:
objtag = self.add(obj)
self.make_callback('draw-event', objtag)
if self.edit_follows_draw:
#self.set_draw_mode('edit')
self.clear_selected()
self.edit_select(obj)
self.make_callback('edit-select', self._edit_obj)
return True
else:
self.process_drawing()
def draw_motion(self, canvas, event, data_x, data_y, viewer):
if not self.candraw:
return False
self._draw_update(data_x, data_y, self._draw_cxt)
return True
def draw_poly_add(self, canvas, event, data_x, data_y, viewer):
if not self.candraw:
return False
cxt = self._draw_cxt
if self.t_drawtype in ('polygon', 'freepolygon', 'path', 'freepath'):
x, y = cxt.crdmap.data_to(data_x, data_y)
cxt.points.append((x, y))
elif self.t_drawtype == 'beziercurve' and len(cxt.points) < 3:
x, y = cxt.crdmap.data_to(data_x, data_y)
cxt.points.append((x, y))
self._draw_update(data_x, data_y, cxt, force_update=True)
return True
def draw_poly_delete(self, canvas, event, data_x, data_y, viewer):
if not self.candraw:
return False
cxt = self._draw_cxt
if self.t_drawtype in ('polygon', 'freepolygon', 'path',
'freepath', 'beziercurve'):
if len(cxt.points) > 0:
cxt.points.pop()
self._draw_update(data_x, data_y, cxt, force_update=True)
return True
def is_drawing(self):
return self._draw_obj is not None
def enable_draw(self, tf):
self.candraw = tf
def set_drawcolor(self, colorname):
self.t_drawparams['color'] = colorname
def set_drawtype(self, drawtype, **drawparams):
if drawtype is not None:
drawtype = drawtype.lower()
assert drawtype in self.drawtypes, \
ValueError("Bad drawing type '%s': must be one of %s" % (
drawtype, self.drawtypes))
self.t_drawtype = drawtype
self.t_drawparams = drawparams.copy()
def get_drawtypes(self):
return self.drawtypes
def get_drawtype(self):
return self.t_drawtype
def get_draw_class(self, drawtype):
drawtype = drawtype.lower()
klass = self.draw_dict[drawtype]
return klass
def get_drawparams(self):
return self.t_drawparams.copy()
def process_drawing(self):
self._process_time = time.time()
#self.redraw(whence=3)
self.update_canvas()
def register_canvas_type(self, name, klass):
drawtype = name.lower()
self.draw_dict[drawtype] = klass
if not drawtype in self.drawtypes:
self.drawtypes.append(drawtype)
self.drawtypes.sort()
##### EDITING LOGIC #####
def get_edit_object(self):
return self._edit_obj
def is_editing(self):
return self.get_edit_obj() is not None
def enable_edit(self, tf):
self.canedit = tf
def _rot_xlate(self, obj, data_x, data_y):
# translate point back into non-rotated form
rot_deg = - obj.rot_deg
xoff, yoff = obj.get_center_pt()
data_x, data_y = trcalc.rotate_pt(data_x, data_y, rot_deg,
xoff=xoff, yoff=yoff)
return data_x, data_y
def _edit_update(self, data_x, data_y, viewer):
if (not self.canedit) or (self._cp_index is None):
return False
x, y = data_x, data_y
if self._cp_index < 0:
if self.easymove:
self._edit_obj.set_edit_point(0, (x - self._start_x,
y - self._start_y),
self._edit_detail)
else:
# special hack for objects that have rot_deg attribute
if hasattr(self._edit_obj, 'rot_deg') and (self._cp_index > 0):
x, y = self._rot_xlate(self._edit_obj, x, y)
self._edit_obj.set_edit_point(self._cp_index, (x, y),
self._edit_detail)
#self._edit_obj.sync_state()
if time.time() - self._process_time > self._delta_time:
self.process_drawing()
return True
def _is_editable(self, obj, x, y, is_inside):
return is_inside and obj.editable
def _prepare_to_move(self, obj, data_x, data_y):
#print(("moving an object", obj.editable))
self.edit_select(obj)
self._cp_index = -1
ref_x, ref_y = self._edit_obj.get_reference_pt()
self._start_x, self._start_y = data_x - ref_x, data_y - ref_y
#print(("end moving an object", obj.editable))
def edit_start(self, canvas, event, data_x, data_y, viewer):
if not self.canedit:
return False
self._edit_tmp = self._edit_obj
self._edit_status = False
self._edit_detail = Bunch()
self._cp_index = None
#shift_held = 'shift' in event.modifiers
shift_held = False
selects = self.get_selected()
if len(selects) == 0:
#print("no objects already selected")
# <-- no objects already selected
# check for objects at this location
#print("getting items")
objs = canvas.select_items_at(viewer, data_x, data_y,
test=self._is_editable)
#print("items: %s" % (str(objs)))
if len(objs) == 0:
# <-- no objects under cursor
return False
# pick top object
obj = objs[-1]
self._prepare_to_move(obj, data_x, data_y)
else:
self._edit_status = True
# Ugh. Check each selected object's control points
# for a match
contains = []
for obj in selects:
#print("editing: checking for cp")
edit_pts = obj.get_edit_points(viewer)
#print((self._edit_obj, edit_pts))
i = obj.get_pt(viewer, edit_pts, data_x, data_y,
obj.cap_radius)
#print(('got point', i))
if i is not None:
#print("editing cp #%d" % (i))
# editing a control point from an existing object
self._edit_obj = obj
self._cp_index = i
if hasattr(obj, 'rot_deg'):
x, y = self._rot_xlate(self._edit_obj, data_x, data_y)
else:
x, y = data_x, data_y
self._edit_detail.start_pos = (x, y)
obj.setup_edit(self._edit_detail)
self._edit_update(data_x, data_y, viewer)
return True
## if obj.contains(data_x, data_y):
## contains.append(obj)
# update: check if objects bbox contains this point
x1, y1, x2, y2 = obj.get_llur()
if (x1 <= data_x <= x2) and (y1 <= data_y <= y2):
contains.append(obj)
# <-- no control points match, is there an object that contains
# this point?
if len(contains) > 0:
# TODO?: make a compound object of contains and move it?
obj = contains[-1]
if self.is_selected(obj) and shift_held:
# deselecting object
self.select_remove(obj)
else:
self._prepare_to_move(obj, data_x, data_y)
## Compound = self.get_draw_class('compoundobject')
## c_obj = Compound(*self.get_selected())
## c_obj.inherit_from(obj)
## self._prepare_to_move(c_obj, data_x, data_y)
else:
# <-- user clicked outside any selected item's control pt
# and outside any selected item
if not shift_held:
self.clear_selected()
# see now if there is an unselected item at this location
objs = canvas.select_items_at(viewer, data_x, data_y,
test=self._is_editable)
#print("new items: %s" % (str(objs)))
if len(objs) > 0:
# pick top object
obj = objs[-1]
#print(("top object", obj))
if self.num_selected() > 0:
#print("there are previously selected items")
# if there are already some selected items, then
# add this object to the selection, make a compound
# object
self.edit_select(obj)
Compound = self.get_draw_class('compoundobject')
c_obj = Compound(*self.get_selected())
c_obj.inherit_from(obj)
self._prepare_to_move(c_obj, data_x, data_y)
else:
# otherwise just start over with this new object
#print(("starting over"))
self._prepare_to_move(obj, data_x, data_y)
self.process_drawing()
return True
def edit_stop(self, canvas, event, data_x, data_y, viewer):
if not self.canedit:
return False
if (self._edit_tmp != self._edit_obj) or (
(self._edit_obj is not None) and
(self._edit_status != self.is_selected(self._edit_obj))):
# <-- editing status has changed
#print("making edit-select callback")
self.make_callback('edit-select', self._edit_obj)
if (self._edit_obj is not None) and (self._cp_index is not None):
# <-- an object has been edited
self._edit_update(data_x, data_y, viewer)
self._cp_index = None
self.make_callback('edit-event', self._edit_obj)
self._edit_obj.make_callback('edited')
return True
def edit_motion(self, canvas, event, data_x, data_y, viewer):
if not self.canedit:
return False
if (self._edit_obj is not None) and (self._cp_index is not None):
self._edit_update(data_x, data_y, viewer)
return True
return False
def edit_poly_add(self, canvas, event, data_x, data_y, viewer):
if not self.canedit:
return False
obj = self._edit_obj
if (obj is not None) and self.is_selected(obj) and \
(obj.kind in ('polygon', 'path')):
self.logger.debug("checking points")
# determine which line we are adding a point to
points = list(obj.get_data_points())
if obj.kind == 'polygon':
points = points + [points[0]]
x0, y0 = points[0]
insert = None
for i in range(1, len(points[1:])+1):
x1, y1 = points[i]
self.logger.debug("checking line %d" % (i))
if obj.within_line(viewer, data_x, data_y, x0, y0, x1, y1,
8):
insert = i
break
x0, y0 = x1, y1
if insert is not None:
self.logger.debug("inserting point")
# Point near a line
pt = obj.crdmap.data_to(data_x, data_y)
obj.insert_pt(insert, pt)
self.process_drawing()
else:
self.logger.debug("cursor not near a line")
return True
def edit_poly_delete(self, canvas, event, data_x, data_y, viewer):
if not self.canedit:
return False
obj = self._edit_obj
if (obj is not None) and self.is_selected(obj) and \
(obj.kind in ('polygon', 'path')):
self.logger.debug("checking points")
# determine which point we are deleting
points = list(obj.get_data_points())
delete = None
for i in range(len(points)):
x1, y1 = points[i]
self.logger.debug("checking vertex %d" % (i))
if obj.within_radius(viewer, data_x, data_y, x1, y1,
8):
delete = i
break
if delete is not None:
self.logger.debug("deleting point")
obj.delete_pt(delete)
self.process_drawing()
else:
self.logger.debug("cursor not near a point")
return True
def edit_rotate(self, delta_deg, viewer):
if self._edit_obj is None:
return False
self._edit_obj.rotate_by(delta_deg)
self.process_drawing()
self.make_callback('edit-event', self._edit_obj)
return True
def _edit_rotate_cb(self, canvas, event, viewer, msg=True):
if not self.canedit or (viewer != event.viewer):
return False
bd = viewer.get_bindings()
amount = event.amount
if bd.get_direction(event.direction) == 'down':
amount = - amount
return self.edit_rotate(amount)
def edit_scale(self, delta_x, delta_y, viewer):
if self._edit_obj is None:
return False
self._edit_obj.scale_by(delta_x, delta_y)
self.process_drawing()
self.make_callback('edit-event', self._edit_obj)
return True
def _edit_scale_cb(self, canvas, event, viewer, msg=True):
if not self.canedit or (viewer != event.viewer):
return False
bd = viewer.get_bindings()
if bd.get_direction(event.direction) == 'down':
amount = 0.9
else:
amount = 1.1
return self.edit_scale(amount, amount)
def edit_delete(self):
if (self._edit_obj is not None) and self.is_selected(self._edit_obj):
self.select_remove(self._edit_obj)
obj, self._edit_obj = self._edit_obj, None
self.delete_object(obj)
self.make_callback('edit-event', self._edit_obj)
return True
def edit_delete_cb(self, canvas, event, data_x, data_y, viewer):
if not self.canedit or (viewer != event.viewer):
return False
return self.edit_delete()
def edit_select(self, newobj):
if not self.canedit:
return False
if not self.multi_select_ok:
self.clear_selected()
# add new object to selection
self.select_add(newobj)
self._edit_obj = newobj
return True
##### SELECTION LOGIC #####
def _is_selectable(self, obj, x, y, is_inside):
return is_inside and obj.editable
#return is_inside
def is_selected(self, obj):
return obj in self._selected
def get_selected(self):
return self._selected
def num_selected(self):
return len(self._selected)
def clear_selected(self):
self._selected = []
def select_remove(self, obj):
try:
self._selected.remove(obj)
except:
pass
def select_add(self, obj):
if obj not in self._selected:
self._selected.append(obj)
##### PICK LOGIC #####
def _do_pick(self, canvas, event, data_x, data_y, cb_name, viewer):
# check for objects at this location
objs = canvas.select_items_at(viewer, data_x, data_y)
if len(objs) == 0:
# <-- no objects under cursor
if self._pick_cur_obj is not None:
# leaving an object that we were in--make pick-leave cb
obj, self._pick_cur_obj = self._pick_cur_obj, None
pt = obj.crdmap.data_to(data_x, data_y)
obj.make_callback('pick-leave', canvas, event, pt)
return False
# pick top object
obj = objs[-1]
self.logger.debug("%s event in %s obj at x, y = %d, %d" % (
cb_name, obj.kind, data_x, data_y))
# get coordinates in native form for this object
pt = obj.crdmap.data_to(data_x, data_y)
if self._pick_cur_obj is None:
# entering a new object--make pick-enter cb
self._pick_cur_obj = obj
obj.make_callback('pick-enter', canvas, event, pt)
# make pick callback
obj.make_callback(cb_name, canvas, event, pt)
return True
def pick_start(self, canvas, event, data_x, data_y, viewer):
return self._do_pick(canvas, event, data_x, data_y,
'pick-down', viewer)
def pick_motion(self, canvas, event, data_x, data_y, viewer):
return self._do_pick(canvas, event, data_x, data_y,
'pick-move', viewer)
def pick_hover(self, canvas, event, data_x, data_y, viewer):
return self._do_pick(canvas, event, data_x, data_y,
'pick-hover', viewer)
def pick_stop(self, canvas, event, data_x, data_y, viewer):
return self._do_pick(canvas, event, data_x, data_y,
'pick-up', viewer)
# The canvas drawing
def draw(self, viewer):
# Draw everything else as usual
super(DrawingMixin, self).draw(viewer)
# Draw our current drawing object, if any
if self._draw_obj:
self._draw_obj.draw(viewer)
# Draw control points on edited objects
selected = list(self.get_selected())
if len(selected) > 0:
for obj in selected:
## if not self.has_object(obj):
## # <-- the object has been removed from the canvas
## # but not removed from the selection
## self.select_remove(obj)
## continue
cr = viewer.renderer.setup_cr(obj)
obj.draw_edit(cr, viewer)
### NON-PEP8 EQUIVALENTS -- TO BE DEPRECATED ###
setSurface = set_surface
getDrawClass = get_draw_class
#END
|
|
import pathlib
from collections import defaultdict
from operator import itemgetter
from itertools import groupby
from flask import Flask, send_from_directory, request, jsonify
from logbook import Logger
from aesop import isocodes, events
from aesop.models import Movie, TVShow, TVShowEpisode, Source, Config, database_proxy, Genre, MovieGenre, TVShowGenre
app = Flask('aesop.ui')
log = Logger('aesop.ui')
@app.route('/')
def root():
templates = str(pathlib.Path(__file__).with_name('templates'))
return send_from_directory(templates, 'index.html')
@app.route('/series')
def series():
series = list(TVShow.select().order_by(TVShow.title).dicts())
tvshow_genre_map = Genre.select(TVShowGenre.media, Genre.text).join(TVShowGenre).tuples()
d = defaultdict(list)
for show_id, text in tvshow_genre_map:
d[show_id].append(text)
for tv in series:
tv['genres'] = d[tv['id']]
return jsonify({'data': series})
@app.route('/series/<id>')
def singleseries(id):
tvshow = TVShow.select(TVShow.media_id, TVShow.title).where(TVShow.media_id == id).dicts().get()
return jsonify({'data': tvshow})
# this and set_watched_movie are not websocket commands because they need data
# back.
@app.route('/series/setwatched/<int:video_id>', methods=['POST'])
def set_watched_series(video_id):
m = TVShowEpisode.select().where(TVShowEpisode.id == video_id).get()
with database_proxy.transaction():
m.watched = not m.watched
m.save()
show = m.show
if all([episode.watched for episode in show.episodes]):
show.watched = True
if show.is_dirty():
show.save()
return jsonify({'watched': m.watched})
@app.route('/series/<id>/seasons')
def seasons(id):
tvshow = TVShow.select().where(TVShow.media_id == id).get()
seasons = tvshow.episodes.select(TVShowEpisode.season, TVShowEpisode.watched).group_by(TVShowEpisode.season, TVShowEpisode.watched).dicts()
collapsed_seasons = defaultdict(bool)
for season in seasons:
watched = season['watched']
season = season['season']
if season in collapsed_seasons:
watched = collapsed_seasons[season] and watched
collapsed_seasons[season] = watched
seasons = [dict(season=season, watched=watched) for (season, watched) in collapsed_seasons.items()]
return jsonify({'data': seasons})
@app.route('/series/<id>/episodes/<int:season>')
def episodes(id, season):
tvshow = TVShow.select().where(TVShow.media_id == id).get()
return jsonify({'data': list(tvshow.episodes.select().where(TVShowEpisode.season == season).order_by(TVShowEpisode.episode).dicts())})
@app.route('/movies')
def movies():
movies = list(Movie.select(Movie.id, Movie.title, Movie.watched, Movie.year).order_by(Movie.title).dicts())
movie_genre_map = Genre.select(MovieGenre.media, Genre.text).join(MovieGenre).tuples()
d = defaultdict(list)
for movie_id, text in movie_genre_map:
d[movie_id].append(text)
for m in movies:
m['genres'] = d[m['id']]
return jsonify({'data': movies})
@app.route('/movies/<int:id>', methods=['GET', 'POST'])
def movie(id):
if request.method == 'POST':
genres = request.json['movie'].pop('genres')
Movie.update(**request.json['movie']).where(Movie.id == id).execute()
m = Movie.get(Movie.id == id)
m.replace_genres([Genre.get_or_create(g) for g in genres])
return jsonify({'status': 'ok'})
else:
movie = Movie.select().where(Movie.id == id).dicts().get()
q = Genre.select(Genre.text).join(MovieGenre).where(MovieGenre.media == movie['id'])
movie['genres'] = [g[0] for g in q.tuples()]
return jsonify({'movie': movie})
@app.route('/movies/setwatched/<int:video_id>', methods=['POST'])
def set_watched_movie(video_id):
m = Movie.select(Movie.id, Movie.watched).where(Movie.id == video_id).get()
m.watched = not m.watched
m.save()
return jsonify({'watched': m.watched})
@app.route('/genres')
def genres():
return jsonify({'genres': [g[0] for g in Genre.select(Genre.text).order_by(Genre.text).tuples()]})
@app.route('/update/', methods=['POST'])
def update():
raise NotImplementedError()
@app.route('/settings/', methods=['GET', 'POST'])
def settings():
if request.method == 'POST':
from aesop.models import database
try:
with database.transaction():
Config.delete().execute()
for setting in request.json['configuration']:
Config.create(**setting)
Source.delete().execute()
for setting in request.json['sources']:
Source.create(**setting)
except Exception as e:
events.error.blocking("Settings could not be saved: {!r}".format(str(e)))
raise
else:
events.success.blocking("Settings saved")
else:
configuration = []
for section, values in groupby(list(Config.select(Config.section, Config.key, Config.value).dicts()), key=itemgetter('section')):
configuration.append({
'name': section,
'values': [config_with_help(v) for v in values],
})
return jsonify({
'configuration': configuration,
'sources': list(Source.select().dicts()),
})
return jsonify({'response': 'ok'})
@app.route('/stats/')
def stats():
series = TVShow.select().count()
episodes = TVShowEpisode.select().count()
episodes_watched = TVShowEpisode.select().where(TVShowEpisode.watched == True).count()
movies = Movie.select().count()
movies_watched = Movie.select().where(Movie.watched == True).count()
stats = {
'series': series,
'episodes': episodes,
'episodes watched': episodes_watched,
'movies': movies,
'movies watched': movies_watched,
}
return jsonify({'stats': stats})
@app.route('/manifest.json')
def manifest():
return jsonify({
'name': 'Aesop',
"start_url": "/",
"display": "standalone",
})
@app.route('/search/genres/')
def get_upstream_genres():
imdb_id = request.values['i']
video_type = request.values['type']
upstream = request.values.get('m', 'omdb')
if upstream == 'omdb':
import requests
params = {
'i': imdb_id,
'p': 'full',
'type': video_type,
}
resp = requests.get('http://www.omdbapi.com/?', params=params)
json = resp.json()
if json['Response'] != 'False':
genres = json['Genre'].split(', ')
else:
genres = []
else:
assert False, "Unknown upstream type {!r}".format(upstream)
return jsonify({'genres': genres})
@app.route('/search/')
def search_upstream():
query = request.values['q']
video_type = request.values['type']
upstream = request.values.get('m', 'omdb')
if len(query) < 3:
results = []
elif upstream == 'omdb':
import requests
params = {
's': query,
'type': video_type,
}
resp = requests.get('http://www.omdbapi.com/', params=params)
results = resp.json().get('Search', [])
results = [
dict(title=t['Title'], year=int(t['Year']), id=t['imdbID'],
description='{} {}'.format(t['Year'], t['Title']))
for t in results
]
else:
assert False, "Unknown upstream type {!r}".format(upstream)
return jsonify({'results': list(results)})
help_map = {
'concurrency': 'Amount of concurrent requests to perform when retrieving video metadata.',
'frequency': 'How frequently to scan for new videos',
'theme': 'Website theme to use',
'seek size': 'Amount of time in seconds to jump forward/backward',
'subtitles for matching audio': 'Should subtitles be automatically enabled if the audio and subtitles language are the same?',
'video output': 'Video Output Driver. Messing with this can break things so be careful',
}
isochoices = [dict(display='-- None --', value='0')] + sorted([
dict(display=nicename, value=iso)
for (iso, nicename) in isocodes.isocodes.items()
], key=itemgetter('display'))
extras_map = {
'theme': {
'choices': {
'cyborg': 'Cyborg',
'darkly': 'Darkly',
'flatly': 'Flatly',
'journal': 'Journal',
'cosmo': 'Cosmo',
'cerulean': 'Cerulean',
},
},
'preferred subtitle': {
'choices': isochoices,
'typeahead': 'Preferred Subtitle Language',
'default': '',
},
'preferred audio': {
'choices': isochoices,
'typeahead': 'Preferred Audio Language',
'default': '',
},
'subtitles for matching audio': {
'choices': {
'1': 'Yes',
'0': 'No',
},
},
'concurrency': {
'type': 'number',
},
}
def config_with_help(config):
config['help'] = help_map.get(config['key'], '')
config.update(extras_map.get(config['key'], {}))
if 'typeahead' in config:
value = config['value']
choices = config.get('choices', [])
for choice in choices:
if choice['value'] == value:
config['value'] = dict(value=value, display=choice['display'])
break
if config['key'] == 'concurrency':
config['value'] = int(config['value'])
return config
def main():
from aesop.models import init
from aesop.utils import setup_logging
setup_logging('aesop.ui', 'INFO')
init()
app.run(debug=True, host='0.0.0.0')
if __name__ == '__main__':
main()
|
|
#
#
# Copyright (C) 2014 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Common functions for tool scripts.
"""
import logging
import os
import time
from io import StringIO
import OpenSSL
from ganeti import constants
from ganeti import errors
from ganeti import pathutils
from ganeti import utils
from ganeti import serializer
from ganeti import ssconf
from ganeti import ssh
def VerifyOptions(parser, opts, args):
"""Verifies options and arguments for correctness.
"""
if args:
parser.error("No arguments are expected")
return opts
def _VerifyCertificateStrong(cert_pem, error_fn,
_check_fn=utils.CheckNodeCertificate):
"""Verifies a certificate against the local node daemon certificate.
Includes elaborate tests of encodings etc., and returns formatted
certificate.
@type cert_pem: string
@param cert_pem: Certificate and key in PEM format
@type error_fn: callable
@param error_fn: function to call in case of an error
@rtype: string
@return: Formatted key and certificate
"""
try:
cert = \
OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_pem)
except Exception as err:
raise error_fn("(stdin) Unable to load certificate: %s" % err)
try:
key = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, cert_pem)
except OpenSSL.crypto.Error as err:
raise error_fn("(stdin) Unable to load private key: %s" % err)
# Check certificate with given key; this detects cases where the key given on
# stdin doesn't match the certificate also given on stdin
try:
utils.X509CertKeyCheck(cert, key)
except OpenSSL.SSL.Error:
raise error_fn("(stdin) Certificate is not signed with given key")
# Standard checks, including check against an existing local certificate
# (no-op if that doesn't exist)
_check_fn(cert)
key_encoded = OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key)
cert_encoded = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM,
cert)
complete_cert_encoded = key_encoded + cert_encoded
if not cert_pem == complete_cert_encoded.decode('ascii'):
logging.error("The certificate differs after being reencoded. Please"
" renew the certificates cluster-wide to prevent future"
" inconsistencies.")
# Format for storing on disk
buf = StringIO()
buf.write(cert_pem)
return buf.getvalue()
def _VerifyCertificateSoft(cert_pem, error_fn,
_check_fn=utils.CheckNodeCertificate):
"""Verifies a certificate against the local node daemon certificate.
@type cert_pem: string
@param cert_pem: Certificate in PEM format (no key)
"""
try:
OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, cert_pem)
except OpenSSL.crypto.Error as err:
pass
else:
raise error_fn("No private key may be given")
try:
cert = \
OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_pem)
except Exception as err:
raise errors.X509CertError("(stdin)",
"Unable to load certificate: %s" % err)
_check_fn(cert)
def VerifyCertificateSoft(data, error_fn, _verify_fn=_VerifyCertificateSoft):
"""Verifies cluster certificate if existing.
@type data: dict
@type error_fn: callable
@param error_fn: function to call in case of an error
@rtype: string
@return: Formatted key and certificate
"""
cert = data.get(constants.SSHS_NODE_DAEMON_CERTIFICATE)
if cert:
_verify_fn(cert, error_fn)
def VerifyCertificateStrong(data, error_fn,
_verify_fn=_VerifyCertificateStrong):
"""Verifies cluster certificate. Throws error when not existing.
@type data: dict
@type error_fn: callable
@param error_fn: function to call in case of an error
@rtype: string
@return: Formatted key and certificate
"""
cert = data.get(constants.NDS_NODE_DAEMON_CERTIFICATE)
if not cert:
raise error_fn("Node daemon certificate must be specified")
return _verify_fn(cert, error_fn)
def VerifyClusterName(data, error_fn, cluster_name_constant,
_verify_fn=ssconf.VerifyClusterName):
"""Verifies cluster name.
@type data: dict
"""
name = data.get(cluster_name_constant)
if name:
_verify_fn(name)
else:
raise error_fn("Cluster name must be specified")
return name
def VerifyHmac(data, error_fn):
"""Verifies the presence of the hmac secret.
@type data: dict
"""
hmac = data.get(constants.NDS_HMAC)
if not hmac:
raise error_fn("Hmac key must be provided")
return hmac
def LoadData(raw, data_check):
"""Parses and verifies input data.
@rtype: dict
"""
result = None
try:
result = serializer.LoadAndVerifyJson(raw, data_check)
logging.debug("Received data: %s", serializer.DumpJson(result))
except Exception as e:
logging.warn("Received data is not valid json: %s.", str(raw))
raise e
return result
def GenerateRootSshKeys(key_type, key_bits, error_fn, _suffix="",
_homedir_fn=None):
"""Generates root's SSH keys for this node.
"""
ssh.InitSSHSetup(key_type, key_bits, error_fn=error_fn,
_homedir_fn=_homedir_fn, _suffix=_suffix)
def GenerateClientCertificate(
data, error_fn, client_cert=pathutils.NODED_CLIENT_CERT_FILE,
signing_cert=pathutils.NODED_CERT_FILE):
"""Regenerates the client certificate of the node.
@type data: string
@param data: the JSON-formated input data
"""
if not os.path.exists(signing_cert):
raise error_fn("The signing certificate '%s' cannot be found."
% signing_cert)
# TODO: This sets the serial number to the number of seconds
# since epoch. This is technically not a correct serial number
# (in the way SSL is supposed to be used), but it serves us well
# enough for now, as we don't have any infrastructure for keeping
# track of the number of signed certificates yet.
serial_no = int(time.time())
# The hostname of the node is provided with the input data.
hostname = data.get(constants.NDS_NODE_NAME)
if not hostname:
raise error_fn("No hostname found.")
utils.GenerateSignedSslCert(client_cert, serial_no, signing_cert,
common_name=hostname)
|
|
# (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
'''
As of zookeeper 3.4.0, the `mntr` admin command is provided for easy parsing of zookeeper stats.
This check first parses the `stat` admin command for a version number.
If the zookeeper version supports `mntr`, it is also parsed.
Duplicate information is being reported by both `mntr` and `stat` to keep backwards compatibility.
Example:
`stat` reports: zookeeper.latency.avg
`mntr` reports: zookeeper.avg.latency
If available, make use of the data reported by `mntr` not `stat`.
The duplicate `stat` reports are only kept for backward compatibility.
Besides the usual zookeeper state of `leader`, `follower`, `observer` and `standalone`,
this check will report three other states:
`down`: the check cannot connect to zookeeper
`inactive`: the zookeeper instance has lost connection to the cluster
`unknown`: an unexpected error has occured in this check
States can be accessed through the gauge `zookeeper.instances.<state>,
through the set `zookeeper.instances`, or through the `mode:<state>` tag.
Parses the response from zookeeper's `stat` admin command, which looks like:
```
Zookeeper version: 3.2.2--1, built on 03/16/2010 07:31 GMT
Clients:
/10.42.114.160:32634[1](queued=0,recved=12,sent=0)
/10.37.137.74:21873[1](queued=0,recved=53613,sent=0)
/10.37.137.74:21876[1](queued=0,recved=57436,sent=0)
/10.115.77.32:32990[1](queued=0,recved=16,sent=0)
/10.37.137.74:21891[1](queued=0,recved=55011,sent=0)
/10.37.137.74:21797[1](queued=0,recved=19431,sent=0)
Latency min/avg/max: -10/0/20007
Received: 101032173
Sent: 0
Outstanding: 0
Zxid: 0x1034799c7
Mode: leader
Node count: 487
```
`stat` tested with Zookeeper versions 3.0.0 to 3.4.5
The following is an example of the `mntr` commands output:
```
zk_version 3.4.5-cdh4.4.0--1, built on 09/04/2013 01:46 GMT
zk_avg_latency 0
zk_max_latency 0
zk_min_latency 0
zk_packets_received 4
zk_packets_sent 3
zk_num_alive_connections 1
zk_outstanding_requests 0
zk_server_state standalone
zk_znode_count 4
zk_watch_count 0
zk_ephemerals_count 0
zk_approximate_data_size 27
zk_open_file_descriptor_count 29
zk_max_file_descriptor_count 4096
```
`mntr` tested with ZooKeeper 3.4.5
'''
# stdlib
from collections import defaultdict
from distutils.version import LooseVersion # pylint: disable=E0611,E0401
from StringIO import StringIO
import re
import socket
import struct
# project
from checks import AgentCheck
class ZKConnectionFailure(Exception):
""" Raised when we are unable to connect or get the output of a command. """
pass
class ZKMetric(tuple):
"""
A Zookeeper metric.
Tuple with an optional metric type (default is 'gauge').
"""
def __new__(cls, name, value, m_type="gauge"):
return super(ZKMetric, cls).__new__(cls, [name, value, m_type])
class ZookeeperCheck(AgentCheck):
"""
ZooKeeper AgentCheck.
Parse content from `stat` and `mntr`(if available) commmands to retrieve health cluster metrics.
"""
# example match:
# "Zookeeper version: 3.4.10-39d3a4f269333c922ed3db283be479f9deacaa0f, built on 03/23/2017 10:13 GMT"
version_pattern = re.compile(r'(\d+\.\d+\.\d+)')
SOURCE_TYPE_NAME = 'zookeeper'
STATUS_TYPES = [
'leader',
'follower',
'observer',
'standalone',
'down',
'inactive',
]
# `mntr` information to report as `rate`
_MNTR_RATES = set(
[
'zk_packets_received',
'zk_packets_sent',
]
)
def check(self, instance):
host = instance.get('host', 'localhost')
port = int(instance.get('port', 2181))
timeout = float(instance.get('timeout', 3.0))
expected_mode = (instance.get('expected_mode') or '').strip()
tags = instance.get('tags', [])
cx_args = (host, port, timeout)
sc_tags = ["host:{0}".format(host), "port:{0}".format(port)] + list(set(tags))
hostname = self.hostname
report_instance_mode = instance.get("report_instance_mode", True)
zk_version = None # parse_stat will parse and set version string
# Send a service check based on the `ruok` response.
# Set instance status to down if not ok.
try:
ruok_out = self._send_command('ruok', *cx_args)
except ZKConnectionFailure:
# The server should not respond at all if it's not OK.
status = AgentCheck.CRITICAL
message = 'No response from `ruok` command'
self.increment('zookeeper.timeouts')
if report_instance_mode:
self.report_instance_mode(hostname, 'down', tags)
raise
else:
ruok_out.seek(0)
ruok = ruok_out.readline()
if ruok == 'imok':
status = AgentCheck.OK
else:
status = AgentCheck.WARNING
message = u'Response from the server: %s' % ruok
finally:
self.service_check(
'zookeeper.ruok', status, message=message, tags=sc_tags
)
# Read metrics from the `stat` output.
try:
stat_out = self._send_command('stat', *cx_args)
except ZKConnectionFailure:
self.increment('zookeeper.timeouts')
if report_instance_mode:
self.report_instance_mode(hostname, 'down', tags)
raise
except Exception as e:
self.warning(e)
self.increment('zookeeper.datadog_client_exception')
if report_instance_mode:
self.report_instance_mode(hostname, 'unknown', tags)
raise
else:
# Parse the response
metrics, new_tags, mode, zk_version = self.parse_stat(stat_out)
# Write the data
if mode != 'inactive':
for metric, value, m_type in metrics:
submit_metric = getattr(self, m_type)
submit_metric(metric, value, tags=tags + new_tags)
if report_instance_mode:
self.report_instance_mode(hostname, mode, tags)
if expected_mode:
if mode == expected_mode:
status = AgentCheck.OK
message = u"Server is in %s mode" % mode
else:
status = AgentCheck.CRITICAL
message = u"Server is in %s mode but check expects %s mode"\
% (mode, expected_mode)
self.service_check('zookeeper.mode', status, message=message,
tags=sc_tags)
# Read metrics from the `mntr` output
if zk_version and LooseVersion(zk_version) > LooseVersion("3.4.0"):
try:
mntr_out = self._send_command('mntr', *cx_args)
except ZKConnectionFailure:
self.increment('zookeeper.timeouts')
if report_instance_mode:
self.report_instance_mode(hostname, 'down', tags)
raise
except Exception as e:
self.warning(e)
self.increment('zookeeper.datadog_client_exception')
if report_instance_mode:
self.report_instance_mode(hostname, 'unknown', tags)
raise
else:
metrics, mode = self.parse_mntr(mntr_out)
mode_tag = "mode:%s" % mode
if mode != 'inactive':
for metric, value, m_type in metrics:
submit_metric = getattr(self, m_type)
submit_metric(metric, value, tags=tags + [mode_tag])
if report_instance_mode:
self.report_instance_mode(hostname, mode, tags)
def report_instance_mode(self, hostname, mode, tags):
gauges = defaultdict(int)
if mode not in self.STATUS_TYPES:
mode = "unknown"
tags = tags + ['mode:%s' % mode]
self.gauge('zookeeper.instances', 1, tags=tags)
gauges[mode] = 1
for k, v in gauges.iteritems():
gauge_name = 'zookeeper.instances.%s' % k
self.gauge(gauge_name, v)
def _send_command(self, command, host, port, timeout):
sock = socket.socket()
sock.settimeout(timeout)
buf = StringIO()
chunk_size = 1024
# try-finally and try-except to stay compatible with python 2.4
try:
try:
# Connect to the zk client port and send the stat command
sock.connect((host, port))
sock.sendall(command)
# Read the response into a StringIO buffer
chunk = sock.recv(chunk_size)
buf.write(chunk)
num_reads = 1
max_reads = 10000
while chunk:
if num_reads > max_reads:
# Safeguard against an infinite loop
raise Exception("Read %s bytes before exceeding max reads of %s. "
% (buf.tell(), max_reads))
chunk = sock.recv(chunk_size)
buf.write(chunk)
num_reads += 1
except (socket.timeout, socket.error):
raise ZKConnectionFailure()
finally:
sock.close()
return buf
def parse_stat(self, buf):
''' `buf` is a readable file-like object
returns a tuple: (metrics, tags, mode, version)
'''
metrics = []
buf.seek(0)
# Check the version line to make sure we parse the rest of the
# body correctly. Particularly, the Connections val was added in
# >= 3.4.4.
start_line = buf.readline()
match = self.version_pattern.search(start_line)
if match is None:
return (None, None, "inactive", None)
raise Exception("Could not parse version from stat command output: %s" % start_line)
else:
version = match.group()
has_connections_val = LooseVersion(version) > LooseVersion("3.4.4")
# Clients:
buf.readline() # skip the Clients: header
connections = 0
client_line = buf.readline().strip()
if client_line:
connections += 1
while client_line:
client_line = buf.readline().strip()
if client_line:
connections += 1
# Latency min/avg/max: -10/0/20007
_, value = buf.readline().split(':')
l_min, l_avg, l_max = [int(v) for v in value.strip().split('/')]
metrics.append(ZKMetric('zookeeper.latency.min', l_min))
metrics.append(ZKMetric('zookeeper.latency.avg', l_avg))
metrics.append(ZKMetric('zookeeper.latency.max', l_max))
# Received: 101032173
_, value = buf.readline().split(':')
metrics.append(ZKMetric('zookeeper.bytes_received', long(value.strip())))
# Sent: 1324
_, value = buf.readline().split(':')
metrics.append(ZKMetric('zookeeper.bytes_sent', long(value.strip())))
if has_connections_val:
# Connections: 1
_, value = buf.readline().split(':')
metrics.append(ZKMetric('zookeeper.connections', int(value.strip())))
else:
# If the zk version doesnt explicitly give the Connections val,
# use the value we computed from the client list.
metrics.append(ZKMetric('zookeeper.connections', connections))
# Outstanding: 0
_, value = buf.readline().split(':')
# Fixme: This metric name is wrong. It should be removed in a major version of the agent
# See https://github.com/DataDog/dd-agent/issues/1383
metrics.append(ZKMetric('zookeeper.bytes_outstanding', long(value.strip())))
metrics.append(ZKMetric('zookeeper.outstanding_requests', long(value.strip())))
# Zxid: 0x1034799c7
_, value = buf.readline().split(':')
# Parse as a 64 bit hex int
zxid = long(value.strip(), 16)
# convert to bytes
zxid_bytes = struct.pack('>q', zxid)
# the higher order 4 bytes is the epoch
(zxid_epoch,) = struct.unpack('>i', zxid_bytes[0:4])
# the lower order 4 bytes is the count
(zxid_count,) = struct.unpack('>i', zxid_bytes[4:8])
metrics.append(ZKMetric('zookeeper.zxid.epoch', zxid_epoch))
metrics.append(ZKMetric('zookeeper.zxid.count', zxid_count))
# Mode: leader
_, value = buf.readline().split(':')
mode = value.strip().lower()
tags = [u'mode:' + mode]
# Node count: 487
_, value = buf.readline().split(':')
metrics.append(ZKMetric('zookeeper.nodes', long(value.strip())))
return metrics, tags, mode, version
def parse_mntr(self, buf):
'''
Parse `mntr` command's content.
`buf` is a readable file-like object
Returns: a tuple (metrics, mode)
if mode == 'inactive', metrics will be None
'''
buf.seek(0)
first = buf.readline() # First is version string or error
if first == 'This ZooKeeper instance is not currently serving requests':
return (None, 'inactive')
metrics = []
mode = 'inactive'
for line in buf:
try:
key, value = line.split()
if key == "zk_server_state":
mode = value.lower()
continue
metric_name = self._normalize_metric_label(key)
metric_type = "rate" if key in self._MNTR_RATES else "gauge"
metric_value = int(value)
metrics.append(ZKMetric(metric_name, metric_value, metric_type))
except ValueError:
self.log.warning(
u"Cannot format `mntr` value. key={key}, value{value}".format(
key=key, value=value
)
)
continue
except Exception:
self.log.exception(
u"Unexpected exception occurred while parsing `mntr` command content:\n"
u"{buf}".format(
buf=buf
)
)
return (metrics, mode)
def _normalize_metric_label(self, key):
if re.match('zk', key):
key = key.replace('zk', 'zookeeper', 1)
return key.replace('_', '.', 1)
|
|
u'''
Created on Jan 9, 2011
@author: Mark V Systems Limited
(c) Copyright 2011 Mark V Systems Limited, All rights reserved.
'''
from arelle import (XPathContext, XbrlConst, XmlUtil, XbrlUtil, XmlValidate)
from arelle.FunctionXs import xsString
from arelle.ModelObject import ModelObject
from arelle.ModelFormulaObject import (aspectModels, Aspect, aspectModelAspect,
ModelFormula, ModelTuple, ModelExistenceAssertion,
ModelValueAssertion,
ModelFactVariable, ModelGeneralVariable, ModelVariable,
ModelParameter, ModelFilter, ModelAspectCover, ModelBooleanFilter)
from arelle.PrototypeInstanceObject import DimValuePrototype
from arelle.ModelValue import (QName)
import datetime, time, logging, re
from decimal import Decimal
from math import log10, isnan, isinf, fabs
from arelle.Locale import format_string
from collections import defaultdict
ModelDimensionValue = None
expressionVariablesPattern = re.compile(ur"([^$]*)([$]\w[\w:.-]*)([^$]*)")
def evaluate(xpCtx, varSet, variablesInScope=False, uncoveredAspectFacts=None):
# for each dependent variable, find bindings
if variablesInScope:
stackedEvaluations = (xpCtx.evaluations, xpCtx.evaluationHashDicts)
else:
xpCtx.varBindings = {}
uncoveredAspectFacts = {}
xpCtx.evaluations = [] # list of evaluations
xpCtx.evaluationHashDicts = [] # hash indexs of evaluations
try:
xpCtx.variableSet = varSet
if isinstance(varSet, ModelExistenceAssertion):
varSet.evaluationsCount = 0
if xpCtx.formulaOptions.timeVariableSetEvaluation:
varSet.timeEvaluationStarted = timeEvaluationsStarted = time.time()
varSet.evaluationNumber = 0
initialTraceCount = xpCtx.modelXbrl.logCount.get(logging.getLevelName(u'INFO'), 0)
evaluateVar(xpCtx, varSet, 0, {}, uncoveredAspectFacts)
if isinstance(varSet, ModelExistenceAssertion):
prog = varSet.testProg
if prog:
assertionParamQnames = [] # set and then remove assertion variable quames
for varRel in varSet.orderedVariableRelationships:
varQname = varRel.variableQname
var = varRel.toModelObject
if isinstance(var, ModelParameter) and varQname not in xpCtx.inScopeVars:
assertionParamQnames.append(varQname)
xpCtx.inScopeVars[varQname] = xpCtx.inScopeVars.get(var.parameterQname)
result = xpCtx.evaluateBooleanValue(prog, contextItem=varSet.evaluationsCount)
for varQname in assertionParamQnames:
xpCtx.inScopeVars.pop(varQname)
else:
result = varSet.evaluationsCount > 0
if result: varSet.countSatisfied += 1
else: varSet.countNotSatisfied += 1
if ((xpCtx.formulaOptions.traceSatisfiedAssertions and result) or
((xpCtx.formulaOptions.traceUnsatisfiedAssertions or
xpCtx.formulaOptions.errorUnsatisfiedAssertions ) and not result)):
xpCtx.modelXbrl.log(
u"ERROR" if (xpCtx.formulaOptions.errorUnsatisfiedAssertions and not result) else u"INFO",
u"formula:assertionSatisfied" if result else u"formula:assertionUnsatisfied",
_(u"%(label)s"),
modelObject=varSet, label=varSet.logLabel(),
messageCodes=(u"formula:assertionSatisfied", u"formula:assertionUnsatisfied"))
if xpCtx.formulaOptions.traceVariableSetExpressionResult:
xpCtx.modelXbrl.info(u"formula:trace",
_(u"Existence Assertion %(xlinkLabel)s \nResult: %(result)s"),
modelObject=varSet, xlinkLabel=varSet.xlinkLabel, result=result)
msg = varSet.message(result)
if msg is not None:
xpCtx.inScopeVars[XbrlConst.qnEaTestExpression] = varSet.test
xpCtx.modelXbrl.info(u"message:" + (varSet.id or varSet.xlinkLabel or _(u"unlabeled variableSet")),
msg.evaluate(xpCtx),
modelObject=varSet,
messageCodes=(u"message:{variableSetID|xlinkLabel}",))
xpCtx.inScopeVars.pop(XbrlConst.qnEaTestExpression)
if xpCtx.formulaOptions.traceVariableSetExpressionResult and initialTraceCount == xpCtx.modelXbrl.logCount.get(logging._checkLevel(u'INFO'), 0):
xpCtx.modelXbrl.info(u"formula:trace",
_(u"Variable set %(xlinkLabel)s had no xpCtx.evaluations"),
modelObject=varSet, xlinkLabel=varSet.xlinkLabel)
if xpCtx.formulaOptions.timeVariableSetEvaluation:
xpCtx.modelXbrl.info(u"formula:time",
_(u"Variable set %(xlinkLabel)s time for %(count)s evaluations: %(time)s"),
modelObject=varSet, xlinkLabel=varSet.xlinkLabel, count=varSet.evaluationNumber,
time=format_string(xpCtx.modelXbrl.modelManager.locale, u"%.3f", time.time() - timeEvaluationsStarted))
xpCtx.variableSet = None
except XPathContext.XPathException, err:
xpCtx.modelXbrl.error(err.code,
_(u"Variable set %(label)s \nException: %(error)s"),
modelObject=varSet, label=varSet.logLabel(), error=err.message)
xpCtx.variableSet = None
if xpCtx.formulaOptions.traceVariableSetExpressionResult:
xpCtx.modelXbrl.info(u"formula:trace",
_(u"Variable set %(xlinkLabel)s evaluations: %(evaluations)s x %(variables)s"),
modelObject=varSet, xlinkLabel=varSet.xlinkLabel,
evaluations=len(xpCtx.evaluations),
variables=max(len(e) for e in xpCtx.evaluations) if xpCtx.evaluations else 0)
del xpCtx.evaluations[:] # dereference
del xpCtx.evaluationHashDicts[:]
if variablesInScope:
xpCtx.evaluations, xpCtx.evaluationHashDicts = stackedEvaluations
else:
for vb in xpCtx.varBindings.values():
vb.close() # dereference
xpCtx.varBindings.clear() # dereference
uncoveredAspectFacts.clear() # dereference
pass
def evaluateVar(xpCtx, varSet, varIndex, cachedFilteredFacts, uncoveredAspectFacts):
if varIndex == len(varSet.orderedVariableRelationships):
# check if all fact vars are fallen back
anyFactVar = False; anyBoundFactVar = False
for vb in xpCtx.varBindings.values():
if vb.isFactVar:
anyFactVar = True
if not vb.isFallback: anyBoundFactVar = True
if xpCtx.varBindings and anyFactVar and not anyBoundFactVar:
if xpCtx.formulaOptions.traceVariableSetExpressionResult:
xpCtx.modelXbrl.info(u"formula:trace",
_(u"Variable set %(xlinkLabel)s skipped evaluation, all fact variables have fallen back"),
modelObject=varSet, xlinkLabel=varSet.xlinkLabel)
return
# record completed evaluation, for fallback blocking purposes
fbVars = set(vb.qname for vb in xpCtx.varBindings.values() if vb.isFallback)
thisEvaluation = tuple(vb.matchableBoundFact(fbVars) for vb in xpCtx.varBindings.values())
if evaluationIsUnnecessary(thisEvaluation, xpCtx.evaluationHashDicts, xpCtx.evaluations):
if xpCtx.formulaOptions.traceVariableSetExpressionResult:
xpCtx.modelXbrl.info(u"formula:trace",
_(u"Variable set %(xlinkLabel)s skipped non-different or fallback evaluation, duplicates another evaluation"),
modelObject=varSet, xlinkLabel=varSet.xlinkLabel)
varSet.evaluationNumber += 1
if xpCtx.formulaOptions.timeVariableSetEvaluation:
now = time.time()
xpCtx.modelXbrl.info(u"formula:time",
_(u"Variable set %(xlinkLabel)s skipped evaluation %(count)s: %(time)s sec"),
modelObject=varSet, xlinkLabel=varSet.xlinkLabel, count=varSet.evaluationNumber,
time=format_string(xpCtx.modelXbrl.modelManager.locale, u"%.3f", now - varSet.timeEvaluationStarted))
varSet.timeEvaluationStarted = now
if xpCtx.isRunTimeExceeded: raise XPathContext.RunTimeExceededException()
xpCtx.modelXbrl.profileActivity(u"... evaluation {0} (skipped)".format(varSet.evaluationNumber), minTimeToShow=10.0)
return
xpCtx.modelXbrl.profileActivity(u"... evaluation {0}".format(varSet.evaluationNumber), minTimeToShow=10.0)
for i, fb in enumerate(thisEvaluation):
while i >= len(xpCtx.evaluationHashDicts): xpCtx.evaluationHashDicts.append(defaultdict(set))
xpCtx.evaluationHashDicts[i][hash(fb)].add(len(xpCtx.evaluations)) # hash and eval index
xpCtx.evaluations.append(thisEvaluation) # complete evaluations tuple
# evaluate preconditions
for precondition in varSet.preconditions:
result = precondition.evalTest(xpCtx)
if xpCtx.formulaOptions.traceVariableSetExpressionResult:
xpCtx.modelXbrl.info(u"formula:trace",
_(u"Variable set %(xlinkLabel)s \nPrecondition %(precondition)s \nResult: %(result)s"),
modelObject=varSet, xlinkLabel=varSet.xlinkLabel, precondition=precondition.xlinkLabel, result=result)
if not result: # precondition blocks evaluation
if xpCtx.formulaOptions.timeVariableSetEvaluation:
varSet.evaluationNumber += 1
now = time.time()
xpCtx.modelXbrl.info(u"formula:time",
_(u"Variable set %(xlinkLabel)s precondition blocked evaluation %(count)s: %(time)s sec"),
modelObject=varSet, xlinkLabel=varSet.xlinkLabel, count=varSet.evaluationNumber,
time=format_string(xpCtx.modelXbrl.modelManager.locale, u"%.3f", now - varSet.timeEvaluationStarted))
varSet.timeEvaluationStarted = now
if xpCtx.isRunTimeExceeded: raise XPathContext.RunTimeExceededException()
return
# evaluate variable set
if isinstance(varSet, ModelExistenceAssertion):
varSet.evaluationsCount += 1
else:
if isinstance(varSet, ModelTuple):
result = u"(tuple)"
traceOf = u"Tuple"
elif isinstance(varSet, ModelFormula):
result = xpCtx.evaluate(varSet.valueProg)
traceOf = u"Formula"
elif isinstance(varSet, ModelValueAssertion):
result = xpCtx.evaluateBooleanValue(varSet.testProg)
if result: varSet.countSatisfied += 1
else: varSet.countNotSatisfied += 1
msg = varSet.message(result)
if msg is not None:
xpCtx.inScopeVars[XbrlConst.qnVaTestExpression] = varSet.test
xpCtx.modelXbrl.info(u"message:" + (varSet.id or varSet.xlinkLabel or _(u"unlabeled variableSet")),
msg.evaluate(xpCtx),
modelObject=varSet,
label=varSet.logLabel(),
messageCodes=(u"message:{variableSetID|xlinkLabel}",))
xpCtx.inScopeVars.pop(XbrlConst.qnVaTestExpression)
if ((xpCtx.formulaOptions.traceSatisfiedAssertions and result) or
((xpCtx.formulaOptions.traceUnsatisfiedAssertions or
xpCtx.formulaOptions.errorUnsatisfiedAssertions ) and not result)):
_modelObjects = [varSet]
factVarBindings = []
for vb in sorted(xpCtx.varBindings.values(), key=lambda _vb: _vb.qname):
if vb.isFallback:
factVarBindings.append(u", \n${}: fallback {}".format(vb.qname, xpCtx.flattenSequence(vb.values)))
else:
if vb.isBindAsSequence:
_modelObjects.extend(vb.yieldedEvaluation)
else:
_modelObjects.append(vb.yieldedFact)
factVarBindings.append(u", \n${}: {} context {}".format(vb.qname, vb.yieldedFact.qname, vb.yieldedFactContext.id))
xpCtx.modelXbrl.log(
u"ERROR" if (xpCtx.formulaOptions.errorUnsatisfiedAssertions and not result) else u"INFO",
u"formula:assertionSatisfied" if result else u"formula:assertionUnsatisfied",
_(u"%(label)s%(factVarBindings)s"),
modelObject=_modelObjects, label=varSet.logLabel(),
factVarBindings=u"".join(factVarBindings) + (u"\n" if factVarBindings else u""),
messageCodes=(u"formula:assertionSatisfied", u"formula:assertionUnsatisfied"))
del _modelObjects[:]
traceOf = u"Value Assertion"
if xpCtx.formulaOptions.traceVariableSetExpressionResult:
label = varSet.logLabel()
expression = varSet.expression
xpCtx.modelXbrl.info(u"formula:trace",
_(u"%(variableSetType)s %(xlinkLabel)s{0} \nExpression: %(expression)s \nEvaluated: %(evaluatedExpression)s \nResult: %(result)s")
.format(u" \n%(label)s" if label else u""),
modelObject=varSet, variableSetType=traceOf, xlinkLabel=varSet.xlinkLabel,
label=label, result=result, expression=expression,
evaluatedExpression=u''.join(xpCtx.traceEffectiveVariableValue(varSet,expr)
for grp in expressionVariablesPattern.findall(expression)
for expr in grp))
if isinstance(varSet, ModelFormula) and varSet.outputInstanceQname in xpCtx.inScopeVars:
newFact = produceOutputFact(xpCtx, varSet, result)
else:
newFact = None
if varSet.hasConsistencyAssertion:
from arelle import FormulaConsisAsser
FormulaConsisAsser.evaluate(xpCtx, varSet, newFact)
if xpCtx.formulaOptions.timeVariableSetEvaluation:
varSet.evaluationNumber += 1
now = time.time()
xpCtx.modelXbrl.info(u"formula:time",
_(u"Variable set %(xlinkLabel)s completed evaluation %(count)s: %(time)s sec"),
modelObject=varSet, xlinkLabel=varSet.xlinkLabel, count=varSet.evaluationNumber,
time=format_string(xpCtx.modelXbrl.modelManager.locale, u"%.3f", now - varSet.timeEvaluationStarted))
varSet.timeEvaluationStarted = now
if xpCtx.isRunTimeExceeded: raise XPathContext.RunTimeExceededException()
# do dependent variable scope relationships
for varScopeRel in xpCtx.modelXbrl.relationshipSet(XbrlConst.variablesScope).fromModelObject(varSet):
try:
resultQname = varScopeRel.variableQname
if resultQname:
overriddenInScopeVar = xpCtx.inScopeVars.get(resultQname)
xpCtx.inScopeVars[resultQname] = result
vb = VariableBinding(xpCtx, varScopeRel)
vb.yieldedEvaluation = result
vb.yieldedFact = newFact
overriddenVarBinding = xpCtx.varBindings.get(resultQname)
xpCtx.varBindings[resultQname] = vb
evaluate(xpCtx, varScopeRel.toModelObject, True, uncoveredAspectFacts)
if resultQname:
xpCtx.inScopeVars.pop(resultQname)
if overriddenInScopeVar is not None: # restore overridden value if there was one
xpCtx.inScopeVars[resultQname] = overriddenInScopeVar
xpCtx.varBindings.pop(resultQname)
if overriddenVarBinding is not None:
xpCtx.varBindings[resultQname] = overriddenVarBinding
vb.close() # dereference
except XPathContext.XPathException, err:
xpCtx.modelXbrl.error(err.code,
_(u"Variable set chained in scope of variable set %(variableset)s \nException: \n%(error)s"),
modelObject=(varSet, varScopeRel.toModelObject), variableSet=varSet.logLabel(), error=err.message)
else:
# produce variable bindings
varRel = varSet.orderedVariableRelationships[varIndex]
varQname = varRel.variableQname
vb = VariableBinding(xpCtx, varRel)
var = vb.var
if vb.isFactVar:
vb.aspectsDefined = set(aspectModels[varSet.aspectModel]) # has to be a mutable set
vb.values = None
varHasNoVariableDependencies = var.hasNoVariableDependencies
varHasNilFacts = var.nils == u"true"
if varHasNoVariableDependencies and varQname in cachedFilteredFacts:
facts, vb.aspectsDefined, vb.aspectsCovered = cachedFilteredFacts[varQname]
if xpCtx.formulaOptions.traceVariableFilterWinnowing:
xpCtx.modelXbrl.info(u"formula:trace",
_(u"Fact Variable %(variable)s: start with %(factCount)s facts previously cached after explicit filters"),
modelObject=var, variable=varQname, factCount=len(facts))
else:
if var.fromInstanceQnames:
groupFilteredFactsKey = u"grp:" + unicode(varQname) # multi instance vars or non-var-dependent variables
elif varHasNilFacts:
groupFilteredFactsKey = u"grp:stdInstWithNils"
else:
groupFilteredFactsKey = u"grp:stdInstNonNil"
if groupFilteredFactsKey in cachedFilteredFacts:
facts = cachedFilteredFacts[groupFilteredFactsKey]
if xpCtx.formulaOptions.traceVariableFilterWinnowing:
xpCtx.modelXbrl.info(u"formula:trace",
_(u"Fact Variable %(variable)s: start with %(factCount)s facts previously cached before variable filters"),
modelObject=var, variable=varQname, factCount=len(facts))
else:
facts = set.union(*[(inst.factsInInstance if varHasNilFacts else inst.nonNilFactsInInstance)
for inst in vb.instances])
if xpCtx.formulaOptions.traceVariableFilterWinnowing:
xpCtx.modelXbrl.info(u"formula:trace",
_(u"Fact Variable %(variable)s filtering: start with %(factCount)s facts"),
modelObject=var, variable=varQname, factCount=len(facts))
facts = filterFacts(xpCtx, vb, facts, varSet.groupFilterRelationships, u"group")
vb.aspectsCovered.clear() # group boolean sub-filters may have covered aspects
cachedFilteredFacts[groupFilteredFactsKey] = facts
facts = filterFacts(xpCtx, vb, facts, var.filterRelationships, None) # also finds covered aspects (except aspect cover filter dims, not known until after this complete pass)
# adding dim aspects must be done after explicit filterin
for fact in facts:
if fact.isItem and fact.context is not None:
vb.aspectsDefined |= fact.context.dimAspects(xpCtx.defaultDimensionAspects)
coverAspectCoverFilterDims(xpCtx, vb, var.filterRelationships) # filters need to know what dims are covered
if varHasNoVariableDependencies:
cachedFilteredFacts[varQname] = (facts, vb.aspectsDefined, vb.aspectsCovered)
considerFallback = bool(var.fallbackValueProg)
if varSet.implicitFiltering == u"true":
if any((_vb.isFactVar and not _vb.isFallback) for _vb in xpCtx.varBindings.values()):
factCount = len(facts)
# uncovered aspects of the prior variable bindings may include aspects not in current variable binding
uncoveredAspects = (vb.aspectsDefined | _DICT_SET(uncoveredAspectFacts.keys())) - vb.aspectsCovered - set([Aspect.DIMENSIONS])
facts = implicitFilter(xpCtx, vb, facts, uncoveredAspects, uncoveredAspectFacts)
if (considerFallback and varHasNoVariableDependencies and
factCount and
factCount - len(facts) == 0 and
len(xpCtx.varBindings) > 1 and
all((len(_vb.aspectsDefined) == len(vb.aspectsDefined) for _vb in xpCtx.varBindings.values()))):
considerFallback = False
vb.facts = facts
if xpCtx.formulaOptions.traceVariableFiltersResult:
xpCtx.modelXbrl.info(u"formula:trace",
_(u"Fact Variable %(variable)s: filters result %(result)s"),
modelObject=var, variable=varQname, result=unicode(vb.facts))
if considerFallback:
vb.values = xpCtx.evaluate(var.fallbackValueProg)
if xpCtx.formulaOptions.traceVariableExpressionResult:
xpCtx.modelXbrl.info(u"formula:trace",
_(u"Fact Variable %(variable)s: fallbackValue result %(result)s"),
modelObject=var, variable=varQname, result=unicode(vb.values))
elif vb.isGeneralVar: # general variable
if var.fromInstanceQnames:
contextItem = [inst.modelDocument.xmlRootElement
for qn in var.fromInstanceQnames
for instSeq in (xpCtx.inScopeVars[qn],)
for inst in (instSeq if isinstance(instSeq,(list,tuple)) else (instSeq,))
]
else:
contextItem = xpCtx.modelXbrl.modelDocument.xmlRootElement # default is standard input instance
vb.values = xpCtx.flattenSequence( xpCtx.evaluate(var.selectProg, contextItem=contextItem) )
if xpCtx.formulaOptions.traceVariableExpressionResult:
xpCtx.modelXbrl.info(u"formula:trace",
_(u"General Variable %(variable)s: select result %(result)s"),
modelObject=var, variable=varQname, result=unicode(vb.values))
elif vb.isParameter:
vb.parameterValue = xpCtx.inScopeVars.get(var.parameterQname)
# recurse partitions, preserve overlaid var bindings and inScopeVars
overriddenVarBinding = xpCtx.varBindings.get(varQname)
xpCtx.varBindings[varQname] = vb
for evaluationResult in vb.evaluationResults:
overriddenInScopeVar = xpCtx.inScopeVars.get(varQname)
xpCtx.inScopeVars[varQname] = evaluationResult
evaluationContributedUncoveredAspects = {}
if vb.isFactVar and not vb.isFallback:
# cache uncoveredAspect facts for nested evaluations
for aspect in vb.aspectsDefined | vb.aspectsCovered: # covered aspects may not be defined e.g., test 12062 v11, undefined aspect is a complemented aspect
if uncoveredAspectFacts.get(aspect) is None:
evaluationContributedUncoveredAspects[aspect] = uncoveredAspectFacts.get(aspect,u"none")
uncoveredAspectFacts[aspect] = None if vb.hasAspectValueCovered(aspect) else vb.yieldedFact
if xpCtx.formulaOptions.traceVariableFiltersResult:
xpCtx.modelXbrl.info(u"formula:trace",
_(u"%(variableType)s %(variable)s: bound value %(result)s"),
modelObject=var, variableType=vb.resourceElementName, variable=varQname, result=unicode(evaluationResult))
if xpCtx.isRunTimeExceeded: raise XPathContext.RunTimeExceededException()
evaluateVar(xpCtx, varSet, varIndex + 1, cachedFilteredFacts, uncoveredAspectFacts)
xpCtx.inScopeVars.pop(varQname)
if overriddenInScopeVar is not None: # restore overridden value if there was one
xpCtx.inScopeVars[varQname] = overriddenInScopeVar
for aspect, priorFact in evaluationContributedUncoveredAspects.items():
if priorFact == u"none":
del uncoveredAspectFacts[aspect]
else:
uncoveredAspectFacts[aspect] = priorFact
xpCtx.varBindings.pop(varQname)
vb.close() # dereference
if overriddenVarBinding is not None:
xpCtx.varBindings[varQname] = overriddenVarBinding
def filterFacts(xpCtx, vb, facts, filterRelationships, filterType):
typeLbl = filterType + u" " if filterType else u""
orFilter = filterType == u"or"
groupFilter = filterType == u"group"
if orFilter:
factSet = set()
for varFilterRel in filterRelationships:
_filter = varFilterRel.toModelObject
if isinstance(_filter,ModelFilter): # relationship not constrained to real filters
result = _filter.filter(xpCtx, vb, facts, varFilterRel.isComplemented)
if xpCtx.formulaOptions.traceVariableFilterWinnowing:
xpCtx.modelXbrl.info(u"formula:trace",
_(u"Fact Variable %(variable)s %(filterType)s %(filter)s filter %(xlinkLabel)s passes %(factCount)s facts"),
modelObject=vb.var, variable=vb.qname,
filterType=typeLbl, filter=_filter.localName, xlinkLabel=_filter.xlinkLabel, factCount=len(result)),
if orFilter:
factSet |= result
else:
facts = result
if not groupFilter and varFilterRel.isCovered: # block boolean group filters that have cover in subnetworks
vb.aspectsCovered |= _filter.aspectsCovered(vb)
if orFilter:
return factSet
else:
return facts
def coverAspectCoverFilterDims(xpCtx, vb, filterRelationships):
for varFilterRel in filterRelationships:
_filter = varFilterRel.toModelObject
if isinstance(_filter,ModelAspectCover): # relationship not constrained to real filters
if varFilterRel.isCovered:
vb.aspectsCovered |= _filter.dimAspectsCovered(vb)
elif isinstance(_filter,ModelBooleanFilter) and varFilterRel.isCovered:
coverAspectCoverFilterDims(xpCtx, vb, _filter.filterRelationships)
def implicitFilter(xpCtx, vb, facts, aspects, uncoveredAspectFacts):
if xpCtx.formulaOptions.traceVariableFilterWinnowing: # trace shows by aspect by bound variable match
for aspect in aspects:
if uncoveredAspectFacts.get(aspect, u"none") is not None:
facts = [fact
for fact in facts
if aspectMatches(xpCtx, uncoveredAspectFacts.get(aspect), fact, aspect)]
a = unicode(aspect) if isinstance(aspect,QName) else Aspect.label[aspect]
xpCtx.modelXbrl.info(u"formula:trace",
_(u"Fact Variable %(variable)s implicit filter %(aspect)s passes %(factCount)s facts"),
modelObject=vb.var, variable=vb.qname, aspect=a, factCount=len(facts))
if len(facts) == 0: break
else:
testableAspectFacts = [(aspect, uncoveredAspectFacts.get(aspect))
for aspect in aspects
if uncoveredAspectFacts.get(aspect, u"none") is not None]
#testableAspectFacts = [(aspect, fact)
# for aspect, fact in uncoveredAspectFacts.items()
# if not vb.hasAspectValueCovered(aspect)]
if testableAspectFacts:
# not tracing, do bulk aspect filtering
facts = [fact
for fact in facts
if all(aspectMatches(xpCtx, uncoveredAspectFact, fact, aspect)
for (aspect, uncoveredAspectFact) in testableAspectFacts)]
return facts
def aspectsMatch(xpCtx, fact1, fact2, aspects):
return all(aspectMatches(xpCtx, fact1, fact2, aspect) for aspect in aspects)
def aspectMatches(xpCtx, fact1, fact2, aspect):
if fact1 is None: # fallback (atomic) never matches any aspect
return False
if aspect == 1: # Aspect.LOCATION:
return (fact2 is not None and
fact1.modelXbrl != fact2.modelXbrl or # test deemed true for multi-instance comparisons
fact1.getparent() == fact2.getparent())
elif aspect == 2: # Aspect.CONCEPT:
return fact2 is not None and fact1.qname == fact2.qname
elif fact1.isTuple or fact2.isTuple:
return fact1.isTuple and fact2.isTuple # only match the aspects both facts have
elif aspect == 5: # Aspect.UNIT:
u1 = fact1.unit
u2 = fact2.unit if fact2 is not None else None
if u1 is not None:
return u1.isEqualTo(u2)
return u2 is None
else:
# rest of comparisons are for context
c1 = fact1.context
c2 = fact2.context if fact2 is not None else None
if c1 is None or (c2 is None and aspect != 10):
return False # something wrong, must be a context
if c1 is c2:
return True # same context
if aspect == 4: # Aspect.PERIOD:
return c1.isPeriodEqualTo(c2)
if aspect == 3: # Aspect.ENTITY_IDENTIFIER:
return c1.isEntityIdentifierEqualTo(c2)
if aspect == 6: # Aspect.COMPLETE_SEGMENT:
return XbrlUtil.nodesCorrespond(fact1.modelXbrl, c1.segment, c2.segment, dts2=fact2.modelXbrl)
elif aspect == 7: # Aspect.COMPLETE_SCENARIO:
return XbrlUtil.nodesCorrespond(fact1.modelXbrl, c1.scenario, c2.scenario, dts2=fact2.modelXbrl)
elif aspect == 8 or aspect == 9: # aspect in (Aspect.NON_XDT_SEGMENT, Aspect.NON_XDT_SCENARIO):
nXs1 = c1.nonDimValues(aspect)
nXs2 = c2.nonDimValues(aspect)
lXs1 = len(nXs1)
lXs2 = len(nXs2)
if lXs1 != lXs2:
return False
elif lXs1 > 0:
for i in xrange(lXs1):
if not XbrlUtil.nodesCorrespond(fact1.modelXbrl, nXs1[i], nXs2[i], dts2=fact2.modelXbrl):
return False
return True
elif aspect == 10: # Aspect.DIMENSIONS:
u''' (no implicit filtering on ALL dimensions for now)
dimQnames1 = fact1.context.dimAspects
dimQnames2 = fact2.context.dimAspects
if len(dimQnames1 ^ dimQnames2): # dims not in both
matches = False
else:
for dimQname1 in dimQnames1:
if dimQname1 not in dimQnames2 or \
not aspectMatches(fact1, fact2, dimQname1):
matches = False
break
'''
elif isinstance(aspect, QName):
global ModelDimensionValue
if ModelDimensionValue is None:
from arelle.ModelInstanceObject import ModelDimensionValue
dimValue1 = c1.dimValue(aspect)
if c2 is None:
if dimValue1 is None: # neither fact nor matching facts have this dimension aspect
return True
return False
dimValue2 = c2.dimValue(aspect)
if isinstance(dimValue1, ModelDimensionValue):
if dimValue1.isExplicit:
if isinstance(dimValue2, QName):
if dimValue1.memberQname != dimValue2:
return False
elif isinstance(dimValue2, (ModelDimensionValue,DimValuePrototype)):
if dimValue2.isTyped:
return False
elif dimValue1.memberQname != dimValue2.memberQname:
return False
elif dimValue2 is None:
return False
elif dimValue1.isTyped:
if isinstance(dimValue2, QName):
return False
elif isinstance(dimValue2, (ModelDimensionValue,DimValuePrototype)):
if dimValue2.isExplicit:
return False
elif dimValue1.dimension.typedDomainElement in xpCtx.modelXbrl.modelFormulaEqualityDefinitions:
equalityDefinition = xpCtx.modelXbrl.modelFormulaEqualityDefinitions[dimValue1.dimension.typedDomainElement]
return equalityDefinition.evalTest(xpCtx, fact1, fact2)
elif not XbrlUtil.nodesCorrespond(fact1.modelXbrl, dimValue1.typedMember, dimValue2.typedMember, dts2=fact2.modelXbrl):
return False
elif dimValue2 is None:
return False
elif isinstance(dimValue1,QName): # first dim is default value of an explicit dim
if isinstance(dimValue2, QName): # second dim is default value of an explicit dim
# multi-instance does not consider member's qname here where it is a default
# only check if qnames match if the facts are from same instance
if fact1.modelXbrl == fact2.modelXbrl and dimValue1 != dimValue2:
return False
elif isinstance(dimValue2, (ModelDimensionValue,DimValuePrototype)):
if dimValue2.isTyped:
return False
elif dimValue1 != dimValue2.memberQname:
return False
elif dimValue2 is None: # no dim aspect for fact 2
if fact1.modelXbrl == fact2.modelXbrl: # only allowed for multi-instance
return False
elif dimValue1 is None:
# absent dim member from fact1 allowed if fact2 is default in different instance
if isinstance(dimValue2,QName):
if fact1.modelXbrl == fact2.modelXbrl:
return False
elif dimValue2 is not None:
return False
# else if both are None, matches True for single and multiple instance
return True
def factsPartitions(xpCtx, facts, aspects):
factsPartitions = []
for fact in facts:
matched = False
for partition in factsPartitions:
if aspectsMatch(xpCtx, fact, partition[0], aspects):
partition.append(fact)
matched = True
break
if not matched:
factsPartitions.append([fact,])
return factsPartitions
def evaluationIsUnnecessary(thisEval, otherEvalHashDicts, otherEvals):
if otherEvals:
if all(e is None for e in thisEval):
return True # evaluation not necessary, all fallen back
# hash check if any hashes merit further look for equality
otherEvalSets = [otherEvalHashDicts[i].get(hash(e), set())
for i, e in enumerate(thisEval)
if e is not None]
if otherEvalSets:
matchingEvals = [otherEvals[i] for i in set.intersection(*otherEvalSets)]
# detects evaluations which are not different (duplicate) and extra fallback evaluations
return any(all([e == matchingEval[i] for i, e in enumerate(thisEval) if e is not None])
for matchingEval in matchingEvals)
return False
u'''
r = range(len(thisEval))
for otherEval in otherEvals:
if all([thisEval[i] is None or thisEval[i] == otherEval[i] for i in r]):
return True
return False
'''
def produceOutputFact(xpCtx, formula, result):
priorErrorCount = len(xpCtx.modelXbrl.errors)
isTuple = isinstance(formula,ModelTuple)
# assemble context
conceptQname = formulaAspectValue(xpCtx, formula, Aspect.CONCEPT, u"xbrlfe:missingConceptRule")
if isinstance(conceptQname, VariableBindingError):
xpCtx.modelXbrl.error(conceptQname.err,
_(u"Formula %(label)s concept: %(concept)s"),
modelObject=formula, label=formula.logLabel(), concept=conceptQname.msg)
modelConcept = None
else:
modelConcept = xpCtx.modelXbrl.qnameConcepts[conceptQname]
if modelConcept is None or (not modelConcept.isTuple if isTuple else not modelConcept.isItem):
xpCtx.modelXbrl.error(u"xbrlfe:missingConceptRule",
_(u"Formula %(label)s concept %(concept)s is not a %(element)s"),
modelObject=formula, label=formula.logLabel(), concept=conceptQname, element=formula.localName)
outputLocation = formulaAspectValue(xpCtx, formula, Aspect.LOCATION_RULE, None)
if not isTuple:
# entity
entityIdentScheme = formulaAspectValue(xpCtx, formula, Aspect.SCHEME, u"xbrlfe:missingEntityIdentifierRule")
if isinstance(entityIdentScheme, VariableBindingError):
xpCtx.modelXbrl.error(unicode(entityIdentScheme),
_(u"Formula %(label)s entity identifier scheme: %(scheme)s"),
modelObject=formula, label=formula.logLabel(), scheme=entityIdentScheme.msg)
entityIdentValue = None
else:
entityIdentValue = formulaAspectValue(xpCtx, formula, Aspect.VALUE, u"xbrlfe:missingEntityIdentifierRule")
if isinstance(entityIdentValue, VariableBindingError):
xpCtx.modelXbrl.error(unicode(entityIdentScheme),
_(u"Formula %(label)s entity identifier value: %(entityIdentifier)s"),
modelObject=formula, label=formula.logLabel(), entityIdentifier=entityIdentValue.msg)
# period
periodType = formulaAspectValue(xpCtx, formula, Aspect.PERIOD_TYPE, u"xbrlfe:missingPeriodRule")
periodStart = None
periodEndInstant = None
if isinstance(periodType, VariableBindingError):
xpCtx.modelXbrl.error(unicode(periodType),
_(u"Formula %(label)s period type: %(periodType)s"),
modelObject=formula, label=formula.logLabel(), periodType=periodType.msg)
elif periodType == u"instant":
periodEndInstant = formulaAspectValue(xpCtx, formula, Aspect.INSTANT, u"xbrlfe:missingPeriodRule")
if isinstance(periodEndInstant, VariableBindingError):
xpCtx.modelXbrl.error(unicode(periodEndInstant),
_(u"Formula %(label)s period end: %(period)s"),
modelObject=formula, label=formula.logLabel(), period=periodEndInstant.msg)
elif periodType == u"duration":
periodStart = formulaAspectValue(xpCtx, formula, Aspect.START, u"xbrlfe:missingPeriodRule")
if isinstance(periodStart, VariableBindingError):
xpCtx.modelXbrl.error(unicode(periodStart),
_(u"Formula %(label)s period start: %(period)s"),
modelObject=formula, label=formula.logLabel(), period=periodStart.msg)
periodEndInstant = formulaAspectValue(xpCtx, formula, Aspect.END, u"xbrlfe:missingPeriodRule")
if isinstance(periodEndInstant, VariableBindingError):
xpCtx.modelXbrl.error(unicode(periodEndInstant),
_(u"Formula %(label)s period end: %(period)s"),
modelObject=formula, label=formula.logLabel(), period=periodEndInstant.msg)
# unit
if modelConcept is not None and modelConcept.isNumeric:
unitSource = formulaAspectValue(xpCtx, formula, Aspect.UNIT_MEASURES, None)
multDivBy = formulaAspectValue(xpCtx, formula, Aspect.MULTIPLY_BY, u"xbrlfe:missingUnitRule")
if isinstance(multDivBy, VariableBindingError):
xpCtx.modelXbrl.error(unicode(multDivBy) if isinstance(multDivBy, VariableBindingError) else u"xbrlfe:missingUnitRule",
_(u"Formula %(label)s unit: %(unit)s"),
modelObject=formula, label=formula.logLabel(), unit=multDivBy.msg)
multiplyBy = (); divideBy = () # prevent errors later if bad
else:
divMultBy = formulaAspectValue(xpCtx, formula, Aspect.DIVIDE_BY, u"xbrlfe:missingUnitRule")
if isinstance(divMultBy, VariableBindingError):
xpCtx.modelXbrl.error(unicode(multDivBy) if isinstance(divMultBy, VariableBindingError) else u"xbrlfe:missingUnitRule",
_(u"Formula %(label)s unit: %(unit)s"),
modelObject=formula, label=formula.logLabel(), unit=divMultBy.msg)
multiplyBy = (); divideBy = () # prevent errors later if bad
else:
multiplyBy = unitSource[0] + multDivBy[0] + divMultBy[1]
divideBy = unitSource[1] + multDivBy[1] + divMultBy[0]
# remove cancelling mult/div units
lookForCommonUnits = True
while lookForCommonUnits:
lookForCommonUnits = False
for commonUnit in multiplyBy:
if commonUnit in divideBy:
multiplyBy.remove(commonUnit)
divideBy.remove(commonUnit)
lookForCommonUnits = True
break
if len(multiplyBy) == 0: # if no units add pure
if (Aspect.MULTIPLY_BY not in formula.aspectValues and Aspect.MULTIPLY_BY not in formula.aspectProgs and
Aspect.DIVIDE_BY not in formula.aspectValues and Aspect.DIVIDE_BY not in formula.aspectProgs):
xpCtx.modelXbrl.error(u"xbrlfe:missingUnitRule",
_(u"Formula %(label)s"),
modelObject=formula, label=formula.logLabel())
multiplyBy.append(XbrlConst.qnXbrliPure)
# dimensions
segOCCs = []
scenOCCs = []
if formula.aspectModel == u"dimensional":
dimAspects = {}
dimQnames = formulaAspectValue(xpCtx, formula, Aspect.DIMENSIONS, None)
if dimQnames:
for dimQname in dimQnames:
dimConcept = xpCtx.modelXbrl.qnameConcepts[dimQname]
dimErr = u"xbrlfe:missing{0}DimensionRule".format(u"typed" if dimConcept is not None and dimConcept.isTypedDimension else u"explicit")
dimValue = formulaAspectValue(xpCtx, formula, dimQname, dimErr)
if isinstance(dimValue, VariableBindingError):
xpCtx.modelXbrl.error(dimErr,
_(u"Formula %(label)s dimension %(dimension)s: %(value)s"),
modelObject=formula, label=formula.logLabel(),
dimension=dimQname, value=dimValue.msg)
elif dimConcept.isTypedDimension:
if isinstance(dimValue, list): # result of flatten, always a list
if len(dimValue) != 1 or not isinstance(dimValue[0], ModelObject):
xpCtx.modelXbrl.error(u"xbrlfe:wrongXpathResultForTypedDimensionRule",
_(u"Formula %(label)s dimension %(dimension)s value is not a node: %(value)s"),
modelObject=formula, label=formula.logLabel(),
dimension=dimQname, value=dimValue)
continue
dimValue = dimValue[0]
dimAspects[dimQname] = dimValue
elif dimValue is not None and xpCtx.modelXbrl.qnameDimensionDefaults.get(dimQname) != dimValue:
dimAspects[dimQname] = dimValue
segOCCs = formulaAspectValue(xpCtx, formula, Aspect.NON_XDT_SEGMENT, None)
scenOCCs = formulaAspectValue(xpCtx, formula, Aspect.NON_XDT_SCENARIO, None)
for occElt in xpCtx.flattenSequence((segOCCs, scenOCCs)):
if isinstance(occElt, ModelObject) and occElt.namespaceURI == XbrlConst.xbrldi:
xpCtx.modelXbrl.error(u"xbrlfe:badSubsequentOCCValue",
_(u"Formula %(label)s OCC element %(occ)s covers a dimensional aspect"),
modelObject=(formula,occElt), label=formula.logLabel(),
occ=occElt.elementQname)
else:
dimAspects = None # non-dimensional
segOCCs = formulaAspectValue(xpCtx, formula, Aspect.COMPLETE_SEGMENT, None)
scenOCCs = formulaAspectValue(xpCtx, formula, Aspect.COMPLETE_SCENARIO, None)
if priorErrorCount < len(xpCtx.modelXbrl.errors):
return None # had errors, don't produce output fact
# does context exist in out instance document
outputInstanceQname = formula.outputInstanceQname
outputXbrlInstance = xpCtx.inScopeVars[outputInstanceQname]
xbrlElt = outputXbrlInstance.modelDocument.xmlRootElement
# in source instance document
newFact = None
if isTuple:
newFact = outputXbrlInstance.createFact(conceptQname, parent=outputLocation,
afterSibling=xpCtx.outputLastFact.get(outputInstanceQname))
else:
# add context
prevCntx = outputXbrlInstance.matchContext(
entityIdentScheme, entityIdentValue, periodType, periodStart, periodEndInstant,
dimAspects, segOCCs, scenOCCs)
if prevCntx is not None:
cntxId = prevCntx.id
newCntxElt = prevCntx
else:
newCntxElt = outputXbrlInstance.createContext(entityIdentScheme, entityIdentValue,
periodType, periodStart, periodEndInstant, conceptQname, dimAspects, segOCCs, scenOCCs,
afterSibling=xpCtx.outputLastContext.get(outputInstanceQname),
beforeSibling=xpCtx.outputFirstFact.get(outputInstanceQname))
cntxId = newCntxElt.id
xpCtx.outputLastContext[outputInstanceQname] = newCntxElt
# does unit exist
# add unit
if modelConcept.isNumeric:
prevUnit = outputXbrlInstance.matchUnit(multiplyBy, divideBy)
if prevUnit is not None:
unitId = prevUnit.id
newUnitElt = prevUnit
else:
newUnitElt = outputXbrlInstance.createUnit(multiplyBy, divideBy,
afterSibling=xpCtx.outputLastUnit.get(outputInstanceQname),
beforeSibling=xpCtx.outputFirstFact.get(outputInstanceQname))
unitId = newUnitElt.id
xpCtx.outputLastUnit[outputInstanceQname] = newUnitElt
# add fact
attrs = [(u"contextRef", cntxId)]
precision = None
decimals = None
if modelConcept.isNumeric:
attrs.append((u"unitRef", unitId))
value = formula.evaluate(xpCtx)
valueSeqLen = len(value)
if valueSeqLen > 1:
xpCtx.modelXbrl.error(u"xbrlfe:nonSingletonOutputValue",
_(u"Formula %(label)s value is a sequence of length %(valueSequenceLength)s"),
modelObject=formula, label=formula.logLabel(), valueSequenceLength=valueSeqLen)
else:
if valueSeqLen == 0: #xsi:nil if no value
attrs.append((XbrlConst.qnXsiNil, u"true"))
v = None
else:
# add precision/decimals for non-fraction numerics
if modelConcept.isNumeric and not modelConcept.isFraction:
if formula.hasDecimals:
decimals = formula.evaluateRule(xpCtx, Aspect.DECIMALS)
attrs.append((u"decimals", decimals))
else:
if formula.hasPrecision:
precision = formula.evaluateRule(xpCtx, Aspect.PRECISION)
else:
precision = 0
attrs.append((u"precision", precision))
x = value[0]
if isinstance(x,float):
if (isnan(x) or
(precision and (isinf(precision) or precision == 0)) or
(decimals and isinf(decimals))):
v = xsString(xpCtx, None, x)
elif decimals is not None:
v = u"%.*f" % ( int(decimals), x)
elif precision is not None and precision != 0:
a = fabs(x)
log = log10(a) if a != 0 else 0
v = u"%.*f" % ( int(precision) - int(log) - (1 if a >= 1 else 0), x)
else: # no implicit precision yet
v = xsString(xpCtx, None, x)
elif isinstance(x,Decimal):
if (x.is_nan() or
(precision and (isinf(precision) or precision == 0)) or
(decimals and isinf(decimals))):
v = xsString(xpCtx, None, x)
elif decimals is not None:
v = u"%.*f" % ( int(decimals), x)
elif precision is not None and precision != 0:
a = x.copy_abs()
log = a.log10() if a != 0 else 0
v = u"%.*f" % ( int(precision) - int(log) - (1 if a >= 1 else 0), x)
else: # no implicit precision yet
v = xsString(xpCtx, None, x)
elif isinstance(x,QName):
v = XmlUtil.addQnameValue(xbrlElt, x)
elif isinstance(x,datetime.datetime):
v = XmlUtil.dateunionValue(x)
else:
v = xsString(xpCtx, None, x)
newFact = outputXbrlInstance.createFact(conceptQname, attributes=attrs, text=v,
parent=outputLocation,
afterSibling=xpCtx.outputLastFact.get(outputInstanceQname))
if newFact is not None:
xpCtx.outputLastFact[outputInstanceQname] = newFact
if outputInstanceQname not in xpCtx.outputFirstFact:
xpCtx.outputFirstFact[outputInstanceQname] = newFact
return newFact
def formulaAspectValue(xpCtx, formula, aspect, srcMissingErr):
ruleValue = formula.evaluateRule(xpCtx, aspect)
if ruleValue is not None:
if aspect in (Aspect.CONCEPT,
Aspect.VALUE, Aspect.SCHEME,
Aspect.PERIOD_TYPE, Aspect.START, Aspect.END, Aspect.INSTANT,
):
return ruleValue
if isinstance(aspect,QName) and ruleValue != XbrlConst.qnFormulaDimensionSAV:
return ruleValue
sourceQname = formula.source(aspect)
formulaUncovered = sourceQname == XbrlConst.qnFormulaUncovered
if aspect == Aspect.LOCATION_RULE and sourceQname is None:
return xpCtx.inScopeVars[formula.outputInstanceQname].modelDocument.xmlRootElement
elif aspect == Aspect.DIMENSIONS and formulaUncovered:
aspectSourceValue = set() # union of uncovered dimensions, all variables
elif srcMissingErr is None:
aspectSourceValue = None # important for dimensions, missing is not an error
elif formulaUncovered:
if isinstance(aspect,QName): # absent uncovered dimension is ok, just not copied to output OCC
aspectSourceValue = None
else:
aspectSourceValue = xbrlfe_undefinedSAV # other then dimensions, absent is an error
else:
aspectSourceValue = VariableBindingError(srcMissingErr,
_(u"neither source {0}, nor an aspect rule, were found.")
.format(sourceQname if sourceQname else u''))
for vb in xpCtx.varBindings.values():
if vb.isFactVar and not vb.isFallback:
if aspect == Aspect.DIMENSIONS and formulaUncovered:
aspectSourceValue |= vb.aspectValue(aspect)
elif formulaUncovered and vb.hasAspectValueUncovered(aspect):
aspectSourceValue = vb.aspectValue(aspect)
break
elif sourceQname == vb.qname:
if not vb.isBindAsSequence or vb.hasAspectValueUncovered(aspect):
aspectSourceValue = vb.aspectValue(aspect)
else:
aspectSourceValue = VariableBindingError(u"xbrlfe:sequenceSAVConflicts",
_(u"source, {0}, contains the QName of a fact variable that binds as a sequence where that fact's aspect rule covers this filtered aspect")
.format(sourceQname))
break
elif aspect == Aspect.LOCATION_RULE and sourceQname == vb.qname:
aspectSourceValue = vb.aspectValue(aspect)
break
# modify by any specific rules
if aspect in (Aspect.CONCEPT, Aspect.LOCATION_RULE,
Aspect.VALUE, Aspect.SCHEME,
Aspect.PERIOD_TYPE, Aspect.START, Aspect.END, Aspect.INSTANT,
) or isinstance(aspect,QName):
return aspectSourceValue
elif aspect == Aspect.UNIT_MEASURES:
augment = formula.evaluateRule(xpCtx, Aspect.AUGMENT)
if aspectSourceValue and (not augment or augment == u"true"): # true is the default behavior
return aspectSourceValue
else:
return ([],[])
elif aspect in (Aspect.MULTIPLY_BY, Aspect.DIVIDE_BY):
if sourceQname and aspectSourceValue:
return aspectSourceValue
else:
return (ruleValue,[])
elif aspect == Aspect.DIMENSIONS:
if aspectSourceValue is None: aspectSourceValue = set()
if ruleValue is None: ruleValueSet = set()
else: ruleValueSet = set(ruleValue)
omitDims = formula.evaluateRule(xpCtx, Aspect.OMIT_DIMENSIONS)
if omitDims is None: omitDimsSet = set()
else: omitDimsSet = set(omitDims)
return (aspectSourceValue | ruleValueSet) - omitDimsSet
elif isinstance(aspect, QName):
return aspectSourceValue
elif aspect in (Aspect.COMPLETE_SEGMENT, Aspect.COMPLETE_SCENARIO,
Aspect.NON_XDT_SEGMENT, Aspect.NON_XDT_SCENARIO):
occFragments = []
occEmpty = ruleValue and ruleValue[0] == XbrlConst.qnFormulaOccEmpty
if not occEmpty and aspectSourceValue:
occFragments.extend(aspectSourceValue)
if ruleValue:
occFragments.extend(ruleValue[1 if occEmpty else 0:])
return occFragments
return None
def uncoveredAspectValue(xpCtx, aspect):
for vb in xpCtx.varBindings.values():
if vb.isFactVar and not vb.isFallback and vb.hasAspectValueUncovered(aspect):
return vb.aspectValue(aspect)
return None
def variableBindingIsFallback(xpCtx, variableQname):
for vb in xpCtx.varBindings.values():
if vb.qname == variableQname:
return vb.isFactVar and vb.isFallback
return False
def uncoveredVariableSetAspects(xpCtx):
aspectsDefined = set()
aspectsCovered = set()
for vb in xpCtx.varBindings.values():
if vb.isFactVar and not vb.isFallback:
aspectsCovered |= vb.aspectsCovered
aspectsDefined |= vb.aspectsDefined
return (aspectsDefined - aspectsCovered)
class VariableBindingError(object):
def __init__(self, err, msg=None):
self.err = err
self.msg = msg
def __repr__(self):
return self.err
xbrlfe_undefinedSAV = VariableBindingError(u"xbrlfe:undefinedSAV")
class VariableBinding(object):
def __init__(self, xpCtx, varRel=None, boundFact=None):
self.xpCtx = xpCtx
if varRel is not None:
self.qname = varRel.variableQname
self.var = varRel.toModelObject
else:
self.qname = self.var = None
self.aspectsDefined = set()
self.aspectsCovered = set()
self.isFactVar = isinstance(self.var, ModelFactVariable)
self.isGeneralVar = isinstance(self.var, ModelGeneralVariable)
self.isParameter = isinstance(self.var, ModelParameter)
self.isFormulaResult = isinstance(self.var, ModelFormula)
self.isBindAsSequence = self.var.bindAsSequence == u"true" if isinstance(self.var,ModelVariable) else False
self.yieldedFact = boundFact
self.yieldedFactResult = None
self.isFallback = False
self.instances = ([inst
for qn in self.var.fromInstanceQnames
for inst in xpCtx.flattenSequence(xpCtx.inScopeVars[qn])]
if self.var is not None and self.var.fromInstanceQnames
else [xpCtx.modelXbrl])
def close(self):
self.__dict__.clear() # dereference
pass
@property
def resourceElementName(self):
if self.isFactVar: return _(u"Fact Variable")
elif self.isGeneralVar: return _(u"General Variable")
elif self.isParameter: return _(u"Parameter")
elif isinstance(self.var, ModelTuple): return _(u"Tuple")
elif isinstance(self.var, ModelFormula): return _(u"Formula")
elif isinstance(self.var, ModelValueAssertion): return _(u"ValueAssertion")
elif isinstance(self.var, ModelExistenceAssertion): return _(u"ExistenceAssertion")
def matchesSubPartitions(self, partition, aspects):
if self.var.matches == u"true":
return [partition]
subPartitions = []
for fact in partition:
foundSubPartition = False
for subPartition in subPartitions:
matchedInSubPartition = False
for fact2 in subPartition:
if aspectsMatch(self.xpCtx, fact, fact2, aspects):
matchedInSubPartition = True
break
if not matchedInSubPartition:
subPartition.append(fact)
foundSubPartition = True
break
if not foundSubPartition:
subPartitions.append([fact,])
return subPartitions
@property
def evaluationResults(self):
if self.isFactVar:
if self.isBindAsSequence and self.facts:
for factsPartition in factsPartitions(self.xpCtx, self.facts, self.aspectsDefined - self.aspectsCovered):
for matchesSubPartition in self.matchesSubPartitions(factsPartition, self.aspectsDefined):
self.yieldedFact = matchesSubPartition[0]
self.yieldedFactContext = self.yieldedFact.context
self.yieldedEvaluation = matchesSubPartition
self.isFallback = False
yield matchesSubPartition
else:
for fact in self.facts:
self.yieldedFact = fact
self.yieldedFactContext = self.yieldedFact.context
self.yieldedEvaluation = fact
self.isFallback = False
yield fact
if self.values:
self.yieldedFact = None
self.yieldedFactContext = None
self.yieldedEvaluation = u"fallback"
self.isFallback = True
yield self.values
elif self.isGeneralVar:
self.yieldedFact = None
self.yieldedFactContext = None
self.isFallback = False
if self.isBindAsSequence:
self.yieldedEvaluation = self.values
yield self.values
else:
for value in self.values:
self.yieldedEvaluation = value
yield value
elif self.isParameter:
self.yieldedFact = None
self.yieldedEvaluation = None
self.isFallback = False
yield self.parameterValue
def matchableBoundFact(self, fbVars): # return from this function has to be hashable
if (self.isFallback or self.isParameter
# remove to allow different gen var evaluations: or self.isGeneralVar
or (self.isGeneralVar and not fbVars.isdisjoint(self.var.variableRefs()))):
return None
if self.isBindAsSequence:
return tuple(self.yieldedEvaluation)
if self.isFormulaResult:
return self.yieldedFact
return self.yieldedEvaluation
def hasDimension(self, dimension):
return dimension in self.definedDimensions
def hasDimensionValueDefined(self, dimension):
return dimension in self.definedDimensions
def definedDimensions(self, dimension):
return self.yieldedFact.context.dimAspects(self.xpCtx.defaultDimensionAspects) if self.yieldedFact.isItem and self.yieldedFact.context is not None else set()
def isDimensionalValid(self, dimension):
return False
def hasAspectValueUncovered(self, aspect):
if aspect in aspectModelAspect: aspect = aspectModelAspect[aspect]
return aspect in self.aspectsDefined and aspect not in self.aspectsCovered
def hasAspectValueCovered(self, aspect):
if aspect in aspectModelAspect: aspect = aspectModelAspect[aspect]
return aspect in self.aspectsCovered
def aspectsNotCovered(self, aspects):
return set(a for a in aspects if not self.hasAspectValueCovered(a))
def hasAspectValueDefined(self, aspect):
if aspect in aspectModelAspect: aspect = aspectModelAspect[aspect]
return aspect in self.aspectsDefined
def aspectValue(self, aspect):
fact = self.yieldedFact
if fact is None:
if aspect == Aspect.DIMENSIONS:
return set()
else:
return None
if aspect == Aspect.LOCATION:
return fact.getparent()
elif aspect == Aspect.LOCATION_RULE:
return fact
elif aspect == Aspect.CONCEPT:
return fact.qname
elif fact.isTuple or fact.context is None:
return None #subsequent aspects don't exist for tuples
# context is known to be not None after here
elif aspect == Aspect.PERIOD:
return fact.context.period
elif aspect == Aspect.PERIOD_TYPE:
if fact.context.isInstantPeriod: return u"instant"
elif fact.context.isStartEndPeriod: return u"duration"
elif fact.context.isForeverPeriod: return u"forever"
return None
elif aspect == Aspect.INSTANT:
return fact.context.instantDatetime
elif aspect == Aspect.START:
return fact.context.startDatetime
elif aspect == Aspect.END:
return fact.context.endDatetime
elif aspect == Aspect.ENTITY_IDENTIFIER:
return fact.context.entityIdentifierElement
elif aspect == Aspect.SCHEME:
return fact.context.entityIdentifier[0]
elif aspect == Aspect.VALUE:
return fact.context.entityIdentifier[1]
elif aspect in (Aspect.COMPLETE_SEGMENT, Aspect.COMPLETE_SCENARIO,
Aspect.NON_XDT_SEGMENT, Aspect.NON_XDT_SCENARIO):
return fact.context.nonDimValues(aspect)
elif aspect == Aspect.DIMENSIONS:
return fact.context.dimAspects(self.xpCtx.defaultDimensionAspects)
elif isinstance(aspect, QName):
return fact.context.dimValue(aspect)
elif fact.unit is not None:
if aspect == Aspect.UNIT:
return fact.unit
elif aspect in (Aspect.UNIT_MEASURES, Aspect.MULTIPLY_BY, Aspect.DIVIDE_BY):
return fact.unit.measures
return None
|
|
import copy
import os
import socket
import sys
import pickle
from parameterized import parameterized
from gssapi import creds as gsscreds
from gssapi import mechs as gssmechs
from gssapi import names as gssnames
from gssapi import sec_contexts as gssctx
from gssapi import raw as gb
from gssapi import _utils as gssutils
from gssapi import exceptions as excs
import k5test.unit as ktu
import k5test as kt
TARGET_SERVICE_NAME = b'host'
FQDN = socket.getfqdn().encode('utf-8')
SERVICE_PRINCIPAL = TARGET_SERVICE_NAME + b'/' + FQDN
# disable error deferring to catch errors immediately
gssctx.SecurityContext.__DEFER_STEP_ERRORS__ = False
class _GSSAPIKerberosTestCase(kt.KerberosTestCase):
@classmethod
def setUpClass(cls):
super(_GSSAPIKerberosTestCase, cls).setUpClass()
svc_princ = SERVICE_PRINCIPAL.decode("UTF-8")
cls.realm.kinit(svc_princ, flags=['-k'])
cls._init_env()
cls.USER_PRINC = cls.realm.user_princ.split('@')[0].encode("UTF-8")
cls.ADMIN_PRINC = cls.realm.admin_princ.split('@')[0].encode("UTF-8")
@classmethod
def _init_env(cls):
cls._saved_env = copy.deepcopy(os.environ)
for k, v in cls.realm.env.items():
os.environ[k] = v
@classmethod
def _restore_env(cls):
for k in copy.deepcopy(os.environ):
if k in cls._saved_env:
os.environ[k] = cls._saved_env[k]
else:
del os.environ[k]
cls._saved_env = None
@classmethod
def tearDownClass(cls):
super(_GSSAPIKerberosTestCase, cls).tearDownClass()
cls._restore_env()
def _perms_cycle(elem, rest, old_d):
if elem is None:
name_str = "with_params_"
true_keys = [k for (k, v) in old_d.items() if v]
if not true_keys:
name_str += 'none'
else:
name_str += '_'.join(true_keys)
return [(name_str, old_d)]
else:
if len(rest) > 0:
next_elem = rest.pop()
else:
next_elem = None
res = []
for v in (True, False):
new_d = copy.deepcopy(old_d)
new_d[elem] = v
res.extend(_perms_cycle(next_elem, copy.deepcopy(rest), new_d))
return res
def exist_perms(**kwargs):
all_elems = list(kwargs.keys())
curr_elems = copy.deepcopy(all_elems)
perms = _perms_cycle(curr_elems.pop(), curr_elems, {})
res = []
for name_str, perm in perms:
args = dict([(k, v) for (k, v) in kwargs.items() if perm[k]])
res.append((name_str, args))
return parameterized.expand(res)
def true_false_perms(*all_elems_tuple):
all_elems = list(all_elems_tuple)
curr_elems = copy.deepcopy(all_elems)
perms = _perms_cycle(curr_elems.pop(), curr_elems, {})
return parameterized.expand(perms)
# NB(directxman12): MIT Kerberos completely ignores input TTLs for
# credentials. I suspect this is because the TTL
# is actually set when kinit is called.
# NB(directxman12): the above note used to be wonderfully sarcastic
class CredsTestCase(_GSSAPIKerberosTestCase):
def setUp(self):
super(CredsTestCase, self).setUp()
svc_princ = SERVICE_PRINCIPAL.decode("UTF-8")
self.realm.kinit(svc_princ, flags=['-k'])
self.name = gssnames.Name(SERVICE_PRINCIPAL,
gb.NameType.kerberos_principal)
@exist_perms(lifetime=30, mechs=[gb.MechType.kerberos],
usage='both')
def test_acquire_by_init(self, str_name, kwargs):
creds = gsscreds.Credentials(name=self.name, **kwargs)
self.assertIsInstance(creds.lifetime, int)
del creds
@exist_perms(lifetime=30, mechs=[gb.MechType.kerberos],
usage='both')
def test_acquire_by_method(self, str_name, kwargs):
cred_resp = gsscreds.Credentials.acquire(name=self.name,
**kwargs)
self.assertIsNotNone(cred_resp)
creds, actual_mechs, ttl = cred_resp
self.assertIsInstance(creds, gsscreds.Credentials)
self.assertIn(gb.MechType.kerberos, actual_mechs)
self.assertIsInstance(ttl, int)
del creds
@ktu.gssapi_extension_test('rfc5588', 'RFC 5588')
def test_store_acquire(self):
# we need to acquire a forwardable ticket
svc_princ = SERVICE_PRINCIPAL.decode("UTF-8")
self.realm.kinit(svc_princ, flags=['-k', '-f'])
target_name = gssnames.Name(TARGET_SERVICE_NAME,
gb.NameType.hostbased_service)
client_creds = gsscreds.Credentials(usage='initiate')
client_ctx = gssctx.SecurityContext(
name=target_name, creds=client_creds,
flags=gb.RequirementFlag.delegate_to_peer)
client_token = client_ctx.step()
server_creds = gsscreds.Credentials(usage='accept')
server_ctx = gssctx.SecurityContext(creds=server_creds)
server_ctx.step(client_token)
deleg_creds = server_ctx.delegated_creds
self.assertIsNotNone(deleg_creds)
store_res = deleg_creds.store(usage='initiate', set_default=True,
overwrite=True)
self.assertEqual(store_res.usage, "initiate")
self.assertIn(gb.MechType.kerberos, store_res.mechs)
reacquired_creds = gsscreds.Credentials(name=deleg_creds.name,
usage='initiate')
self.assertIsNotNone(reacquired_creds)
@ktu.gssapi_extension_test('cred_store', 'credentials store')
def test_store_into_acquire_from(self):
CCACHE = 'FILE:{tmpdir}/other_ccache'.format(tmpdir=self.realm.tmpdir)
KT = '{tmpdir}/other_keytab'.format(tmpdir=self.realm.tmpdir)
store = {'ccache': CCACHE, 'keytab': KT}
princ_name = 'service/cs@' + self.realm.realm
self.realm.addprinc(princ_name)
self.realm.extract_keytab(princ_name, KT)
self.realm.kinit(princ_name, None, ['-k', '-t', KT])
initial_creds = gsscreds.Credentials(name=None,
usage='initiate')
store_res = initial_creds.store(store, overwrite=True)
self.assertIsNotNone(store_res.mechs)
self.assertGreater(len(store_res.mechs), 0)
self.assertEqual(store_res.usage, "initiate")
name = gssnames.Name(princ_name)
retrieved_creds = gsscreds.Credentials(name=name, store=store)
self.assertIsNotNone(retrieved_creds)
def test_create_from_other(self):
raw_creds = gb.acquire_cred(None, usage='accept').creds
high_level_creds = gsscreds.Credentials(raw_creds)
self.assertEqual(high_level_creds.usage, "accept")
@true_false_perms('name', 'lifetime', 'usage', 'mechs')
def test_inquire(self, str_name, kwargs):
creds = gsscreds.Credentials(name=self.name)
resp = creds.inquire(**kwargs)
if kwargs['name']:
self.assertEqual(resp.name, self.name)
else:
self.assertIsNone(resp.name)
if kwargs['lifetime']:
self.assertIsInstance(resp.lifetime, int)
else:
self.assertIsNone(resp.lifetime)
if kwargs['usage']:
self.assertEqual(resp.usage, "both")
else:
self.assertIsNone(resp.usage)
if kwargs['mechs']:
self.assertIn(gb.MechType.kerberos, resp.mechs)
else:
self.assertIsNone(resp.mechs)
@true_false_perms('name', 'init_lifetime', 'accept_lifetime', 'usage')
def test_inquire_by_mech(self, str_name, kwargs):
creds = gsscreds.Credentials(name=self.name)
resp = creds.inquire_by_mech(mech=gb.MechType.kerberos, **kwargs)
if kwargs['name']:
self.assertEqual(resp.name, self.name)
else:
self.assertIsNone(resp.name)
if kwargs['init_lifetime']:
self.assertIsInstance(resp.init_lifetime, int)
else:
self.assertIsNone(resp.init_lifetime)
if kwargs['accept_lifetime']:
self.assertIsInstance(resp.accept_lifetime, int)
else:
self.assertIsNone(resp.accept_lifetime)
if kwargs['usage']:
self.assertEqual(resp.usage, "both")
else:
self.assertIsNone(resp.usage)
def test_add(self):
input_creds = gsscreds.Credentials(gb.Creds())
name = gssnames.Name(SERVICE_PRINCIPAL)
new_creds = input_creds.add(name, gb.MechType.kerberos,
usage='initiate')
self.assertIsInstance(new_creds, gsscreds.Credentials)
@ktu.gssapi_extension_test('cred_store', 'credentials store')
def test_store_into_add_from(self):
CCACHE = 'FILE:{tmpdir}/other_ccache'.format(tmpdir=self.realm.tmpdir)
KT = '{tmpdir}/other_keytab'.format(tmpdir=self.realm.tmpdir)
store = {'ccache': CCACHE, 'keytab': KT}
princ_name = 'service/cs@' + self.realm.realm
self.realm.addprinc(princ_name)
self.realm.extract_keytab(princ_name, KT)
self.realm.kinit(princ_name, None, ['-k', '-t', KT])
initial_creds = gsscreds.Credentials(name=None,
usage='initiate')
store_res = initial_creds.store(store, overwrite=True)
self.assertIsNotNone(store_res.mechs)
self.assertGreater(len(store_res.mechs), 0)
self.assertEqual(store_res.usage, "initiate")
name = gssnames.Name(princ_name)
input_creds = gsscreds.Credentials(gb.Creds())
retrieved_creds = input_creds.add(name, gb.MechType.kerberos,
store=store)
self.assertIsInstance(retrieved_creds, gsscreds.Credentials)
@ktu.gssapi_extension_test('cred_imp_exp', 'credentials import-export')
def test_export(self):
creds = gsscreds.Credentials(name=self.name)
token = creds.export()
self.assertIsInstance(token, bytes)
@ktu.gssapi_extension_test('cred_imp_exp', 'credentials import-export')
def test_import_by_init(self):
creds = gsscreds.Credentials(name=self.name)
token = creds.export()
imported_creds = gsscreds.Credentials(token=token)
self.assertEqual(imported_creds.lifetime, creds.lifetime)
self.assertEqual(imported_creds.name, creds.name)
@ktu.gssapi_extension_test('cred_imp_exp', 'credentials import-export')
def test_pickle_unpickle(self):
creds = gsscreds.Credentials(name=self.name)
pickled_creds = pickle.dumps(creds)
unpickled_creds = pickle.loads(pickled_creds)
self.assertEqual(unpickled_creds.lifetime, creds.lifetime)
self.assertEqual(unpickled_creds.name, creds.name)
@exist_perms(lifetime=30, mechs=[gb.MechType.kerberos],
usage='initiate')
@ktu.gssapi_extension_test('s4u', 'S4U')
def test_impersonate(self, str_name, kwargs):
server_name = gssnames.Name(SERVICE_PRINCIPAL,
gb.NameType.kerberos_principal)
password = self.realm.password("user")
self.realm.kinit(self.realm.user_princ, password=password,
flags=["-f"])
client_ctx = gssctx.SecurityContext(
name=server_name, flags=gb.RequirementFlag.delegate_to_peer)
client_token = client_ctx.step()
self.realm.kinit(SERVICE_PRINCIPAL.decode("utf-8"), flags=["-k"])
server_creds = gsscreds.Credentials(usage="both")
server_ctx = gssctx.SecurityContext(creds=server_creds)
server_ctx.step(client_token)
self.assertTrue(server_ctx.complete)
imp_creds = server_ctx.delegated_creds.impersonate(server_name,
**kwargs)
self.assertIsInstance(imp_creds, gsscreds.Credentials)
@ktu.gssapi_extension_test('s4u', 'S4U')
def test_add_with_impersonate(self):
server_name = gssnames.Name(SERVICE_PRINCIPAL,
gb.NameType.kerberos_principal)
password = self.realm.password("user")
self.realm.kinit(self.realm.user_princ, password=password,
flags=["-f"])
client_ctx = gssctx.SecurityContext(
name=server_name, flags=gb.RequirementFlag.delegate_to_peer)
client_token = client_ctx.step()
self.realm.kinit(SERVICE_PRINCIPAL.decode("utf-8"), flags=["-k"])
server_creds = gsscreds.Credentials(usage="both")
server_ctx = gssctx.SecurityContext(creds=server_creds)
server_ctx.step(client_token)
self.assertTrue(server_ctx.complete)
# use empty creds to test here
input_creds = gsscreds.Credentials(gb.Creds())
new_creds = input_creds.add(
server_name, gb.MechType.kerberos,
impersonator=server_ctx.delegated_creds, usage='initiate')
self.assertIsInstance(new_creds, gsscreds.Credentials)
class MechsTestCase(_GSSAPIKerberosTestCase):
def test_indicate_mechs(self):
mechs = gssmechs.Mechanism.all_mechs()
for mech in mechs:
s = str(mech)
self.assertGreater(len(s), 0)
@ktu.gssapi_extension_test('rfc5801', 'RFC 5801: SASL Names')
def test_sasl_properties(self):
mechs = gssmechs.Mechanism.all_mechs()
for mech in mechs:
s = str(mech)
self.assertGreater(len(s), 0)
self.assertIsInstance(s, str)
# Note that some mechanisms don't have SASL names or SASL
# descriptions; in this case, GSSAPI returns empty strings.
if mech.sasl_name:
self.assertIsInstance(mech.sasl_name, str)
if mech.description:
self.assertIsInstance(mech.description, str)
cmp_mech = gssmechs.Mechanism.from_sasl_name(mech.sasl_name)
self.assertEqual(str(cmp_mech), str(mech))
@ktu.gssapi_extension_test('rfc5587', 'RFC 5587: Mech Inquiry')
def test_mech_inquiry(self):
mechs = list(gssmechs.Mechanism.all_mechs())
c = len(mechs)
g_M_from_attrs = gssmechs.Mechanism.from_attrs
for mech in mechs:
attrs = mech.attrs
known_attrs = mech.known_attrs
for attr in attrs:
from_desired = g_M_from_attrs(desired_attrs=[attr])
from_except = g_M_from_attrs(except_attrs=[attr])
from_desired = list(from_desired)
from_except = list(from_except)
self.assertEqual(len(from_desired) + len(from_except), c)
self.assertIn(mech, from_desired)
self.assertNotIn(mech, from_except)
for attr in known_attrs:
from_desired = g_M_from_attrs(desired_attrs=[attr])
from_except = g_M_from_attrs(except_attrs=[attr])
from_desired = list(from_desired)
from_except = list(from_except)
self.assertEqual(len(from_desired) + len(from_except), c)
class NamesTestCase(_GSSAPIKerberosTestCase):
def test_create_from_other(self):
raw_name = gb.import_name(SERVICE_PRINCIPAL)
high_level_name = gssnames.Name(raw_name)
self.assertEqual(bytes(high_level_name), SERVICE_PRINCIPAL)
def test_create_from_name_no_type(self):
name = gssnames.Name(SERVICE_PRINCIPAL)
self.assertIsNotNone(name)
def test_create_from_name_and_type(self):
name = gssnames.Name(SERVICE_PRINCIPAL, gb.NameType.kerberos_principal)
self.assertIsNotNone(name)
self.assertEqual(name.name_type, gb.NameType.kerberos_principal)
def test_create_from_token(self):
name1 = gssnames.Name(TARGET_SERVICE_NAME,
gb.NameType.hostbased_service)
exported_name = name1.canonicalize(gb.MechType.kerberos).export()
name2 = gssnames.Name(token=exported_name)
self.assertEqual(name2.name_type, gb.NameType.kerberos_principal)
@ktu.gssapi_extension_test('rfc6680', 'RFC 6680')
def test_display_as(self):
name = gssnames.Name(TARGET_SERVICE_NAME,
gb.NameType.hostbased_service)
canonical_name = name.canonicalize(gb.MechType.kerberos)
# NB(directxman12): krb5 doesn't implement display_name_ext, so just
# check to make sure we return the right types and a reasonable value
krb_name = canonical_name.display_as(
gb.NameType.hostbased_service)
princ_str = SERVICE_PRINCIPAL.decode('utf-8') + '@'
self.assertEqual(str(canonical_name), princ_str)
self.assertIsInstance(krb_name, str)
self.assertEqual(krb_name, princ_str)
@ktu.gssapi_extension_test('rfc6680', 'RFC 6680')
def test_create_from_composite_token_no_attrs(self):
name1 = gssnames.Name(TARGET_SERVICE_NAME,
gb.NameType.hostbased_service)
exported_name = name1.canonicalize(
gb.MechType.kerberos).export(composite=True)
name2 = gssnames.Name(token=exported_name, composite=True)
self.assertIsNotNone(name2)
@ktu.gssapi_extension_test('rfc6680', 'RFC 6680')
@ktu.krb_plugin_test('authdata', 'greet_client')
def test_create_from_composite_token_with_attrs(self):
name1 = gssnames.Name(TARGET_SERVICE_NAME,
gb.NameType.hostbased_service)
canon_name = name1.canonicalize(gb.MechType.kerberos)
canon_name.attributes['urn:greet:greeting'] = b'some val'
exported_name = canon_name.export(composite=True)
# TODO(directxman12): when you just import a token as composite,
# appears as this name whose text is all garbled, since it contains
# all of the attributes, etc, but doesn't properly have the attributes.
# Once it's canonicalized, the attributes reappear. However, if you
# just import it as normal export, the attributes appear directly.
# It is thus unclear as to what is going on
# name2_raw = gssnames.Name(token=exported_name, composite=True)
# name2 = name2_raw.canonicalize(gb.MechType.kerberos)
name2 = gssnames.Name(token=exported_name)
self.assertIsNotNone(name2)
ugg = name2.attributes["urn:greet:greeting"]
self.assertEqual(ugg.values, set([b"some val"]))
self.assertTrue(ugg.complete)
self.assertFalse(ugg.authenticated)
def test_to_str(self):
name = gssnames.Name(SERVICE_PRINCIPAL, gb.NameType.kerberos_principal)
name_str = str(name)
if sys.version_info[0] == 2:
target_val = SERVICE_PRINCIPAL
else:
target_val = SERVICE_PRINCIPAL.decode(gssutils._get_encoding())
self.assertEqual(name_str, target_val)
def test_to_unicode(self):
name = gssnames.Name(SERVICE_PRINCIPAL, gb.NameType.kerberos_principal)
self.assertEqual(str(name),
SERVICE_PRINCIPAL.decode(gssutils._get_encoding()))
def test_to_bytes(self):
name = gssnames.Name(SERVICE_PRINCIPAL, gb.NameType.kerberos_principal)
# NB(directxman12): bytes only calles __bytes__ on Python 3+
self.assertEqual(name.__bytes__(), SERVICE_PRINCIPAL)
def test_compare(self):
name1 = gssnames.Name(SERVICE_PRINCIPAL)
name2 = gssnames.Name(SERVICE_PRINCIPAL)
name3 = gssnames.Name(TARGET_SERVICE_NAME,
gb.NameType.hostbased_service)
self.assertEqual(name1, name2)
self.assertNotEqual(name1, name3)
def test_canoncialize_and_export(self):
name = gssnames.Name(SERVICE_PRINCIPAL, gb.NameType.kerberos_principal)
canonical_name = name.canonicalize(gb.MechType.kerberos)
exported_name = canonical_name.export()
self.assertIsInstance(exported_name, bytes)
def test_canonicalize(self):
name = gssnames.Name(TARGET_SERVICE_NAME,
gb.NameType.hostbased_service)
canonicalized_name = name.canonicalize(gb.MechType.kerberos)
self.assertIsInstance(canonicalized_name, gssnames.Name)
self.assertEqual(bytes(canonicalized_name), SERVICE_PRINCIPAL + b"@")
def test_copy(self):
name1 = gssnames.Name(SERVICE_PRINCIPAL)
name2 = copy.copy(name1)
self.assertEqual(name1, name2)
# NB(directxman12): we don't test display_name_ext because the krb5 mech
# doesn't actually implement it
@ktu.gssapi_extension_test('rfc6680', 'RFC 6680')
def test_is_mech_name(self):
name = gssnames.Name(TARGET_SERVICE_NAME,
gb.NameType.hostbased_service)
self.assertFalse(name.is_mech_name)
canon_name = name.canonicalize(gb.MechType.kerberos)
self.assertTrue(canon_name.is_mech_name)
self.assertIsInstance(canon_name.mech, gb.OID)
self.assertEqual(canon_name.mech, gb.MechType.kerberos)
@ktu.gssapi_extension_test('rfc6680', 'RFC 6680')
def test_export_name_composite_no_attrs(self):
name = gssnames.Name(TARGET_SERVICE_NAME,
gb.NameType.hostbased_service)
canon_name = name.canonicalize(gb.MechType.kerberos)
exported_name = canon_name.export(composite=True)
self.assertIsInstance(exported_name, bytes)
@ktu.gssapi_extension_test('rfc6680', 'RFC 6680')
@ktu.krb_plugin_test('authdata', 'greet_client')
def test_export_name_composite_with_attrs(self):
name = gssnames.Name(TARGET_SERVICE_NAME,
gb.NameType.hostbased_service)
canon_name = name.canonicalize(gb.MechType.kerberos)
canon_name.attributes['urn:greet:greeting'] = b'some val'
exported_name = canon_name.export(composite=True)
self.assertIsInstance(exported_name, bytes)
@ktu.gssapi_extension_test('rfc6680', 'RFC 6680')
@ktu.krb_plugin_test('authdata', 'greet_client')
def test_basic_get_set_del_name_attribute_no_auth(self):
name = gssnames.Name(TARGET_SERVICE_NAME,
gb.NameType.hostbased_service)
canon_name = name.canonicalize(gb.MechType.kerberos)
canon_name.attributes['urn:greet:greeting'] = (b'some val', True)
ugg = canon_name.attributes["urn:greet:greeting"]
self.assertEqual(ugg.values, set([b"some val"]))
self.assertTrue(ugg.complete)
self.assertFalse(ugg.authenticated)
del canon_name.attributes['urn:greet:greeting']
# NB(directxman12): for some reason, the greet:greeting handler plugin
# doesn't properly delete itself -- it just clears the value. If we
# try to get its value now, we segfault (due to an issue with
# greet:greeting's delete). Instead, just set the value again.
canon_name.attributes['urn:greet:greeting'] = b'some other val'
class SecurityContextTestCase(_GSSAPIKerberosTestCase):
def setUp(self):
super(SecurityContextTestCase, self).setUp()
gssctx.SecurityContext.__DEFER_STEP_ERRORS__ = False
self.client_name = gssnames.Name(self.USER_PRINC)
self.client_creds = gsscreds.Credentials(name=None,
usage='initiate')
self.target_name = gssnames.Name(TARGET_SERVICE_NAME,
gb.NameType.hostbased_service)
self.server_name = gssnames.Name(SERVICE_PRINCIPAL)
self.server_creds = gsscreds.Credentials(name=self.server_name,
usage='accept')
def _create_client_ctx(self, **kwargs):
return gssctx.SecurityContext(name=self.target_name, **kwargs)
# NB(directxman12): we skip testing process_context_token, because there is
# no concrete, non-deprecated was to obtain an "async"
# token
def test_create_from_other(self):
raw_client_ctx, raw_server_ctx = self._create_completed_contexts()
high_level_ctx = gssctx.SecurityContext(raw_client_ctx)
self.assertEqual(high_level_ctx.target_name, self.target_name)
@exist_perms(lifetime=30, flags=[],
mech=gb.MechType.kerberos,
channel_bindings=None)
def test_create_new_init(self, str_name, kwargs):
client_ctx = gssctx.SecurityContext(name=self.target_name,
creds=self.client_creds,
**kwargs)
self.assertEqual(client_ctx.usage, "initiate")
client_ctx = self._create_client_ctx(**kwargs)
self.assertEqual(client_ctx.usage, "initiate")
def test_create_new_accept(self):
server_ctx = gssctx.SecurityContext(creds=self.server_creds)
self.assertEqual(server_ctx.usage, "accept")
def test_init_throws_error_on_invalid_args(self):
self.assertRaises(TypeError, gssctx.SecurityContext, usage='accept',
name=self.target_name)
def _create_completed_contexts(self):
client_ctx = self._create_client_ctx(lifetime=400)
client_token = client_ctx.step()
self.assertIsInstance(client_token, bytes)
server_ctx = gssctx.SecurityContext(creds=self.server_creds)
server_token = server_ctx.step(client_token)
self.assertIsInstance(server_token, bytes)
client_ctx.step(server_token)
return (client_ctx, server_ctx)
def test_complete_on_partially_completed(self):
client_ctx = self._create_client_ctx()
client_tok = client_ctx.step()
self.assertFalse(client_ctx.complete)
server_ctx = gssctx.SecurityContext(creds=self.server_creds)
server_tok = server_ctx.step(client_tok)
client_ctx.step(server_tok)
self.assertTrue(client_ctx.complete)
self.assertTrue(server_ctx.complete)
def test_initiate_accept_steps(self):
client_ctx, server_ctx = self._create_completed_contexts()
# KDC may allow for clockskew by increasing acceptor context lifetime
self.assertLessEqual(server_ctx.lifetime, 400 + 300)
self.assertEqual(server_ctx.initiator_name, client_ctx.initiator_name)
self.assertIsInstance(server_ctx.mech, gb.OID)
self.assertIsInstance(server_ctx.actual_flags, gb.IntEnumFlagSet)
self.assertFalse(server_ctx.locally_initiated)
self.assertTrue(server_ctx.complete)
self.assertLessEqual(client_ctx.lifetime, 400)
self.assertEqual(client_ctx.target_name, self.target_name)
self.assertIsInstance(client_ctx.mech, gb.OID)
self.assertIsInstance(client_ctx.actual_flags, gb.IntEnumFlagSet)
self.assertTrue(client_ctx.locally_initiated)
self.assertTrue(client_ctx.complete)
def test_channel_bindings(self):
bdgs = gb.ChannelBindings(application_data=b'abcxyz',
initiator_address_type=gb.AddressType.ip,
initiator_address=b'127.0.0.1',
acceptor_address_type=gb.AddressType.ip,
acceptor_address=b'127.0.0.1')
client_ctx = self._create_client_ctx(lifetime=400,
channel_bindings=bdgs)
client_token = client_ctx.step()
self.assertIsInstance(client_token, bytes)
server_ctx = gssctx.SecurityContext(creds=self.server_creds,
channel_bindings=bdgs)
server_token = server_ctx.step(client_token)
self.assertIsInstance(server_token, bytes)
client_ctx.step(server_token)
def test_bad_channel_bindings_raises_error(self):
bdgs = gb.ChannelBindings(application_data=b'abcxyz',
initiator_address_type=gb.AddressType.ip,
initiator_address=b'127.0.0.1',
acceptor_address_type=gb.AddressType.ip,
acceptor_address=b'127.0.0.1')
client_ctx = self._create_client_ctx(lifetime=400,
channel_bindings=bdgs)
client_token = client_ctx.step()
self.assertIsInstance(client_token, bytes)
bdgs.acceptor_address = b'127.0.1.0'
server_ctx = gssctx.SecurityContext(creds=self.server_creds,
channel_bindings=bdgs)
self.assertRaises(gb.BadChannelBindingsError, server_ctx.step,
client_token)
def test_export_create_from_token(self):
client_ctx, server_ctx = self._create_completed_contexts()
token = client_ctx.export()
self.assertIsInstance(token, bytes)
imported_ctx = gssctx.SecurityContext(token=token)
self.assertEqual(imported_ctx.usage, "initiate")
self.assertEqual(imported_ctx.target_name, self.target_name)
def test_pickle_unpickle(self):
client_ctx, server_ctx = self._create_completed_contexts()
pickled_ctx = pickle.dumps(client_ctx)
unpickled_ctx = pickle.loads(pickled_ctx)
self.assertIsInstance(unpickled_ctx, gssctx.SecurityContext)
self.assertEqual(unpickled_ctx.usage, "initiate")
self.assertEqual(unpickled_ctx.target_name, self.target_name)
def test_encrypt_decrypt(self):
client_ctx, server_ctx = self._create_completed_contexts()
encrypted_msg = client_ctx.encrypt(b'test message')
self.assertIsInstance(encrypted_msg, bytes)
decrypted_msg = server_ctx.decrypt(encrypted_msg)
self.assertIsInstance(decrypted_msg, bytes)
self.assertEqual(decrypted_msg, b"test message")
def test_encrypt_decrypt_throws_error_on_no_encryption(self):
client_ctx, server_ctx = self._create_completed_contexts()
wrap_res = client_ctx.wrap(b'test message', False)
self.assertIsInstance(wrap_res, gb.WrapResult)
self.assertFalse(wrap_res.encrypted)
self.assertIsInstance(wrap_res.message, bytes)
self.assertRaises(excs.EncryptionNotUsed, server_ctx.decrypt,
wrap_res.message)
def test_wrap_unwrap(self):
client_ctx, server_ctx = self._create_completed_contexts()
wrap_res = client_ctx.wrap(b'test message', True)
self.assertIsInstance(wrap_res, gb.WrapResult)
self.assertTrue(wrap_res.encrypted)
self.assertIsInstance(wrap_res.message, bytes)
unwrap_res = server_ctx.unwrap(wrap_res.message)
self.assertIsInstance(unwrap_res, gb.UnwrapResult)
self.assertIsInstance(unwrap_res.message, bytes)
self.assertEqual(unwrap_res.message, b"test message")
self.assertTrue(unwrap_res.encrypted)
def test_get_wrap_size_limit(self):
client_ctx, server_ctx = self._create_completed_contexts()
with_conf = client_ctx.get_wrap_size_limit(100)
without_conf = client_ctx.get_wrap_size_limit(100, encrypted=True)
self.assertIsInstance(with_conf, int)
self.assertIsInstance(without_conf, int)
self.assertLessEqual(with_conf, 100)
self.assertLessEqual(without_conf, 100)
def test_get_signature(self):
client_ctx, server_ctx = self._create_completed_contexts()
mic_token = client_ctx.get_signature(b'some message')
self.assertIsInstance(mic_token, bytes)
self.assertGreater(len(mic_token), 0)
def test_verify_signature_raise(self):
client_ctx, server_ctx = self._create_completed_contexts()
mic_token = client_ctx.get_signature(b'some message')
server_ctx.verify_signature(b'some message', mic_token)
self.assertRaises(gb.GSSError, server_ctx.verify_signature,
b"other message", mic_token)
@ktu.krb_minversion_test("1.11", "returning tokens")
def test_defer_step_error_on_method(self):
gssctx.SecurityContext.__DEFER_STEP_ERRORS__ = True
bdgs = gb.ChannelBindings(application_data=b'abcxyz')
client_ctx = self._create_client_ctx(lifetime=400,
channel_bindings=bdgs)
client_token = client_ctx.step()
self.assertIsInstance(client_token, bytes)
bdgs.application_data = b'defuvw'
server_ctx = gssctx.SecurityContext(creds=self.server_creds,
channel_bindings=bdgs)
self.assertIsInstance(server_ctx.step(client_token), bytes)
self.assertRaises(gb.BadChannelBindingsError, server_ctx.encrypt,
b"test")
@ktu.krb_minversion_test("1.11", "returning tokens")
def test_defer_step_error_on_complete_property_access(self):
gssctx.SecurityContext.__DEFER_STEP_ERRORS__ = True
bdgs = gb.ChannelBindings(application_data=b'abcxyz')
client_ctx = self._create_client_ctx(lifetime=400,
channel_bindings=bdgs)
client_token = client_ctx.step()
self.assertIsInstance(client_token, bytes)
bdgs.application_data = b'defuvw'
server_ctx = gssctx.SecurityContext(creds=self.server_creds,
channel_bindings=bdgs)
self.assertIsInstance(server_ctx.step(client_token), bytes)
self.assertRaises(gb.BadChannelBindingsError,
lambda: server_ctx.complete)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.