gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
import datetime
import re
from typing import List, Optional, Union
import dateutil.parser
import dateutil.relativedelta as rdelta
import typepy
from .__version__ import __author__, __copyright__, __email__, __license__, __version__
class DateTimeRange:
"""
A class that represents a range of datetime.
:param datetime.datetime/str start_datetime: |param_start_datetime|
:param datetime.datetime/str end_datetime: |param_end_datetime|
:Examples:
:Sample Code:
.. code:: python
from datetimerange import DateTimeRange
DateTimeRange("2015-03-22T10:00:00+0900", "2015-03-22T10:10:00+0900")
:Output:
.. parsed-literal::
2015-03-22T10:00:00+0900 - 2015-03-22T10:10:00+0900
.. py:attribute:: start_time_format
:type: str
:value: "%Y-%m-%dT%H:%M:%S%z"
Conversion format string for :py:attr:`.start_datetime`.
.. seealso:: :py:meth:`.get_start_time_str`
.. py:attribute:: end_time_format
:type: str
:value: "%Y-%m-%dT%H:%M:%S%z"
Conversion format string for :py:attr:`.end_datetime`.
.. seealso:: :py:meth:`.get_end_time_str`
"""
NOT_A_TIME_STR = "NaT"
def __init__(
self,
start_datetime=None,
end_datetime=None,
start_time_format="%Y-%m-%dT%H:%M:%S%z",
end_time_format="%Y-%m-%dT%H:%M:%S%z",
):
self.set_time_range(start_datetime, end_datetime)
self.start_time_format = start_time_format
self.end_time_format = end_time_format
self.is_output_elapse = False
self.separator = " - "
def __repr__(self):
if self.is_output_elapse:
suffix = f" ({self.end_datetime - self.start_datetime})"
else:
suffix = ""
return self.separator.join((self.get_start_time_str(), self.get_end_time_str())) + suffix
def __eq__(self, other):
if not isinstance(other, DateTimeRange):
return False
return all(
[self.start_datetime == other.start_datetime, self.end_datetime == other.end_datetime]
)
def __ne__(self, other):
if not isinstance(other, DateTimeRange):
return True
return any(
[self.start_datetime != other.start_datetime, self.end_datetime != other.end_datetime]
)
def __add__(self, other):
return DateTimeRange(self.start_datetime + other, self.end_datetime + other)
def __iadd__(self, other):
self.set_start_datetime(self.start_datetime + other)
self.set_end_datetime(self.end_datetime + other)
return self
def __sub__(self, other):
return DateTimeRange(self.start_datetime - other, self.end_datetime - other)
def __isub__(self, other):
self.set_start_datetime(self.start_datetime - other)
self.set_end_datetime(self.end_datetime - other)
return self
def __contains__(self, x):
"""
:param x:
|datetime|/``DateTimeRange`` instance to compare.
Parse and convert to |datetime| if the value type is |str|.
:type x: |datetime|/``DateTimeRange``/|str|
:return: |True| if the ``x`` is within the time range
:rtype: bool
:Sample Code:
.. code:: python
from datetimerange import DateTimeRange
time_range = DateTimeRange("2015-03-22T10:00:00+0900", "2015-03-22T10:10:00+0900")
print("2015-03-22T10:05:00+0900" in time_range)
print("2015-03-22T10:15:00+0900" in time_range)
time_range_smaller = DateTimeRange("2015-03-22T10:03:00+0900", "2015-03-22T10:07:00+0900")
print(time_range_smaller in time_range)
:Output:
.. parsed-literal::
True
False
True
.. seealso::
:py:meth:`.validate_time_inversion`
"""
self.validate_time_inversion()
if isinstance(x, DateTimeRange):
return x.start_datetime >= self.start_datetime and x.end_datetime <= self.end_datetime
try:
value = dateutil.parser.parse(x)
except (TypeError, AttributeError):
value = x
return self.start_datetime <= value <= self.end_datetime
@property
def start_datetime(self):
"""
:return: Start time of the time range.
:rtype: datetime.datetime
:Sample Code:
.. code:: python
from datetimerange import DateTimeRange
time_range = DateTimeRange("2015-03-22T10:00:00+0900", "2015-03-22T10:10:00+0900")
time_range.start_datetime
:Output:
.. parsed-literal::
datetime.datetime(2015, 3, 22, 10, 0, tzinfo=tzoffset(None, 32400))
"""
return self.__start_datetime
@property
def end_datetime(self):
"""
:return: End time of the time range.
:rtype: datetime.datetime
:Sample Code:
.. code:: python
from datetimerange import DateTimeRange
time_range = DateTimeRange("2015-03-22T10:00:00+0900", "2015-03-22T10:10:00+0900")
time_range.end_datetime
:Output:
.. parsed-literal::
datetime.datetime(2015, 3, 22, 10, 10, tzinfo=tzoffset(None, 32400))
"""
return self.__end_datetime
@property
def timedelta(self):
"""
:return:
(|attr_end_datetime| - |attr_start_datetime|) as |timedelta|
:rtype: datetime.timedelta
:Sample Code:
.. code:: python
from datetimerange import DateTimeRange
time_range = DateTimeRange("2015-03-22T10:00:00+0900", "2015-03-22T10:10:00+0900")
time_range.timedelta
:Output:
.. parsed-literal::
datetime.timedelta(0, 600)
"""
return self.end_datetime - self.start_datetime
def is_set(self):
"""
:return:
|True| if both |attr_start_datetime| and
|attr_end_datetime| were not |None|.
:rtype: bool
:Sample Code:
.. code:: python
from datetimerange import DateTimeRange
time_range = DateTimeRange()
print(time_range.is_set())
time_range.set_time_range("2015-03-22T10:00:00+0900", "2015-03-22T10:10:00+0900")
print(time_range.is_set())
:Output:
.. parsed-literal::
False
True
"""
return all([self.start_datetime is not None, self.end_datetime is not None])
def validate_time_inversion(self):
"""
Check time inversion of the time range.
:raises ValueError:
If |attr_start_datetime| is
bigger than |attr_end_datetime|.
:raises TypeError:
Any one of |attr_start_datetime| and |attr_end_datetime|,
or both is inappropriate datetime value.
:Sample Code:
.. code:: python
from datetimerange import DateTimeRange
time_range = DateTimeRange("2015-03-22T10:10:00+0900", "2015-03-22T10:00:00+0900")
try:
time_range.validate_time_inversion()
except ValueError:
print("time inversion")
:Output:
.. parsed-literal::
time inversion
"""
if not self.is_set():
# for python2/3 compatibility
raise TypeError
if self.start_datetime > self.end_datetime:
raise ValueError(
"time inversion found: {:s} > {:s}".format(
str(self.start_datetime), str(self.end_datetime)
)
)
def is_valid_timerange(self):
"""
:return:
|True| if the time range is
not null and not time inversion.
:rtype: bool
:Sample Code:
.. code:: python
from datetimerange import DateTimeRange
time_range = DateTimeRange()
print(time_range.is_valid_timerange())
time_range.set_time_range("2015-03-22T10:20:00+0900", "2015-03-22T10:10:00+0900")
print(time_range.is_valid_timerange())
time_range.set_time_range("2015-03-22T10:00:00+0900", "2015-03-22T10:10:00+0900")
print(time_range.is_valid_timerange())
:Output:
.. parsed-literal::
False
False
True
.. seealso::
:py:meth:`.is_set`
:py:meth:`.validate_time_inversion`
"""
try:
self.validate_time_inversion()
except (TypeError, ValueError):
return False
return self.is_set()
def is_intersection(self, x):
"""
:param DateTimeRange x: Value to compare
:return: |True| if intersect with ``x``
:rtype: bool
:Sample Code:
.. code:: python
from datetimerange import DateTimeRange
time_range = DateTimeRange("2015-03-22T10:00:00+0900", "2015-03-22T10:10:00+0900")
x = DateTimeRange("2015-03-22T10:05:00+0900", "2015-03-22T10:15:00+0900")
time_range.is_intersection(x)
:Output:
.. parsed-literal::
True
"""
return self.intersection(x).is_set()
def get_start_time_str(self):
"""
:return:
|attr_start_datetime| as |str| formatted with
|attr_start_time_format|.
Return |NaT| if the invalid value or the invalid format.
:rtype: str
:Sample Code:
.. code:: python
from datetimerange import DateTimeRange
time_range = DateTimeRange("2015-03-22T10:00:00+0900", "2015-03-22T10:10:00+0900")
print(time_range.get_start_time_str())
time_range.start_time_format = "%Y/%m/%d %H:%M:%S"
print(time_range.get_start_time_str())
:Output:
.. parsed-literal::
2015-03-22T10:00:00+0900
2015/03/22 10:00:00
"""
try:
return self.start_datetime.strftime(self.start_time_format)
except AttributeError:
return self.NOT_A_TIME_STR
def get_end_time_str(self):
"""
:return:
|attr_end_datetime| as a |str| formatted with
|attr_end_time_format|.
Return |NaT| if invalid datetime or format.
:rtype: str
:Sample Code:
.. code:: python
from datetimerange import DateTimeRange
time_range = DateTimeRange("2015-03-22T10:00:00+0900", "2015-03-22T10:10:00+0900")
print(time_range.get_end_time_str())
time_range.end_time_format = "%Y/%m/%d %H:%M:%S"
print(time_range.get_end_time_str())
:Output:
.. parsed-literal::
2015-03-22T10:10:00+0900
2015/03/22 10:10:00
"""
try:
return self.end_datetime.strftime(self.end_time_format)
except AttributeError:
return self.NOT_A_TIME_STR
def get_timedelta_second(self):
"""
:return: (|attr_end_datetime| - |attr_start_datetime|) as seconds
:rtype: float
:Sample Code:
.. code:: python
from datetimerange import DateTimeRange
time_range = DateTimeRange("2015-03-22T10:00:00+0900", "2015-03-22T10:10:00+0900")
time_range.get_timedelta_second()
:Output:
.. parsed-literal::
600.0
"""
return self.timedelta.total_seconds()
def set_start_datetime(self, value, timezone=None):
"""
Set the start time of the time range.
:param value: |param_start_datetime|
:type value: |datetime|/|str|
:raises ValueError: If the value is invalid as a |datetime| value.
:Sample Code:
.. code:: python
from datetimerange import DateTimeRange
time_range = DateTimeRange()
print(time_range)
time_range.set_start_datetime("2015-03-22T10:00:00+0900")
print(time_range)
:Output:
.. parsed-literal::
NaT - NaT
2015-03-22T10:00:00+0900 - NaT
"""
self.__start_datetime = self.__normalize_datetime_value(value, timezone)
def set_end_datetime(self, value, timezone=None):
"""
Set the end time of the time range.
:param datetime.datetime/str value: |param_end_datetime|
:raises ValueError: If the value is invalid as a |datetime| value.
:Sample Code:
.. code:: python
from datetimerange import DateTimeRange
time_range = DateTimeRange()
print(time_range)
time_range.set_end_datetime("2015-03-22T10:10:00+0900")
print(time_range)
:Output:
.. parsed-literal::
NaT - NaT
NaT - 2015-03-22T10:10:00+0900
"""
self.__end_datetime = self.__normalize_datetime_value(value, timezone)
def set_time_range(self, start, end):
"""
:param datetime.datetime/str start: |param_start_datetime|
:param datetime.datetime/str end: |param_end_datetime|
:Sample Code:
.. code:: python
from datetimerange import DateTimeRange
time_range = DateTimeRange()
print(time_range)
time_range.set_time_range("2015-03-22T10:00:00+0900", "2015-03-22T10:10:00+0900")
print(time_range)
:Output:
.. parsed-literal::
NaT - NaT
2015-03-22T10:00:00+0900 - 2015-03-22T10:10:00+0900
"""
self.set_start_datetime(start)
self.set_end_datetime(end)
@staticmethod
def __compare_relativedelta(lhs, rhs):
if lhs.years < rhs.years:
return -1
if lhs.years > rhs.years:
return 1
if lhs.months < rhs.months:
return -1
if lhs.months > rhs.months:
return 1
if lhs.days < rhs.days:
return -1
if lhs.days > rhs.days:
return 1
if lhs.hours < rhs.hours:
return -1
if lhs.hours > rhs.hours:
return 1
if lhs.minutes < rhs.minutes:
return -1
if lhs.minutes > rhs.minutes:
return 1
if lhs.seconds < rhs.seconds:
return -1
if lhs.seconds > rhs.seconds:
return 1
if lhs.microseconds < rhs.microseconds:
return -1
if lhs.microseconds > rhs.microseconds:
return 1
return 0
def __compare_timedelta(self, lhs, seconds):
try:
rhs = datetime.timedelta(seconds=seconds)
if lhs < rhs:
return -1
if lhs > rhs:
return 1
return 0
except TypeError:
return self.__compare_relativedelta(
lhs.normalized(), rdelta.relativedelta(seconds=seconds)
)
def range(self, step):
"""
Return an iterator object.
:param step: Step of iteration.
:type step: |timedelta|/dateutil.relativedelta.relativedelta
:return: iterator
:rtype: iterator
:Sample Code:
.. code:: python
import datetime
from datetimerange import DateTimeRange
time_range = DateTimeRange("2015-01-01T00:00:00+0900", "2015-01-04T00:00:00+0900")
for value in time_range.range(datetime.timedelta(days=1)):
print(value)
:Output:
.. parsed-literal::
2015-01-01 00:00:00+09:00
2015-01-02 00:00:00+09:00
2015-01-03 00:00:00+09:00
2015-01-04 00:00:00+09:00
"""
if self.__compare_timedelta(step, 0) == 0:
raise ValueError("step must be not zero")
is_inversion = False
try:
self.validate_time_inversion()
except ValueError:
is_inversion = True
if not is_inversion:
if self.__compare_timedelta(step, seconds=0) < 0:
raise ValueError(f"invalid step: expect greater than 0, actual={step}")
else:
if self.__compare_timedelta(step, seconds=0) > 0:
raise ValueError(f"invalid step: expect less than 0, actual={step}")
current_datetime = self.start_datetime
while current_datetime <= self.end_datetime:
yield current_datetime
current_datetime = current_datetime + step
def intersection(self, x):
"""
Newly set a time range that overlaps
the input and the current time range.
:param DateTimeRange x:
Value to compute intersection with the current time range.
:Sample Code:
.. code:: python
from datetimerange import DateTimeRange
dtr0 = DateTimeRange("2015-03-22T10:00:00+0900", "2015-03-22T10:10:00+0900")
dtr1 = DateTimeRange("2015-03-22T10:05:00+0900", "2015-03-22T10:15:00+0900")
dtr0.intersection(dtr1)
:Output:
.. parsed-literal::
2015-03-22T10:05:00+0900 - 2015-03-22T10:10:00+0900
"""
self.validate_time_inversion()
x.validate_time_inversion()
if any([x.start_datetime in self, self.start_datetime in x]):
start_datetime = max(self.start_datetime, x.start_datetime)
end_datetime = min(self.end_datetime, x.end_datetime)
else:
start_datetime = None
end_datetime = None
return DateTimeRange(
start_datetime=start_datetime,
end_datetime=end_datetime,
start_time_format=self.start_time_format,
end_time_format=self.end_time_format,
)
def subtract(self, x):
"""
Remove a time range from this one and return the result.
- The result will be ``[self.copy()]`` if the second range does not overlap the first
- The result will be ``[]`` if the second range wholly encompasses the first range
- The result will be ``[new_range]`` if the second range overlaps one end of the range
- The result will be ``[new_range1, new_range2]`` if the second range is
an internal sub range of the first
:param DateTimeRange x:
Range to remove from this one.
:return: List(DateTimeRange)
List of new ranges when the second range is removed from this one
:Sample Code:
.. code:: python
from datetimerange import DateTimeRange
dtr0 = DateTimeRange("2015-03-22T10:00:00+0900", "2015-03-22T10:10:00+0900")
dtr1 = DateTimeRange("2015-03-22T10:05:00+0900", "2015-03-22T10:15:00+0900")
dtr0.subtract(dtr1)
:Output:
.. parsed-literal::
[2015-03-22T10:00:00+0900 - 2015-03-22T10:05:00+0900]
"""
overlap = self.intersection(x)
# No intersection, return a copy of the original
if not overlap.is_set() or overlap.get_timedelta_second() <= 0:
return [
DateTimeRange(
start_datetime=self.start_datetime,
end_datetime=self.end_datetime,
start_time_format=self.start_time_format,
end_time_format=self.end_time_format,
)
]
# Case 2, full overlap, subtraction results in empty set
if (
overlap.start_datetime == self.start_datetime
and overlap.end_datetime == self.end_datetime
):
return []
# Case 3, overlap on start
if overlap.start_datetime == self.start_datetime:
return [
DateTimeRange(
start_datetime=overlap.end_datetime,
end_datetime=self.end_datetime,
start_time_format=self.start_time_format,
end_time_format=self.end_time_format,
)
]
# Case 4, overlap on end
if overlap.end_datetime == self.end_datetime:
return [
DateTimeRange(
start_datetime=self.start_datetime,
end_datetime=overlap.start_datetime,
start_time_format=self.start_time_format,
end_time_format=self.end_time_format,
)
]
# Case 5, underlap, two new ranges are needed.
return [
DateTimeRange(
start_datetime=self.start_datetime,
end_datetime=overlap.start_datetime,
start_time_format=self.start_time_format,
end_time_format=self.end_time_format,
),
DateTimeRange(
start_datetime=overlap.end_datetime,
end_datetime=self.end_datetime,
start_time_format=self.start_time_format,
end_time_format=self.end_time_format,
),
]
def encompass(self, x):
"""
Newly set a time range that encompasses
the input and the current time range.
:param DateTimeRange x:
Value to compute encompass with the current time range.
:Sample Code:
.. code:: python
from datetimerange import DateTimeRange
dtr0 = DateTimeRange("2015-03-22T10:00:00+0900", "2015-03-22T10:10:00+0900")
dtr1 = DateTimeRange("2015-03-22T10:05:00+0900", "2015-03-22T10:15:00+0900")
dtr0.encompass(dtr1)
:Output:
.. parsed-literal::
2015-03-22T10:00:00+0900 - 2015-03-22T10:15:00+0900
"""
self.validate_time_inversion()
x.validate_time_inversion()
return DateTimeRange(
start_datetime=min(self.start_datetime, x.start_datetime),
end_datetime=max(self.end_datetime, x.end_datetime),
start_time_format=self.start_time_format,
end_time_format=self.end_time_format,
)
def truncate(self, percentage):
"""
Truncate ``percentage`` / 2 [%] of whole time from first and last time.
:param float percentage: Percentage of truncate.
:Sample Code:
.. code:: python
from datetimerange import DateTimeRange
time_range = DateTimeRange(
"2015-03-22T10:00:00+0900", "2015-03-22T10:10:00+0900")
time_range.is_output_elapse = True
print(time_range)
time_range.truncate(10)
print(time_range)
:Output:
.. parsed-literal::
2015-03-22T10:00:00+0900 - 2015-03-22T10:10:00+0900 (0:10:00)
2015-03-22T10:00:30+0900 - 2015-03-22T10:09:30+0900 (0:09:00)
"""
self.validate_time_inversion()
if percentage < 0:
raise ValueError("discard_percent must be greater or equal to zero: " + str(percentage))
if percentage == 0:
return
discard_time = self.timedelta // int(100) * int(percentage / 2)
self.__start_datetime += discard_time
self.__end_datetime -= discard_time
def split(self, separator: Union[str, datetime.datetime]) -> List["DateTimeRange"]:
"""
Split the DateTimerange in two DateTimerange at a specifit datetime.
:param Union[str, datetime.datetime] separator:
Date and time to split the DateTimeRange.
This value will be included for both of the ranges after split.
:Sample Code:
.. code:: python
from datetimerange import DateTimeRange
dtr = DateTimeRange("2015-03-22T10:00:00+0900", "2015-03-22T10:10:00+0900")
dtr.split("2015-03-22T10:05:00+0900")
:Output:
.. parsed-literal::
[2015-03-22T10:00:00+0900 - 2015-03-22T10:05:00+0900,
2015-03-22T10:05:00+0900 - 2015-03-22T10:10:00+0900]
"""
self.validate_time_inversion()
separatingseparation = self.__normalize_datetime_value(separator, timezone=None)
if (separatingseparation not in self) or (
separatingseparation in (self.start_datetime, self.end_datetime)
):
return [
DateTimeRange(
start_datetime=self.start_datetime,
end_datetime=self.end_datetime,
start_time_format=self.start_time_format,
end_time_format=self.end_time_format,
)
]
return [
DateTimeRange(
start_datetime=self.start_datetime,
end_datetime=separatingseparation,
start_time_format=self.start_time_format,
end_time_format=self.end_time_format,
),
DateTimeRange(
start_datetime=separatingseparation,
end_datetime=self.end_datetime,
start_time_format=self.start_time_format,
end_time_format=self.end_time_format,
),
]
def __normalize_datetime_value(self, value, timezone):
if value is None:
return None
try:
return typepy.type.DateTime(
value, strict_level=typepy.StrictLevel.MIN, timezone=timezone
).convert()
except typepy.TypeConversionError as e:
raise ValueError(e)
@classmethod
def from_range_text(
cls,
range_text: str,
separator: str = "-",
start_time_format: Optional[str] = None,
end_time_format: Optional[str] = None,
) -> "DateTimeRange":
"""Create a ``DateTimeRange`` instance from a datetime range text.
:param str range_text:
Input text that includes datetime range.
e.g. ``2021-01-23T10:00:00+0400 - 2021-01-232T10:10:00+0400``
:param str separator:
Text that separating the ``range_text``.
:return: DateTimeRange
Created instance.
"""
dattime_ranges = re.split(r"\s+{}\s+".format(re.escape(separator)), range_text.strip())
if len(dattime_ranges) != 2:
raise ValueError("range_text should include two datetime that separated by hyphen")
start, end = dattime_ranges
kwargs = {
"start_datetime": start,
"end_datetime": end,
}
if start_time_format:
kwargs["start_time_format"] = start_time_format
if end_time_format:
kwargs["end_time_format"] = end_time_format
return DateTimeRange(**kwargs)
|
|
"""Neural Network Policy implementation."""
from SafeRLBench import Policy
from SafeRLBench.error import add_dependency, MultipleCallsException
from SafeRLBench.spaces import RdSpace
import numpy as np
from numpy.random import normal
try:
import tensorflow as tf
except ModuleNotFoundError:
tf = None
import logging
logger = logging.getLogger(__name__)
def default_init_weights(shape):
"""Initialize default weights."""
weights = tf.random_normal(shape, mean=0, stddev=0.1, name='weights')
return tf.Variable(weights)
class NeuralNetwork(Policy):
"""Fully connected Neural Network Policy.
Attributes
----------
args : list
Contains the args used to initialize the policy.
kwargs : dict
Contains the kwargs used to initialize the policy.
layers : list of integers
A list describing the layer sizes. The first element represents the
size of the input layer, the last element the size of the output
layer.
state_space : space instance
action_space : space instance
weights : tf.Variable
If none the init_weights function will be used to initialize the
weights.
init_weights : callable
Takes a shape as an argument and returns a tf.Variable according to
this shape.
activation : list of activation functions
An activation function which will be used to construct the respective
layer. If only one activation function is passed, it will be used for
every layer. If the argument is None by default the sigmoid function
will be used.
dtype : string
Data type of input and output.
W_action : list of tf.Variable
The list contains the `tf.Variable` instances describing the mapping
between the hidden layers. The i-th entry describes the connection
between layer i and layer i+1.
W_var : list of tf.Variable
This list contains the weights used to compute the variance estimation.
Each entry corresponds to one layer and contains weights of shape
(layer[i], 1).
a_pred :
Action estimate of the fully connected neural network defined by
`W_action` and activation.
var :
Variance estimate which is a weighted sum of all hidden units.
The weights are described by `W_var`.
h : list of tf.Tensor
Hidden layers
"""
def __init__(self,
layers, weights=None, init_weights=None, activation=None,
dtype='float', scope='global', do_setup=False):
"""Initialize Neural Network wrapper."""
add_dependency(tf, 'TensorFlow')
if (len(layers) < 2):
raise ValueError('At least two layers needed.')
# determine state and action space
state_space = RdSpace((layers[0],))
action_space = RdSpace((layers[-1],))
# store arguments convenient for copy operation
self.args = [layers]
self.kwargs = {
'weights': weights,
'init_weights': init_weights,
'activation': activation,
'dtype': dtype
}
self.state_space = state_space
self.action_space = action_space
self.dtype = dtype
self.layers = layers
self.scope = scope
self.is_set_up = False
if init_weights is None:
self.init_weights = default_init_weights
else:
self.init_weights = init_weights
# Activation function
if activation is None:
activation = (len(layers) - 2) * [tf.sigmoid]
elif (isinstance(activation, list)
and (len(activation) != len(layers) - 2)):
raise ValueError('Activation list has wrong size.')
else:
activation = (len(layers) - 2) * [activation]
self.activation = activation
# Symbols
self.X = tf.placeholder(dtype, shape=[None, layers[0]], name='X')
self.a = tf.placeholder(dtype, shape=[None, layers[-1]], name='a')
if do_setup:
with tf.variable_scope(self.scope):
self.setup()
else:
# Make sure all fields exist
self.W_action = None
self.W_var = None
self.a_pred = None
self.var = None
self.h = None
self.sess = None
def setup(self):
"""Set up the network graph.
The weights and graph will be initialized by this function. If do_setup
is True, setup will automatically be called, when instantiating the
class.
"""
if self.is_set_up:
raise MultipleCallsException('Network is already set up.')
layers = self.layers
weights = self.kwargs['weights']
# Weights for the action estimation
with tf.variable_scope('action_estimator'):
if weights is None:
w = []
for i in range(len(layers) - 1):
w.append(self.init_weights((layers[i], layers[i + 1])))
else:
w = weights
self.W_action = w
# generate network
self.a_pred = self._generate_network()
# Weights for variance estimation
with tf.variable_scope('variance_estimator'):
self.W_var = []
for i in range(1, len(layers) - 1):
self.W_var.append(self.init_weights((layers[i], 1)))
# generate variance network
self.var = self._generate_variance()
self.is_set_up = True
def _generate_network(self):
self.h = [self.X]
for i, act in enumerate(self.activation):
h_i = self.h[i]
w_i = self.W_action[i]
self.h.append(act(tf.matmul(h_i, w_i)))
return tf.matmul(self.h[-1], self.W_action[-1])
def _generate_variance(self):
var = []
if not self.W_var:
return tf.constant(0, name='variance')
for h_i, w_i in zip(self.W_var, self.h[1:]):
var.append(tf.reduce_sum(tf.matmul(w_i, h_i)))
return tf.abs(tf.reduce_sum(var, name='variance'))
def copy(self, scope, do_setup=True):
"""Generate a copy of the network.
The copy will instantiate the class with the same arguments, but
replace `scope` and `do_setup` with the respective arguments passed
to this function.
Parameters
----------
scope : String
Indication the scope that should be used when initializing the
network.
do_setup : Boolean
Default: True ; Indicating if the `setup` method, should be called
when instantiating.
"""
self.kwargs['scope'] = scope
self.kwargs['do_setup'] = do_setup
return NeuralNetwork(*self.args, **self.kwargs)
def map(self, state):
"""Compute output in session.
Make sure a default session is set when calling.
"""
state = state.flatten()
assert(self.state_space.contains(state))
if self.sess is None:
sess = tf.get_default_session()
else:
sess = self.sess
mean, var = sess.run([self.a_pred, self.var], {self.X: [state]})
action = np.array(normal(mean, var))
action = action.reshape(self.action_space.shape)
return action
@property
def parameters(self):
"""Return weights of the neural network.
This returns a list of tf.Variables. Please note that these can not
simply be updated by assignment. See the parameters.setter docstring
for more information.
The list of tf.Variables can be directly accessed through the
attribute `W`.
"""
if self.sess is None:
return tf.get_default_session().run(self.W_action + self.W_var)
else:
return self.sess.run(self.W_action + self.W_var)
@parameters.setter
def parameters(self, update):
"""Setter function for parameters.
Since the parameters are a list of `tf.Variable`, we need to feed them
into an assign operator. Thus the argument, needs to be a list
containing an element for each Variable in `W_action` and `W_var` in
that order, i.e. `W_var` will be the last element.
Parameters
----------
update :
List of parameters for each `tf.Varible`
Notes
-----
Make sure there is a default session or `self.sess` is set.
"""
if not isinstance(update, list):
update = [update]
variables = self.W_action + self.W_var
assign_op = []
for (var, val) in zip(variables, update):
assign_op.append(var.assign(val))
if self.sess is None:
sess = tf.get_default_session()
else:
sess = self.sess
sess.run(assign_op)
@property
def parameter_space(self):
"""Return parameter space."""
pass
|
|
"""
Master configuration file for Evennia.
NOTE: NO MODIFICATIONS SHOULD BE MADE TO THIS FILE!
All settings changes should be done by copy-pasting the variable and
its value to game/settings.py. An empty game/settings.py can be
auto-generated by running game/manage.py without any arguments.
Hint: Don't copy&paste over more from this file than you actually want
to change. Anything you don't copy&paste will thus retain its default
value - which may change as Evennia is developed. This way you can
always be sure of what you have changed and what is default behaviour.
"""
import os
######################################################################
# Evennia base server config
######################################################################
# This is the name of your game. Make it catchy!
SERVERNAME = "Evennia"
# Activate telnet service
TELNET_ENABLED = True
# A list of ports the Evennia telnet server listens on Can be one or many.
TELNET_PORTS = [4000]
# Interface addresses to listen to. If 0.0.0.0, listen to all. Use :: for IPv6.
TELNET_INTERFACES = ['0.0.0.0']
# OOB (out-of-band) telnet communication allows Evennia to communicate
# special commands and data with enabled Telnet clients. This is used
# to create custom client interfaces over a telnet connection. To make
# full use of OOB, you need to prepare functions to handle the data
# server-side (see OOB_FUNC_MODULE). TELNET_ENABLED is required for this
# to work.
TELNET_OOB_ENABLED = False
# Start the evennia django+twisted webserver so you can
# browse the evennia website and the admin interface
# (Obs - further web configuration can be found below
# in the section 'Config for Django web features')
WEBSERVER_ENABLED = True
# This is a security setting protecting against host poisoning
# attacks. It defaults to allowing all. In production, make
# sure to change this to your actual host addresses/IPs.
ALLOWED_HOSTS = ["*"]
# The webserver sits behind a Portal proxy. This is a list
# of tuples (proxyport,serverport) used. The proxyports are what
# the Portal proxy presents to the world. The serverports are
# the internal ports the proxy uses to forward data to the Server-side
# webserver (these should not be publicly open)
WEBSERVER_PORTS = [(8000, 5001)]
# Interface addresses to listen to. If 0.0.0.0, listen to all. Use :: for IPv6.
WEBSERVER_INTERFACES = ['0.0.0.0']
# IP addresses that may talk to the server in a reverse proxy configuration,
# like NginX.
UPSTREAM_IPS = ['127.0.0.1']
# The webserver uses threadpool for handling requests. This will scale
# with server load. Set the minimum and maximum number of threads it
# may use as (min, max) (must be > 0)
WEBSERVER_THREADPOOL_LIMITS = (1, 20)
# Start the evennia webclient. This requires the webserver to be running and
# offers the fallback ajax-based webclient backbone for browsers not supporting
# the websocket one.
WEBCLIENT_ENABLED = True
# Activate Websocket support for modern browsers. If this is on, the
# default webclient will use this and only use the ajax version of the browser
# is too old to support websockets. Requires WEBCLIENT_ENABLED.
WEBSOCKET_CLIENT_ENABLED = True
# Server-side websocket port to open for the webclient.
WEBSOCKET_CLIENT_PORT = 8001
# Interface addresses to listen to. If 0.0.0.0, listen to all. Use :: for IPv6.
WEBSOCKET_CLIENT_INTERFACE = '0.0.0.0'
# Actual URL for webclient component to reach the websocket. The first
# port number in the WEBSOCKET_PORTS list will be automatically appended.
WEBSOCKET_CLIENT_URL = "ws://localhost"
# Activate SSH protocol communication (SecureShell)
SSH_ENABLED = False
# Ports to use for SSH
SSH_PORTS = [8022]
# Interface addresses to listen to. If 0.0.0.0, listen to all. Use :: for IPv6.
SSH_INTERFACES = ['0.0.0.0']
# Activate SSL protocol (SecureSocketLibrary)
SSL_ENABLED = False
# Ports to use for SSL
SSL_PORTS = [4001]
# Interface addresses to listen to. If 0.0.0.0, listen to all. Use :: for IPv6.
SSL_INTERFACES = ['0.0.0.0']
# Activate custom websocket support. This is unrelated to the websocket client!
# This is intended to be used by optional third-party connections/applications
# or clients.
WEBSOCKET_ENABLED = False
# Ports to use for Websockets
WEBSOCKET_PORTS = [8021]
# Interface addresses to listen to. If 0.0.0.0, listen to all. Use :: for IPv6.
WEBSOCKET_INTERFACES = ['0.0.0.0']
# This determine's whether Evennia's custom admin page is used, or if the
# standard Django admin is used.
EVENNIA_ADMIN = True
# The path that contains this settings.py file (no trailing slash).
BASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Path to the src directory containing the bulk of the codebase's code.
SRC_DIR = os.path.join(BASE_PATH, 'src')
# Path to the game directory (containing the database file if using sqlite).
GAME_DIR = os.path.join(BASE_PATH, 'game')
# Place to put log files
LOG_DIR = os.path.join(GAME_DIR, 'logs')
SERVER_LOG_FILE = os.path.join(LOG_DIR, 'server.log')
PORTAL_LOG_FILE = os.path.join(LOG_DIR, 'portal.log')
HTTP_LOG_FILE = os.path.join(LOG_DIR, 'http_requests.log')
# Rotate log files when server and/or portal stops. This will keep log
# file sizes down. Turn off to get ever growing log files and never
# loose log info.
CYCLE_LOGFILES = True
# Local time zone for this installation. All choices can be found here:
# http://www.postgresql.org/docs/8.0/interactive/datetime-keywords.html#DATETIME-TIMEZONE-SET-TABLE
TIME_ZONE = 'UTC'
# Authentication backends. This is the code used to authenticate a user.
AUTHENTICATION_BACKENDS = (
'src.web.utils.backends.CaseInsensitiveModelBackend',)
# Language code for this installation. All choices can be found here:
# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
LANGUAGE_CODE = 'en-us'
# How long time (in seconds) a user may idle before being logged
# out. This can be set as big as desired. A user may avoid being
# thrown off by sending the empty system command 'idle' to the server
# at regular intervals. Set <=0 to deactivate idle timout completely.
IDLE_TIMEOUT = 3600
# The idle command can be sent to keep your session active without actually
# having to spam normal commands regularly. It gives no feedback, only updates
# the idle timer.
IDLE_COMMAND = "idle"
# The set of encodings tried. A Player object may set an attribute "encoding" on
# itself to match the client used. If not set, or wrong encoding is
# given, this list is tried, in order, aborting on the first match.
# Add sets for languages/regions your players are likely to use.
# (see http://en.wikipedia.org/wiki/Character_encoding)
ENCODINGS = ["utf-8", "latin-1", "ISO-8859-1"]
# The game server opens an AMP port so that the portal can
# communicate with it. This is an internal functionality of Evennia, usually
# operating between two processes on the same machine. You usually don't need to
# change this unless you cannot use the default AMP port/host for
# whatever reason.
AMP_HOST = 'localhost'
AMP_PORT = 5000
AMP_INTERFACE = '127.0.0.1'
# Database objects are cached in what is known as the idmapper. The idmapper
# caching results in a massive speedup of the server (since it dramatically
# limits the number of database accesses needed) and also allows for
# storing temporary data on objects. It is however also the main memory
# consumer of Evennia. With this setting the cache can be capped and
# flushed when it reaches a certain size. Minimum is 50 MB but it is
# not recommended to set this to less than 100 MB for a distribution
# system.
# Empirically, N_objects_in_cache ~ ((RMEM - 35) / 0.0157):
# mem(MB) | objs in cache || mem(MB) | objs in cache
# 50 | ~1000 || 800 | ~49 000
# 100 | ~4000 || 1200 | ~75 000
# 200 | ~10 000 || 1600 | ~100 000
# 500 | ~30 000 || 2000 | ~125 000
# Note that the estimated memory usage is not exact (and the cap is only
# checked every 5 minutes), so err on the side of caution if
# running on a server with limited memory. Also note that Python
# will not necessarily return the memory to the OS when the idmapper
# flashes (the memory will be freed and made available to the Python
# process only). How many objects need to be in memory at any given
# time depends very much on your game so some experimentation may
# be necessary (use @server to see how many objects are in the idmapper
# cache at any time). Setting this to None disables the cache cap.
IDMAPPER_CACHE_MAXSIZE = 200 # (MB)
######################################################################
# Evennia Database config
######################################################################
# Database config syntax for Django 1.2+.
# ENGINE - path to the the database backend. Possible choices are:
# 'django.db.backends.sqlite3', (default)
# 'django.db.backends.mysql',
# 'django.db.backends.'postgresql_psycopg2' (see Issue 241),
# 'django.db.backends.oracle' (untested).
# NAME - database name, or path to the db file for sqlite3
# USER - db admin (unused in sqlite3)
# PASSWORD - db admin password (unused in sqlite3)
# HOST - empty string is localhost (unused in sqlite3)
# PORT - empty string defaults to localhost (unused in sqlite3)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(GAME_DIR, 'evennia.db3'),
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': ''
}}
######################################################################
# Evennia pluggable modules
######################################################################
# Plugin modules extend Evennia in various ways. In the cases with no
# existing default, there are examples of many of these modules
# in game/gamesrc/conf/examples.
# The command parser module to use. See the default module for which
# functions it must implement
COMMAND_PARSER = "src.commands.cmdparser.cmdparser"
# The handler that outputs errors when searching
# objects using object.search().
SEARCH_AT_RESULT = "src.commands.cmdparser.at_search_result"
# The parser used in order to separate multiple
# object matches (so you can separate between same-named
# objects without using dbrefs).
SEARCH_AT_MULTIMATCH_INPUT = "src.commands.cmdparser.at_multimatch_input"
# The module holding text strings for the connection screen.
# This module should contain one or more variables
# with strings defining the look of the screen.
CONNECTION_SCREEN_MODULE = "src.commands.connection_screen"
# An optional module that, if existing, must hold a function
# named at_initial_setup(). This hook method can be used to customize
# the server's initial setup sequence (the very first startup of the system).
# The check will fail quietly if module doesn't exist or fails to load.
AT_INITIAL_SETUP_HOOK_MODULE = ""
# Module containing your custom at_server_start(), at_server_reload() and
# at_server_stop() methods. These methods will be called every time
# the server starts, reloads and resets/stops respectively.
AT_SERVER_STARTSTOP_MODULE = ""
# List of one or more module paths to modules containing a function start_
# plugin_services(application). This module will be called with the main
# Evennia Server application when the Server is initiated.
# It will be called last in the startup sequence.
SERVER_SERVICES_PLUGIN_MODULES = []
# List of one or more module paths to modules containing a function
# start_plugin_services(application). This module will be called with the
# main Evennia Portal application when the Portal is initiated.
# It will be called last in the startup sequence.
PORTAL_SERVICES_PLUGIN_MODULES = []
# Module holding MSSP meta data. This is used by MUD-crawlers to determine
# what type of game you are running, how many players you have etc.
MSSP_META_MODULE = ""
# Tuple of modules implementing lock functions. All callable functions
# inside these modules will be available as lock functions.
LOCK_FUNC_MODULES = ("src.locks.lockfuncs",)
# Module holding OOB (Out of Band) hook objects. This allows for customization
# and expansion of which hooks OOB protocols are allowed to call on the server
# protocols for attaching tracker hooks for when various object field change
OOB_PLUGIN_MODULES = ["src.server.oob_cmds"]
######################################################################
# Default command sets
######################################################################
# Note that with the exception of the unloggedin set (which is not
# stored anywhere in the databse), changing these paths will only affect
# NEW created characters/objects, not those already in play. So if you plan to
# change this, it's recommended you do it before having created a lot of objects
# (or simply reset the database after the change for simplicity). Remember
# that you should never edit things in src/. Instead copy out the examples
# in game/gamesrc/commands/examples up one level and re-point these settings
# to point to these copies instead - these you can then change as you please
# (or copy/paste from the default modules in src/ if you prefer).
# Command set used on session before player has logged in
CMDSET_UNLOGGEDIN = "src.commands.default.cmdset_unloggedin.UnloggedinCmdSet"
# Command set used on the logged-in session
CMDSET_SESSION = "src.commands.default.cmdset_session.SessionCmdSet"
# Default set for logged in player with characters (fallback)
CMDSET_CHARACTER = "src.commands.default.cmdset_character.CharacterCmdSet"
# Command set for players without a character (ooc)
CMDSET_PLAYER = "src.commands.default.cmdset_player.PlayerCmdSet"
# Location to search for cmdsets if full path not given
CMDSET_PATHS = ["game.gamesrc.commands"]
######################################################################
# Typeclasses and other paths
######################################################################
# Server-side session class used.
SERVER_SESSION_CLASS = "src.server.serversession.ServerSession"
# Base paths for typeclassed object classes. These paths must be
# defined relative evennia's root directory. They will be searched in
# order to find relative typeclass paths.
OBJECT_TYPECLASS_PATHS = ["game.gamesrc.objects",
"game.gamesrc.objects.examples",
"contrib"]
SCRIPT_TYPECLASS_PATHS = ["game.gamesrc.scripts",
"game.gamesrc.scripts.examples",
"contrib"]
PLAYER_TYPECLASS_PATHS = ["game.gamesrc.objects", "contrib"]
CHANNEL_TYPECLASS_PATHS = ["game.gamesrc.conf", "contrib"]
# Typeclass for player objects (linked to a character) (fallback)
BASE_PLAYER_TYPECLASS = "src.players.player.Player"
# Typeclass and base for all objects (fallback)
BASE_OBJECT_TYPECLASS = "src.objects.objects.Object"
# Typeclass for character objects linked to a player (fallback)
BASE_CHARACTER_TYPECLASS = "src.objects.objects.Character"
# Typeclass for rooms (fallback)
BASE_ROOM_TYPECLASS = "src.objects.objects.Room"
# Typeclass for Exit objects (fallback).
BASE_EXIT_TYPECLASS = "src.objects.objects.Exit"
# Typeclass for Channel (fallback).
BASE_CHANNEL_TYPECLASS = "src.comms.comms.Channel"
# Typeclass for Scripts (fallback). You usually don't need to change this
# but create custom variations of scripts on a per-case basis instead.
BASE_SCRIPT_TYPECLASS = "src.scripts.scripts.DoNothing"
# The default home location used for all objects. This is used as a
# fallback if an object's normal home location is deleted. Default
# is Limbo (#2).
DEFAULT_HOME = "#2"
# The start position for new characters. Default is Limbo (#2).
# MULTISESSION_MODE = 0, 1 - used by default unloggedin create command
# MULTISESSION_MODE = 2,3 - used by default character_create command
START_LOCATION = "#2"
# Lookups of Attributes, Tags, Nicks, Aliases can be aggressively
# cached to avoid repeated database hits. This often gives noticeable
# performance gains since they are called so often. Drawback is that
# if you are accessing the database from multiple processes (such as
# from a website -not- running Evennia's own webserver) data may go
# out of sync between the processes. Keep on unless you face such
# issues.
TYPECLASS_AGGRESSIVE_CACHE = True
######################################################################
# Batch processors
######################################################################
# Python path to a directory to be searched for batch scripts
# for the batch processors (.ev and/or .py files).
BASE_BATCHPROCESS_PATHS = ['game.gamesrc.world', 'contrib']
######################################################################
# Game Time setup
######################################################################
# You don't actually have to use this, but it affects the routines in
# src.utils.gametime.py and allows for a convenient measure to
# determine the current in-game time. You can of course interpret
# "week", "month" etc as your own in-game time units as desired.
#The time factor dictates if the game world runs faster (timefactor>1)
# or slower (timefactor<1) than the real world.
TIME_FACTOR = 2.0
# These measures might or might not make sense to your game world.
TIME_SEC_PER_MIN = 60
TIME_MIN_PER_HOUR = 60
TIME_HOUR_PER_DAY = 24
TIME_DAY_PER_WEEK = 7
TIME_WEEK_PER_MONTH = 4
TIME_MONTH_PER_YEAR = 12
######################################################################
# Default Player setup and access
######################################################################
# Different Multisession modes allow a player (=account) to connect to the
# game simultaneously with multiple clients (=sessions). In modes 0,1 there is
# only one character created to the same name as the account at first login.
# In modes 2,3 no default character will be created and the MAX_NR_CHARACTERS
# value (below) defines how many characters the default char_create command
# allow per player.
# 0 - single session, one player, one character, when a new session is
# connected, the old one is disconnected
# 1 - multiple sessions, one player, one character, each session getting
# the same data
# 2 - multiple sessions, one player, many characters, one session per
# character (disconnects multiplets)
# 3 - like mode 2, except multiple sessions can puppet one character, each
# session getting the same data.
MULTISESSION_MODE = 0
# The maximum number of characters allowed for MULTISESSION_MODE 2,3. This is
# checked by the default ooc char-creation command. Forced to 1 for
# MULTISESSION_MODE 0 and 1.
MAX_NR_CHARACTERS = 1
# The access hiearchy, in climbing order. A higher permission in the
# hierarchy includes access of all levels below it. Used by the perm()/pperm()
# lock functions.
PERMISSION_HIERARCHY = ["Guests", # note-only used if GUEST_ENABLED=True
"Players",
"PlayerHelpers",
"Builders",
"Wizards",
"Immortals"]
# The default permission given to all new players
PERMISSION_PLAYER_DEFAULT = "Players"
# Default sizes for client window (in number of characters), if client
# is not supplying this on its own
CLIENT_DEFAULT_WIDTH = 78
CLIENT_DEFAULT_HEIGHT = 45 # telnet standard is 24 but does anyone use such
# low-res displays anymore?
######################################################################
# Guest accounts
######################################################################
# This enables guest logins, by default via "connect guest"
GUEST_ENABLED = False
# Typeclass for guest player objects (linked to a character)
BASE_GUEST_TYPECLASS = "src.players.player.Guest"
# The permission given to guests
PERMISSION_GUEST_DEFAULT = "Guests"
# The default home location used for guests.
GUEST_HOME = DEFAULT_HOME
# The start position used for guest characters.
GUEST_START_LOCATION = START_LOCATION
# The naming convention used for creating new guest
# players/characters. The size of this list also detemines how many
# guests may be on the game at once. The default is a maximum of nine
# guests, named Guest1 through Guest9.
GUEST_LIST = ["Guest" + str(s+1) for s in range(9)]
######################################################################
# In-game Channels created from server start
######################################################################
# Each default channel is defined by a tuple containing
# (name, aliases, description, locks)
# where aliases may be a tuple too, and locks is
# a valid lockstring definition.
# Default user channel for communication
CHANNEL_PUBLIC = ("Public", ('ooc',), 'Public discussion',
"control:perm(Wizards);listen:all();send:all()")
# General info about the server
CHANNEL_MUDINFO = ("MUDinfo", '', 'Informative messages',
"control:perm(Immortals);listen:perm(Immortals);send:false()")
# Channel showing when new people connecting
CHANNEL_CONNECTINFO = ("MUDconnections", '', 'Connection log',
"control:perm(Immortals);listen:perm(Wizards);send:false()")
######################################################################
# External Channel connections
######################################################################
# Note: You do *not* have to make your MUD open to
# the public to use the external connections, they
# operate as long as you have an internet connection,
# just like stand-alone chat clients. IRC and IMC2
# requires that you have twisted.words installed.
# Evennia can connect to external IRC channels and
# echo what is said on the channel to IRC and vice
# versa. Obs - make sure the IRC network allows bots.
# When enabled, command @irc2chan will be available in-game
IRC_ENABLED = False
# RSS allows to connect RSS feeds (from forum updates, blogs etc) to
# an in-game channel. The channel will be updated when the rss feed
# updates. Use @rss2chan in game to connect if this setting is
# active. OBS: RSS support requires the python-feedparser package to
# be installed (through package manager or from the website
# http://code.google.com/p/feedparser/)
RSS_ENABLED=False
RSS_UPDATE_INTERVAL = 60*10 # 10 minutes
# IMC (Inter-MUD communication) allows to connect an Evennia channel
# to an IMC2 server. This lets them talk to people on other MUDs also
# using IMC. Evennia's IMC2 client was developed against MudByte's
# network. You must register your MUD on the network before you can
# use it, go to http://www.mudbytes.net/imc2-intermud-join-network.
# Choose 'Other unsupported IMC2 version' from the choices and and
# enter your information there. You should enter the same 'short mud
# name' as your SERVERNAME above, then choose imc network server as
# well as client/server passwords same as below. When enabled, the
# command @imc2chan becomes available in-game and allows you to
# connect Evennia channels to IMC channels on the network. The Evennia
# discussion channel 'ievennia' is on server01.mudbytes.net:5000.
# NOTE - IMC2 is currently NOT FUNCTIONAL due to lack of testing means.
IMC2_ENABLED = False
IMC2_NETWORK = "server01.mudbytes.net"
IMC2_PORT = 5000 # this is the imc2 port, not on localhost
IMC2_CLIENT_PWD = ""
IMC2_SERVER_PWD = ""
######################################################################
# Django web features
######################################################################
# While DEBUG is False, show a regular server error page on the web
# stuff, email the traceback to the people in the ADMINS tuple
# below. If True, show a detailed traceback for the web
# browser to display. Note however that this will leak memory when
# active, so make sure to turn it off for a production server!
DEBUG = False
# While true, show "pretty" error messages for template syntax errors.
TEMPLATE_DEBUG = DEBUG
# Emails are sent to these people if the above DEBUG value is False. If you'd
# rather nobody recieve emails, leave this commented out or empty.
ADMINS = () #'Your Name', 'your_email@domain.com'),)
# These guys get broken link notifications when SEND_BROKEN_LINK_EMAILS is True.
MANAGERS = ADMINS
# Absolute path to the directory that holds file uploads from web apps.
# Example: "/home/media/media.lawrence.com"
MEDIA_ROOT = os.path.join(GAME_DIR, "gamesrc", "web", "media")
# It's safe to dis-regard this, as it's a Django feature we only half use as a
# dependency, not actually what it's primarily meant for.
SITE_ID = 1
# The age for sessions.
# Default: 1209600 (2 weeks, in seconds)
SESSION_COOKIE_AGE = 1209600
# Session cookie domain
# Default: None
SESSION_COOKIE_DOMAIN = None
# The name of the cookie to use for sessions.
# Default: 'sessionid'
SESSION_COOKIE_NAME = 'sessionid'
# Should the session expire when the browser closes?
# Default: False
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# Where to find locales (no need to change this, most likely)
LOCALE_PATHS = ["../locale/"]
# This should be turned off unless you want to do tests with Django's
# development webserver (normally Evennia runs its own server)
SERVE_MEDIA = False
# The master urlconf file that contains all of the sub-branches to the
# applications. Change this to add your own URLs to the website.
ROOT_URLCONF = 'src.web.urls'
# Where users are redirected after logging in via contrib.auth.login.
LOGIN_REDIRECT_URL = '/'
# Where to redirect users when using the @login_required decorator.
LOGIN_URL = '/accounts/login'
# Where to redirect users who wish to logout.
LOGOUT_URL = '/accounts/login'
# URL that handles the media served from MEDIA_ROOT.
# Example: "http://media.lawrence.com"
MEDIA_URL = '/media/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure
# to use a trailing slash. Django1.4+ will look for admin files under
# STATIC_URL/admin.
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(GAME_DIR, "gamesrc", "web", "static")
# Directories from which static files will be gathered from.
STATICFILES_DIRS = (
os.path.join(GAME_DIR, "gamesrc", "web", "static_overrides"),
os.path.join(SRC_DIR, "web", "static"),)
# Patterns of files in the static directories. Used here to make sure that
# its readme file is preserved but unused.
STATICFILES_IGNORE_PATTERNS = ('README.md',)
# The name of the currently selected web template. This corresponds to the
# directory names shown in the webtemplates directory.
ACTIVE_TEMPLATE = 'prosimii'
# We setup the location of the website template as well as the admin site.
TEMPLATE_DIRS = (
os.path.join(GAME_DIR, "gamesrc", "web", "template_overrides"),
os.path.join(SRC_DIR, "web", "templates", ACTIVE_TEMPLATE),
os.path.join(SRC_DIR, "web", "templates"),)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',)
# MiddleWare are semi-transparent extensions to Django's functionality.
# see http://www.djangoproject.com/documentation/middleware/ for a more detailed
# explanation.
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.messages.middleware.MessageMiddleware', # 1.4?
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.admindocs.middleware.XViewMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',)
# Context processors define context variables, generally for the template
# system to use.
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.i18n',
'django.core.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.media',
'django.core.context_processors.debug',
'src.web.utils.general_context.general_context',)
######################################################################
# Evennia components
######################################################################
# Global and Evennia-specific apps. This ties everything together so we can
# refer to app models and perform DB syncs.
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.sites',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.flatpages',
'django.contrib.staticfiles',
'src.server',
'src.typeclasses',
'src.players',
'src.objects',
'src.comms',
'src.help',
'src.scripts',
'src.web.webclient')
# The user profile extends the User object with more functionality;
# This should usually not be changed.
AUTH_USER_MODEL = "players.PlayerDB"
#AUTH_PROFILE_MODULE = "players.PlayerDB"
# Use a custom test runner that just tests Evennia-specific apps.
TEST_RUNNER = 'src.server.tests.EvenniaTestSuiteRunner'
######################################################################
# Django extensions
######################################################################
# Django extesions are useful third-party tools that are not
# always included in the default django distro.
try:
import django_extensions
INSTALLED_APPS = INSTALLED_APPS + ('django_extensions',)
except ImportError:
pass
#######################################################################
# SECRET_KEY
#######################################################################
# This is the salt for cryptographic hashing used by Django.
# It is a fallback for the SECRET_KEY setting in settings.py, which
# is randomly seeded when settings.py is first created. If copying
# from here, make sure to change it!
SECRET_KEY = 'changeme!(*#&*($&*(#*(&SDFKJJKLS*(@#KJAS'
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Register flags for optimizing performance."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing
import REDACTED
from absl import flags # pylint: disable=g-bad-import-order
import tensorflow as tf # pylint: disable=g-bad-import-order
from REDACTED.tf2_common.utils.flags._conventions import help_wrap
# Map string to TensorFlow dtype
DTYPE_MAP = {
"fp16": tf.float16,
"bf16": tf.bfloat16,
"fp32": tf.float32,
}
def get_tf_dtype(flags_obj):
if getattr(flags_obj, "fp16_implementation", None) == "graph_rewrite":
# If the graph_rewrite is used, we build the graph with fp32, and let the
# graph rewrite change ops to fp16.
return tf.float32
return DTYPE_MAP[flags_obj.dtype]
def get_loss_scale(flags_obj, default_for_fp16):
if flags_obj.loss_scale == "dynamic":
return flags_obj.loss_scale
elif flags_obj.loss_scale is not None:
return float(flags_obj.loss_scale)
elif flags_obj.dtype == "fp32" or flags_obj.dtype == "bf16":
return 1 # No loss scaling is needed for fp32 and bf16
else:
assert flags_obj.dtype == "fp16"
return default_for_fp16
def define_performance(num_parallel_calls=False, inter_op=False, intra_op=False,
synthetic_data=False, max_train_steps=False, dtype=False,
all_reduce_alg=False, num_packs=False,
tf_gpu_thread_mode=False,
datasets_num_private_threads=False,
datasets_num_parallel_batches=False,
dynamic_loss_scale=False, fp16_implementation=False,
loss_scale=False,
tf_data_experimental_slack=False, enable_xla=False,
force_v2_in_keras_compile=False,
training_dataset_cache=False,
training_prefetch_batchs=False,
eval_dataset_cache=False,
eval_prefetch_batchs=False):
"""Register flags for specifying performance tuning arguments.
Args:
num_parallel_calls: Create a flag to specify parallelism of data loading.
inter_op: Create a flag to allow specification of inter op threads.
intra_op: Create a flag to allow specification of intra op threads.
synthetic_data: Create a flag to allow the use of synthetic data.
max_train_steps: Create a flags to allow specification of maximum number
of training steps
dtype: Create flags for specifying dtype.
all_reduce_alg: If set forces a specific algorithm for multi-gpu.
num_packs: If set provides number of packs for MirroredStrategy's cross
device ops.
tf_gpu_thread_mode: gpu_private triggers us of private thread pool.
datasets_num_private_threads: Number of private threads for datasets.
datasets_num_parallel_batches: Determines how many batches to process in
parallel when using map and batch from tf.data.
dynamic_loss_scale: Allow the "loss_scale" flag to take on the value
"dynamic". Only valid if `dtype` is True.
fp16_implementation: Create fp16_implementation flag.
loss_scale: Controls the loss scaling, normally for mixed-precision
training. Can only be turned on if dtype is also True.
tf_data_experimental_slack: Determines whether to enable tf.data's
`experimental_slack` option.
enable_xla: Determines if XLA (auto clustering) is turned on.
force_v2_in_keras_compile: Forces the use of run_distribued path even if not
using a `strategy`. This is not the same as
`tf.distribute.OneDeviceStrategy`
training_dataset_cache: Whether to cache the training dataset on workers.
Typically used to improve training performance when training data is in
remote storage and can fit into worker memory.
training_prefetch_bachs: The number of batchs to prefetch for training.
eval_dataset_cache: Whether to cache the eval dataset on workers.
eval_prefetch_bachs: The number of batchs to prefetch for eval.
Returns:
A list of flags for core.py to marks as key flags.
"""
key_flags = []
if num_parallel_calls:
flags.DEFINE_integer(
name="num_parallel_calls", short_name="npc",
default=multiprocessing.cpu_count(),
help=help_wrap("The number of records that are processed in parallel "
"during input processing. This can be optimized per "
"data set but for generally homogeneous data sets, "
"should be approximately the number of available CPU "
"cores. (default behavior)"))
if inter_op:
flags.DEFINE_integer(
name="inter_op_parallelism_threads", short_name="inter", default=0,
help=help_wrap("Number of inter_op_parallelism_threads to use for CPU. "
"See TensorFlow config.proto for details.")
)
if intra_op:
flags.DEFINE_integer(
name="intra_op_parallelism_threads", short_name="intra", default=0,
help=help_wrap("Number of intra_op_parallelism_threads to use for CPU. "
"See TensorFlow config.proto for details."))
if synthetic_data:
flags.DEFINE_bool(
name="use_synthetic_data", short_name="synth", default=False,
help=help_wrap(
"If set, use fake data (zeroes) instead of a real dataset. "
"This mode is useful for performance debugging, as it removes "
"input processing steps, but will not learn anything."))
if max_train_steps:
flags.DEFINE_integer(
name="max_train_steps", short_name="mts", default=None, help=help_wrap(
"The model will stop training if the global_step reaches this "
"value. If not set, training will run until the specified number "
"of epochs have run as usual. It is generally recommended to set "
"--train_epochs=1 when using this flag."
))
if dtype:
flags.DEFINE_enum(
name="dtype", short_name="dt", default="fp32",
enum_values=DTYPE_MAP.keys(),
help=help_wrap("The TensorFlow datatype used for calculations. "
"Variables may be cast to a higher precision on a "
"case-by-case basis for numerical stability."))
loss_scale_help_text = (
"The amount to scale the loss by when the model is run. {}. Before "
"gradients are computed, the loss is multiplied by the loss scale, "
"making all gradients loss_scale times larger. To adjust for this, "
"gradients are divided by the loss scale before being applied to "
"variables. This is mathematically equivalent to training without "
"a loss scale, but the loss scale helps avoid some intermediate "
"gradients from underflowing to zero. If not provided the default "
"for fp16 is 128 and 1 for all other dtypes.{}"
)
if dynamic_loss_scale:
loss_scale_help_text = loss_scale_help_text.format(
"This can be an int/float or the string 'dynamic'",
" The string 'dynamic' can be used to dynamically determine the "
"optimal loss scale during training, but currently this "
"significantly slows down performance")
loss_scale_validation_msg = ("loss_scale should be a positive int/float "
"or the string 'dynamic'.")
else:
loss_scale_help_text = loss_scale_help_text.format(
"This must be an int/float", "")
loss_scale_validation_msg = "loss_scale should be a positive int/float."
if loss_scale:
flags.DEFINE_string(
name="loss_scale", short_name="ls", default=None,
help=help_wrap(loss_scale_help_text))
@flags.validator(flag_name="loss_scale",
message=loss_scale_validation_msg)
def _check_loss_scale(loss_scale): # pylint: disable=unused-variable
"""Validator to check the loss scale flag is valid."""
if loss_scale is None:
return True # null case is handled in get_loss_scale()
if loss_scale == "dynamic" and dynamic_loss_scale:
return True
try:
loss_scale = float(loss_scale)
except ValueError:
return False
return loss_scale > 0
if fp16_implementation:
flags.DEFINE_enum(
name="fp16_implementation", default="keras",
enum_values=("keras', 'graph_rewrite"),
help=help_wrap(
"When --dtype=fp16, how fp16 should be implemented. This has no "
"impact on correctness. 'keras' uses the "
"tf.keras.mixed_precision API. 'graph_rewrite' uses the "
"tf.train.experimental.enable_mixed_precision_graph_rewrite "
"API."))
@flags.multi_flags_validator(["fp16_implementation", "dtype",
"loss_scale"])
def _check_fp16_implementation(flags_dict):
"""Validator to check fp16_implementation flag is valid."""
if (flags_dict["fp16_implementation"] == "graph_rewrite" and
flags_dict["dtype"] != "fp16"):
raise flags.ValidationError("--fp16_implementation should not be "
"specified unless --dtype=fp16")
return True
if all_reduce_alg:
flags.DEFINE_string(
name="all_reduce_alg", short_name="ara", default=None,
help=help_wrap("Defines the algorithm to use for performing all-reduce."
"When specified with MirroredStrategy for single "
"worker, this controls "
"tf.contrib.distribute.AllReduceCrossTowerOps. When "
"specified with MultiWorkerMirroredStrategy, this "
"controls "
"tf.distribute.experimental.CollectiveCommunication; "
"valid options are `ring` and `nccl`."))
if num_packs:
flags.DEFINE_integer(
name="num_packs", default=1,
help=help_wrap("Sets `num_packs` in the cross device ops used in "
"MirroredStrategy. For details, see "
"tf.distribute.NcclAllReduce."))
if tf_gpu_thread_mode:
flags.DEFINE_string(
name="tf_gpu_thread_mode", short_name="gt_mode", default=None,
help=help_wrap(
"Whether and how the GPU device uses its own threadpool.")
)
flags.DEFINE_integer(
name="per_gpu_thread_count", short_name="pgtc", default=0,
help=help_wrap(
"The number of threads to use for GPU. Only valid when "
"tf_gpu_thread_mode is not global.")
)
if datasets_num_private_threads:
flags.DEFINE_integer(
name="datasets_num_private_threads",
default=None,
help=help_wrap(
"Number of threads for a private threadpool created for all"
"datasets computation..")
)
if datasets_num_parallel_batches:
flags.DEFINE_integer(
name="datasets_num_parallel_batches",
default=None,
help=help_wrap(
"Determines how many batches to process in parallel when using "
"map and batch from tf.data.")
)
if training_dataset_cache:
flags.DEFINE_boolean(
name="training_dataset_cache",
default=False,
help=help_wrap(
"Determines whether to cache the training dataset on workers. "
"Typically used to improve training performance when training "
"data is in remote storage and can fit into worker memory.")
)
if training_prefetch_batchs:
flags.DEFINE_integer(
name="training_prefetch_batchs",
default=tf.data.experimental.AUTOTUNE,
help=help_wrap(
"The number of batchs to prefetch for the training dataset.")
)
if eval_dataset_cache:
flags.DEFINE_boolean(
name="eval_dataset_cache",
default=False,
help=help_wrap(
"Determines whether to cache the eval dataset on workers. "
"Typically used to improve eval performance when eval "
"data is in remote storage and can fit into worker memory.")
)
if eval_prefetch_batchs:
flags.DEFINE_integer(
name="eval_prefetch_batchs",
default=tf.data.experimental.AUTOTUNE,
help=help_wrap(
"The number of batchs to prefetch for the eval dataset.")
)
if tf_data_experimental_slack:
flags.DEFINE_boolean(
name="tf_data_experimental_slack",
default=False,
help=help_wrap(
"Whether to enable tf.data's `experimental_slack` option.")
)
if enable_xla:
flags.DEFINE_boolean(
name="enable_xla", default=False,
help="Whether to enable XLA auto jit compilation")
if force_v2_in_keras_compile:
flags.DEFINE_boolean(
name="force_v2_in_keras_compile", default=None,
help="Forces the use of run_distribued path even if not"
"using a `strategy`. This is not the same as"
"`tf.distribute.OneDeviceStrategy`")
return key_flags
|
|
#!/usr/bin/python
# coding: UTF-8
# kegdata service to read about key status
# Written by: Ron Ritchey
from __future__ import unicode_literals
import json, threading, logging, Queue, time, getopt, sys, logging
import RPi.GPIO as GPIO
from hx711 import HX711
# HOW TO CALCULATE THE REFFERENCE UNIT
# To set the reference unit to 1. Put 1kg on your sensor or anything you have and know exactly how much it weights.
# In this case, 92 is 1 gram because, with 1 as a reference unit I got numbers near 0 without any weight
# and I got numbers around 184000 when I added 2kg. So, according to the rule of thirds:
# If 2000 grams is 184000 then 1000 grams is 184000 / 2000 = 92.
#hx.set_reference_unit(1)
#hx.set_reference_unit(92)
#hx.set_reference_unit(10772)
class kegdata():
kegdata_init = {
'name':"Sharon's Stout",
'description':'Rich Chocolate and Coffee Flavor',
'ABV':7.5,
'IBU':23,
'weight':320
}
varcheck = {
u'unicode':
[
u'name',
u'description',
],
u'int':
[
u'weight',
],
u'float':
[
u'ABV',
u'IBU',
]
}
def __init__(self, q):
self.dataqueue = q
self.kegdata = self.kegdata_init
self.kegdata_prev = { }
print "Initializing keg data service"
self.hx = HX711(4,17)
self.hx.set_reading_format("LSB", "MSB")
self.hx.set_reference_unit(673)
self.hx.reset()
self.hx.tare()
# Now set up a thread to listen to the channel and update our data when
# the channel indicates a relevant key has changed
data_t = threading.Thread(target=self.run)
data_t.daemon = True
data_t.start()
# self.server = server
# self.port = port
# self.pwd = pwd
# self.connection_failed = 0
#
# self.dataclient = None
# Now set up a thread to listen to the channel and update our data when
# the channel indicates a relevant key has changed
# data_t = threading.Thread(target=self.run)
# data_t.daemon = True
# data_t.start()
def validatekegvars(self, vars):
for vtype, members in self.varcheck.iteritems():
if vtype == u'unicode':
for v in members:
try:
if type(vars[v]) is unicode:
continue
if type(vars[v]) is None:
vars[v] = u""
elif type(vars[v]) is str:
logging.debug(u"Received string in {0}. Converting to Unicode".format(v))
vars[v] = vars[v].decode()
else:
# This happens so often when playing from webradio that I'm disabling logging for now.
# logging.debug(u"Received non-string type {0} in {1}. Converting to null".format(type(vars[v]),v))
vars[v] = u""
except KeyError:
logging.debug(u"Missing required value {0}. Adding empty version".format(v))
vars[v] = u""
elif vtype == u'bool':
for v in members:
try:
if type(vars[v]) is bool:
continue
if type(vars[v]) is None:
vars[v] = False
elif type(vars[v]) is int:
logging.debug(u"Received integer in {0}. Converting to boolean".format(v))
vars[v] = bool(vars[v])
else:
logging.debug(u"Received non-bool type {0} in {1}. Converting to False".format(type(vars[v]),v))
vars[v] = False
except KeyError:
logging.debug(u"Missing required value {0}. Adding empty version".format(v))
vars[v] = False
elif vtype == u'int':
for v in members:
try:
if type(vars[v]) is int:
continue
if type(vars[v]) is None:
vars[v] = 0
elif type(vars[v]) is bool:
logging.debug(u"Received boolean in {0}. Converting to integer".format(v))
vars[v] = int(vars[v])
else:
logging.debug(u"Received non-integer type {0} in {1}. Converting to 0".format(type(vars[v]),v))
vars[v] = 0
except KeyError:
logging.debug(u"Missing required value {0}. Adding empty version".format(v))
vars[v] = 0
# def connect(self):
#
# # Try up to 10 times to connect to REDIS
# self.connection_failed = 0
#
# logging.debug(u"Connecting to Rune Redis service on {0}:{1}".format(self.server, self.port))
#
# while True:
# if self.connection_failed >= 10:
# logging.debug(u"Could not connect to Rune Redis service")
# raise RuntimeError(u"Could not connect to Rune Redis service")
# try:
# # Connection to REDIS
# client = redis.StrictRedis(self.server, self.port, self.pwd)
#
# # Configure REDIS to send keyspace messages for set events
# client.config_set(u'notify-keyspace-events', u'KEA')
# self.dataclient = client
# logging.debug(u"Connected to Rune Redis service")
# break
# except:
# self.dataclient = None
# self.connection_failed += 1
# time.sleep(1)
#
#
# def subscribe(self):
# # Try to subscribe. If you fail, reconnect and try again.
# # If you fail, allow the resulting exception to be passed on.
#
# try:
# # Create a pubsub to receive messages
# self.pubsub = self.dataclient.pubsub(ignore_subscribe_messages=True)
#
# # Subscribe to act_player_info keyspace events
# self.pubsub.psubscribe(u'__key*__:act_player_info')
# except redis.ConnectionError:
# self.connect()
#
# # Try again to subscribe
# # Create a pubsub to receive messages
# self.pubsub = self.dataclient.pubsub(ignore_subscribe_messages=True)
#
# # Subscribe to act_player_info keyspace events
# self.pubsub.subscribe(u'__key*__:act_player_info')
def run(self):
logging.debug(u"kegdata service starting")
while True:
# if self.dataclient is None:
# try:
# # Try to connect
# self.connect()
# self.subscribe()
# self.status()
# self.sendUpdate()
# except (redis.ConnectionError, RuntimeError):
# self.dataclient = None
# # On connection error, sleep 5 and then return to top and try again
# time.sleep(5)
# continue
# try:
# # Wait for notice that key has changed
# msg = self.pubsub.get_message()
# if msg:
# # act_player_info key event occured
# self.status()
# self.sendUpdate()
# time.sleep(.01)
# except (redis.ConnectionError, RuntimeError):
# # if we lose our connection while trying to query DB
# # sleep 5 and then return to top to try again
# self.dataclient = None
# logging.debug(u"Could not get status from Rune Redis service")
# time.sleep(5)
# continue
self.status()
self.sendUpdate()
time.sleep(5)
def status(self):
# Read kegplayer status and update kegdata
# Update keg variables
self.kegdata[u'name'] = "Sharon's Stout"
self.kegdata[u'description'] = "Rich Chocolate and Coffee Flavor"
self.kegdata[u'ABV'] = 7.5
self.kegdata[u'IBU'] = 23
self.kegdata[u'weight'] = int(self.hx.get_weight(10))
print "Weight is {0} in oz".format(self.kegdata[u'weight'])
self.hx.power_down()
self.hx.power_up()
self.validatekegvars(self.kegdata)
def sendUpdate(self):
# Figure out what has changed and then send just those values across dataqueue
md = { }
for k, v in self.kegdata.iteritems():
pv = self.kegdata_prev[k] if k in self.kegdata_prev else None
if pv != v:
md[k] = v
# Send md to queue if anything has changed
if len(md) > 0:
# # elapsed is special as it needs to be sent to guarantee that the timer gets updated correctly. Even if it hasn't changed, send it anyway
# md[u'elapsed'] = self.kegdata[u'elapsed']
self.dataqueue.put(md)
# Update kegdata_prev
self.kegdata_prev = self.kegdata.copy()
if __name__ == u'__main__':
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s', filename=u'kegdata.log', level=logging.DEBUG)
logging.getLogger().addHandler(logging.StreamHandler())
# try:
# opts, args = getopt.getopt(sys.argv[1:],u"hs:p:w:",[u"server=",u"port=",u"pwd="])
# except getopt.GetoptError:
# print u'kegdata_rune.py -s <server> -p <port> -w <password>'
# sys.exit(2)
# Set defaults
# server = u'localhost'
# port = 6379
# pwd= u''
# for opt, arg in opts:
# if opt == u'-h':
# print u'kegdata_rune.py -s <server> -p <port> -w <password>'
# sys.exit()
# elif opt in (u"-s", u"--server"):
# server = arg
# elif opt in (u"-p", u"--port"):
# port = arg
# elif opt in (u"-w", u"--pwd"):
# pwd = arg
import sys
q = Queue.Queue()
kd = kegdata(q)
try:
start = time.time()
while True:
if start+120 < time.time():
break;
try:
item = q.get(timeout=1000)
print u"++++++++++"
for k,v in item.iteritems():
print u"[{0}] '{1}' type {2}".format(k,v,type(v))
print u"++++++++++"
print
q.task_done()
except Queue.Empty:
pass
except KeyboardInterrupt:
print u''
pass
print u"Exiting..."
|
|
#!/usr/bin/env python
from __future__ import print_function
import copy
import os
import logging
from peyutil import read_as_json
from taxalotl.tax_partition import (INP_TAXONOMY_DIRNAME,
MISC_DIRNAME,
GEN_MAPPING_FILENAME,
get_taxon_partition,
use_tax_partitions)
_LOG = logging.getLogger(__name__)
_LIFE = 'Life'
####################################################################################################
# Some data (to later be refactored
_x = {
'Archaea': {},
'Bacteria': {},
'Eukaryota': {
'Archaeplastida': {
'Glaucophyta': {},
'Rhodophyta': {},
'Chloroplastida': {},
MISC_DIRNAME: {},
},
'Fungi': {},
'Haptophyta': {},
'Metazoa': {
'Annelida': {},
'Arthropoda': {
'Arachnida': {},
'Malacostraca': {},
'Insecta': {
'Diptera': {},
'Coleoptera': {},
'Lepidoptera': {},
'Hymenoptera': {},
MISC_DIRNAME: {},
},
MISC_DIRNAME: {},
},
'Bryozoa': {},
'Chordata': {},
'Cnidaria': {},
'Ctenophora': {},
'Mollusca': {},
'Nematoda': {},
'Platyhelminthes': {},
'Porifera': {},
MISC_DIRNAME: {},
},
'SAR': {},
MISC_DIRNAME: {}
},
'Viruses': {},
MISC_DIRNAME: {},
}
BASE_PARTITIONS_DICT = {_LIFE: _x}
del _x
NAME_TO_PARTS_SUBSETS = {}
NAME_TO_PARENT_FRAGMENT = {}
NONTERMINAL_PART_NAMES = []
TERMINAL_PART_NAMES = []
PART_NAME_TO_FRAGMENT = {}
def get_inp_taxdir(parts_dir, frag, taxonomy_id):
return os.path.join(parts_dir, frag, INP_TAXONOMY_DIRNAME, taxonomy_id)
def get_misc_inp_taxdir(parts_dir, frag, taxonomy_id):
return os.path.join(parts_dir, frag, MISC_DIRNAME, INP_TAXONOMY_DIRNAME, taxonomy_id)
def get_all_taxdir_and_misc_uncles(parts_dir, frag, taxonomy_id):
"""Returns a list of dirs for this taxonomy_id starting at
the `frag` directory, but also including the __misc__ subdirectories
of is ancestral directories.
This represents the set of directories that should hold the taxa
for this fragment allowing for underclassification of taxa, but
not misclassification into a non-ancestral group.
"""
d = [get_inp_taxdir(parts_dir, frag, taxonomy_id)]
if os.sep in frag:
frag = os.path.split(frag)[0]
while len(frag) > 1 + len(parts_dir):
md = get_inp_taxdir(parts_dir, os.path.join(frag, MISC_DIRNAME), taxonomy_id)
d.append(md)
if os.sep in frag:
frag = os.path.split(frag)[0]
else:
break
return d
def get_auto_gen_part_mapper(res):
fp = os.path.join(res.partitioned_filepath, GEN_MAPPING_FILENAME)
if not os.path.isfile(fp):
m = 'Mapping file not found at "{}"\nRun the build-partitions-maps command.'
raise RuntimeError(m.format(fp))
master_mapping = read_as_json(fp)
a_list = list(res.alias_list)
base_res = res.base_resource
if base_res:
a_list.extend(base_res.alias_list)
poss_ids = [res.id] + a_list + [res.base_id]
for k in poss_ids:
if k in master_mapping:
return master_mapping[k]
m = 'No entry for ids {} found in "{}".'
raise RuntimeError(m.format(', '.join(poss_ids), fp))
def _fill_parts_indices(d, par_frag):
global NAME_TO_PARTS_SUBSETS, NAME_TO_PARENT_FRAGMENT, NONTERMINAL_PART_NAMES
for k, subd in d.items():
NAME_TO_PARTS_SUBSETS[k] = tuple(subd.keys())
NAME_TO_PARENT_FRAGMENT[k] = par_frag
if par_frag:
cf = os.path.join(par_frag, k)
else:
cf = k
PART_NAME_TO_FRAGMENT[k] = cf
if subd:
NONTERMINAL_PART_NAMES.append(k)
_fill_parts_indices(subd, cf)
elif k != MISC_DIRNAME:
TERMINAL_PART_NAMES.append(k)
_fill_parts_indices(BASE_PARTITIONS_DICT, '')
PART_NAMES = list(NAME_TO_PARTS_SUBSETS.keys())
PART_NAMES.sort()
PART_NAMES = tuple(PART_NAMES)
PREORDER_PART_LIST = tuple(NONTERMINAL_PART_NAMES)
POSTORDER_PART_LIST = tuple(reversed(PREORDER_PART_LIST))
NONTERMINAL_PART_NAMES.sort()
NONTERMINAL_PART_NAMES = tuple(NONTERMINAL_PART_NAMES)
TERMINAL_PART_NAMES.sort()
TERMINAL_PART_NAMES = tuple(TERMINAL_PART_NAMES)
def _rec_populate(d_to_fill, key_to_filled_set):
# _LOG.info('key_to_filled_set = {}'.format(key_to_filled_set))
if MISC_DIRNAME in d_to_fill:
del d_to_fill[MISC_DIRNAME]
for key, subd in d_to_fill.items():
filled_set = key_to_filled_set.get(key)
if subd:
_rec_populate(subd, key_to_filled_set)
if not filled_set:
cu = set()
for v in subd.keys():
fsv = key_to_filled_set.get(v)
if fsv:
cu.update(fsv)
if cu:
key_to_filled_set[key] = cu
def fill_empty_anc_of_mapping(mapping):
# _LOG.info('mapping = {}'.format(mapping))
s = copy.deepcopy(BASE_PARTITIONS_DICT)
_rec_populate(s, mapping)
# Data above here, to be refactored at some point
####################################################################################################
# Code below
def iter_existing_tax_dirs(path_pref, res_id):
suffix = os.path.join(INP_TAXONOMY_DIRNAME, res_id)
misc_suffix = os.path.join(MISC_DIRNAME, INP_TAXONOMY_DIRNAME, res_id)
for tup in os.walk(path_pref):
dirname = tup[0]
if dirname == path_pref:
continue
p = os.path.join(dirname, suffix)
if os.path.exists(p):
yield p
p = os.path.join(dirname, misc_suffix)
if os.path.exists(p):
yield p
def has_any_partition_dirs(path_pref, res_id):
assert path_pref
for p in iter_existing_tax_dirs(path_pref, res_id):
return True
return False
def find_partition_dirs_for_taxonomy(path_pref, res_id):
return [i for i in iter_existing_tax_dirs(path_pref, res_id)]
def get_part_dir_from_part_name(res, parts_key):
return os.path.join(res.partitioned_filepath, res.config.get_fragment_from_part_name(parts_key))
def merge_and_write_taxon_partition_list(tp_list):
if not tp_list:
return
fp_set = set()
for tp in tp_list:
fp = tp.taxon_fp
if fp in fp_set:
tp.append_write()
else:
tp.write()
fp_set.add(fp)
def write_info_for_res(outstream, res, part_name_to_split):
_LOG.debug('part_name_to_split = {}'.format(part_name_to_split))
par_frag = NAME_TO_PARENT_FRAGMENT[part_name_to_split]
_LOG.debug('par_frag = {}'.format(par_frag))
if par_frag and not res.has_been_partitioned_for_fragment(par_frag):
par_name = os.path.split(par_frag)[-1]
outstream.write(
'{} does not cover or has not been partitioned into {}\n'.format(res.id, par_name))
return
part_keys = NAME_TO_PARTS_SUBSETS[part_name_to_split]
master_map = res.get_primary_partition_map()
mapping = [(k, master_map[k]) for k in part_keys if k in master_map]
if not mapping:
outstream.write("No {} mapping for {}\n".format(res.id, part_name_to_split))
return
fragment = os.path.join(par_frag, part_name_to_split) if par_frag else part_name_to_split
if not res.has_been_partitioned_for_fragment(fragment):
outstream.write(
'{} does not cover or has not been partitioned into {}\n'.format(res.id, fragment))
return
outstream.write('What can I say about {} at {} ? Great stuff...\n'.format(res.id, fragment))
def do_partition(res, part_name_to_split):
"""Partition a parent taxon into descendants and garbagebin (__misc__) dir
:param res: a wrapper around the resource. Used for id, part_source_filepath,
:param part_name_to_split must be one of the hard-coded keys in NAME_TO_PARENT_FRAGMENT
"""
_LOG.debug('part_name_to_split = {}'.format(part_name_to_split))
par_frag = NAME_TO_PARENT_FRAGMENT[part_name_to_split]
_LOG.debug('par_frag = {}'.format(par_frag))
if par_frag and not res.has_been_partitioned_for_fragment(par_frag):
par_name = os.path.split(par_frag)[-1]
do_partition(res, par_name)
part_keys = NAME_TO_PARTS_SUBSETS[part_name_to_split]
master_map = res.get_primary_partition_map()
mapping = [(k, master_map[k]) for k in part_keys if k in master_map]
if not mapping:
_LOG.info("No {} mapping for {}".format(res.id, part_name_to_split))
return
fragment = os.path.join(par_frag, part_name_to_split) if par_frag else part_name_to_split
if res.has_been_partitioned_for_fragment(fragment):
_LOG.info("Partition for fragment {} has already been done.".format(fragment))
return
tp = get_taxon_partition(res, fragment)
if not par_frag:
tp.external_input_fp = os.path.join(res.partition_source_dir, res.taxon_filename)
tp.do_partition(mapping)
def check_partition(res, part_name_to_split):
par_frag = NAME_TO_PARENT_FRAGMENT[part_name_to_split]
part_keys = NAME_TO_PARTS_SUBSETS[part_name_to_split]
master_map = res.get_primary_partition_map()
fragment = os.path.join(par_frag, part_name_to_split) if par_frag else part_name_to_split
if not res.has_been_partitioned_for_fragment(fragment):
_LOG.info("Partition for fragment {} has not been done.".format(fragment))
return True
pop_subdirs = [k for k in part_keys if k in master_map]
if not pop_subdirs:
_LOG.info("No {} mapping for {}".format(res.id, part_name_to_split))
return True
with use_tax_partitions() as cache:
misc = get_taxon_partition(res, fragment)
cache.clear_without_flush(misc.cache_key)
subs = [get_taxon_partition(res, os.path.join(fragment, k)) for k in pop_subdirs]
unpart = get_taxon_partition(res, _LIFE)
unpart.external_input_fp = os.path.join(res.partition_source_dir, res.taxon_filename)
check_partition_union(fragment, misc, subs, unpart)
def check_partition_union(fragment, misc, subs, unpartitioned):
slice_roots, slice_ids = misc._debug_validity_check()
for p in subs:
p_ids = p._debug_validity_check()[1]
slice_ids.update(p_ids)
_LOG.warning('{} IDs from {} bring total in {} up to {}'.format(len(p_ids), p.fragment,
len(slice_ids), misc.fragment))
for p_root_id, root_obj in p._roots.items():
pr = root_obj['par_id']
if pr not in slice_ids:
slice_roots.add(p_root_id)
unpartitioned._debug_check_subtree_ids(slice_roots, slice_ids)
def get_inverse_misc_non_misc_dir_for_tax(inp_dir, tax_id):
""" If given an unpartitioned dir, return (misc, False) otherwise (canonical, True)
"""
misc_suffix = "/" + os.path.join(MISC_DIRNAME, INP_TAXONOMY_DIRNAME, tax_id)
if inp_dir.endswith(misc_suffix):
non_misc = inp_dir[:-len(misc_suffix)]
return os.path.join(non_misc, INP_TAXONOMY_DIRNAME, tax_id), False
non_misc_suffix = "/" + os.path.join(INP_TAXONOMY_DIRNAME, tax_id)
assert inp_dir.endswith(non_misc_suffix)
non_misc = inp_dir[:-len(non_misc_suffix)]
return os.path.join(non_misc, MISC_DIRNAME, INP_TAXONOMY_DIRNAME, tax_id), True
|
|
#!/usr/bin/env python
# OpenVirteX control script
# Heavily based on FlowVisor's fvctl
#import python utilities to parse arguments
import sys
from optparse import OptionParser
import urllib2
import json
import getpass
VERSION = '0.1'
SUPPORTED_PROTO = ['tcp']
def getUrl(opts, path):
return URL % (opts.host, opts.port, path)
def buildRequest(data, url, cmd):
j = { "id" : "ovxctl", "method" : cmd , "jsonrpc" : "2.0" }
h = {"Content-Type" : "application/json-rpc"}
if data is not None:
j['params'] = data
return urllib2.Request(url, json.dumps(j), h)
def pa_none(args, cmd):
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=USAGE.format(cmd), description=ldesc)
(options, args) = parser.parse_args(args)
return (options, args)
#Create calls
def pa_addControllers(args, cmd):
usage = "%s <tenant_id> <vdpid> <ctrlUrls>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_addControllers(gopts, opts, args):
if len(args) != 3:
print "addControllers: Must specify tenant id, virtual dpid, controller list"
sys.exit()
req = { "controllerUrls" : buildControllerList(args[2]), \
"tenantId" : int(args[0]), "vdpid" : int(args[1].replace(":",""), 16) }
resp = connect(gopts, "tenant", "addControllers", data=req, passwd=getPasswd(gopts))
if resp:
print "Added controllers %s to switch %s" % (args[2], args[1])
print resp
def pa_createNetwork(args, cmd):
usage = "%s <protocol> <controller_urls> <ip_network> <ip_mask> " % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def buildControllerList(ctrls):
if ctrls.lower() == "none":
return []
l = ctrls.split(',')
controllerUrls = []
for ctrl in l:
parts = ctrl.split(":")
if len(parts) < 3:
print "%s is not a valid controller url" % ctrl
sys.exit()
if parts[0] not in SUPPORTED_PROTO:
print "%s in %s is not a supported protocol" % (parts[0], ctrl)
sys.exit()
try:
int(parts[2])
except:
print "%s in %s is not a valid port number" % (parts[2], ctrl)
sys.exit()
controllerUrls.append(ctrl)
return controllerUrls
def do_createNetwork(gopts, opts, args):
if len(args) != 3:
print "createNetwork : Must specify controllerUrls, network_ip, network_mask"
sys.exit()
req = { "controllerUrls" : buildControllerList(args[0]), \
"networkAddress" : args[1], "mask" : int(args[2]) }
network_id = connect(gopts, "tenant", "createNetwork", data=req, passwd=getPasswd(gopts))
if network_id:
print "Virtual network has been created (network_id %s)." % str(network_id)
def pa_createSwitch(args, cmd):
usage = "%s [options] <tenant_id> <physical_dpids>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
parser.add_option("-d", "--dpid", dest="dpid", type="str", default="0",
help="Specify the DPID for this switch")
return parser.parse_args(args)
def do_createSwitch(gopts, opts, args):
if len(args) != 2:
print ("createSwitch : must specify: " +
"virtual tenant_id and a comma separated list of physical dpids " +
"(e.g. 00:00:00:00:00:00:00:01) which will be associated to the virtual switch")
sys.exit()
dpids = [int(dpid.replace(":", ""), 16) for dpid in args[1].split(',')]
req = { "tenantId" : int(args[0]), "dpids" : dpids, "dpid" : int(opts.dpid.replace(":", ""), 16) }
reply = connect(gopts, "tenant", "createSwitch", data=req, passwd=getPasswd(gopts))
switchId = reply.get('vdpid')
if switchId:
switch_name = '00:' + ':'.join([("%x" % switchId)[i:i+2] for i in range(0, len(("%x" % switchId)), 2)])
print "Virtual switch has been created (tenant_id %s, switch_id %s)" % (args[0], switch_name)
def pa_createPort(args, cmd):
usage = "%s <tenant_id> <physical_dpid> <physical_port>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_createPort(gopts, opts, args):
if len(args) != 3:
print ("createPort : must specify: " +
"virtual tenant_id, physical dpid " +
"(e.g. 00:00:00:00:00:00:00:01) and physical port")
sys.exit()
req = { "tenantId" : int(args[0]), "dpid" : int(args[1].replace(":", ""), 16), "port" : int(args[2]) }
reply = connect(gopts, "tenant", "createPort", data=req, passwd=getPasswd(gopts))
switchId = reply.get('vdpid')
portId = reply.get('vport')
if switchId and portId:
switch_name = '00:' + ':'.join([("%x" %int(switchId))[i:i+2] for i in range(0, len(("%x" %int(switchId))), 2)])
print "Virtual port has been created (tenant_id %s, switch_id %s, port_id %s)" % (args[0], switch_name, portId)
def pa_setInternalRouting(args, cmd):
usage = "%s <tenant_id> <virtual_dpid> <routing_algorithm> <backup_routes_num>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_setInternalRouting(gopts, opts, args):
if len(args) != 4:
print ("setInternalRouting : Must specify virtual tenant_id, virtual switch_id, " +
"algorithm (spf, manual) and number of backup routes")
sys.exit()
req = { "tenantId" : int(args[0]), "vdpid" : int(args[1].replace(":", ""), 16),
"algorithm" : args[2], "backup_num" : int(args[3]) }
reply = connect(gopts, "tenant", "setInternalRouting", data=req, passwd=getPasswd(gopts))
tenantId = reply.get('tenantId')
switchId = reply.get('vdpid')
if tenantId and switchId:
print "Routing has be set for big switch (tenant_id %s, switch_id %s)" % (switchId, tenantId)
def pa_connectHost(args, cmd):
usage = "%s <tenant_id> <vitual_dpid> <virtual_port> <host_mac>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_connectHost(gopts, opts, args):
if len(args) != 4:
print "connectHost : Must specify virtual tenant_id, virtual switch_id, virtual port_id and host MAC address"
sys.exit()
req = { "tenantId" : int(args[0]), "vdpid" : int(args[1].replace(":", ""), 16),
"vport" : int(args[2]), "mac" : args[3] }
reply = connect(gopts, "tenant", "connectHost", data=req, passwd=getPasswd(gopts))
hostId = reply.get('hostId')
if hostId:
print "Host (host_id %s) has been connected to virtual port" % (hostId)
def pa_connectLink(args, cmd):
usage = "%s <tenant_id> <src_virtual_dpid> <src_virtual_port> <dst_virtual_dpid> <dst_virtual_port>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_connectLink(gopts, opts, args):
if len(args) != 7:
print ("connectLink : Must specify tenant_id, src_virtual_dpid, src_virtual_port, dst_virtual_dpid, dst_virtual_port, "
+ "algorithm (spf, manual), number of backup routes")
sys.exit()
req = { "tenantId" : int(args[0]), "srcDpid" : int(args[1].replace(":", ""), 16),
"srcPort" : int(args[2]), "dstDpid" : int(args[3].replace(":", ""), 16),
"dstPort" : int(args[4]), "algorithm" : args[5], "backup_num" : int(args[6]) }
reply = connect(gopts, "tenant", "connectLink", data=req, passwd=getPasswd(gopts))
linkId = reply.get('linkId')
if linkId:
print "Virtual link (link_id %s) has been created" % (linkId)
def pa_setLinkPath(args, cmd):
usage = "%s <tenant_id> <link_id> <physical_path> <priority>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_setLinkPath(gopts, opts, args):
if len(args) != 4:
print "setLinkPath : Must specify tenant_id, link_id, the physical path that connect the end-points and the priority [0-255]"
sys.exit()
req = { "tenantId" : int(args[0]), "linkId" : int(args[1]), "path" : translate_path(args[2]), "priority" : int(args[3]) }
reply = connect(gopts, "tenant", "setLinkPath", data=req, passwd=getPasswd(gopts))
linkId = reply.get('linkId')
if linkId:
print "Virtual link (link_id %s) path has been set" % (linkId)
def pa_connectRoute(args, cmd):
usage = "%s <tenant_id> <virtual_dpid> <src_virtual_port> <dst_virtual_port> <physical_path> <priority>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_connectRoute(gopts, opts, args):
if len(args) != 6:
print ("connectRoute : Must specify tenant_id, virtual_dpid, src_virtual_port, dst_virtual_port, " +
"the physical path that connect the end-points and the priority [0-255]")
sys.exit()
req = { "tenantId" : int(args[0]), "vdpid" : int(args[1].replace(":", ""), 16),
"srcPort" : int(args[2]), "dstPort" : int(args[3]),
"path" : translate_path(args[4]), "priority" : int(args[5]) }
reply = connect(gopts, "tenant", "connectRoute", data=req, passwd=getPasswd(gopts))
routeId = reply.get('routeId')
if routeId:
print "Big-switch internal route (route_id %s) has been created" % (routeId)
#Remove calls
def pa_removeNetwork(args, cmd):
usage = "%s <tenant_id>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_removeNetwork(gopts, opts, args):
if len(args) != 1:
print "removeNetwork : Must specify a virtual tenant_id"
sys.exit()
req = { "tenantId" : int(args[0]) }
result = connect(gopts, "tenant", "removeNetwork", data=req, passwd=getPasswd(gopts))
print "Network (tenant_id %s) has been removed" % (args[0])
def pa_removeSwitch(args, cmd):
usage = "%s <tenant_id> <virtual_dpid>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_removeSwitch(gopts, opts, args):
if len(args) != 2:
print "removeSwitch : Must specify a virtual tenant_id and a virtual switch_id"
sys.exit()
req = { "tenantId" : int(args[0]), "vdpid" : int(args[1].replace(":", ""), 16) }
result = connect(gopts, "tenant", "removeSwitch", data=req, passwd=getPasswd(gopts))
print "Switch (switch_id %s) has been removed" % (args[1])
def pa_removePort(args, cmd):
usage = "%s <tenant_id> <virtual_dpid> <virtual_port>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_removePort(gopts, opts, args):
if len(args) != 3:
print "removePort : Must specify a virtual tenant_id, a virtual switch_id and a virtual port_id"
sys.exit()
req = { "tenantId" : int(args[0]), "vdpid" : int(args[1].replace(":", ""), 16), "vport" : int(args[2])}
result = connect(gopts, "tenant", "removePort", data=req, passwd=getPasswd(gopts))
print "Port (port_id %s) has been removed from virtual switch (switch_id %s)" % (args[2], args[1])
def pa_disconnectHost(args, cmd):
usage = "%s <tenant_id> <host_id>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_disconnectHost(gopts, opts, args):
if len(args) != 2:
print "disconnectHost : Must specify a a virtual tenant_id and a host_id"
sys.exit()
req = { "tenantId" : int(args[0]), "hostId" : int(args[1]) }
result = connect(gopts, "tenant", "disconnectHost", data=req, passwd=getPasswd(gopts))
print "Host (host_id %s) has been disconnected from the virtual network (tenant_id %s)" % (args[1], args[0])
def pa_disconnectLink(args, cmd):
usage = "%s <tenant_id> <link_id>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_disconnectLink(gopts, opts, args):
if len(args) != 2:
print "disconnectLink : Must specify a a virtual tenant_id and a link_id"
sys.exit()
req = { "tenantId" : int(args[0]), "linkId" : int(args[1]) }
result = connect(gopts, "tenant", "disconnectLink", data=req, passwd=getPasswd(gopts))
print "Link (link_id %s) has been disconnected from the virtual network (tenant_id %s)" % (args[1], args[0])
def pa_disconnectRoute(args, cmd):
usage = "%s <tenant_id> <route_id>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_disconnectRoute(gopts, opts, args):
if len(args) != 3:
print "disconnectRoute : Must specify a virtual tenant_id, switch_id and a route_id"
sys.exit()
req = { "tenantId" : int(args[0]), "vdpid" : int(args[1].replace(":", ""), 16) , "routeId" : int(args[2]) }
result = connect(gopts, "tenant", "disconnectRoute", data=req, passwd=getPasswd(gopts))
print "Route (route_id %s) in virtual big-switch (switch_id %s) has been disconnected from the virtual network (tenant_id %s)" % (args[2], args[1], args[0])
#Runtime operations
def pa_startNetwork(args, cmd):
usage = "%s <tenant_id>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_startNetwork(gopts, opts, args):
if len(args) != 1:
print "startNetwork : Must specify a tenant_id"
sys.exit()
req = { "tenantId" : int(args[0]) }
result = connect(gopts, "tenant", "startNetwork", data=req, passwd=getPasswd(gopts))
if result:
print "Network (tenant_id %s) has been booted" % (args[0])
def pa_startSwitch(args, cmd):
usage = "%s <tenant_id> <virtual_dpid>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_startSwitch(gopts, opts, args):
if len(args) != 2:
print "startSwitch : Must specify a tenant_id and a virtual switch_id"
sys.exit()
req = { "tenantId" : int(args[0]), "vdpid" : int(args[1].replace(":", ""), 16)}
result = connect(gopts, "tenant", "startSwitch", data=req, passwd=getPasswd(gopts))
if result:
print "Switch (switch_id %s) has been booted in virtual network (tenant_id %s)" % (args[1], args[0])
def pa_startPort(args, cmd):
usage = "%s <tenant_id> <virtual_dpid> <virtual_port>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_startPort(gopts, opts, args):
if len(args) != 3:
print "startPort : Must specify a tenant_id, a virtual switch_id and a virtual port_id"
sys.exit()
req = { "tenantId" : int(args[0]), "vdpid" : int(args[1].replace(":", ""), 16), "vport" : int(args[2])}
reply = connect(gopts, "tenant", "startPort", data=req, passwd=getPasswd(gopts))
tenantId = reply.get('tenantId')
switchId = reply.get('vdpid')
portId = reply.get('vport')
if tenantId and switchId and portId:
print "Port (port_id %s) has been started in virtual switch (tenant_id %s, switch_id %s)" % (portId, tenantId, switchId)
def pa_stopNetwork(args, cmd):
usage = "%s <tenant_id>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_stopNetwork(gopts, opts, args):
if len(args) != 1:
print "stopNetwork : Must specify a tenant_id"
sys.exit()
req = { "tenantId" : int(args[0]) }
result = connect(gopts, "tenant", "stopNetwork", data=req, passwd=getPasswd(gopts))
if result:
print "Network (tenant_id %s) has been shutdown" % (args[0])
def pa_stopSwitch(args, cmd):
usage = "%s <tenant_id> <virtual_dpid>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_stopSwitch(gopts, opts, args):
if len(args) != 2:
print "stopSwitch : Must specify a tenant_id and a virtual switch_id"
sys.exit()
req = { "tenantId" : int(args[0]), "vdpid" : int(args[1].replace(":", ""), 16)}
result = connect(gopts, "tenant", "stopSwitch", data=req, passwd=getPasswd(gopts))
if result:
print "Switch (switch_id %s) has been shutdown in virtual network (tenant_id %s)" % (args[1], args[0])
def pa_stopPort(args, cmd):
usage = "%s <tenant_id> <virtual_dpid> <virtual_port>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_stopPort(gopts, opts, args):
if len(args) != 3:
print "stopPort : Must specify a tenant_id, a virtual switch_id and a virtual port_id"
sys.exit()
req = { "tenantId" : int(args[0]), "vdpid" : int(args[1].replace(":", ""), 16), "vport" : int(args[2])}
result = connect(gopts, "tenant", "stopPort", data=req, passwd=getPasswd(gopts))
if result:
print "Port (port_id %s) has been shutdown in virtual switch (tenant_id %s, switch_id %s)" % (args[2], args[0], args[1])
def pa_getPhysicalFlowtable(args, cmd):
usage = "%s [<physical_dpid>]" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_getPhysicalFlowtable(gopts, opts, args):
if len(args) > 1:
print "getPhysicalFlowtable : May specify optional physical dpid"
sys.exit()
req = {}
if len(args) == 1:
req["dpid"] = int(args[0].replace(":", ""), 16)
result = connect(gopts, "status", "getPhysicalFlowtable", data=req, passwd=getPasswd(gopts))
print json.dumps(result)
def pa_getPhysicalHosts(args, cmd):
usage = "%s" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_getPhysicalHosts(gopts, opts, args):
if len(args) > 0:
print "getPhysicalHosts : No arguments"
sys.exit()
req = {}
result = connect(gopts, "status", "getPhysicalHosts", data=req, passwd=getPasswd(gopts))
print json.dumps(result)
def pa_getPhysicalTopology(args, cmd):
usage = "%s" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_getPhysicalTopology(gopts, opts, args):
if len(args) > 0:
print "getPhysicalTopology : No arguments"
sys.exit()
req = {}
result = connect(gopts, "status", "getPhysicalTopology", data=req, passwd=getPasswd(gopts))
print json.dumps(result)
def pa_listVirtualNetworks(args, cmd):
usage = "%s" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_listVirtualNetworks(gopts, opts, args):
if len(args) > 0:
print "listVirtualNetworks : No arguments"
sys.exit()
req = {}
result = connect(gopts, "status", "listVirtualNetworks", data=req, passwd=getPasswd(gopts))
print json.dumps(result)
def pa_getVirtualAddressMapping(args, cmd):
usage = "%s <tenant_id> <virtual_dpid>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_getVirtualAddressMapping(gopts, opts, args):
if len(args) != 1:
print "getVirtualAddressMapping : Must specify a tenant_id"
sys.exit()
req = { "tenantId" : int(args[0]) }
result = connect(gopts, "status", "getVirtualAddressMapping", data=req, passwd=getPasswd(gopts))
print json.dumps(result)
def pa_getVirtualFlowtable(args, cmd):
usage = "%s <tenant_id> [<virtual_dpid>]" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_getVirtualFlowtable(gopts, opts, args):
if (len(args) == 0) or (len(args) > 2):
print "getVirtualFlowtable : Must specify a tenant_id, and optional virtual switch_id"
sys.exit()
req = { "tenantId" : int(args[0]) }
if len(args) == 2:
req["vdpid"] = int(args[1].replace(":", ""), 16)
result = connect(gopts, "status", "getVirtualFlowtable", data=req, passwd=getPasswd(gopts))
print json.dumps(result)
def pa_getVirtualHosts(args, cmd):
usage = "%s <tenant_id>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_getVirtualHosts(gopts, opts, args):
if len(args) != 1:
print "getVirtualHosts : Must specify a tenant_id"
sys.exit()
req = { "tenantId": int(args[0]) }
result = connect(gopts, "status", "getVirtualHosts", data=req, passwd=getPasswd(gopts))
print json.dumps(result)
def pa_getVirtualLinkMapping(args, cmd):
usage = "%s <tenant_id>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_getVirtualLinkMapping(gopts, opts, args):
if len(args) != 1:
print "getVirtualHosts : Must specify a tenant_id"
sys.exit()
req = { "tenantId": int(args[0]) }
result = connect(gopts, "status", "getVirtualLinkMapping", data=req, passwd=getPasswd(gopts))
print json.dumps(result)
def pa_getVirtualSwitchMapping(args, cmd):
usage = "%s <tenant_id>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_getVirtualSwitchMapping(gopts, opts, args):
if len(args) != 1:
print "getVirtualSwitchMapping : Must specify a tenant_id"
sys.exit()
req = { "tenantId": int(args[0]) }
result = connect(gopts, "status", "getVirtualSwitchMapping", data=req, passwd=getPasswd(gopts))
print json.dumps(result)
def pa_getVirtualTopology(args, cmd):
usage = "%s <tenant_id>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_getVirtualTopology(gopts, opts, args):
if len(args) != 1:
print "getVirtualTopology : Must specify a tenant_id"
sys.exit()
req = { "tenantId": int(args[0]) }
result = connect(gopts, "status", "getVirtualTopology", data=req, passwd=getPasswd(gopts))
print json.dumps(result)
# Methods added for vsdn_project by Gaurav
def pa_migrateVM(args, cmd):
usage = "%s <tenant_id> <host_id> <host_mac> <switch_physical_dpid> <physical_port>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_migrateVM(gopts, opts, args):
if len(args) != 5:
print "Migrate VM : Must specify a tenant_id, host_id, host_mac, switch_physical_dpid, physical_port"
sys.exit()
req = { "tenantId" : int(args[0]), "vsdn_hid" : int(args[1]), "vsdn_hmac" : args[2], "vsdn_spdpid":args[3], "vsdn_pport" : int(args[4])}
reply = connect(gopts, "tenant", "migrateVM", data=req, passwd=getPasswd(gopts))
#hostId = reply.get('hostId')
#if hostId:
print "reply %s"% (reply)
def pa_changeRestriction(args, cmd):
usage = "%s <tenant_id> <is_restricted_topology>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_changeRestriction(gopts, opts, args):
if len(args) != 2:
print "Change Restriction : Must specify a tenant_id, is_restricted_topology"
sys.exit()
req = { "tenantId" : int(args[0]), "vsdn_ntype" : args[1]}
reply = connect(gopts, "tenant", "changeRestriction", data=req, passwd=getPasswd(gopts))
#hostId = reply.get('hostId')
#if hostId:
print "reply %s"% (reply)
def pa_getAllowedSwitches(args, cmd):
usage = "%s <tenant_id> <host_id>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_getAllowedSwitches(gopts, opts, args):
if len(args) != 2:
print "Get Allowed Switches : Must specify a tenant_id, host_id"
sys.exit()
req = { "tenantId" : int(args[0]), "vsdn_hid" : int(args[1])}
reply = connect(gopts, "tenant", "getAllowedSwitches", data=req, passwd=getPasswd(gopts))
#hostId = reply.get('hostId')
#if hostId:
print "reply %s"% (reply)
def pa_traceRoute(args, cmd):
usage = "%s <tenant_id> <src_ip> <dst_ip> <timestamp> <protocol> " % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_traceRoute(gopts, opts, args):
if len(args) != 5:
print "traceRoute : Must specify a tenant_id, source_ip, destination_id, timestamp, protocol"
sys.exit()
req = { "tenantId" : int(args[0]), "vsdn_srcip" : args[1], "vsdn_dstip" : args[2],"vsdn_timestamp" : long(args[3]),"vsdn_protocol" : args[4]}
reply = connect(gopts, "tenant", "traceRoute", data=req, passwd=getPasswd(gopts))
#hostId = reply.get('hostId')
#if hostId:
print "reply %s"% (reply)
# Other methods
def translate_path(path_string):
hop_list = path_string.split(",")
path = ""
for hop in hop_list:
src, dst = hop.split("-")
src_dpid, src_port = src.split("/")
dst_dpid, dst_port = dst.split("/")
src_long_dpid = int(src_dpid.replace(":", ""), 16)
dst_long_dpid = int(dst_dpid.replace(":", ""), 16)
path = path + str(src_long_dpid) + "/" + str(src_port) + "-" + str(dst_long_dpid) + "/" + str(dst_port) + ","
if len(path) > 0:
path.rstrip(",")
return path
def pa_help(args, cmd):
usage = "%s <cmd>" % USAGE.format(cmd)
parser = OptionParser(usage=usage)
return parser.parse_args(args)
def do_help(gopts, opts, args):
if len(args) != 1:
raise IndexError
try:
(pa, func) = CMDS[args[0]]
pa(['--help'], args[0])
except KeyError, e:
print "Invalid command : %s is an unknown command." % args[0]
sys.exit()
def connect(opts, path, cmd, data=None, passwd=None):
try:
url = getUrl(opts, path)
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
passman.add_password(None, url, opts.ovx_user, passwd)
authhandler = urllib2.HTTPBasicAuthHandler(passman)
opener = urllib2.build_opener(authhandler)
req = buildRequest(data, url, cmd)
#ph = urllib2.urlopen(req)
ph = opener.open(req)
return parseResponse(ph.read())
except urllib2.URLError as e:
print e
sys.exit(1)
except urllib2.HTTPError as e:
if e.code == 401:
print "Authentication failed: invalid password"
sys.exit(1)
elif e.code == 504:
print "HTTP Error 504: Gateway timeout"
sys.exit(1)
else:
print e
except RuntimeError as e:
print e
def parseResponse(data):
j = json.loads(data)
if 'error' in j:
print j
sys.exit(1)
return j['result']
def printVersion(option, opt, value, parser):
"""Print ovxctl version and exit"""
print "ovxctl-%s" % VERSION
sys.exit()
def printHelp (option, opt, value, parser):
"""Print ovxctl help and exit"""
cmds = [x for x in CMDS.iterkeys()]
cmds.remove('help')
cmds.sort()
print parser.format_help().strip()
print "\n Available commands are: "
for x in cmds:
(sdesc, ldesc) = DESCS[x]
print " {0:25} {1:10}".format(x, sdesc)
print "\n See '%s help <command>' for more info on a specific command." % sys.argv[0]
sys.exit()
CMDS = {
'addControllers': (pa_addControllers, do_addControllers),
'createNetwork': (pa_createNetwork, do_createNetwork),
'createSwitch': (pa_createSwitch, do_createSwitch),
'createPort': (pa_createPort, do_createPort),
'setInternalRouting': (pa_setInternalRouting, do_setInternalRouting),
'connectHost': (pa_connectHost, do_connectHost),
'connectLink': (pa_connectLink, do_connectLink),
'setLinkPath': (pa_setLinkPath, do_setLinkPath),
'connectRoute': (pa_connectRoute, do_connectRoute),
'removeNetwork': (pa_removeNetwork, do_removeNetwork),
'removeSwitch': (pa_removeSwitch, do_removeSwitch),
'removePort': (pa_removePort, do_removePort),
'disconnectHost': (pa_disconnectHost, do_disconnectHost),
'disconnectLink': (pa_disconnectLink, do_disconnectLink),
'disconnectRoute': (pa_disconnectRoute, do_disconnectRoute),
'startNetwork': (pa_startNetwork, do_startNetwork),
'startSwitch': (pa_startSwitch, do_startSwitch),
'startPort': (pa_startPort, do_startPort),
'stopNetwork': (pa_stopNetwork, do_stopNetwork),
'stopSwitch': (pa_stopSwitch, do_stopSwitch),
'stopPort': (pa_stopPort, do_stopPort),
'getPhysicalFlowtable': (pa_getPhysicalFlowtable, do_getPhysicalFlowtable),
'getPhysicalHosts': (pa_getPhysicalHosts, do_getPhysicalHosts),
'getPhysicalTopology': (pa_getPhysicalTopology, do_getPhysicalTopology),
'listVirtualNetworks': (pa_listVirtualNetworks, do_listVirtualNetworks),
'getVirtualAddressMapping': (pa_getVirtualAddressMapping, do_getVirtualAddressMapping),
'getVirtualFlowtable': (pa_getVirtualFlowtable, do_getVirtualFlowtable),
'getVirtualHosts': (pa_getVirtualHosts, do_getVirtualHosts),
'getVirtualLinkMapping': (pa_getVirtualLinkMapping, do_getVirtualLinkMapping),
'getVirtualSwitchMapping': (pa_getVirtualSwitchMapping, do_getVirtualSwitchMapping),
'getVirtualTopology': (pa_getVirtualTopology, do_getVirtualTopology),
'migrateVM' : (pa_migrateVM, do_migrateVM),
'changeRestriction' : (pa_changeRestriction, do_changeRestriction),
'getAllowedSwitches' : (pa_getAllowedSwitches, do_getAllowedSwitches),
'traceRoute' : (pa_traceRoute, do_traceRoute),
'help' : (pa_help, do_help)
}
DESCS = {
'addControllers' : ("Adds controllers to a virtual switch",
("Adds the specified list of controllers to a given virtual switch.\n"
"ExampleL addController <tenantId> <vdpid> <ctrlUrls>")),
'createNetwork' : ("Creates a virtual network",
("Creates a virtual network. Input: protocol, controllerIP, controller port, ip address, mask. "
"\nExample: createNetwork tcp 1.1.1.1 6634 192.168.1.0 24")),
'createSwitch' : ("Create virtual switch",
("Create a virtual switch. Must specify a tenant_id, and a list of the physical_dpids that will be part of the virtual switch."
"\nExample: createSwitch 1 00:00:00:00:00:00:00:01,00:00:00:00:00:00:00:02")),
'createPort' : ("Create virtual port",
("Create a virtual port. Must specify a tenant_id, a physical_dpid and a physical_port."
"\nExample: createPort 1 00:00:00:00:00:00:00:01 1")),
'setInternalRouting' : ("Set big-switch internal routing mechanism",
("Set big-switch internal routing mechanism. Must specify a tenant_id, a virtual switch_id, the routing type (spf, manual) "
"and the number (0-255) of the backup paths that have to be computed."
"\nExample: setInternalRouting 1 00:00:00:00:00:00:00:01 spf 128")),
'connectHost' : ("Connect host to a virtual port",
("Connect host to a virtual port. Must specify a tenant_id, a virtual switch_id, a virtual port_id and the host MAC address."
"\nExample: connectHost 1 00:a4:23:05:00:00:00:01 1 00:00:00:00:00:01")),
'connectLink' : ("Connect two virtual ports through a virtual link",
("Connect two virtual ports through a virtual link. Must specify a tenant_id, a virtual src_switch_id, a virtual src_port_id, "
"a virtual dst_switch_id, a virtual dst_port_id, the routing type (spf, manual) and the number (0-255) of the backup paths that have to be computed."
"\nExample: connectLink 1 00:a4:23:05:00:00:00:01 1 00:a4:23:05:00:00:00:02 1 spf 1")),
'setLinkPath' : ("Set the physical path of a virtual link",
("Set the physical path of a virtual link. Must specify a tenant_id, a virtual link_id, a physical path and a priority (0-255)."
"\nExample: connectLink 1 1 00:00:00:00:00:00:00:01/1-00:00:00:00:00:00:00:02/1,"
"00:00:00:00:00:00:00:2/2-00:00:00:00:00:00:00:3/1 128")),
'connectRoute' : ("Connect two virtual ports inside a virtual big-switch",
("Connect two virtual ports inside a virtual big-switch. Must specify a tenant_id, a virtual switch_id, a virtual src_port_id, "
"a virtual dst_port_id, a physical path and a priority (0-255)."
"\nExample: connectRoute 1 00:a4:23:05:00:00:00:01 1 2 00:00:00:00:00:00:00:01/1-00:00:00:00:00:00:00:02/1,"
"00:00:00:00:00:00:00:2/2-00:00:00:00:00:00:00:3/1 128")),
'removeNetwork' : ("Remove a virtual network",
("Remove a virtual network. Must specify a tenant_id."
"\nExample: removeNetwork 1")),
'removeSwitch' : ("Remove virtual switch",
("Remove a virtual switch. Must specify a tenant_id and a virtual switch_id."
"\nExample: removeSwitch 1 00:a4:23:05:00:00:00:01")),
'removePort' : ("Remove virtual port",
("Remove a virtual port. Must specify a tenant_id, a virtual switch_id and a virtual port_id."
"\nExample: removePort 1 00:a4:23:05:00:00:00:01 1")),
'disconnectHost' : ("Disconnect host from a virtual port",
("Disconnect host from a virtual port. Must specify a tenant_id and the host_id."
"\nExample: disconnectHost 1 1")),
'disconnectLink' : ("Disconnect link between two virtual ports",
("Disconnect link between two virtual ports. Must specify a tenant_id and the link_id."
"\nExample: disconnectLink 1 1")),
'disconnectRoute' : ("Disconnect big-switch internal route between two virtual ports",
("Disconnect big-switch internal route between two virtual ports. Must specify a tenant_id and the route_id."
"\nExample: disconnectRoute 1 00:a4:23:05:00:00:00:01 1")),
'startNetwork' : ("Start a virtual network",
("Start a virtual network. Must specify a tenant_id."
"\nExample: startNetwork 1")),
'startSwitch' : ("Start a virtual switch",
("Start a virtual switch. Must specify a tenant_id and a virtual switch_id."
"\nExample: startSwitch 1 00:a4:23:05:00:00:00:01")),
'startPort' : ("Start a virtual port",
("Start a virtual port. Must specify a tenant_id, a virtual switch_id and a virtual port_id."
"\nExample: startPort 1 00:a4:23:05:00:00:00:01 1")),
'stopNetwork' : ("Stop a virtual network",
("Stop a virtual network. Must specify a tenant_id."
"\nExample: stopNetwork 1")),
'stopSwitch' : ("Shutdown a virtual switch",
("Shutdown a virtual switch. Must specify a tenant_id and a virtual switch_id."
"\nExample: stopSwitch 1 00:a4:23:05:00:00:00:01")),
'stopPort' : ("Shutdown a virtual port",
("Shutdown a virtual port. Must specify a tenant_id, a virtual switch_id and a virtual port_id."
"\nExample: stopPort 1 00:a4:23:05:00:00:00:01 1")),
# Monitoring API - admin only
'getPhysicalFlowtable' : ("Get the physical flowtable of a specified switch or all switches",
("Get the physical flowtable of a specified switch or all switches. Specify optional physical switch_id."
"\nExample: getPhysicalFlowtable 00:00:00:00:00:00:00:01")),
'getPhysicalHosts' : ("Get a list of physical hosts",
("Get a list of physical hosts."
"\nExample: getPhysicalHosts")),
'getPhysicalTopology': ("Get the physical topology",
("Get the physical topology."
"\nExample: getPhysicalTopology")),
'listVirtualNetworks': ("Get a list of all virtual network tenant ID's",
("Get a list of all virtual network tenant ID's."
"\nExample: listVirtualNetworks")),
# Monitoring API - tenant restricted
'getVirtualAddressMapping' : ("Get the virtual to physical address mapping for a specified virtual network",
("Get the virtual to physical address mapping. Must specify a virtual network tenant_id."
"\nExample: getVirtualAddressMapping 1")),
'getVirtualFlowtable' : ("Get the flowtable in the specified virtual network",
("Get the flowtable in the specified virtual network. Must specify a virtual switch_id, optional virtual switch_id."
"\nExample: getVirtualFlowtable 00:a4:23:05:00:00:00:01")),
'getVirtualHosts' : ("Get list of hosts in virtual network",
("Get list of hosts in virtual network. Must specify a tenant_id"
"\nExample: getVirtualHosts 1")),
'getVirtualLinkMapping' : ("Get the virtual to physical link mapping",
("Get the virtual to physical link mapping. Must specify a tenant_id."
"\nExample: getVirtualLinkMapping 1")),
'getVirtualSwitchMapping' : ("Get the virtual to physical switch mapping",
("Get the virtual to physical switch mapping. Must specify a tenant_id."
"\nExample: getVirtualSwitchMapping 1")),
'getVirtualTopology' : ("Get the virtual topology",
("Get the virtual topology. Must specify a tenant_id."
"\nExample: getVirtualTopology 1")),
#APIs Added for vsdn_project by Gaurav
'migrateVM' : ("Migrate VM from one Physical Host to another",
("Migrate VM from one Physical Host to another. Must specify a tenant_id, host_id, host_mac, switch_physical_dpid, physical_port."
"\nExample: migrateVM 1 //TODO : update this with correct example")),
'changeRestriction' : ("Change the Topology Restriction of a Virtual Network",
("Change the Topology Restriction of a Virtual Network. Must specify a tenant_id, is_topology_restricted."
"\nExample: changeRestriction 1 true")),
'getAllowedSwitches' : ("Get the list of Physical Switches Valid for Migration",
("Get the list of Physical Switches Valid for Migration. Must specify a tenant_id, host_id."
"\nExample: getAllowedSwitches 1 2")),
'traceRoute' : ("Get path between two IPs",
("Get path between two IPs. Must specify a tenant_id, source_ip, destination_id, timestamp, protocol."
"\nExample: traceRoute 1 10.0.0.1 10.0.0.2 1434721741774 2 "))
}
USAGE="%prog {}"
URL = "http://%s:%s/%s"
def getPasswd(opts):
if opts.no_passwd:
return ""
else:
return getpass.getpass("Password: ")
def addCommonOpts (parser):
parser.add_option("-h", "--hostname", dest="host", default="localhost",
help="Specify the OpenVirteX host; default='localhost'")
parser.add_option("-p", "--port", dest="port", default="8080",
help="Specify the OpenVirteX web port; default=8080")
parser.add_option("-u", "--user", dest="ovx_user", default="admin",
help="OpenVirtex admin user; default='admin'")
parser.add_option("-n", "--no-passwd", action="store_true", dest="no_passwd", default=False,
help="Run ovxctl with no password; default false")
parser.add_option("-v", "--version", action="callback", callback=printVersion)
parser.add_option("--help", action="callback", callback=printHelp)
def parse_global_args (arglist):
usage = "%s [options] command [command_args]" % sys.argv[0]
args = []
while (len(arglist) != 0 and arglist[0] not in CMDS):
args.append(arglist[0])
arglist.pop(0)
parser = OptionParser(add_help_option=False, usage=usage)
addCommonOpts(parser)
(opts, pargs) = parser.parse_args(args)
return (opts, arglist, parser)
if __name__ == '__main__':
try:
(gopts, rargs, parser) = parse_global_args(sys.argv[1:])
if len(rargs) < 1:
raise IndexError
(parse_args, do_func) = CMDS[rargs[0]]
(opts, args) = parse_args(rargs[1:], rargs[0])
do_func(gopts, opts, args)
sys.exit(0)
except ValueError, e:
print "The argument types being sent to the function %s are incorrect. Please double check them." % sys.argv[1]
except IndexError, e:
print "%s is an unknown command" % sys.argv[-1]
except Exception, e:
print "uknown error"
printHelp(None,None,None,parser)
|
|
from generator.actions import Actions
import random
import struct
import sys
import random
class enemy(object):
def __init__(self, x, y, board):
self.myBoard = board
self.x = x;
self.y = y;
self.quadrant = 0;
self.homeColumn = x/2
def __eq__(self, other):
if (self.x == other.x) and (self.y == other.y):
return True
else:
return False
def move(self, newQuadrant):
if self.quadrant == newQuadrant:
return
if self.quadrant == 0:
if newQuadrant == 1:
self.x = self.x + 1
else:
self.y = self.y + 1
elif self.quadrant == 1:
if newQuadrant == 0:
self.x = self.x - 1
else:
self.y = self.y + 1
elif self.quadrant == 2:
if newQuadrant == 1:
self.y = self.y - 1
else:
self.x = self.x - 1
else:
if newQuadrant == 2:
self.x = self.x + 1
else:
self.y = self.y - 1
self.quadrant = newQuadrant
def step(self, randomNumber, clearToFire, advance):
# check if the enemies need to move down one row
if advance:
self.y = self.y + 1
move = randomNumber%7
if move == 1:
# fire
if clearToFire == True:
self.myBoard.addEnemyBullet(self.x, self.y)
else:
# move
move = randomNumber%3
newQuadrant = self.quadrant
if move == 0:
# move counter clockwise
newQuadrant = newQuadrant-1
if newQuadrant == -1:
newQuadrant = 3
elif move == 1:
# move clockwise
newQuadrant = newQuadrant+1
if newQuadrant == 4:
newQuadrant = 0
self.move(newQuadrant)
class enemyBullet(object):
def __init__(self, x, y, board):
self.x = x
self.y = y
self.myBoard = board
def __eq__(self, other):
if (self.x == other.x) and (self.y == other.y):
return True
else:
return False
def step(self):
if self.y < (self.myBoard.rows - 1):
self.y = self.y + 1
return True
#if the bullet == now off the screen, return false
return False
class userBullet(object):
def __init__(self, x, y, board):
self.x = x
self.y = y
self.myBoard = board
def __eq__(self, other):
if (self.x == other.x) and (self.y == other.y):
return True
else:
return False
def step(self):
if self.y > 0:
self.y = self.y - 1
return True
#if the bullet == now off the screen, return false
return False
class board(object):
def __init__(self, rows, columns, seed1, seed2):
self.rows = rows
self.columns = columns
self.enemies = []
self.enemyBullets = []
self.userBullets = []
self.shipCords = [0, rows-1]
self.level = 0
self.seed1 = seed1
self.seed2 = seed2
# Add enemies
self.rowCount = 2;
for i in xrange(self.rowCount):
for j in xrange(int(self.columns/2)):
newEnemy = enemy(j*2, i*2, self)
self.enemies.append(newEnemy)
def getRandomInt(self):
self.seed1 = (((42871 * (self.seed1 & 0xFFFF)) & 0xffffffff) + (self.seed1 >> 16)) & 0xffffffff
self.seed2 = (((28172 * (self.seed2 & 0xFFFF)) & 0xffffffff) + (self.seed2 >> 16)) & 0xffffffff
return ((self.seed1 << 16) + self.seed2) & 0xffffffff;
def addEnemyBullet(self, x, y):
self.enemyBullets.append(enemyBullet(x, y, self))
def addEnemy(self, x, y):
self.enemies.append(x, y, self)
def addUserBullet(self, x, y):
self.userBullets.append(userBullet(x, y, self))
def updateUser(self, action):
#check if user == dead
for ele in self.enemyBullets:
if (self.shipCords[0] == ele.x) and (self.shipCords[1] == ele.y):
return False
#fire
if action == "~":
self.addUserBullet(self.shipCords[0], self.shipCords[1])
#up
elif action == "w":
if self.shipCords[1] > 0:
self.shipCords[1] = self.shipCords[1] - 1
#right
elif action == "d":
if self.shipCords[0] < self.columns - 1:
self.shipCords[0] = self.shipCords[0] + 1
#down
elif action == "s":
if self.shipCords[1] < self.rows - 1:
self.shipCords[1] = self.shipCords[1] + 1
#left
elif action == "a":
if self.shipCords[0] > 0:
self.shipCords[0] = self.shipCords[0] - 1
else:
self.addUserBullet(self.shipCords[0], self.shipCords[1])
for ele in self.enemyBullets:
if (self.shipCords[0] == ele.x) and (self.shipCords[1] == ele.y):
return False
return True
def step(self, advance):
output = ""
#step the enemies
for ele in self.enemies:
# Check if enemy should be allowed to fire
clearToFire = True
for ele2 in self.enemies:
if ele != ele2:
if (ele2.homeColumn == ele.homeColumn) and (ele2.y > ele.y):
clearToFire = False
val = self.getRandomInt()
output += str(val%1000) + "\n"
ele.step(val, clearToFire, advance)
#step enemy bullets
self.enemyBullets = [x for x in self.enemyBullets if x.step()]
#step user bullets
self.userBullets = [x for x in self.userBullets if x.step()]
#check for enemy collisions
enemyHold = []
userBulletHold = []
for ele in self.enemies:
for bullet in self.userBullets:
if (ele.x == bullet.x) and (ele.y == bullet.y):
enemyHold.append(ele)
userBulletHold.append(bullet)
for ele in enemyHold:
self.enemies.remove(ele)
for ele in userBulletHold:
self.userBullets.remove(ele)
return output
def draw(self, username, levelAdv):
output = ""
output += "\tWelcome to Attackers General " + username + "!"
output += '\n'
for i in xrange(self.columns+2):
output += '-'
output += '\n'
for i in xrange(self.rows):
output += '|'
for j in xrange(self.columns):
printed = False;
# Check if we should print the user's ship
if(self.shipCords[0] == j and self.shipCords[1] == i):
output += 'W'
printed = True
# Check if we should print enemy bullet
for ele in self.enemyBullets:
if(ele.x == j and ele.y == i) and (printed == False):
output += '+'
printed = True
break
# Check if we should print an enemy
for ele in self.enemies:
if(ele.x == j and ele.y == i) and (printed == False):
output += '#'
printed = True
break
# Check if we should print user bullet
for ele in self.userBullets:
if(ele.x == j and ele.y == i) and (printed == False):
output += '!'
printed = True
break
# If there wasn't anything to draw, draw a space
if(printed == False):
output += ' '
output += '|\n'
for i in xrange(self.columns+2):
output += '-'
output += '\n'
if levelAdv:
self.level = self.level + 1
# print level
output += '\t\tLevel #' + str(self.level) + "\n"
# Return the output of the draw
return output
rows = 25
columns = 25
class game(object):
def __init__(self, username, seed1, seed2, f):
self.f = f
self.username = username
if self.username == "magic":
self.seed1 = 0
self.seed2 = 0
else:
self.seed1 = seed1
self.seed2 = seed2
self.lost = False
self.currentUserInput = ""
self.turnCounter = 0
self.level = 1
# Create the board
self.myBoard = board(rows, columns, self.seed1, self.seed2)
def next(self, userInput):
enemyCount = len(self.myBoard.enemies)
output = ""
# If the user just pressed enter, account for that
if len(userInput) == 0:
userInput = "~"
else:
userInput = userInput[0]
if enemyCount != 0:
if self.turnCounter != 0:
# Take action
alive = self.myBoard.updateUser(self.currentUserInput)
enemyCount = len(self.myBoard.enemies)
if (self.turnCounter+1)%20 == 0:
self.level += 1
if (alive == False) or ((self.level >= 23) and (self.turnCounter%20 == 0)):
#self.f.comment('you lose %d %d %d', alive, self.level, self.turnCounter)
output += "\t\tyou lose\n\n\n\n\n"
self.lost = True
return output
else:
self.myBoard.step(((self.turnCounter%20) == 0))
# Draw the board
output += self.myBoard.draw(self.username, (((self.turnCounter)%20) == 0))
self.currentUserInput = userInput
self.turnCounter += 1
# We won
if enemyCount == 0:
danceX = 4
danceY = 5
num = 0
for i in xrange(100000):
output += "\t\t!!!DANCE MODE!!!\n\n\n\n\n\n\n\n"
# Draw top of dance box
for j in xrange(12):
output += '-'
output += '\n'
# Draw out board
for m in xrange(10):
output += '|'
for n in xrange(10):
if (m == danceY) and (n == danceX):
output += 'W'
else:
output += ' '
output += '|\n'
# Draw bottom of dance box
for j in xrange(12):
output += '-'
output += '\n'
output += "Bust a move (w = +1, s=SaveDigit, d=FIRE)\n"
output += str(num) + '\n'
input = raw_input()
if len(input) == 0:
input = "~"
if input[0] == "w":
num = num + 1
elif input[0] == "s":
num = 0
# Move the ship randomly
r = random.randint(0,4)
if r == 0:
if danceY == 0:
danceY = 9
else:
danceY = danceY - 1
elif r == 1:
if danceY == 9:
danceY = 0
else:
danceY = danceY + 1
if r == 2:
if danceX == 0:
danceX = 9
else:
danceX = danceX - 1
elif r == 3:
if danceX == 9:
danceX = 0
else:
danceX = danceX + 1
return output
def random_string(cnt):
return ''.join([chr(random.randint(97, 122)) for x in xrange(cnt)])
def random_input(cnt):
rndStr = random_string(cnt)
rndInp = ""
for ele in rndStr:
if ele != '\n':
rndInp += ele
return rndInp
class TemplateGenerator(Actions):
def init_rng(self):
vals = struct.unpack('<IIII', self.magic_page[0:16])
self.state['seed1'] = (vals[0] ^ vals[1]) & 0xffffffff
self.state['seed2'] = (vals[2] ^ vals[3]) & 0xffffffff
def start(self):
#self.delay(100)
# read prompt
self.read(delim='\n', expect='Please input your name:\n')
# send name
self.state['username'] = random_input(32)
newUsername = ""
for ele in self.state['username']:
if ((ord(ele) >= 97) and (ord(ele) <= 122)):
newUsername += ele
else:
break
self.write(self.state['username'] + '\n')
self.state['username'] = newUsername
# Pull the secret values out
self.init_rng()
# Setup game object
self.game = game(self.state['username'], self.state['seed1'], self.state['seed2'], self)
def go(self):
while True:
if self.game.lost == True:
break
# Get user input
userInput = random_input(1)
# Get predicted gameboard state
boardState = self.game.next(userInput)
if not self.game.lost:
self.write(userInput + '\n')
#self.read(delim='\n', expect = str(struct.unpack('<i', struct.pack('<I', self.game.seed1))[0]) + " " + str(struct.unpack('<i', struct.pack('<I', self.game.seed2))[0]) + "\n")
# Check if predicted gameboard state matches realtiy
lines = boardState.split('\n')
del lines[-1]
for line in lines:
self.read(delim='\n', expect=line+'\n')
def quit(self):
pass
|
|
# Copyright (c) 2015 Hitachi Data Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
from oslo_config import cfg
from manila import exception
import manila.share.configuration
import manila.share.driver
from manila.share.drivers.hitachi.hnas import driver
from manila.share.drivers.hitachi.hnas import ssh
from manila import test
CONF = cfg.CONF
share_nfs = {
'id': 'aa4a7710-f326-41fb-ad18-b4ad587fc87a',
'name': 'aa4a7710-f326-41fb-ad18-b4ad587fc87a',
'size': 50,
'host': 'hnas',
'share_proto': 'NFS',
'share_type_id': 1,
'share_network_id': 'bb329e24-3bdb-491d-acfd-dfe70c09b98d',
'share_server_id': 'cc345a53-491d-acfd-3bdb-dfe70c09b98d',
'export_locations': [{'path': '172.24.44.10:/shares/'
'aa4a7710-f326-41fb-ad18-b4ad587fc87a'}],
}
share_cifs = {
'id': 'f5cadaf2-afbe-4cc4-9021-85491b6b76f7',
'name': 'f5cadaf2-afbe-4cc4-9021-85491b6b76f7',
'size': 50,
'host': 'hnas',
'share_proto': 'CIFS',
'share_type_id': 1,
'share_network_id': 'bb329e24-3bdb-491d-acfd-dfe70c09b98d',
'share_server_id': 'cc345a53-491d-acfd-3bdb-dfe70c09b98d',
'export_locations': [{'path': '\\\\172.24.44.10\\'
'f5cadaf2-afbe-4cc4-9021-85491b6b76f7'}],
}
share_invalid_host = {
'id': 'aa4a7710-f326-41fb-ad18-b4ad587fc87a',
'name': 'aa4a7710-f326-41fb-ad18-b4ad587fc87a',
'size': 50,
'host': 'invalid',
'share_proto': 'NFS',
'share_type_id': 1,
'share_network_id': 'bb329e24-3bdb-491d-acfd-dfe70c09b98d',
'share_server_id': 'cc345a53-491d-acfd-3bdb-dfe70c09b98d',
'export_locations': [{'path': '172.24.44.10:/shares/'
'aa4a7710-f326-41fb-ad18-b4ad587fc87a'}],
}
share_mount_support_nfs = {
'id': '62125744-fcdd-4f55-a8c1-d1498102f634',
'name': '62125744-fcdd-4f55-a8c1-d1498102f634',
'size': 50,
'host': 'hnas',
'share_proto': 'NFS',
'share_type_id': 1,
'share_network_id': 'bb329e24-3bdb-491d-acfd-dfe70c09b98d',
'share_server_id': 'cc345a53-491d-acfd-3bdb-dfe70c09b98d',
'export_locations': [{'path': '172.24.44.10:/shares/'
'62125744-fcdd-4f55-a8c1-d1498102f634'}],
'mount_snapshot_support': True,
}
share_mount_support_cifs = {
'id': 'd6e7dc6b-f65f-49d9-968d-936f75474f29',
'name': 'd6e7dc6b-f65f-49d9-968d-936f75474f29',
'size': 50,
'host': 'hnas',
'share_proto': 'CIFS',
'share_type_id': 1,
'share_network_id': 'bb329e24-3bdb-491d-acfd-dfe70c09b98d',
'share_server_id': 'cc345a53-491d-acfd-3bdb-dfe70c09b98d',
'export_locations': [{'path': '172.24.44.10:/shares/'
'd6e7dc6b-f65f-49d9-968d-936f75474f29'}],
'mount_snapshot_support': True,
}
access_nfs_rw = {
'id': 'acdc7172b-fe07-46c4-b78f-df3e0324ccd0',
'access_type': 'ip',
'access_to': '172.24.44.200',
'access_level': 'rw',
'state': 'active',
}
access_cifs_rw = {
'id': '43167594-40e9-b899-1f4f-b9c2176b7564',
'access_type': 'user',
'access_to': 'fake_user',
'access_level': 'rw',
'state': 'active',
}
access_cifs_ro = {
'id': '32407088-1f4f-40e9-b899-b9a4176b574d',
'access_type': 'user',
'access_to': 'fake_user',
'access_level': 'ro',
'state': 'active',
}
snapshot_nfs = {
'id': 'abba6d9b-f29c-4bf7-aac1-618cda7aaf0f',
'share_id': 'aa4a7710-f326-41fb-ad18-b4ad587fc87a',
'share': share_nfs,
'provider_location': '/snapshots/aa4a7710-f326-41fb-ad18-b4ad587fc87a/'
'abba6d9b-f29c-4bf7-aac1-618cda7aaf0f',
'size': 2,
}
snapshot_cifs = {
'id': '91bc6e1b-1ba5-f29c-abc1-da7618cabf0a',
'share_id': 'f5cadaf2-afbe-4cc4-9021-85491b6b76f7',
'share': share_cifs,
'provider_location': '/snapshots/f5cadaf2-afbe-4cc4-9021-85491b6b76f7/'
'91bc6e1b-1ba5-f29c-abc1-da7618cabf0a',
'size': 2,
}
manage_snapshot = {
'id': 'bc168eb-fa71-beef-153a-3d451aa1351f',
'share_id': 'aa4a7710-f326-41fb-ad18-b4ad587fc87a',
'share': share_nfs,
'provider_location': '/snapshots/aa4a7710-f326-41fb-ad18-b4ad587fc87a'
'/snapshot18-05-2106',
}
snapshot_mount_support_nfs = {
'id': '3377b015-a695-4a5a-8aa5-9b931b023380',
'share_id': '62125744-fcdd-4f55-a8c1-d1498102f634',
'share': share_mount_support_nfs,
'provider_location': '/snapshots/62125744-fcdd-4f55-a8c1-d1498102f634'
'/3377b015-a695-4a5a-8aa5-9b931b023380',
}
snapshot_mount_support_cifs = {
'id': 'f9916515-5cb8-4612-afa6-7f2baa74223a',
'share_id': 'd6e7dc6b-f65f-49d9-968d-936f75474f29',
'share': share_mount_support_cifs,
'provider_location': '/snapshots/d6e7dc6b-f65f-49d9-968d-936f75474f29'
'/f9916515-5cb8-4612-afa6-7f2baa74223a',
}
invalid_share = {
'id': 'aa4a7710-f326-41fb-ad18-b4ad587fc87a',
'name': 'aa4a7710-f326-41fb-ad18-b4ad587fc87a',
'size': 100,
'host': 'hnas',
'share_proto': 'HDFS',
}
invalid_snapshot = {
'id': '24dcdcb5-a582-4bcc-b462-641da143afee',
'share_id': 'aa4a7710-f326-41fb-ad18-b4ad587fc87a',
'share': invalid_share,
}
invalid_access_type = {
'id': 'acdc7172b-fe07-46c4-b78f-df3e0324ccd0',
'access_type': 'cert',
'access_to': 'manila_user',
'access_level': 'rw',
'state': 'active',
}
invalid_access_level = {
'id': 'acdc7172b-fe07-46c4-b78f-df3e0324ccd0',
'access_type': 'ip',
'access_to': 'manila_user',
'access_level': '777',
'state': 'active',
}
invalid_protocol_msg = ("Share backend error: Only NFS or CIFS protocol are "
"currently supported. Share provided %(id)s with "
"protocol %(proto)s." %
{'id': invalid_share['id'],
'proto': invalid_share['share_proto']})
@ddt.ddt
class HitachiHNASTestCase(test.TestCase):
def setUp(self):
super(HitachiHNASTestCase, self).setUp()
CONF.set_default('driver_handles_share_servers', False)
CONF.hitachi_hnas_evs_id = '2'
CONF.hitachi_hnas_evs_ip = '172.24.44.10'
CONF.hitachi_hnas_admin_network_ip = '10.20.30.40'
CONF.hitachi_hnas_ip = '172.24.44.1'
CONF.hitachi_hnas_ip_port = 'hitachi_hnas_ip_port'
CONF.hitachi_hnas_user = 'hitachi_hnas_user'
CONF.hitachi_hnas_password = 'hitachi_hnas_password'
CONF.hitachi_hnas_file_system_name = 'file_system'
CONF.hitachi_hnas_ssh_private_key = 'private_key'
CONF.hitachi_hnas_cluster_admin_ip0 = None
CONF.hitachi_hnas_stalled_job_timeout = 10
CONF.hitachi_hnas_driver_helper = ('manila.share.drivers.hitachi.hnas.'
'ssh.HNASSSHBackend')
self.fake_conf = manila.share.configuration.Configuration(None)
self.fake_private_storage = mock.Mock()
self.mock_object(self.fake_private_storage, 'get',
mock.Mock(return_value=None))
self.mock_object(self.fake_private_storage, 'delete',
mock.Mock(return_value=None))
self._driver = driver.HitachiHNASDriver(
private_storage=self.fake_private_storage,
configuration=self.fake_conf)
self._driver.backend_name = "hnas"
self.mock_log = self.mock_object(driver, 'LOG')
# mocking common backend calls
self.mock_object(ssh.HNASSSHBackend, "check_fs_mounted", mock.Mock(
return_value=True))
self.mock_object(ssh.HNASSSHBackend, "check_vvol")
self.mock_object(ssh.HNASSSHBackend, "check_quota")
self.mock_object(ssh.HNASSSHBackend, "check_cifs")
self.mock_object(ssh.HNASSSHBackend, "check_export")
self.mock_object(ssh.HNASSSHBackend, 'check_directory')
@ddt.data('hitachi_hnas_driver_helper', 'hitachi_hnas_evs_id',
'hitachi_hnas_evs_ip', 'hitachi_hnas_ip', 'hitachi_hnas_user')
def test_init_invalid_conf_parameters(self, attr_name):
self.mock_object(manila.share.driver.ShareDriver, '__init__')
setattr(CONF, attr_name, None)
self.assertRaises(exception.InvalidParameterValue,
self._driver.__init__)
def test_init_invalid_credentials(self):
self.mock_object(manila.share.driver.ShareDriver,
'__init__')
CONF.hitachi_hnas_password = None
CONF.hitachi_hnas_ssh_private_key = None
self.assertRaises(exception.InvalidParameterValue,
self._driver.__init__)
@ddt.data(True, False)
def test_update_access_nfs(self, empty_rules):
if not empty_rules:
access1 = {
'access_type': 'ip',
'access_to': '172.24.10.10',
'access_level': 'rw'
}
access2 = {
'access_type': 'ip',
'access_to': '188.100.20.10',
'access_level': 'ro'
}
access_list = [access1, access2]
access_list_updated = (
[access1['access_to'] + '(' + access1['access_level'] +
',norootsquash)', access2['access_to'] + '(' +
access2['access_level'] + ')', ])
else:
access_list = []
access_list_updated = []
self.mock_object(ssh.HNASSSHBackend, "update_nfs_access_rule",
mock.Mock())
self._driver.update_access('context', share_nfs, access_list, [], [])
ssh.HNASSSHBackend.update_nfs_access_rule.assert_called_once_with(
access_list_updated, share_id=share_nfs['id'])
self.assertTrue(self.mock_log.debug.called)
def test_update_access_ip_exception(self):
access1 = {
'access_type': 'ip',
'access_to': '188.100.20.10',
'access_level': 'ro'
}
access2 = {
'access_type': 'something',
'access_to': '172.24.10.10',
'access_level': 'rw'
}
access_list = [access1, access2]
self.assertRaises(exception.InvalidShareAccess,
self._driver.update_access, 'context', share_nfs,
access_list, [], [])
def test_update_access_not_found_exception(self):
access1 = {
'access_type': 'ip',
'access_to': '188.100.20.10',
'access_level': 'ro'
}
access2 = {
'access_type': 'something',
'access_to': '172.24.10.10',
'access_level': 'rw'
}
access_list = [access1, access2]
self.mock_object(self._driver, '_ensure_share', mock.Mock(
side_effect=exception.HNASItemNotFoundException(msg='fake')))
self.assertRaises(exception.ShareResourceNotFound,
self._driver.update_access, 'context', share_nfs,
access_list, add_rules=[], delete_rules=[])
@ddt.data([access_cifs_rw, 'acr'], [access_cifs_ro, 'ar'])
@ddt.unpack
def test_allow_access_cifs(self, access_cifs, permission):
access_list_allow = [access_cifs]
self.mock_object(ssh.HNASSSHBackend, 'cifs_allow_access')
self._driver.update_access('context', share_cifs, [],
access_list_allow, [])
ssh.HNASSSHBackend.cifs_allow_access.assert_called_once_with(
share_cifs['id'], 'fake_user', permission, is_snapshot=False)
self.assertTrue(self.mock_log.debug.called)
def test_allow_access_cifs_invalid_type(self):
access_cifs_type_ip = {
'id': '43167594-40e9-b899-1f4f-b9c2176b7564',
'access_type': 'ip',
'access_to': 'fake_user',
'access_level': 'rw',
'state': 'active',
}
access_list_allow = [access_cifs_type_ip]
self.assertRaises(exception.InvalidShareAccess,
self._driver.update_access, 'context', share_cifs,
[], access_list_allow, [])
def test_deny_access_cifs(self):
access_list_deny = [access_cifs_rw]
self.mock_object(ssh.HNASSSHBackend, 'cifs_deny_access')
self._driver.update_access('context', share_cifs, [], [],
access_list_deny)
ssh.HNASSSHBackend.cifs_deny_access.assert_called_once_with(
share_cifs['id'], 'fake_user', is_snapshot=False)
self.assertTrue(self.mock_log.debug.called)
def test_deny_access_cifs_unsupported_type(self):
access_cifs_type_ip = {
'id': '43167594-40e9-b899-1f4f-b9c2176b7564',
'access_type': 'ip',
'access_to': 'fake_user',
'access_level': 'rw',
'state': 'active',
}
access_list_deny = [access_cifs_type_ip]
self.mock_object(ssh.HNASSSHBackend, 'cifs_deny_access')
self._driver.update_access('context', share_cifs, [], [],
access_list_deny)
self.assertTrue(self.mock_log.warning.called)
def test_update_access_invalid_share_protocol(self):
self.mock_object(self._driver, '_ensure_share')
ex = self.assertRaises(exception.ShareBackendException,
self._driver.update_access, 'context',
invalid_share, [], [], [])
self.assertEqual(invalid_protocol_msg, ex.msg)
def test_update_access_cifs_recovery_mode(self):
access_list = [access_cifs_rw, access_cifs_ro]
permission_list = [('fake_user1', 'acr'), ('fake_user2', 'ar')]
self.mock_object(ssh.HNASSSHBackend, 'list_cifs_permissions',
mock.Mock(return_value=permission_list))
self.mock_object(ssh.HNASSSHBackend, 'cifs_deny_access')
self.mock_object(ssh.HNASSSHBackend, 'cifs_allow_access')
self._driver.update_access('context', share_cifs, access_list, [], [])
ssh.HNASSSHBackend.list_cifs_permissions.assert_called_once_with(
share_cifs['id'])
self.assertTrue(self.mock_log.debug.called)
def _get_export(self, id, share_proto, ip, is_admin_only,
is_snapshot=False):
if share_proto.lower() == 'nfs':
if is_snapshot:
path = '/snapshots/' + id
else:
path = '/shares/' + id
export = ':'.join((ip, path))
else:
export = r'\\%s\%s' % (ip, id)
return {
"path": export,
"is_admin_only": is_admin_only,
"metadata": {},
}
@ddt.data(share_nfs, share_cifs)
def test_create_share(self, share):
self.mock_object(driver.HitachiHNASDriver, "_check_fs_mounted",
mock.Mock())
self.mock_object(ssh.HNASSSHBackend, "vvol_create")
self.mock_object(ssh.HNASSSHBackend, "quota_add")
self.mock_object(ssh.HNASSSHBackend, "nfs_export_add", mock.Mock(
return_value='/shares/' + share['id']))
self.mock_object(ssh.HNASSSHBackend, "cifs_share_add")
result = self._driver.create_share('context', share)
self.assertTrue(self.mock_log.debug.called)
ssh.HNASSSHBackend.vvol_create.assert_called_once_with(share['id'])
ssh.HNASSSHBackend.quota_add.assert_called_once_with(share['id'],
share['size'])
expected = [
self._get_export(
share['id'], share['share_proto'], self._driver.hnas_evs_ip,
False),
self._get_export(
share['id'], share['share_proto'],
self._driver.hnas_admin_network_ip, True)]
if share['share_proto'].lower() == 'nfs':
ssh.HNASSSHBackend.nfs_export_add.assert_called_once_with(
share_nfs['id'], snapshot_id=None)
self.assertFalse(ssh.HNASSSHBackend.cifs_share_add.called)
else:
ssh.HNASSSHBackend.cifs_share_add.assert_called_once_with(
share_cifs['id'], snapshot_id=None)
self.assertFalse(ssh.HNASSSHBackend.nfs_export_add.called)
self.assertEqual(expected, result)
def test_create_share_export_error(self):
self.mock_object(driver.HitachiHNASDriver, "_check_fs_mounted",
mock.Mock())
self.mock_object(ssh.HNASSSHBackend, "vvol_create")
self.mock_object(ssh.HNASSSHBackend, "quota_add")
self.mock_object(ssh.HNASSSHBackend, "nfs_export_add", mock.Mock(
side_effect=exception.HNASBackendException('msg')))
self.mock_object(ssh.HNASSSHBackend, "vvol_delete")
self.assertRaises(exception.HNASBackendException,
self._driver.create_share, 'context', share_nfs)
self.assertTrue(self.mock_log.debug.called)
ssh.HNASSSHBackend.vvol_create.assert_called_once_with(share_nfs['id'])
ssh.HNASSSHBackend.quota_add.assert_called_once_with(share_nfs['id'],
share_nfs['size'])
ssh.HNASSSHBackend.nfs_export_add.assert_called_once_with(
share_nfs['id'], snapshot_id=None)
ssh.HNASSSHBackend.vvol_delete.assert_called_once_with(share_nfs['id'])
def test_create_share_invalid_share_protocol(self):
self.mock_object(driver.HitachiHNASDriver, "_create_share",
mock.Mock(return_value="path"))
ex = self.assertRaises(exception.ShareBackendException,
self._driver.create_share, 'context',
invalid_share)
self.assertEqual(invalid_protocol_msg, ex.msg)
@ddt.data(share_nfs, share_cifs)
def test_delete_share(self, share):
self.mock_object(driver.HitachiHNASDriver, "_check_fs_mounted",
mock.Mock())
self.mock_object(ssh.HNASSSHBackend, "nfs_export_del")
self.mock_object(ssh.HNASSSHBackend, "cifs_share_del")
self.mock_object(ssh.HNASSSHBackend, "vvol_delete")
self._driver.delete_share('context', share)
self.assertTrue(self.mock_log.debug.called)
ssh.HNASSSHBackend.vvol_delete.assert_called_once_with(share['id'])
if share['share_proto'].lower() == 'nfs':
ssh.HNASSSHBackend.nfs_export_del.assert_called_once_with(
share['id'])
self.assertFalse(ssh.HNASSSHBackend.cifs_share_del.called)
else:
ssh.HNASSSHBackend.cifs_share_del.assert_called_once_with(
share['id'])
self.assertFalse(ssh.HNASSSHBackend.nfs_export_del.called)
@ddt.data(snapshot_nfs, snapshot_cifs, snapshot_mount_support_nfs,
snapshot_mount_support_cifs)
def test_create_snapshot(self, snapshot):
hnas_id = snapshot['share_id']
access_list = ['172.24.44.200(rw,norootsquash)',
'172.24.49.180(all_squash,read_write,secure)',
'172.24.49.110(ro, secure)',
'172.24.49.112(secure,readwrite,norootsquash)',
'172.24.49.142(read_only, secure)',
'172.24.49.201(rw,read_write,readwrite)',
'172.24.49.218(rw)']
ro_list = ['172.24.44.200(ro,norootsquash)',
'172.24.49.180(all_squash,ro,secure)',
'172.24.49.110(ro, secure)',
'172.24.49.112(secure,ro,norootsquash)',
'172.24.49.142(read_only, secure)',
'172.24.49.201(ro,ro,ro)',
'172.24.49.218(ro)']
export_locations = [
self._get_export(
snapshot['id'], snapshot['share']['share_proto'],
self._driver.hnas_evs_ip, False, is_snapshot=True),
self._get_export(
snapshot['id'], snapshot['share']['share_proto'],
self._driver.hnas_admin_network_ip, True, is_snapshot=True)]
expected = {'provider_location': '/snapshots/' + hnas_id + '/' +
snapshot['id']}
if snapshot['share'].get('mount_snapshot_support'):
expected['export_locations'] = export_locations
self.mock_object(ssh.HNASSSHBackend, "get_nfs_host_list", mock.Mock(
return_value=access_list))
self.mock_object(ssh.HNASSSHBackend, "update_nfs_access_rule",
mock.Mock())
self.mock_object(ssh.HNASSSHBackend, "is_cifs_in_use", mock.Mock(
return_value=False))
self.mock_object(ssh.HNASSSHBackend, "tree_clone")
self.mock_object(ssh.HNASSSHBackend, "nfs_export_add")
self.mock_object(ssh.HNASSSHBackend, "cifs_share_add")
out = self._driver.create_snapshot('context', snapshot)
ssh.HNASSSHBackend.tree_clone.assert_called_once_with(
'/shares/' + hnas_id, '/snapshots/' + hnas_id + '/' +
snapshot['id'])
self.assertEqual(expected, out)
if snapshot['share']['share_proto'].lower() == 'nfs':
ssh.HNASSSHBackend.get_nfs_host_list.assert_called_once_with(
hnas_id)
ssh.HNASSSHBackend.update_nfs_access_rule.assert_any_call(
ro_list, share_id=hnas_id)
ssh.HNASSSHBackend.update_nfs_access_rule.assert_any_call(
access_list, share_id=hnas_id)
else:
ssh.HNASSSHBackend.is_cifs_in_use.assert_called_once_with(
hnas_id)
def test_create_snapshot_invalid_protocol(self):
self.mock_object(self._driver, '_ensure_share')
ex = self.assertRaises(exception.ShareBackendException,
self._driver.create_snapshot, 'context',
invalid_snapshot)
self.assertEqual(invalid_protocol_msg, ex.msg)
def test_create_snapshot_cifs_exception(self):
cifs_excep_msg = ("Share backend error: CIFS snapshot when share is "
"mounted is disabled. Set "
"hitachi_hnas_allow_cifs_snapshot_while_mounted to "
"True or unmount the share to take a snapshot.")
self.mock_object(ssh.HNASSSHBackend, "is_cifs_in_use", mock.Mock(
return_value=True))
ex = self.assertRaises(exception.ShareBackendException,
self._driver.create_snapshot, 'context',
snapshot_cifs)
self.assertEqual(cifs_excep_msg, ex.msg)
def test_create_snapshot_first_snapshot(self):
hnas_id = snapshot_nfs['share_id']
self.mock_object(ssh.HNASSSHBackend, "get_nfs_host_list", mock.Mock(
return_value=['172.24.44.200(rw)']))
self.mock_object(ssh.HNASSSHBackend, "update_nfs_access_rule",
mock.Mock())
self.mock_object(ssh.HNASSSHBackend, "tree_clone", mock.Mock(
side_effect=exception.HNASNothingToCloneException('msg')))
self.mock_object(ssh.HNASSSHBackend, "create_directory")
self.mock_object(ssh.HNASSSHBackend, "nfs_export_add")
self.mock_object(ssh.HNASSSHBackend, "cifs_share_add")
self._driver.create_snapshot('context', snapshot_nfs)
self.assertTrue(self.mock_log.warning.called)
ssh.HNASSSHBackend.get_nfs_host_list.assert_called_once_with(
hnas_id)
ssh.HNASSSHBackend.update_nfs_access_rule.assert_any_call(
['172.24.44.200(ro)'], share_id=hnas_id)
ssh.HNASSSHBackend.update_nfs_access_rule.assert_any_call(
['172.24.44.200(rw)'], share_id=hnas_id)
ssh.HNASSSHBackend.create_directory.assert_called_once_with(
'/snapshots/' + hnas_id + '/' + snapshot_nfs['id'])
@ddt.data(snapshot_nfs, snapshot_cifs,
snapshot_mount_support_nfs, snapshot_mount_support_cifs)
def test_delete_snapshot(self, snapshot):
hnas_share_id = snapshot['share_id']
hnas_snapshot_id = snapshot['id']
self.mock_object(driver.HitachiHNASDriver, "_check_fs_mounted")
self.mock_object(ssh.HNASSSHBackend, "tree_delete")
self.mock_object(ssh.HNASSSHBackend, "delete_directory")
self.mock_object(ssh.HNASSSHBackend, "nfs_export_del")
self.mock_object(ssh.HNASSSHBackend, "cifs_share_del")
self._driver.delete_snapshot('context', snapshot)
self.assertTrue(self.mock_log.debug.called)
self.assertTrue(self.mock_log.info.called)
driver.HitachiHNASDriver._check_fs_mounted.assert_called_once_with()
ssh.HNASSSHBackend.tree_delete.assert_called_once_with(
'/snapshots/' + hnas_share_id + '/' + snapshot['id'])
ssh.HNASSSHBackend.delete_directory.assert_called_once_with(
'/snapshots/' + hnas_share_id)
if snapshot['share']['share_proto'].lower() == 'nfs':
if snapshot['share'].get('mount_snapshot_support'):
ssh.HNASSSHBackend.nfs_export_del.assert_called_once_with(
snapshot_id=hnas_snapshot_id)
else:
ssh.HNASSSHBackend.nfs_export_del.assert_not_called()
else:
if snapshot['share'].get('mount_snapshot_support'):
ssh.HNASSSHBackend.cifs_share_del.assert_called_once_with(
hnas_snapshot_id)
else:
ssh.HNASSSHBackend.cifs_share_del.assert_not_called()
def test_delete_managed_snapshot(self):
hnas_id = manage_snapshot['share_id']
self.mock_object(driver.HitachiHNASDriver, "_check_fs_mounted")
self.mock_object(ssh.HNASSSHBackend, "tree_delete")
self.mock_object(ssh.HNASSSHBackend, "delete_directory")
self.mock_object(ssh.HNASSSHBackend, "nfs_export_del")
self.mock_object(ssh.HNASSSHBackend, "cifs_share_del")
self._driver.delete_snapshot('context', manage_snapshot)
self.assertTrue(self.mock_log.debug.called)
self.assertTrue(self.mock_log.info.called)
driver.HitachiHNASDriver._check_fs_mounted.assert_called_once_with()
ssh.HNASSSHBackend.tree_delete.assert_called_once_with(
manage_snapshot['provider_location'])
ssh.HNASSSHBackend.delete_directory.assert_called_once_with(
'/snapshots/' + hnas_id)
@ddt.data(share_nfs, share_cifs)
def test_ensure_share(self, share):
result = self._driver.ensure_share('context', share)
ssh.HNASSSHBackend.check_vvol.assert_called_once_with(share['id'])
ssh.HNASSSHBackend.check_quota.assert_called_once_with(share['id'])
expected = [
self._get_export(
share['id'], share['share_proto'], self._driver.hnas_evs_ip,
False),
self._get_export(
share['id'], share['share_proto'],
self._driver.hnas_admin_network_ip, True)]
if share['share_proto'].lower() == 'nfs':
ssh.HNASSSHBackend.check_export.assert_called_once_with(
share['id'])
self.assertFalse(ssh.HNASSSHBackend.check_cifs.called)
else:
ssh.HNASSSHBackend.check_cifs.assert_called_once_with(share['id'])
self.assertFalse(ssh.HNASSSHBackend.check_export.called)
self.assertEqual(expected, result)
def test_ensure_share_invalid_protocol(self):
ex = self.assertRaises(exception.ShareBackendException,
self._driver.ensure_share, 'context',
invalid_share)
self.assertEqual(invalid_protocol_msg, ex.msg)
def test_shrink_share(self):
self.mock_object(ssh.HNASSSHBackend, "get_share_usage", mock.Mock(
return_value=10))
self.mock_object(ssh.HNASSSHBackend, "modify_quota")
self._driver.shrink_share(share_nfs, 11)
ssh.HNASSSHBackend.get_share_usage.assert_called_once_with(
share_nfs['id'])
ssh.HNASSSHBackend.modify_quota.assert_called_once_with(
share_nfs['id'], 11)
def test_shrink_share_new_size_lower_than_usage(self):
self.mock_object(ssh.HNASSSHBackend, "get_share_usage", mock.Mock(
return_value=10))
self.assertRaises(exception.ShareShrinkingPossibleDataLoss,
self._driver.shrink_share, share_nfs, 9)
ssh.HNASSSHBackend.get_share_usage.assert_called_once_with(
share_nfs['id'])
def test_extend_share(self):
self.mock_object(ssh.HNASSSHBackend, "get_stats", mock.Mock(
return_value=(500, 200, True)))
self.mock_object(ssh.HNASSSHBackend, "modify_quota")
self._driver.extend_share(share_nfs, 150)
ssh.HNASSSHBackend.get_stats.assert_called_once_with()
ssh.HNASSSHBackend.modify_quota.assert_called_once_with(
share_nfs['id'], 150)
def test_extend_share_with_no_available_space_in_fs(self):
self.mock_object(ssh.HNASSSHBackend, "get_stats", mock.Mock(
return_value=(500, 200, False)))
self.mock_object(ssh.HNASSSHBackend, "modify_quota")
self.assertRaises(exception.HNASBackendException,
self._driver.extend_share, share_nfs, 1000)
ssh.HNASSSHBackend.get_stats.assert_called_once_with()
@ddt.data(share_nfs, share_cifs)
def test_manage_existing(self, share):
expected_exports = [
self._get_export(
share['id'], share['share_proto'], self._driver.hnas_evs_ip,
False),
self._get_export(
share['id'], share['share_proto'],
self._driver.hnas_admin_network_ip, True)]
expected_out = {'size': share['size'],
'export_locations': expected_exports}
self.mock_object(ssh.HNASSSHBackend, "get_share_quota", mock.Mock(
return_value=share['size']))
out = self._driver.manage_existing(share, 'option')
self.assertEqual(expected_out, out)
ssh.HNASSSHBackend.get_share_quota.assert_called_once_with(
share['id'])
def test_manage_existing_no_quota(self):
self.mock_object(ssh.HNASSSHBackend, "get_share_quota", mock.Mock(
return_value=None))
self.assertRaises(exception.ManageInvalidShare,
self._driver.manage_existing, share_nfs, 'option')
ssh.HNASSSHBackend.get_share_quota.assert_called_once_with(
share_nfs['id'])
def test_manage_existing_wrong_share_id(self):
self.mock_object(self.fake_private_storage, 'get',
mock.Mock(return_value='Wrong_share_id'))
self.assertRaises(exception.HNASBackendException,
self._driver.manage_existing, share_nfs, 'option')
@ddt.data(':/', '1.1.1.1:/share_id', '1.1.1.1:/shares',
'1.1.1.1:shares/share_id', ':/share_id')
def test_manage_existing_wrong_path_format_nfs(self, wrong_location):
expected_exception = ("Share backend error: Incorrect path. It "
"should have the following format: "
"IP:/shares/share_id.")
self._test_manage_existing_wrong_path(
share_nfs.copy(), expected_exception, wrong_location)
@ddt.data('\\\\1.1.1.1', '1.1.1.1\\share_id', '1.1.1.1\\shares\\share_id',
'\\\\1.1.1.1\\shares\\share_id', '\\\\share_id')
def test_manage_existing_wrong_path_format_cifs(self, wrong_location):
expected_exception = ("Share backend error: Incorrect path. It should "
"have the following format: \\\\IP\\share_id.")
self._test_manage_existing_wrong_path(
share_cifs.copy(), expected_exception, wrong_location)
def _test_manage_existing_wrong_path(
self, share, expected_exception, wrong_location):
share['export_locations'] = [{'path': wrong_location}]
ex = self.assertRaises(exception.ShareBackendException,
self._driver.manage_existing, share, 'option')
self.assertEqual(expected_exception, ex.msg)
def test_manage_existing_wrong_evs_ip(self):
share_nfs['export_locations'] = [{'path': '172.24.44.189:/shares/'
'aa4a7710-f326-41fb-ad18-'}]
self.assertRaises(exception.ShareBackendException,
self._driver.manage_existing, share_nfs,
'option')
def test_manage_existing_invalid_host(self):
self.assertRaises(exception.ShareBackendException,
self._driver.manage_existing, share_invalid_host,
'option')
def test_manage_existing_invalid_protocol(self):
self.assertRaises(exception.ShareBackendException,
self._driver.manage_existing, invalid_share,
'option')
@ddt.data(True, False)
def test_unmanage(self, has_export_locations):
share_copy = share_nfs.copy()
if not has_export_locations:
share_copy['export_locations'] = []
self._driver.unmanage(share_copy)
self.assertTrue(self.fake_private_storage.delete.called)
self.assertTrue(self.mock_log.info.called)
def test_get_network_allocations_number(self):
result = self._driver.get_network_allocations_number()
self.assertEqual(0, result)
@ddt.data([share_nfs, snapshot_nfs], [share_cifs, snapshot_cifs])
@ddt.unpack
def test_create_share_from_snapshot(self, share, snapshot):
self.mock_object(driver.HitachiHNASDriver, "_check_fs_mounted",
mock.Mock())
self.mock_object(ssh.HNASSSHBackend, "vvol_create")
self.mock_object(ssh.HNASSSHBackend, "quota_add")
self.mock_object(ssh.HNASSSHBackend, "tree_clone")
self.mock_object(ssh.HNASSSHBackend, "cifs_share_add")
self.mock_object(ssh.HNASSSHBackend, "nfs_export_add")
result = self._driver.create_share_from_snapshot('context',
share,
snapshot)
ssh.HNASSSHBackend.vvol_create.assert_called_once_with(share['id'])
ssh.HNASSSHBackend.quota_add.assert_called_once_with(share['id'],
share['size'])
ssh.HNASSSHBackend.tree_clone.assert_called_once_with(
'/snapshots/' + share['id'] + '/' + snapshot['id'],
'/shares/' + share['id'])
expected = [
self._get_export(
share['id'], share['share_proto'], self._driver.hnas_evs_ip,
False),
self._get_export(
share['id'], share['share_proto'],
self._driver.hnas_admin_network_ip, True)]
if share['share_proto'].lower() == 'nfs':
ssh.HNASSSHBackend.nfs_export_add.assert_called_once_with(
share['id'])
self.assertFalse(ssh.HNASSSHBackend.cifs_share_add.called)
else:
ssh.HNASSSHBackend.cifs_share_add.assert_called_once_with(
share['id'])
self.assertFalse(ssh.HNASSSHBackend.nfs_export_add.called)
self.assertEqual(expected, result)
def test_create_share_from_snapshot_empty_snapshot(self):
self.mock_object(driver.HitachiHNASDriver, "_check_fs_mounted",
mock.Mock())
self.mock_object(ssh.HNASSSHBackend, "vvol_create")
self.mock_object(ssh.HNASSSHBackend, "quota_add")
self.mock_object(ssh.HNASSSHBackend, "tree_clone", mock.Mock(
side_effect=exception.HNASNothingToCloneException('msg')))
self.mock_object(ssh.HNASSSHBackend, "nfs_export_add")
result = self._driver.create_share_from_snapshot('context', share_nfs,
snapshot_nfs)
expected = [
self._get_export(
share_nfs['id'], share_nfs['share_proto'],
self._driver.hnas_evs_ip, False),
self._get_export(
share_nfs['id'], share_nfs['share_proto'],
self._driver.hnas_admin_network_ip, True)]
self.assertEqual(expected, result)
self.assertTrue(self.mock_log.warning.called)
ssh.HNASSSHBackend.vvol_create.assert_called_once_with(share_nfs['id'])
ssh.HNASSSHBackend.quota_add.assert_called_once_with(share_nfs['id'],
share_nfs['size'])
ssh.HNASSSHBackend.tree_clone.assert_called_once_with(
'/snapshots/' + share_nfs['id'] + '/' + snapshot_nfs['id'],
'/shares/' + share_nfs['id'])
ssh.HNASSSHBackend.nfs_export_add.assert_called_once_with(
share_nfs['id'])
def test_create_share_from_snapshot_invalid_protocol(self):
self.mock_object(driver.HitachiHNASDriver, "_check_fs_mounted",
mock.Mock())
self.mock_object(ssh.HNASSSHBackend, "vvol_create")
self.mock_object(ssh.HNASSSHBackend, "quota_add")
self.mock_object(ssh.HNASSSHBackend, "tree_clone")
ex = self.assertRaises(exception.ShareBackendException,
self._driver.create_share_from_snapshot,
'context', invalid_share, snapshot_nfs)
self.assertEqual(invalid_protocol_msg, ex.msg)
def test_create_share_from_snapshot_cleanup(self):
dest_path = '/snapshots/' + share_nfs['id'] + '/' + snapshot_nfs['id']
src_path = '/shares/' + share_nfs['id']
self.mock_object(driver.HitachiHNASDriver, "_check_fs_mounted",
mock.Mock())
self.mock_object(ssh.HNASSSHBackend, "vvol_create")
self.mock_object(ssh.HNASSSHBackend, "quota_add")
self.mock_object(ssh.HNASSSHBackend, "tree_clone")
self.mock_object(ssh.HNASSSHBackend, "vvol_delete")
self.mock_object(ssh.HNASSSHBackend, "nfs_export_add", mock.Mock(
side_effect=exception.HNASBackendException(
msg='Error adding nfs export.')))
self.assertRaises(exception.HNASBackendException,
self._driver.create_share_from_snapshot,
'context', share_nfs, snapshot_nfs)
ssh.HNASSSHBackend.vvol_create.assert_called_once_with(
share_nfs['id'])
ssh.HNASSSHBackend.quota_add.assert_called_once_with(
share_nfs['id'], share_nfs['size'])
ssh.HNASSSHBackend.tree_clone.assert_called_once_with(
dest_path, src_path)
ssh.HNASSSHBackend.nfs_export_add.assert_called_once_with(
share_nfs['id'])
ssh.HNASSSHBackend.vvol_delete.assert_called_once_with(
share_nfs['id'])
def test__check_fs_mounted(self):
self._driver._check_fs_mounted()
ssh.HNASSSHBackend.check_fs_mounted.assert_called_once_with()
def test__check_fs_mounted_not_mounted(self):
self.mock_object(ssh.HNASSSHBackend, 'check_fs_mounted', mock.Mock(
return_value=False))
self.assertRaises(exception.HNASBackendException,
self._driver._check_fs_mounted)
ssh.HNASSSHBackend.check_fs_mounted.assert_called_once_with()
def test__update_share_stats(self):
fake_data = {
'share_backend_name': self._driver.backend_name,
'driver_handles_share_servers':
self._driver.driver_handles_share_servers,
'vendor_name': 'Hitachi',
'driver_version': '4.0.0',
'storage_protocol': 'NFS_CIFS',
'total_capacity_gb': 1000,
'free_capacity_gb': 200,
'reserved_percentage': driver.CONF.reserved_share_percentage,
'qos': False,
'thin_provisioning': True,
'dedupe': True,
'revert_to_snapshot_support': True,
'mount_snapshot_support': True,
}
self.mock_object(ssh.HNASSSHBackend, 'get_stats', mock.Mock(
return_value=(1000, 200, True)))
self.mock_object(driver.HitachiHNASDriver, "_check_fs_mounted",
mock.Mock())
self.mock_object(manila.share.driver.ShareDriver,
'_update_share_stats')
self._driver._update_share_stats()
self.assertTrue(self._driver.hnas.get_stats.called)
(manila.share.driver.ShareDriver._update_share_stats.
assert_called_once_with(fake_data))
self.assertTrue(self.mock_log.info.called)
@ddt.data(snapshot_nfs, snapshot_cifs,
snapshot_mount_support_nfs, snapshot_mount_support_cifs)
def test_ensure_snapshot(self, snapshot):
result = self._driver.ensure_snapshot('context', snapshot)
if snapshot['share'].get('mount_snapshot_support'):
expected = [
self._get_export(
snapshot['id'], snapshot['share']['share_proto'],
self._driver.hnas_evs_ip, False, is_snapshot=True),
self._get_export(
snapshot['id'], snapshot['share']['share_proto'],
self._driver.hnas_admin_network_ip, True,
is_snapshot=True)]
if snapshot['share']['share_proto'].lower() == 'nfs':
ssh.HNASSSHBackend.check_export.assert_called_once_with(
snapshot['id'], is_snapshot=True)
self.assertFalse(ssh.HNASSSHBackend.check_cifs.called)
else:
ssh.HNASSSHBackend.check_cifs.assert_called_once_with(
snapshot['id'])
self.assertFalse(ssh.HNASSSHBackend.check_export.called)
else:
expected = None
ssh.HNASSSHBackend.check_directory.assert_called_once_with(
snapshot['provider_location'])
self.assertEqual(expected, result)
def test_manage_existing_snapshot(self):
self.mock_object(ssh.HNASSSHBackend, 'check_directory',
mock.Mock(return_value=True))
self.mock_object(self._driver, '_ensure_snapshot',
mock.Mock(return_value=[]))
path_info = manage_snapshot['provider_location'].split('/')
hnas_snapshot_id = path_info[3]
out = self._driver.manage_existing_snapshot(manage_snapshot,
{'size': 20})
ssh.HNASSSHBackend.check_directory.assert_called_with(
'/snapshots/aa4a7710-f326-41fb-ad18-b4ad587fc87a'
'/snapshot18-05-2106')
self._driver._ensure_snapshot.assert_called_with(
manage_snapshot,
hnas_snapshot_id)
self.assertEqual(20, out['size'])
self.assertTrue(self.mock_log.debug.called)
self.assertTrue(self.mock_log.info.called)
@ddt.data(None, exception.HNASItemNotFoundException('Fake error.'))
def test_manage_existing_snapshot_with_mount_support(self, exc):
export_locations = [{
'path': '172.24.44.10:/snapshots/'
'3377b015-a695-4a5a-8aa5-9b931b023380'}]
self.mock_object(ssh.HNASSSHBackend, 'check_directory',
mock.Mock(return_value=True))
self.mock_object(self._driver, '_ensure_snapshot',
mock.Mock(return_value=[], side_effect=exc))
self.mock_object(self._driver, '_get_export_locations',
mock.Mock(return_value=export_locations))
if exc:
self.mock_object(self._driver, '_create_export')
path_info = snapshot_mount_support_nfs['provider_location'].split('/')
hnas_snapshot_id = path_info[3]
out = self._driver.manage_existing_snapshot(
snapshot_mount_support_nfs,
{'size': 20, 'export_locations': export_locations})
ssh.HNASSSHBackend.check_directory.assert_called_with(
'/snapshots/62125744-fcdd-4f55-a8c1-d1498102f634'
'/3377b015-a695-4a5a-8aa5-9b931b023380')
self._driver._ensure_snapshot.assert_called_with(
snapshot_mount_support_nfs,
hnas_snapshot_id)
self._driver._get_export_locations.assert_called_with(
snapshot_mount_support_nfs['share']['share_proto'],
hnas_snapshot_id,
is_snapshot=True)
if exc:
self._driver._create_export.assert_called_with(
snapshot_mount_support_nfs['share_id'],
snapshot_mount_support_nfs['share']['share_proto'],
snapshot_id=hnas_snapshot_id)
self.assertEqual(20, out['size'])
self.assertEqual(export_locations, out['export_locations'])
self.assertTrue(self.mock_log.debug.called)
self.assertTrue(self.mock_log.info.called)
@ddt.data('fake_size', '128GB', '512 GB', {'size': 128})
def test_manage_snapshot_invalid_size_exception(self, size):
self.assertRaises(exception.ManageInvalidShareSnapshot,
self._driver.manage_existing_snapshot,
manage_snapshot, {'size': size})
def test_manage_snapshot_size_not_provided_exception(self):
self.assertRaises(exception.ManageInvalidShareSnapshot,
self._driver.manage_existing_snapshot,
manage_snapshot, {})
@ddt.data('/root/snapshot_id', '/snapshots/share1/snapshot_id',
'/directory1', 'snapshots/share1/snapshot_id')
def test_manage_snapshot_invalid_path_exception(self, path):
snap_copy = manage_snapshot.copy()
snap_copy['provider_location'] = path
self.assertRaises(exception.ManageInvalidShareSnapshot,
self._driver.manage_existing_snapshot,
snap_copy, {'size': 20})
self.assertTrue(self.mock_log.debug.called)
def test_manage_inexistent_snapshot_exception(self):
self.mock_object(ssh.HNASSSHBackend, 'check_directory',
mock.Mock(return_value=False))
self.assertRaises(exception.ManageInvalidShareSnapshot,
self._driver.manage_existing_snapshot,
manage_snapshot, {'size': 20})
self.assertTrue(self.mock_log.debug.called)
def test_unmanage_snapshot(self):
self._driver.unmanage_snapshot(snapshot_nfs)
self.assertTrue(self.mock_log.info.called)
@ddt.data({'snap': snapshot_nfs, 'exc': None},
{'snap': snapshot_cifs, 'exc': None},
{'snap': snapshot_nfs,
'exc': exception.HNASNothingToCloneException('fake')},
{'snap': snapshot_cifs,
'exc': exception.HNASNothingToCloneException('fake')})
@ddt.unpack
def test_revert_to_snapshot(self, exc, snap):
self.mock_object(driver.HitachiHNASDriver, "_check_fs_mounted")
self.mock_object(ssh.HNASSSHBackend, 'tree_delete')
self.mock_object(ssh.HNASSSHBackend, 'vvol_create')
self.mock_object(ssh.HNASSSHBackend, 'quota_add')
self.mock_object(ssh.HNASSSHBackend, 'tree_clone',
mock.Mock(side_effect=exc))
self._driver.revert_to_snapshot('context', snap, None, None)
driver.HitachiHNASDriver._check_fs_mounted.assert_called_once_with()
ssh.HNASSSHBackend.tree_delete.assert_called_once_with(
'/'.join(('/shares', snap['share_id'])))
ssh.HNASSSHBackend.vvol_create.assert_called_once_with(
snap['share_id'])
ssh.HNASSSHBackend.quota_add.assert_called_once_with(
snap['share_id'], 2)
ssh.HNASSSHBackend.tree_clone.assert_called_once_with(
'/'.join(('/snapshots', snap['share_id'], snap['id'])),
'/'.join(('/shares', snap['share_id'])))
ssh.HNASSSHBackend.check_directory.assert_called_once_with(
snap['provider_location'])
if exc:
self.assertTrue(self.mock_log.warning.called)
self.assertTrue(self.mock_log.info.called)
def test_nfs_snapshot_update_access_allow(self):
access1 = {
'access_type': 'ip',
'access_to': '172.24.10.10',
}
access2 = {
'access_type': 'ip',
'access_to': '172.31.20.20',
}
access_list = [access1, access2]
self.mock_object(ssh.HNASSSHBackend, "update_nfs_access_rule")
self._driver.snapshot_update_access('ctxt', snapshot_nfs, access_list,
access_list, [])
ssh.HNASSSHBackend.update_nfs_access_rule.assert_called_once_with(
[access1['access_to'] + '(ro)', access2['access_to'] + '(ro)'],
snapshot_id=snapshot_nfs['id'])
ssh.HNASSSHBackend.check_directory.assert_called_once_with(
snapshot_nfs['provider_location'])
self.assertTrue(self.mock_log.debug.called)
def test_nfs_snapshot_update_access_deny(self):
access1 = {
'access_type': 'ip',
'access_to': '172.24.10.10',
}
self.mock_object(ssh.HNASSSHBackend, "update_nfs_access_rule")
self._driver.snapshot_update_access('ctxt', snapshot_nfs, [],
[], [access1])
ssh.HNASSSHBackend.update_nfs_access_rule.assert_called_once_with(
[], snapshot_id=snapshot_nfs['id'])
ssh.HNASSSHBackend.check_directory.assert_called_once_with(
snapshot_nfs['provider_location'])
self.assertTrue(self.mock_log.debug.called)
def test_nfs_snapshot_update_access_invalid_access_type(self):
access1 = {
'access_type': 'user',
'access_to': 'user1',
}
self.assertRaises(exception.InvalidSnapshotAccess,
self._driver.snapshot_update_access, 'ctxt',
snapshot_nfs, [access1], [], [])
ssh.HNASSSHBackend.check_directory.assert_called_once_with(
snapshot_nfs['provider_location'])
def test_cifs_snapshot_update_access_allow(self):
access1 = {
'access_type': 'user',
'access_to': 'fake_user1',
}
self.mock_object(ssh.HNASSSHBackend, 'cifs_allow_access')
self._driver.snapshot_update_access('ctxt', snapshot_cifs, [access1],
[access1], [])
ssh.HNASSSHBackend.cifs_allow_access.assert_called_with(
snapshot_cifs['id'], access1['access_to'], 'ar', is_snapshot=True)
ssh.HNASSSHBackend.check_directory.assert_called_once_with(
snapshot_cifs['provider_location'])
self.assertTrue(self.mock_log.debug.called)
def test_cifs_snapshot_update_access_deny(self):
access1 = {
'access_type': 'user',
'access_to': 'fake_user1',
}
self.mock_object(ssh.HNASSSHBackend, 'cifs_deny_access')
self._driver.snapshot_update_access('ctxt', snapshot_cifs, [], [],
[access1])
ssh.HNASSSHBackend.cifs_deny_access.assert_called_with(
snapshot_cifs['id'], access1['access_to'], is_snapshot=True)
ssh.HNASSSHBackend.check_directory.assert_called_once_with(
snapshot_cifs['provider_location'])
self.assertTrue(self.mock_log.debug.called)
def test_cifs_snapshot_update_access_recovery_mode(self):
access1 = {
'access_type': 'user',
'access_to': 'fake_user1',
}
access2 = {
'access_type': 'user',
'access_to': 'HDS\\fake_user2',
}
access_list = [access1, access2]
permission_list = [('fake_user1', 'ar'), ('HDS\\fake_user2', 'ar')]
formatted_user = r'"\{1}{0}\{1}"'.format(access2['access_to'], '"')
self.mock_object(ssh.HNASSSHBackend, 'list_cifs_permissions',
mock.Mock(return_value=permission_list))
self.mock_object(ssh.HNASSSHBackend, 'cifs_deny_access')
self.mock_object(ssh.HNASSSHBackend, 'cifs_allow_access')
self._driver.snapshot_update_access('ctxt', snapshot_cifs, access_list,
[], [])
ssh.HNASSSHBackend.list_cifs_permissions.assert_called_once_with(
snapshot_cifs['id'])
ssh.HNASSSHBackend.cifs_deny_access.assert_called_with(
snapshot_cifs['id'], formatted_user, is_snapshot=True)
ssh.HNASSSHBackend.cifs_allow_access.assert_called_with(
snapshot_cifs['id'], access2['access_to'].replace('\\', '\\\\'),
'ar', is_snapshot=True)
ssh.HNASSSHBackend.check_directory.assert_called_once_with(
snapshot_cifs['provider_location'])
self.assertTrue(self.mock_log.debug.called)
|
|
"""
The Plaid API
The Plaid REST API. Please see https://plaid.com/docs/api for more details. # noqa: E501
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from plaid.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from plaid.model.account_product_access import AccountProductAccess
globals()['AccountProductAccess'] = AccountProductAccess
class AccountProductAccessNullable(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = True
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'account_data': (bool, none_type,), # noqa: E501
'statements': (bool, none_type,), # noqa: E501
'tax_documents': (bool, none_type,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'account_data': 'account_data', # noqa: E501
'statements': 'statements', # noqa: E501
'tax_documents': 'tax_documents', # noqa: E501
}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""AccountProductAccessNullable - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
account_data (bool, none_type): Allow the application to access account data. Only used by certain partners. If relevant to the partner and unset, defaults to `true`.. [optional] if omitted the server will use the default value of True # noqa: E501
statements (bool, none_type): Allow the application to access bank statements. Only used by certain partners. If relevant to the partner and unset, defaults to `true`.. [optional] if omitted the server will use the default value of True # noqa: E501
tax_documents (bool, none_type): Allow the application to access tax documents. Only used by certain partners. If relevant to the partner and unset, defaults to `true`.. [optional] if omitted the server will use the default value of True # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
required_args = {
}
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(
constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in kwargs.items():
if var_name in unused_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
not self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
AccountProductAccess,
],
'oneOf': [
],
}
|
|
#!/usr/bin/env python
import argparse
import zmq
# import uuid
import os
import sys
import platform
import random
import time
import pickle
import logging
import queue
import threading
import json
from parsl.version import VERSION as PARSL_VERSION
from ipyparallel.serialize import serialize_object
LOOP_SLOWDOWN = 0.0 # in seconds
HEARTBEAT_CODE = (2 ** 32) - 1
PKL_HEARTBEAT_CODE = pickle.dumps((2 ** 32) - 1)
class ShutdownRequest(Exception):
''' Exception raised when any async component receives a ShutdownRequest
'''
def __init__(self):
self.tstamp = time.time()
def __repr__(self):
return "Shutdown request received at {}".format(self.tstamp)
class ManagerLost(Exception):
''' Task lost due to worker loss. Worker is considered lost when multiple heartbeats
have been missed.
'''
def __init__(self, worker_id):
self.worker_id = worker_id
self.tstamp = time.time()
def __repr__(self):
return "Task failure due to loss of worker {}".format(self.worker_id)
class BadRegistration(Exception):
''' A new Manager tried to join the executor with a BadRegistration message
'''
def __init__(self, worker_id, critical=False):
self.worker_id = worker_id
self.tstamp = time.time()
self.handled = "critical" if critical else "suppressed"
def __repr__(self):
return "Manager:{} caused a {} failure".format(self.worker_id,
self.handled)
class Interchange(object):
""" Interchange is a task orchestrator for distributed systems.
1. Asynchronously queue large volume of tasks (>100K)
2. Allow for workers to join and leave the union
3. Detect workers that have failed using heartbeats
4. Service single and batch requests from workers
5. Be aware of requests worker resource capacity,
eg. schedule only jobs that fit into walltime.
TODO: We most likely need a PUB channel to send out global commands, like shutdown
"""
def __init__(self,
client_address="127.0.0.1",
interchange_address="127.0.0.1",
client_ports=(50055, 50056, 50057),
worker_ports=None,
worker_port_range=(54000, 55000),
heartbeat_threshold=60,
logdir=".",
logging_level=logging.INFO,
suppress_failure=False,
):
"""
Parameters
----------
client_address : str
The ip address at which the parsl client can be reached. Default: "127.0.0.1"
interchange_address : str
The ip address at which the workers will be able to reach the Interchange. Default: "127.0.0.1"
client_ports : triple(int, int, int)
The ports at which the client can be reached
worker_ports : tuple(int, int)
The specific two ports at which workers will connect to the Interchange. Default: None
worker_port_range : tuple(int, int)
The interchange picks ports at random from the range which will be used by workers.
This is overridden when the worker_ports option is set. Defauls: (54000, 55000)
heartbeat_threshold : int
Number of seconds since the last heartbeat after which worker is considered lost.
logdir : str
Parsl log directory paths. Logs and temp files go here. Default: '.'
logging_level : int
Logging level as defined in the logging module. Default: logging.INFO (20)
suppress_failure : Bool
When set to True, the interchange will attempt to suppress failures. Default: False
"""
self.logdir = logdir
try:
os.makedirs(self.logdir)
except FileExistsError:
pass
start_file_logger("{}/interchange.log".format(self.logdir), level=logging_level)
logger.debug("Initializing Interchange process")
self.client_address = client_address
self.interchange_address = interchange_address
self.suppress_failure = suppress_failure
logger.info("Attempting connection to client at {} on ports: {},{},{}".format(
client_address, client_ports[0], client_ports[1], client_ports[2]))
self.context = zmq.Context()
self.task_incoming = self.context.socket(zmq.DEALER)
self.task_incoming.set_hwm(0)
self.task_incoming.RCVTIMEO = 10 # in milliseconds
self.task_incoming.connect("tcp://{}:{}".format(client_address, client_ports[0]))
self.results_outgoing = self.context.socket(zmq.DEALER)
self.results_outgoing.set_hwm(0)
self.results_outgoing.connect("tcp://{}:{}".format(client_address, client_ports[1]))
self.command_channel = self.context.socket(zmq.REP)
self.command_channel.RCVTIMEO = 1000 # in milliseconds
self.command_channel.connect("tcp://{}:{}".format(client_address, client_ports[2]))
logger.info("Connected to client")
self.pending_task_queue = queue.Queue(maxsize=10 ** 6)
self.worker_ports = worker_ports
self.worker_port_range = worker_port_range
self.task_outgoing = self.context.socket(zmq.ROUTER)
self.task_outgoing.set_hwm(0)
self.results_incoming = self.context.socket(zmq.ROUTER)
self.results_incoming.set_hwm(0)
if self.worker_ports:
self.worker_task_port = self.worker_ports[0]
self.worker_result_port = self.worker_ports[1]
self.task_outgoing.bind("tcp://*:{}".format(self.worker_task_port))
self.results_incoming.bind("tcp://*:{}".format(self.worker_result_port))
else:
self.worker_task_port = self.task_outgoing.bind_to_random_port('tcp://*',
min_port=worker_port_range[0],
max_port=worker_port_range[1], max_tries=100)
self.worker_result_port = self.results_incoming.bind_to_random_port('tcp://*',
min_port=worker_port_range[0],
max_port=worker_port_range[1], max_tries=100)
logger.info("Bound to ports {},{} for incoming worker connections".format(
self.worker_task_port, self.worker_result_port))
self._ready_manager_queue = {}
self.heartbeat_threshold = heartbeat_threshold
self.current_platform = {'parsl_v': PARSL_VERSION,
'python_v': "{}.{}.{}".format(sys.version_info.major,
sys.version_info.minor,
sys.version_info.micro),
'os': platform.system(),
'hname': platform.node(),
'dir': os.getcwd()}
def get_tasks(self, count):
""" Obtains a batch of tasks from the internal pending_task_queue
Parameters
----------
count: int
Count of tasks to get from the queue
Returns
-------
List of upto count tasks. May return fewer than count down to an empty list
eg. [{'task_id':<x>, 'buffer':<buf>} ... ]
"""
tasks = []
for i in range(0, count):
try:
x = self.pending_task_queue.get(block=False)
except queue.Empty:
break
else:
tasks.append(x)
return tasks
def migrate_tasks_to_internal(self, kill_event):
"""Pull tasks from the incoming tasks 0mq pipe onto the internal
pending task queue
Parameters:
-----------
kill_event : threading.Event
Event to let the thread know when it is time to die.
"""
logger.info("[TASK_PULL_THREAD] Starting")
task_counter = 0
poller = zmq.Poller()
poller.register(self.task_incoming, zmq.POLLIN)
while not kill_event.is_set():
try:
msg = self.task_incoming.recv_pyobj()
except zmq.Again:
# We just timed out while attempting to receive
logger.debug("[TASK_PULL_THREAD] {} tasks in internal queue".format(self.pending_task_queue.qsize()))
continue
if msg == 'STOP':
kill_event.set()
break
else:
self.pending_task_queue.put(msg)
task_counter += 1
logger.debug("[TASK_PULL_THREAD] Fetched task:{}".format(task_counter))
def _command_server(self, kill_event):
""" Command server to run async command to the interchange
"""
logger.debug("[COMMAND] Command Server Starting")
while not kill_event.is_set():
try:
command_req = self.command_channel.recv_pyobj()
logger.debug("[COMMAND] Received command request: {}".format(command_req))
if command_req == "OUTSTANDING_C":
outstanding = self.pending_task_queue.qsize()
for manager in self._ready_manager_queue:
outstanding += len(self._ready_manager_queue[manager]['tasks'])
reply = outstanding
elif command_req == "MANAGERS":
reply = []
for manager in self._ready_manager_queue:
resp = (manager.decode('utf-8'),
len(self._ready_manager_queue[manager]['tasks']),
self._ready_manager_queue[manager]['active'])
reply.append(resp)
elif command_req.startswith("HOLD_WORKER"):
cmd, s_manager = command_req.split(';')
manager = s_manager.encode('utf-8')
logger.info("[CMD] Received HOLD_WORKER for {}".format(manager))
if manager in self._ready_manager_queue:
self._ready_manager_queue[manager]['active'] = False
reply = True
else:
reply = False
elif command_req == "SHUTDOWN":
logger.info("[CMD] Received SHUTDOWN command")
kill_event.set()
reply = True
else:
reply = None
logger.debug("[COMMAND] Reply: {}".format(reply))
self.command_channel.send_pyobj(reply)
except zmq.Again:
logger.debug("[COMMAND] is alive")
continue
def start(self, poll_period=1):
""" Start the NeedNameQeueu
Parameters:
----------
poll_period : int
Poll period in milliseconds
TODO: Move task receiving to a thread
"""
logger.info("Incoming ports bound")
start = time.time()
count = 0
self._kill_event = threading.Event()
self._task_puller_thread = threading.Thread(target=self.migrate_tasks_to_internal,
args=(self._kill_event,))
self._task_puller_thread.start()
self._command_thread = threading.Thread(target=self._command_server,
args=(self._kill_event,))
self._command_thread.start()
poller = zmq.Poller()
# poller.register(self.task_incoming, zmq.POLLIN)
poller.register(self.task_outgoing, zmq.POLLIN)
poller.register(self.results_incoming, zmq.POLLIN)
# These are managers which we should examine in an iteration
# for scheduling a job (or maybe any other attention?).
# Anything altering the state of the manager should add it
# onto this list.
interesting_managers = set()
while not self._kill_event.is_set():
logger.debug("[MAIN] starting one main loop iteration")
logger.debug("[MAIN] entering poll")
self.socks = dict(poller.poll(timeout=poll_period))
logger.debug("[MAIN] leaving poll")
# Listen for requests for work
if self.task_outgoing in self.socks and self.socks[self.task_outgoing] == zmq.POLLIN:
logger.debug("[MAIN] starting task_outgoing section")
message = self.task_outgoing.recv_multipart()
manager = message[0]
if manager not in self._ready_manager_queue:
reg_flag = False
try:
msg = json.loads(message[1].decode('utf-8'))
reg_flag = True
except Exception:
logger.warning("[MAIN] Got a non-json registration message from manager:{}".format(
manager))
logger.debug("[MAIN] Message :\n{}\n".format(message[0]))
# By default we set up to ignore bad nodes/registration messages.
self._ready_manager_queue[manager] = {'last': time.time(),
'free_capacity': 0,
'active': True,
'tasks': []}
if reg_flag is True:
interesting_managers.add(manager)
logger.info("[MAIN] Adding manager: {} to ready queue".format(manager))
self._ready_manager_queue[manager].update(msg)
logger.info("[MAIN] Registration info for manager {}: {}".format(manager, msg))
if (msg['python_v'] != self.current_platform['python_v'] or
msg['parsl_v'] != self.current_platform['parsl_v']):
logger.warn("[MAIN] Manager {} has incompatible version info with the interchange".format(manager))
if self.suppress_failure is False:
logger.debug("Setting kill event")
self._kill_event.set()
e = ManagerLost(manager)
result_package = {'task_id': -1, 'exception': serialize_object(e)}
pkl_package = pickle.dumps(result_package)
self.results_outgoing.send(pkl_package)
logger.warning("[MAIN] Sent failure reports, unregistering manager")
else:
logger.debug("[MAIN] Suppressing shutdown due to version incompatibility")
else:
# Registration has failed.
if self.suppress_failure is False:
self._kill_event.set()
e = BadRegistration(manager, critical=True)
result_package = {'task_id': -1, 'exception': serialize_object(e)}
pkl_package = pickle.dumps(result_package)
self.results_outgoing.send(pkl_package)
else:
logger.debug("[MAIN] Suppressing bad registration from manager:{}".format(
manager))
else:
tasks_requested = int.from_bytes(message[1], "little")
logger.debug("[MAIN] Manager {} requested {} tasks".format(manager, tasks_requested))
self._ready_manager_queue[manager]['last'] = time.time()
if tasks_requested == HEARTBEAT_CODE:
logger.debug("[MAIN] Manager {} sends heartbeat".format(manager))
self.task_outgoing.send_multipart([manager, b'', PKL_HEARTBEAT_CODE])
else:
self._ready_manager_queue[manager]['free_capacity'] = tasks_requested
interesting_managers.add(manager)
logger.debug("[MAIN] leaving task_outgoing section")
# If we had received any requests, check if there are tasks that could be passed
logger.debug("Managers count (total/interesting): {}/{}".format(len(self._ready_manager_queue),
len(interesting_managers)))
if interesting_managers and not self.pending_task_queue.empty():
shuffled_managers = list(interesting_managers)
random.shuffle(shuffled_managers)
while shuffled_managers and not self.pending_task_queue.empty(): # cf. the if statement above...
manager = shuffled_managers.pop()
if (self._ready_manager_queue[manager]['free_capacity'] and
self._ready_manager_queue[manager]['active']):
tasks = self.get_tasks(self._ready_manager_queue[manager]['free_capacity'])
if tasks:
self.task_outgoing.send_multipart([manager, b'', pickle.dumps(tasks)])
task_count = len(tasks)
count += task_count
tids = [t['task_id'] for t in tasks]
logger.debug("[MAIN] Sent tasks: {} to {}".format(tids, manager))
self._ready_manager_queue[manager]['free_capacity'] -= task_count
self._ready_manager_queue[manager]['tasks'].extend(tids)
logger.info("[MAIN] Sent tasks: {} to manager {}".format(tids, manager))
if self._ready_manager_queue[manager]['free_capacity'] > 0:
logger.info("[MAIN] Manager {} still has free_capacity {}".format(manager, self._ready_manager_queue[manager]['free_capacity']))
# ... so keep it in the interesting_managers list
else:
logger.info("[MAIN] Manager {} is now saturated".format(manager))
interesting_managers.remove(manager)
else:
interesting_managers.remove(manager)
# logger.debug("Nothing to send to manager {}".format(manager))
logger.debug("[MAIN] leaving _ready_manager_queue section, with {} managers still interesting".format(len(interesting_managers)))
else:
logger.debug("[MAIN] either no interesting managers or no tasks, so skipping manager pass")
# Receive any results and forward to client
if self.results_incoming in self.socks and self.socks[self.results_incoming] == zmq.POLLIN:
logger.debug("[MAIN] entering results_incoming section")
manager, *b_messages = self.results_incoming.recv_multipart()
if manager not in self._ready_manager_queue:
logger.warning("[MAIN] Received a result from a un-registered manager: {}".format(manager))
else:
logger.debug("[MAIN] Got {} result items in batch".format(len(b_messages)))
for b_message in b_messages:
r = pickle.loads(b_message)
# logger.debug("[MAIN] Received result for task {} from {}".format(r['task_id'], manager))
self._ready_manager_queue[manager]['tasks'].remove(r['task_id'])
self.results_outgoing.send_multipart(b_messages)
logger.debug("[MAIN] Current tasks: {}".format(self._ready_manager_queue[manager]['tasks']))
logger.debug("[MAIN] leaving results_incoming section")
logger.debug("[MAIN] entering bad_managers section")
bad_managers = [manager for manager in self._ready_manager_queue if
time.time() - self._ready_manager_queue[manager]['last'] > self.heartbeat_threshold]
for manager in bad_managers:
logger.debug("[MAIN] Last: {} Current: {}".format(self._ready_manager_queue[manager]['last'], time.time()))
logger.warning("[MAIN] Too many heartbeats missed for manager {}".format(manager))
e = ManagerLost(manager)
for tid in self._ready_manager_queue[manager]['tasks']:
result_package = {'task_id': tid, 'exception': serialize_object(e)}
pkl_package = pickle.dumps(result_package)
self.results_outgoing.send(pkl_package)
logger.warning("[MAIN] Sent failure reports, unregistering manager")
self._ready_manager_queue.pop(manager, 'None')
logger.debug("[MAIN] leaving bad_managers section")
logger.debug("[MAIN] ending one main loop iteration")
delta = time.time() - start
logger.info("Processed {} tasks in {} seconds".format(count, delta))
logger.warning("Exiting")
def start_file_logger(filename, name='interchange', level=logging.DEBUG, format_string=None):
"""Add a stream log handler.
Parameters
---------
filename: string
Name of the file to write logs to. Required.
name: string
Logger name. Default="parsl.executors.interchange"
level: logging.LEVEL
Set the logging level. Default=logging.DEBUG
- format_string (string): Set the format string
format_string: string
Format string to use.
Returns
-------
None.
"""
if format_string is None:
format_string = "%(asctime)s.%(msecs)03d %(name)s:%(lineno)d [%(levelname)s] %(message)s"
global logger
logger = logging.getLogger(name)
logger.setLevel(level)
handler = logging.FileHandler(filename)
handler.setLevel(level)
formatter = logging.Formatter(format_string, datefmt='%Y-%m-%d %H:%M:%S')
handler.setFormatter(formatter)
logger.addHandler(handler)
def starter(comm_q, *args, **kwargs):
"""Start the interchange process
The executor is expected to call this function. The args, kwargs match that of the Interchange.__init__
"""
# logger = multiprocessing.get_logger()
ic = Interchange(*args, **kwargs)
comm_q.put((ic.worker_task_port,
ic.worker_result_port))
ic.start()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--client_address",
help="Client address")
parser.add_argument("-l", "--logdir", default="parsl_worker_logs",
help="Parsl worker log directory")
parser.add_argument("-t", "--task_url",
help="REQUIRED: ZMQ url for receiving tasks")
parser.add_argument("-r", "--result_url",
help="REQUIRED: ZMQ url for posting results")
parser.add_argument("--worker_ports", default=None,
help="OPTIONAL, pair of workers ports to listen on, eg --worker_ports=50001,50005")
parser.add_argument("--suppress_failure", action='store_true',
help="Enables suppression of failures")
parser.add_argument("-d", "--debug", action='store_true',
help="Count of apps to launch")
args = parser.parse_args()
# Setup logging
global logger
format_string = "%(asctime)s %(name)s:%(lineno)d [%(levelname)s] %(message)s"
logger = logging.getLogger("interchange")
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setLevel('DEBUG' if args.debug is True else 'INFO')
formatter = logging.Formatter(format_string, datefmt='%Y-%m-%d %H:%M:%S')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.debug("Starting Interchange")
optionals = {}
optionals['suppress_failure'] = args.suppress_failure
if args.worker_ports:
optionals['worker_ports'] = [int(i) for i in args.worker_ports.split(',')]
ic = Interchange(**optionals)
ic.start()
|
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import json
import os
import random
import socket
import time
from nose import SkipTest
from xml.dom import minidom
import six
from six.moves import http_client
from six.moves import urllib
from swiftclient import get_auth
from swift.common import constraints
from swift.common.utils import config_true_value
from test import safe_repr
http_client._MAXHEADERS = constraints.MAX_HEADER_COUNT
class AuthenticationFailed(Exception):
pass
class RequestError(Exception):
pass
class ResponseError(Exception):
def __init__(self, response, method=None, path=None):
self.status = response.status
self.reason = response.reason
self.method = method
self.path = path
self.headers = response.getheaders()
for name, value in self.headers:
if name.lower() == 'x-trans-id':
self.txid = value
break
else:
self.txid = None
super(ResponseError, self).__init__()
def __str__(self):
return repr(self)
def __repr__(self):
return '%d: %r (%r %r) txid=%s' % (
self.status, self.reason, self.method, self.path, self.txid)
def listing_empty(method):
for i in range(6):
if len(method()) == 0:
return True
time.sleep(2 ** i)
return False
def listing_items(method):
marker = None
once = True
items = []
while once or items:
for i in items:
yield i
if once or marker:
if marker:
items = method(parms={'marker': marker})
else:
items = method()
if len(items) == 10000:
marker = items[-1]
else:
marker = None
once = False
else:
items = []
class Connection(object):
def __init__(self, config):
for key in 'auth_host auth_port auth_ssl username password'.split():
if key not in config:
raise SkipTest(
"Missing required configuration parameter: %s" % key)
self.auth_host = config['auth_host']
self.auth_port = int(config['auth_port'])
self.auth_ssl = config['auth_ssl'] in ('on', 'true', 'yes', '1')
self.insecure = config_true_value(config.get('insecure', 'false'))
self.auth_prefix = config.get('auth_prefix', '/')
self.auth_version = str(config.get('auth_version', '1'))
self.account = config.get('account')
self.username = config['username']
self.password = config['password']
self.storage_host = None
self.storage_port = None
self.storage_url = None
self.conn_class = None
def get_account(self):
return Account(self, self.account)
def authenticate(self, clone_conn=None):
if clone_conn:
self.conn_class = clone_conn.conn_class
self.storage_host = clone_conn.storage_host
self.storage_url = clone_conn.storage_url
self.storage_port = clone_conn.storage_port
self.storage_token = clone_conn.storage_token
return
if self.auth_version == "1":
auth_path = '%sv1.0' % (self.auth_prefix)
if self.account:
auth_user = '%s:%s' % (self.account, self.username)
else:
auth_user = self.username
else:
auth_user = self.username
auth_path = self.auth_prefix
auth_scheme = 'https://' if self.auth_ssl else 'http://'
auth_netloc = "%s:%d" % (self.auth_host, self.auth_port)
auth_url = auth_scheme + auth_netloc + auth_path
authargs = dict(snet=False, tenant_name=self.account,
auth_version=self.auth_version, os_options={},
insecure=self.insecure)
(storage_url, storage_token) = get_auth(
auth_url, auth_user, self.password, **authargs)
if not (storage_url and storage_token):
raise AuthenticationFailed()
x = storage_url.split('/')
if x[0] == 'http:':
self.conn_class = http_client.HTTPConnection
self.storage_port = 80
elif x[0] == 'https:':
self.conn_class = http_client.HTTPSConnection
self.storage_port = 443
else:
raise ValueError('unexpected protocol %s' % (x[0]))
self.storage_host = x[2].split(':')[0]
if ':' in x[2]:
self.storage_port = int(x[2].split(':')[1])
# Make sure storage_url is a string and not unicode, since
# keystoneclient (called by swiftclient) returns them in
# unicode and this would cause troubles when doing
# no_safe_quote query.
self.storage_url = str('/%s/%s' % (x[3], x[4]))
self.account_name = str(x[4])
self.auth_user = auth_user
# With v2 keystone, storage_token is unicode.
# We want it to be string otherwise this would cause
# troubles when doing query with already encoded
# non ascii characters in its headers.
self.storage_token = str(storage_token)
self.user_acl = '%s:%s' % (self.account, self.username)
self.http_connect()
return self.storage_url, self.storage_token
def cluster_info(self):
"""
Retrieve the data in /info, or {} on 404
"""
status = self.make_request('GET', '/info',
cfg={'absolute_path': True})
if status // 100 == 4:
return {}
if not 200 <= status <= 299:
raise ResponseError(self.response, 'GET', '/info')
return json.loads(self.response.read())
def http_connect(self):
self.connection = self.conn_class(self.storage_host,
port=self.storage_port)
# self.connection.set_debuglevel(3)
def make_path(self, path=None, cfg=None):
if path is None:
path = []
if cfg is None:
cfg = {}
if cfg.get('version_only_path'):
return '/' + self.storage_url.split('/')[1]
if path:
quote = urllib.parse.quote
if cfg.get('no_quote') or cfg.get('no_path_quote'):
quote = lambda x: x
return '%s/%s' % (self.storage_url,
'/'.join([quote(i) for i in path]))
else:
return self.storage_url
def make_headers(self, hdrs, cfg=None):
if cfg is None:
cfg = {}
headers = {}
if not cfg.get('no_auth_token'):
headers['X-Auth-Token'] = self.storage_token
if cfg.get('use_token'):
headers['X-Auth-Token'] = cfg.get('use_token')
if isinstance(hdrs, dict):
headers.update(hdrs)
return headers
def make_request(self, method, path=None, data='', hdrs=None, parms=None,
cfg=None):
if path is None:
path = []
if hdrs is None:
hdrs = {}
if parms is None:
parms = {}
if cfg is None:
cfg = {}
if not cfg.get('absolute_path'):
# Set absolute_path=True to make a request to exactly the given
# path, not storage path + given path. Useful for
# non-account/container/object requests.
path = self.make_path(path, cfg=cfg)
headers = self.make_headers(hdrs, cfg=cfg)
if isinstance(parms, dict) and parms:
quote = urllib.parse.quote
if cfg.get('no_quote') or cfg.get('no_parms_quote'):
quote = lambda x: x
query_args = ['%s=%s' % (quote(x), quote(str(y)))
for (x, y) in parms.items()]
path = '%s?%s' % (path, '&'.join(query_args))
if not cfg.get('no_content_length'):
if cfg.get('set_content_length'):
headers['Content-Length'] = cfg.get('set_content_length')
else:
headers['Content-Length'] = len(data)
def try_request():
self.http_connect()
self.connection.request(method, path, data, headers)
return self.connection.getresponse()
self.response = None
try_count = 0
fail_messages = []
while try_count < 5:
try_count += 1
try:
self.response = try_request()
except http_client.HTTPException as e:
fail_messages.append(safe_repr(e))
continue
if self.response.status == 401:
fail_messages.append("Response 401")
self.authenticate()
continue
elif self.response.status == 503:
fail_messages.append("Response 503")
if try_count != 5:
time.sleep(5)
continue
break
if self.response:
return self.response.status
request = "{method} {path} headers: {headers} data: {data}".format(
method=method, path=path, headers=headers, data=data)
raise RequestError('Unable to complete http request: %s. '
'Attempts: %s, Failures: %s' %
(request, len(fail_messages), fail_messages))
def put_start(self, path, hdrs=None, parms=None, cfg=None, chunked=False):
if hdrs is None:
hdrs = {}
if parms is None:
parms = {}
if cfg is None:
cfg = {}
self.http_connect()
path = self.make_path(path, cfg)
headers = self.make_headers(hdrs, cfg=cfg)
if chunked:
headers['Transfer-Encoding'] = 'chunked'
headers.pop('Content-Length', None)
if isinstance(parms, dict) and parms:
quote = urllib.parse.quote
if cfg.get('no_quote') or cfg.get('no_parms_quote'):
quote = lambda x: x
query_args = ['%s=%s' % (quote(x), quote(str(y)))
for (x, y) in parms.items()]
path = '%s?%s' % (path, '&'.join(query_args))
self.connection = self.conn_class(self.storage_host,
port=self.storage_port)
# self.connection.set_debuglevel(3)
self.connection.putrequest('PUT', path)
for key, value in headers.items():
self.connection.putheader(key, value)
self.connection.endheaders()
def put_data(self, data, chunked=False):
if chunked:
self.connection.send('%x\r\n%s\r\n' % (len(data), data))
else:
self.connection.send(data)
def put_end(self, chunked=False):
if chunked:
self.connection.send('0\r\n\r\n')
self.response = self.connection.getresponse()
self.connection.close()
return self.response.status
class Base(object):
def __str__(self):
return self.name
def header_fields(self, required_fields, optional_fields=None):
if optional_fields is None:
optional_fields = ()
headers = dict(self.conn.response.getheaders())
ret = {}
for field in required_fields:
if field[1] not in headers:
raise ValueError("%s was not found in response header" %
(field[1]))
try:
ret[field[0]] = int(headers[field[1]])
except ValueError:
ret[field[0]] = headers[field[1]]
for field in optional_fields:
if field[1] not in headers:
continue
try:
ret[field[0]] = int(headers[field[1]])
except ValueError:
ret[field[0]] = headers[field[1]]
return ret
class Account(Base):
def __init__(self, conn, name):
self.conn = conn
self.name = str(name)
def update_metadata(self, metadata=None, cfg=None):
if metadata is None:
metadata = {}
if cfg is None:
cfg = {}
headers = dict(("X-Account-Meta-%s" % k, v)
for k, v in metadata.items())
self.conn.make_request('POST', self.path, hdrs=headers, cfg=cfg)
if not 200 <= self.conn.response.status <= 299:
raise ResponseError(self.conn.response, 'POST',
self.conn.make_path(self.path))
return True
def container(self, container_name):
return Container(self.conn, self.name, container_name)
def containers(self, hdrs=None, parms=None, cfg=None):
if hdrs is None:
hdrs = {}
if parms is None:
parms = {}
if cfg is None:
cfg = {}
format_type = parms.get('format', None)
if format_type not in [None, 'json', 'xml']:
raise RequestError('Invalid format: %s' % format_type)
if format_type is None and 'format' in parms:
del parms['format']
status = self.conn.make_request('GET', self.path, hdrs=hdrs,
parms=parms, cfg=cfg)
if status == 200:
if format_type == 'json':
conts = json.loads(self.conn.response.read())
for cont in conts:
cont['name'] = cont['name'].encode('utf-8')
return conts
elif format_type == 'xml':
conts = []
tree = minidom.parseString(self.conn.response.read())
for x in tree.getElementsByTagName('container'):
cont = {}
for key in ['name', 'count', 'bytes']:
cont[key] = x.getElementsByTagName(key)[0].\
childNodes[0].nodeValue
conts.append(cont)
for cont in conts:
cont['name'] = cont['name'].encode('utf-8')
return conts
else:
lines = self.conn.response.read().split('\n')
if lines and not lines[-1]:
lines = lines[:-1]
return lines
elif status == 204:
return []
raise ResponseError(self.conn.response, 'GET',
self.conn.make_path(self.path))
def delete_containers(self):
for c in listing_items(self.containers):
cont = self.container(c)
if not cont.delete_recursive():
return False
return listing_empty(self.containers)
def info(self, hdrs=None, parms=None, cfg=None):
if hdrs is None:
hdrs = {}
if parms is None:
parms = {}
if cfg is None:
cfg = {}
if self.conn.make_request('HEAD', self.path, hdrs=hdrs,
parms=parms, cfg=cfg) != 204:
raise ResponseError(self.conn.response, 'HEAD',
self.conn.make_path(self.path))
fields = [['object_count', 'x-account-object-count'],
['container_count', 'x-account-container-count'],
['bytes_used', 'x-account-bytes-used']]
return self.header_fields(fields)
@property
def path(self):
return []
class Container(Base):
# policy_specified is set in __init__.py when tests are being set up.
policy_specified = None
def __init__(self, conn, account, name):
self.conn = conn
self.account = str(account)
self.name = str(name)
def create(self, hdrs=None, parms=None, cfg=None):
if hdrs is None:
hdrs = {}
if parms is None:
parms = {}
if cfg is None:
cfg = {}
if self.policy_specified and 'X-Storage-Policy' not in hdrs:
hdrs['X-Storage-Policy'] = self.policy_specified
return self.conn.make_request('PUT', self.path, hdrs=hdrs,
parms=parms, cfg=cfg) in (201, 202)
def update_metadata(self, hdrs=None, cfg=None):
if hdrs is None:
hdrs = {}
if cfg is None:
cfg = {}
self.conn.make_request('POST', self.path, hdrs=hdrs, cfg=cfg)
if not 200 <= self.conn.response.status <= 299:
raise ResponseError(self.conn.response, 'POST',
self.conn.make_path(self.path))
return True
def delete(self, hdrs=None, parms=None):
if hdrs is None:
hdrs = {}
if parms is None:
parms = {}
return self.conn.make_request('DELETE', self.path, hdrs=hdrs,
parms=parms) == 204
def delete_files(self):
for f in listing_items(self.files):
file_item = self.file(f)
if not file_item.delete():
return False
return listing_empty(self.files)
def delete_recursive(self):
return self.delete_files() and self.delete()
def file(self, file_name):
return File(self.conn, self.account, self.name, file_name)
def files(self, hdrs=None, parms=None, cfg=None):
if hdrs is None:
hdrs = {}
if parms is None:
parms = {}
if cfg is None:
cfg = {}
format_type = parms.get('format', None)
if format_type not in [None, 'json', 'xml']:
raise RequestError('Invalid format: %s' % format_type)
if format_type is None and 'format' in parms:
del parms['format']
status = self.conn.make_request('GET', self.path, hdrs=hdrs,
parms=parms, cfg=cfg)
if status == 200:
if format_type == 'json':
files = json.loads(self.conn.response.read())
for file_item in files:
file_item['name'] = file_item['name'].encode('utf-8')
file_item['content_type'] = file_item['content_type'].\
encode('utf-8')
return files
elif format_type == 'xml':
files = []
tree = minidom.parseString(self.conn.response.read())
for x in tree.getElementsByTagName('object'):
file_item = {}
for key in ['name', 'hash', 'bytes', 'content_type',
'last_modified']:
file_item[key] = x.getElementsByTagName(key)[0].\
childNodes[0].nodeValue
files.append(file_item)
for file_item in files:
file_item['name'] = file_item['name'].encode('utf-8')
file_item['content_type'] = file_item['content_type'].\
encode('utf-8')
return files
else:
content = self.conn.response.read()
if content:
lines = content.split('\n')
if lines and not lines[-1]:
lines = lines[:-1]
return lines
else:
return []
elif status == 204:
return []
raise ResponseError(self.conn.response, 'GET',
self.conn.make_path(self.path))
def info(self, hdrs=None, parms=None, cfg=None):
if hdrs is None:
hdrs = {}
if parms is None:
parms = {}
if cfg is None:
cfg = {}
self.conn.make_request('HEAD', self.path, hdrs=hdrs,
parms=parms, cfg=cfg)
if self.conn.response.status == 204:
required_fields = [['bytes_used', 'x-container-bytes-used'],
['object_count', 'x-container-object-count']]
optional_fields = [
['versions', 'x-versions-location'],
['tempurl_key', 'x-container-meta-temp-url-key'],
['tempurl_key2', 'x-container-meta-temp-url-key-2']]
return self.header_fields(required_fields, optional_fields)
raise ResponseError(self.conn.response, 'HEAD',
self.conn.make_path(self.path))
@property
def path(self):
return [self.name]
class File(Base):
def __init__(self, conn, account, container, name):
self.conn = conn
self.account = str(account)
self.container = str(container)
self.name = str(name)
self.chunked_write_in_progress = False
self.content_type = None
self.content_range = None
self.size = None
self.metadata = {}
def make_headers(self, cfg=None):
if cfg is None:
cfg = {}
headers = {}
if not cfg.get('no_content_length'):
if cfg.get('set_content_length'):
headers['Content-Length'] = cfg.get('set_content_length')
elif self.size:
headers['Content-Length'] = self.size
else:
headers['Content-Length'] = 0
if cfg.get('use_token'):
headers['X-Auth-Token'] = cfg.get('use_token')
if cfg.get('no_content_type'):
pass
elif self.content_type:
headers['Content-Type'] = self.content_type
else:
headers['Content-Type'] = 'application/octet-stream'
for key in self.metadata:
headers['X-Object-Meta-' + key] = self.metadata[key]
return headers
@classmethod
def compute_md5sum(cls, data):
block_size = 4096
if isinstance(data, str):
data = six.StringIO(data)
checksum = hashlib.md5()
buff = data.read(block_size)
while buff:
checksum.update(buff)
buff = data.read(block_size)
data.seek(0)
return checksum.hexdigest()
def copy(self, dest_cont, dest_file, hdrs=None, parms=None, cfg=None):
if hdrs is None:
hdrs = {}
if parms is None:
parms = {}
if cfg is None:
cfg = {}
if 'destination' in cfg:
headers = {'Destination': cfg['destination']}
elif cfg.get('no_destination'):
headers = {}
else:
headers = {'Destination': '%s/%s' % (dest_cont, dest_file)}
headers.update(hdrs)
if 'Destination' in headers:
headers['Destination'] = urllib.parse.quote(headers['Destination'])
return self.conn.make_request('COPY', self.path, hdrs=headers,
parms=parms) == 201
def copy_account(self, dest_account, dest_cont, dest_file,
hdrs=None, parms=None, cfg=None):
if hdrs is None:
hdrs = {}
if parms is None:
parms = {}
if cfg is None:
cfg = {}
if 'destination' in cfg:
headers = {'Destination': cfg['destination']}
elif cfg.get('no_destination'):
headers = {}
else:
headers = {'Destination-Account': dest_account,
'Destination': '%s/%s' % (dest_cont, dest_file)}
headers.update(hdrs)
if 'Destination-Account' in headers:
headers['Destination-Account'] = \
urllib.parse.quote(headers['Destination-Account'])
if 'Destination' in headers:
headers['Destination'] = urllib.parse.quote(headers['Destination'])
return self.conn.make_request('COPY', self.path, hdrs=headers,
parms=parms) == 201
def delete(self, hdrs=None, parms=None, cfg=None):
if hdrs is None:
hdrs = {}
if parms is None:
parms = {}
if self.conn.make_request('DELETE', self.path, hdrs=hdrs,
cfg=cfg, parms=parms) != 204:
raise ResponseError(self.conn.response, 'DELETE',
self.conn.make_path(self.path))
return True
def info(self, hdrs=None, parms=None, cfg=None):
if hdrs is None:
hdrs = {}
if parms is None:
parms = {}
if cfg is None:
cfg = {}
if self.conn.make_request('HEAD', self.path, hdrs=hdrs,
parms=parms, cfg=cfg) != 200:
raise ResponseError(self.conn.response, 'HEAD',
self.conn.make_path(self.path))
fields = [['content_length', 'content-length'],
['content_type', 'content-type'],
['last_modified', 'last-modified'],
['etag', 'etag']]
optional_fields = [['x_object_manifest', 'x-object-manifest']]
header_fields = self.header_fields(fields,
optional_fields=optional_fields)
header_fields['etag'] = header_fields['etag'].strip('"')
return header_fields
def initialize(self, hdrs=None, parms=None):
if hdrs is None:
hdrs = {}
if parms is None:
parms = {}
if not self.name:
return False
status = self.conn.make_request('HEAD', self.path, hdrs=hdrs,
parms=parms)
if status == 404:
return False
elif (status < 200) or (status > 299):
raise ResponseError(self.conn.response, 'HEAD',
self.conn.make_path(self.path))
for hdr in self.conn.response.getheaders():
if hdr[0].lower() == 'content-type':
self.content_type = hdr[1]
if hdr[0].lower().startswith('x-object-meta-'):
self.metadata[hdr[0][14:]] = hdr[1]
if hdr[0].lower() == 'etag':
self.etag = hdr[1].strip('"')
if hdr[0].lower() == 'content-length':
self.size = int(hdr[1])
if hdr[0].lower() == 'last-modified':
self.last_modified = hdr[1]
return True
def load_from_filename(self, filename, callback=None):
fobj = open(filename, 'rb')
self.write(fobj, callback=callback)
fobj.close()
@property
def path(self):
return [self.container, self.name]
@classmethod
def random_data(cls, size=None):
if size is None:
size = random.randint(1, 32768)
fd = open('/dev/urandom', 'r')
data = fd.read(size)
fd.close()
return data
def read(self, size=-1, offset=0, hdrs=None, buffer=None,
callback=None, cfg=None, parms=None):
if cfg is None:
cfg = {}
if parms is None:
parms = {}
if size > 0:
range_string = 'bytes=%d-%d' % (offset, (offset + size) - 1)
if hdrs:
hdrs['Range'] = range_string
else:
hdrs = {'Range': range_string}
status = self.conn.make_request('GET', self.path, hdrs=hdrs,
cfg=cfg, parms=parms)
if (status < 200) or (status > 299):
raise ResponseError(self.conn.response, 'GET',
self.conn.make_path(self.path))
for hdr in self.conn.response.getheaders():
if hdr[0].lower() == 'content-type':
self.content_type = hdr[1]
if hdr[0].lower() == 'content-range':
self.content_range = hdr[1]
if hasattr(buffer, 'write'):
scratch = self.conn.response.read(8192)
transferred = 0
while len(scratch) > 0:
buffer.write(scratch)
transferred += len(scratch)
if callable(callback):
callback(transferred, self.size)
scratch = self.conn.response.read(8192)
return None
else:
return self.conn.response.read()
def read_md5(self):
status = self.conn.make_request('GET', self.path)
if (status < 200) or (status > 299):
raise ResponseError(self.conn.response, 'GET',
self.conn.make_path(self.path))
checksum = hashlib.md5()
scratch = self.conn.response.read(8192)
while len(scratch) > 0:
checksum.update(scratch)
scratch = self.conn.response.read(8192)
return checksum.hexdigest()
def save_to_filename(self, filename, callback=None):
try:
fobj = open(filename, 'wb')
self.read(buffer=fobj, callback=callback)
finally:
fobj.close()
def sync_metadata(self, metadata=None, cfg=None, parms=None):
if metadata is None:
metadata = {}
if cfg is None:
cfg = {}
self.metadata.update(metadata)
if self.metadata:
headers = self.make_headers(cfg=cfg)
if not cfg.get('no_content_length'):
if cfg.get('set_content_length'):
headers['Content-Length'] = \
cfg.get('set_content_length')
else:
headers['Content-Length'] = 0
self.conn.make_request('POST', self.path, hdrs=headers,
parms=parms, cfg=cfg)
if self.conn.response.status not in (201, 202):
raise ResponseError(self.conn.response, 'POST',
self.conn.make_path(self.path))
return True
def chunked_write(self, data=None, hdrs=None, parms=None, cfg=None):
if hdrs is None:
hdrs = {}
if parms is None:
parms = {}
if cfg is None:
cfg = {}
if data is not None and self.chunked_write_in_progress:
self.conn.put_data(data, True)
elif data is not None:
self.chunked_write_in_progress = True
headers = self.make_headers(cfg=cfg)
headers.update(hdrs)
self.conn.put_start(self.path, hdrs=headers, parms=parms,
cfg=cfg, chunked=True)
self.conn.put_data(data, True)
elif self.chunked_write_in_progress:
self.chunked_write_in_progress = False
return self.conn.put_end(True) == 201
else:
raise RuntimeError
def write(self, data='', hdrs=None, parms=None, callback=None, cfg=None,
return_resp=False):
if hdrs is None:
hdrs = {}
if parms is None:
parms = {}
if cfg is None:
cfg = {}
block_size = 2 ** 20
if isinstance(data, file):
try:
data.flush()
data.seek(0)
except IOError:
pass
self.size = int(os.fstat(data.fileno())[6])
else:
data = six.StringIO(data)
self.size = data.len
headers = self.make_headers(cfg=cfg)
headers.update(hdrs)
self.conn.put_start(self.path, hdrs=headers, parms=parms, cfg=cfg)
transferred = 0
buff = data.read(block_size)
buff_len = len(buff)
try:
while buff_len > 0:
self.conn.put_data(buff)
transferred += buff_len
if callable(callback):
callback(transferred, self.size)
buff = data.read(block_size)
buff_len = len(buff)
self.conn.put_end()
except socket.timeout as err:
raise err
if (self.conn.response.status < 200) or \
(self.conn.response.status > 299):
raise ResponseError(self.conn.response, 'PUT',
self.conn.make_path(self.path))
try:
data.seek(0)
except IOError:
pass
self.md5 = self.compute_md5sum(data)
if return_resp:
return self.conn.response
return True
def write_random(self, size=None, hdrs=None, parms=None, cfg=None):
if hdrs is None:
hdrs = {}
if parms is None:
parms = {}
if cfg is None:
cfg = {}
data = self.random_data(size)
if not self.write(data, hdrs=hdrs, parms=parms, cfg=cfg):
raise ResponseError(self.conn.response, 'PUT',
self.conn.make_path(self.path))
self.md5 = self.compute_md5sum(six.StringIO(data))
return data
def write_random_return_resp(self, size=None, hdrs=None, parms=None,
cfg=None):
if hdrs is None:
hdrs = {}
if parms is None:
parms = {}
if cfg is None:
cfg = {}
data = self.random_data(size)
resp = self.write(data, hdrs=hdrs, parms=parms, cfg=cfg,
return_resp=True)
if not resp:
raise ResponseError(self.conn.response)
self.md5 = self.compute_md5sum(six.StringIO(data))
return resp
def post(self, hdrs=None, parms=None, cfg=None, return_resp=False):
if hdrs is None:
hdrs = {}
if parms is None:
parms = {}
if cfg is None:
cfg = {}
headers = self.make_headers(cfg=cfg)
headers.update(hdrs)
self.conn.make_request('POST', self.path, hdrs=headers,
parms=parms, cfg=cfg)
if self.conn.response.status not in (201, 202):
raise ResponseError(self.conn.response, 'POST',
self.conn.make_path(self.path))
if return_resp:
return self.conn.response
return True
|
|
from django.conf import settings
from django.template.base import Context, TemplateSyntaxError
from django.template.loader import get_template
from django.test import SimpleTestCase
from .utils import render, setup, SilentGetItemClass, SilentAttrClass, SomeClass
basic_templates = {
'basic-syntax01': 'something cool',
'basic-syntax02': '{{ headline }}',
'basic-syntax03': '{{ first }} --- {{ second }}',
}
class BasicSyntaxTests(SimpleTestCase):
@setup(basic_templates)
def test_basic_syntax01(self):
"""
Plain text should go through the template parser untouched.
"""
output = render('basic-syntax01')
self.assertEqual(output, "something cool")
@setup(basic_templates)
def test_basic_syntax02(self):
"""
Variables should be replaced with their value in the current
context
"""
output = render('basic-syntax02', {'headline': 'Success'})
self.assertEqual(output, 'Success')
@setup(basic_templates)
def test_basic_syntax03(self):
"""
More than one replacement variable is allowed in a template
"""
output = render('basic-syntax03', {"first": 1, "second": 2})
self.assertEqual(output, '1 --- 2')
@setup({'basic-syntax04': 'as{{ missing }}df'})
def test_basic_syntax04(self):
"""
Fail silently when a variable is not found in the current context
"""
output = render('basic-syntax04')
if settings.TEMPLATE_STRING_IF_INVALID:
self.assertEqual(output, 'asINVALIDdf')
else:
self.assertEqual(output, 'asdf')
@setup({'basic-syntax06': '{{ multi word variable }}'})
def test_basic_syntax06(self):
"""
A variable may not contain more than one word
"""
with self.assertRaises(TemplateSyntaxError):
get_template('basic-syntax06')
@setup({'basic-syntax07': '{{ }}'})
def test_basic_syntax07(self):
"""
Raise TemplateSyntaxError for empty variable tags.
"""
with self.assertRaises(TemplateSyntaxError):
get_template('basic-syntax07')
@setup({'basic-syntax08': '{{ }}'})
def test_basic_syntax08(self):
"""
Raise TemplateSyntaxError for empty variable tags.
"""
with self.assertRaises(TemplateSyntaxError):
get_template('basic-syntax08')
@setup({'basic-syntax09': '{{ var.method }}'})
def test_basic_syntax09(self):
"""
Attribute syntax allows a template to call an object's attribute
"""
output = render('basic-syntax09', {'var': SomeClass()})
self.assertEqual(output, 'SomeClass.method')
@setup({'basic-syntax10': '{{ var.otherclass.method }}'})
def test_basic_syntax10(self):
"""
Multiple levels of attribute access are allowed.
"""
output = render('basic-syntax10', {'var': SomeClass()})
self.assertEqual(output, 'OtherClass.method')
@setup({'basic-syntax11': '{{ var.blech }}'})
def test_basic_syntax11(self):
"""
Fail silently when a variable's attribute isn't found.
"""
output = render('basic-syntax11', {'var': SomeClass()})
if settings.TEMPLATE_STRING_IF_INVALID:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'basic-syntax12': '{{ var.__dict__ }}'})
def test_basic_syntax12(self):
"""
Raise TemplateSyntaxError when trying to access a variable
beginning with an underscore.
"""
with self.assertRaises(TemplateSyntaxError):
get_template('basic-syntax12')
# Raise TemplateSyntaxError when trying to access a variable
# containing an illegal character.
@setup({'basic-syntax13': "{{ va>r }}"})
def test_basic_syntax13(self):
with self.assertRaises(TemplateSyntaxError):
get_template('basic-syntax13')
@setup({'basic-syntax14': "{{ (var.r) }}"})
def test_basic_syntax14(self):
with self.assertRaises(TemplateSyntaxError):
get_template('basic-syntax14')
@setup({'basic-syntax15': "{{ sp%am }}"})
def test_basic_syntax15(self):
with self.assertRaises(TemplateSyntaxError):
get_template('basic-syntax15')
@setup({'basic-syntax16': "{{ eggs! }}"})
def test_basic_syntax16(self):
with self.assertRaises(TemplateSyntaxError):
get_template('basic-syntax16')
@setup({'basic-syntax17': "{{ moo? }}"})
def test_basic_syntax17(self):
with self.assertRaises(TemplateSyntaxError):
get_template('basic-syntax17')
@setup({'basic-syntax18': "{{ foo.bar }}"})
def test_basic_syntax18(self):
"""
Attribute syntax allows a template to call a dictionary key's
value.
"""
output = render('basic-syntax18', {"foo": {"bar": "baz"}})
self.assertEqual(output, "baz")
@setup({'basic-syntax19': "{{ foo.spam }}"})
def test_basic_syntax19(self):
"""
Fail silently when a variable's dictionary key isn't found.
"""
output = render('basic-syntax19', {"foo": {"bar": "baz"}})
if settings.TEMPLATE_STRING_IF_INVALID:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'basic-syntax20': "{{ var.method2 }}"})
def test_basic_syntax20(self):
"""
Fail silently when accessing a non-simple method
"""
output = render('basic-syntax20', {'var': SomeClass()})
if settings.TEMPLATE_STRING_IF_INVALID:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'basic-syntax20b': "{{ var.method5 }}"})
def test_basic_syntax20b(self):
"""
Don't silence a TypeError if it was raised inside a callable.
"""
template = get_template('basic-syntax20b')
with self.assertRaises(TypeError):
template.render(Context({'var': SomeClass()}))
# Don't get confused when parsing something that is almost, but not
# quite, a template tag.
@setup({'basic-syntax21': "a {{ moo %} b"})
def test_basic_syntax21(self):
output = render('basic-syntax21')
self.assertEqual(output, "a {{ moo %} b")
@setup({'basic-syntax22': "{{ moo #}"})
def test_basic_syntax22(self):
output = render('basic-syntax22')
self.assertEqual(output, "{{ moo #}")
@setup({'basic-syntax23': "{{ moo #} {{ cow }}"})
def test_basic_syntax23(self):
"""
Treat "moo #} {{ cow" as the variable. Not ideal, but costly to work
around, so this triggers an error.
"""
with self.assertRaises(TemplateSyntaxError):
get_template('basic-syntax23')
@setup({'basic-syntax24': "{{ moo\n }}"})
def test_basic_syntax24(self):
"""
Embedded newlines make it not-a-tag.
"""
output = render('basic-syntax24')
self.assertEqual(output, "{{ moo\n }}")
# Literal strings are permitted inside variables, mostly for i18n
# purposes.
@setup({'basic-syntax25': '{{ "fred" }}'})
def test_basic_syntax25(self):
output = render('basic-syntax25')
self.assertEqual(output, "fred")
@setup({'basic-syntax26': r'{{ "\"fred\"" }}'})
def test_basic_syntax26(self):
output = render('basic-syntax26')
self.assertEqual(output, "\"fred\"")
@setup({'basic-syntax27': r'{{ _("\"fred\"") }}'})
def test_basic_syntax27(self):
output = render('basic-syntax27')
self.assertEqual(output, "\"fred\"")
# #12554 -- Make sure a silent_variable_failure Exception is
# suppressed on dictionary and attribute lookup.
@setup({'basic-syntax28': "{{ a.b }}"})
def test_basic_syntax28(self):
output = render('basic-syntax28', {'a': SilentGetItemClass()})
if settings.TEMPLATE_STRING_IF_INVALID:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'basic-syntax29': "{{ a.b }}"})
def test_basic_syntax29(self):
output = render('basic-syntax29', {'a': SilentAttrClass()})
if settings.TEMPLATE_STRING_IF_INVALID:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
# Something that starts like a number but has an extra lookup works
# as a lookup.
@setup({'basic-syntax30': "{{ 1.2.3 }}"})
def test_basic_syntax30(self):
output = render(
'basic-syntax30',
{"1": {"2": {"3": "d"}}}
)
self.assertEqual(output, 'd')
@setup({'basic-syntax31': "{{ 1.2.3 }}"})
def test_basic_syntax31(self):
output = render(
'basic-syntax31',
{"1": {"2": ("a", "b", "c", "d")}},
)
self.assertEqual(output, 'd')
@setup({'basic-syntax32': "{{ 1.2.3 }}"})
def test_basic_syntax32(self):
output = render(
'basic-syntax32',
{"1": (("x", "x", "x", "x"), ("y", "y", "y", "y"), ("a", "b", "c", "d"))},
)
self.assertEqual(output, 'd')
@setup({'basic-syntax33': "{{ 1.2.3 }}"})
def test_basic_syntax33(self):
output = render(
'basic-syntax33',
{"1": ("xxxx", "yyyy", "abcd")},
)
self.assertEqual(output, 'd')
@setup({'basic-syntax34': "{{ 1.2.3 }}"})
def test_basic_syntax34(self):
output = render(
'basic-syntax34',
{"1": ({"x": "x"}, {"y": "y"}, {"z": "z", "3": "d"})}
)
self.assertEqual(output, 'd')
# Numbers are numbers even if their digits are in the context.
@setup({'basic-syntax35': "{{ 1 }}"})
def test_basic_syntax35(self):
output = render('basic-syntax35', {"1": "abc"})
self.assertEqual(output, '1')
@setup({'basic-syntax36': "{{ 1.2 }}"})
def test_basic_syntax36(self):
output = render('basic-syntax36', {"1": "abc"})
self.assertEqual(output, '1.2')
@setup({'basic-syntax37': '{{ callable }}'})
def test_basic_syntax37(self):
"""
Call methods in the top level of the context.
"""
output = render('basic-syntax37', {"callable": lambda: "foo bar"})
self.assertEqual(output, 'foo bar')
@setup({'basic-syntax38': '{{ var.callable }}'})
def test_basic_syntax38(self):
"""
Call methods returned from dictionary lookups.
"""
output = render('basic-syntax38', {"var": {"callable": lambda: "foo bar"}})
self.assertEqual(output, 'foo bar')
|
|
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django.utils.text import slugify
from django.utils.html import strip_tags
from protein.models import (Protein, ProteinConformation, ProteinState, ProteinSequenceType, ProteinSegment,
ProteinFusion, ProteinFusionProtein, ProteinSource)
from residue.models import Residue
from construct.models import *
from optparse import make_option
from datetime import datetime
import logging, os
import yaml
class Command(BaseCommand):
help = 'Reads source data and creates protein records for constructs'
def add_arguments(self, parser):
parser.add_argument('--filename', action='append', dest='filename',
help='Filename to import. Can be used multiple times')
parser.add_argument('--purge', action='store_true', dest='purge', default=False,
help='Purge existing construct records')
logger = logging.getLogger(__name__)
# source file directory
construct_data_dir = os.sep.join([settings.DATA_DIR, 'structure_data', 'constructs'])
def handle(self, *args, **options):
# delete any existing construct data
if options['purge']:
try:
self.purge_constructs()
except Exception as msg:
print(msg)
self.logger.error(msg)
# import the structure data
try:
self.create_constructs(options['filename'])
except Exception as msg:
print(msg)
self.logger.error(msg)
def purge_constructs(self):
try:
pst = ProteinSequenceType.objects.get(slug='mod')
Protein.objects.filter(sequence_type=pst).delete()
except ProteinSequenceType.DoesNotExist:
self.logger.warning('ProteinSequenceType mod not found: nothing to delete.')
def create_constructs(self, filenames):
self.logger.info('CREATING CONSTRUCTS')
# what files should be parsed?
if not filenames:
filenames = os.listdir(self.construct_data_dir)
# parse files
for source_file in filenames:
source_file_path = os.sep.join([self.construct_data_dir, source_file])
if os.path.isfile(source_file_path) and source_file[0] != '.':
self.logger.info('Reading file {}'.format(source_file_path))
# read the yaml file
with open(source_file_path, 'r') as f:
sd = yaml.load(f)
# is a protein specified?
if 'protein' not in sd:
self.logger.error('Protein not specified for construct, skipping')
continue
# fetch the parent protein
try:
ppc = ProteinConformation.objects.select_related('protein__family', 'protein__species',
'protein__residue_numbering_scheme').get(protein__entry_name=sd['protein'],
state__slug=settings.DEFAULT_PROTEIN_STATE)
except ProteinConformation.DoesNotExist:
# abort if parent protein is not found
self.logger.error('Parent protein {} for construct {} not found, aborting!'.format(
sd['protein'], sd['name']))
continue
if not Protein.objects.filter(name=sd['name']).exists():
# create a protein record
p = Protein()
p.parent = ppc.protein
p.family = ppc.protein.family
p.species = ppc.protein.species
p.residue_numbering_scheme = ppc.protein.residue_numbering_scheme
p.sequence_type, created = ProteinSequenceType.objects.get_or_create(slug='mod',
defaults={'name': 'Modified'})
p.source, created = ProteinSource.objects.get_or_create(name='OTHER')
p.entry_name = slugify(strip_tags(sd['name']))
p.name = sd['name']
p.sequence = ppc.protein.sequence
# save protein (construct)
try:
p.save()
self.logger.info('Created construct {} with parent protein {}'.format(p.name,
ppc.protein.entry_name))
except Exception as e:
print(e)
self.logger.error('Failed creating construct {} with parent protein {}'.format(p.name,
ppc.protein.entry_name))
continue
else:
p = Protein.objects.get(name=sd['name'])
if not ProteinConformation.objects.filter(protein=p).exists():
# create protein conformation record
pc = ProteinConformation()
pc.protein = p
pc.state = ProteinState.objects.get(slug=settings.DEFAULT_PROTEIN_STATE)
try:
pc.save()
self.logger.info('Created conformation {} of protein {}'.format(pc.state.name, p.name))
except:
self.logger.error('Failed creating conformation {} of protein {}'.format(pc.state.name,
p.entry_name))
# # create residue records
# deletions = []
# deletions_list = []
# if 'deletions' in sd and sd['deletions']:
# for t in sd['deletions']:
# deletions += list(range(t[0],t[1]+1))
# deletions_list.append(str(t[0])+'-'+str(t[1]))
# s = ","
# deletion_string = s.join(deletions_list)
# mutations = {}
# if 'mutations' in sd and sd['mutations']:
# for m in sd['mutations']:
# res_num = m[1:-1]
# mutations[res_num] = {
# 'wt_res': m[0],
# 'mut_res': m[-1],
# 'full': m,
# }
# # Create construct record
# c = Construct()
# c.protein_conformation = pc
# c.deletions = deletion_string
# c.save()
# Create Auxiliary proteins
# if 'auxiliary_proteins' in sd and sd['auxiliary_proteins']:
# ap = AuxProtein()
# ap.construct = c
# apct = AuxProteinType.objects.create()
# ap.protein_type = apct
# apct.save()
# if 'remarks' in sd['auxiliary_proteins']:
# ap.remarks = sd['auxiliary_proteins']['remarks']
# ap.save()
# for step in sd['auxiliary_proteins']:
# if 'type' in step and 'name' in step and'sequence' in step:
# ap.protein_type = apct
# ap.protein_type, created = AuxProteinType.objects.get_or_create()
# ap.name = sd['auxiliary_proteins']['name']
# ap.uniprot_id = sd['auxiliary_proteins']['uniprot_id']
# ap.sequence = sd['auxiliary_proteins']['sequence']
#mutations if any to be included from mutation model along with reason of mutation
# ap.position = sd['auxiliary_proteins']['position']
# ap.deletions = sd['auxiliary_proteins']['deletions']
# else:
# self.logger.error('Auxiliary protein step incorrectly defined for {}'.format(p))
# # create expression records
# if 'expression_sys' in sd and sd['expression_sys']:
# ce = ConstructExpression()
# ce.construct = c
# ce.expression_system, created = ConstructExpressionSystem.objects.get_or_create(expression_method = sd['expression_sys']['expression_method'], host_cell_type = sd['expression_sys']['host_cell_type'], host_cell = sd['expression_sys']['host_cell'])
# if 'remarks' in sd:
# ce.remarks = sd['expression_sys']['remarks']
# ce.save()
# # create solubilization records
# if 'solubilization' in sd and sd['solubilization'] and 'steps' in sd['solubilization'] and sd['solubilization']['steps']:
# so = ConstructSolubilization()
# so.construct = c
# cl = ChemicalList.objects.create()
# so.chemical_list = cl
# for step in sd['solubilization']['steps']:
# if 'type' in step and 'item' in step and'concentration' in step:
# chem = Chemical()
# chem.chemical_type, created = ChemicalType.objects.get_or_create(name = step['type'])
# chem.name = step['item']
# chem.save()
# cc = ChemicalConc()
# cc.concentration = step['concentration']
# cc.chemical = chem # since ChemicalConc has a ForeignKey to Chemical
# cc.save()
# cl.chemicals.add(cc)
# else:
# self.logger.error('Solubilization step incorrectly defined for {}'.format(p))
# if 'remarks' in sd['solubilization']:
# so.remarks = sd['solubilization']['remarks']
# so.save()
# # create purification records
# if 'purification' in sd and sd['purification'] and sd['purification']['steps']:
# pu = ConstructPurification()
# pu.construct = c
# if 'remarks' in sd['purification']:
# pu.remarks = sd['purification']['remarks']
# pu.save()
# for step in sd['purification']['steps']:
# if 'type' in step and 'description' in step:
# pust = PurificationStep()
# pust.description = step['description']
# pust.purification = pu
# pust.purification_type, created = PurificationStepType.objects.get_or_create(name = step['type'] ) # 2 values returned by get_or_create
# if created:
# self.logger.info('Created purification step type {}'.format(pust.purification_type))
# pust.save()
# else:
# self.logger.error('Purification step incorrectly defined for {}'.format(p))
# # create crystallization records
# if 'crystallization' in sd and sd['crystallization']:
# cy = ConstructCrystallization()
# cy.construct = c
# cyt = CrystallizationMethodTypes.objects.create()
# cy.crystal_type = cyt
# cy.method = sd['crystallization']['method']
# cy.settings = sd['crystallization']['settings']
# cy.protein_conc = sd['crystallization']['protein_conc']
# cl = ChemicalList.objects.create()
# cy.chemical_list = cl
# for step in sd['crystallization']['chemicallist']:
# if 'type' in step and 'item' in step and'concentration' in step:
# chem = Chemical()
# chem.chemical_type, created = ChemicalType.objects.get_or_create(name = step['type'])
# chem.name = step['item']
# chem.save()
# cc = ChemicalConc()
# cc.concentration = step['concentration']
# cc.chemical = chem # since ChemicalConc has a ForeignKey to Chemical
# cc.save()
# cl.chemicals.add(cc)
# else:
# self.logger.error('Crystallization step incorrectly defined for {}'.format(p))
# cy.aqueous_solution_lipid_ratio = sd['crystallization']['aqueous_solution_lipid_ratio_LCP']
# cy.lcp_bolus_volume = sd['crystallization']['LCP_bolus_volume']
# cy.precipitant_solution_volume = sd['crystallization']['precipitant_solution_volume']
# cy.temp = sd['crystallization']['temperature']
# cy.ph = sd['crystallization']['ph']
# if 'remarks' in sd['crystallization']:
# cy.remarks = sd['crystallization']['remarks']
# cy.save()
# # fusion proteins
# split_segments = {}
# if 'fusion_proteins' in sd and sd['fusion_proteins']:
# for fp in sd['fusion_proteins']:
# fp_start = Residue.objects.get(protein_conformation=ppc,
# sequence_number=fp['positions'][0])
# fp_end = Residue.objects.get(protein_conformation=ppc, sequence_number=fp['positions'][1])
# # if the fusion protein is inserted within only one segment (the usual case), split that
# # segment into two segments
# if fp_start and fp_start.protein_segment == fp_end.protein_segment:
# # get/create split protein segments
# segment_before, created = ProteinSegment.objects.get_or_create(
# slug=fp_start.protein_segment.slug+"_1", defaults={
# 'name': fp_start.protein_segment.name,
# 'category': fp_start.protein_segment.category,
# 'partial': True})
# segment_after, created = ProteinSegment.objects.get_or_create(
# slug=fp_start.protein_segment.slug+"_2", defaults={
# 'name': fp_start.protein_segment.name,
# 'category': fp_start.protein_segment.category,
# 'partial': True})
# # keep track of information about split segments
# split_segments[fp_start.protein_segment.slug] = {
# 'start': {
# 'sequence_number': fp['positions'][0],
# 'segment': segment_before,
# },
# 'end': {
# 'sequence_number': fp['positions'][1],
# 'segment': segment_after,
# },
# }
# # get/insert fusion protein
# fusion, create = ProteinFusion.objects.get_or_create(name=fp['name'], defaults={
# 'sequence': fp['sequence']})
# # create relationship with protein
# ProteinFusionProtein.objects.create(protein=p, protein_fusion=fusion,
# segment_before=segment_before, segment_after=segment_after)
# prs = Residue.objects.filter(protein_conformation=ppc).prefetch_related(
# 'protein_conformation__protein', 'protein_segment', 'generic_number',
# 'display_generic_number__scheme', 'alternative_generic_numbers__scheme')
# updated_sequence = ''
# for pr in prs:
# if pr.sequence_number not in deletions:
# r = Residue()
# r.protein_conformation = pc
# r.generic_number = pr.generic_number
# r.display_generic_number = pr.display_generic_number
# r.sequence_number = pr.sequence_number
# # check for split segments
# if pr.protein_segment.slug in split_segments:
# rsns = split_segments[pr.protein_segment.slug]['start']['sequence_number']
# rsne = split_segments[pr.protein_segment.slug]['end']['sequence_number']
# if r.sequence_number <= rsns:
# r.protein_segment = split_segments[pr.protein_segment.slug]['start']['segment']
# elif r.sequence_number >= rsne:
# r.protein_segment = split_segments[pr.protein_segment.slug]['end']['segment']
# else:
# r.protein_segment = pr.protein_segment
# # amino acid, check for mutations
# if r.sequence_number in mutations:
# if mutations[r.sequence_number]['wt_res'] == pr.amino_acid:
# r.amino_acid = mutations[r.sequence_number]['mut_res']
# else:
# self.logger.error('Mutation {} in construct {} does not match wild-type sequence' \
# + ' of {}'.format(mutations[r.sequence_number]['full'], pc.protein.name,
# ppc.protein.entry_name))
# else:
# r.amino_acid = pr.amino_acid
# # save amino acid to updated sequence
# updated_sequence += r.amino_acid
# # save residue before populating M2M relations
# r.save()
# # alternative generic numbers
# agns = pr.alternative_generic_numbers.all()
# for agn in agns:
# r.alternative_generic_numbers.add(agn)
# # update sequence
# p.sequence = updated_sequence
# p.save()
self.logger.info('COMPLETED CREATING CONSTRUCTS')
|
|
# GUI Application automation and testing library
# Copyright (C) 2006-2018 Mark Mc Mahon and Contributors
# https://github.com/pywinauto/pywinauto/graphs/contributors
# http://pywinauto.readthedocs.io/en/latest/credits.html
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pywinauto nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Cross-platform module to emulate mouse events like a real user"""
import sys
import time
if sys.platform == 'win32':
from . import win32functions
from . import win32defines
from .timings import Timings
import win32api
import win32gui
from . import keyboard
else:
from Xlib.display import Display
from Xlib import X
from Xlib.ext.xtest import fake_input
BUTTON_MAPPING = {'left': 0, 'middle': 1, 'right': 2, 'up_scroll': 3,
'down_scroll': 4, 'left_scroll': 5, 'right_scroll': 6}
if sys.platform == 'win32':
def _perform_click_input(
button="left",
coords=(None, None),
double=False,
button_down=True,
button_up=True,
wheel_dist=0,
pressed="",
key_down=True,
key_up=True,
):
"""Perform a click action using SendInput
All the *click_input() and *mouse_input() methods use this function.
Thanks to a bug report from Tomas Walch (twalch) on sourceforge and code
seen at http://msdn.microsoft.com/en-us/magazine/cc164126.aspx this
function now always works the same way whether the mouse buttons are
swapped or not.
For example if you send a right click to Notepad.Edit - it will always
bring up a popup menu rather than 'clicking' it.
"""
# Handle if the mouse buttons are swapped
if win32functions.GetSystemMetrics(win32defines.SM_SWAPBUTTON):
if button.lower() == 'left':
button = 'right'
elif button.lower() == 'right':
button = 'left'
events = []
if button.lower() == 'left':
events.append(win32defines.MOUSEEVENTF_MOVE)
if button_down:
events.append(win32defines.MOUSEEVENTF_LEFTDOWN)
if button_up:
events.append(win32defines.MOUSEEVENTF_LEFTUP)
elif button.lower() == 'right':
if button_down:
events.append(win32defines.MOUSEEVENTF_RIGHTDOWN)
if button_up:
events.append(win32defines.MOUSEEVENTF_RIGHTUP)
elif button.lower() == 'middle':
if button_down:
events.append(win32defines.MOUSEEVENTF_MIDDLEDOWN)
if button_up:
events.append(win32defines.MOUSEEVENTF_MIDDLEUP)
elif button.lower() == 'move':
events.append(win32defines.MOUSEEVENTF_MOVE)
events.append(win32defines.MOUSEEVENTF_ABSOLUTE)
elif button.lower() == 'x':
if button_down:
events.append(win32defines.MOUSEEVENTF_XDOWN)
if button_up:
events.append(win32defines.MOUSEEVENTF_XUP)
if button.lower() == 'wheel':
events.append(win32defines.MOUSEEVENTF_WHEEL)
# if we were asked to double click (and we are doing a full click
# not just up or down.
if double and button_down and button_up:
events *= 2
if button_down and (button.lower() not in ['move', 'wheel']):
# wait while previous click is not affecting our current click
while 0 < win32api.GetTickCount() - win32api.GetLastInputInfo() < win32gui.GetDoubleClickTime():
time.sleep(Timings.after_clickinput_wait)
# set the cursor position
win32api.SetCursorPos((coords[0], coords[1]))
time.sleep(Timings.after_setcursorpos_wait)
if win32api.GetCursorPos() != (coords[0], coords[1]):
win32api.SetCursorPos((coords[0], coords[1]))
time.sleep(Timings.after_setcursorpos_wait)
keyboard_keys = pressed.lower().split()
if ('control' in keyboard_keys) and key_down:
keyboard.VirtualKeyAction(keyboard.VK_CONTROL, up=False).run()
if ('shift' in keyboard_keys) and key_down:
keyboard.VirtualKeyAction(keyboard.VK_SHIFT, up=False).run()
if ('alt' in keyboard_keys) and key_down:
keyboard.VirtualKeyAction(keyboard.VK_MENU, up=False).run()
dw_flags = 0
for event in events:
dw_flags |= event
dw_data = 0
if button.lower() == 'wheel':
wheel_dist = wheel_dist * 120
dw_data = wheel_dist
if button.lower() == 'move':
x_res = win32functions.GetSystemMetrics(win32defines.SM_CXSCREEN)
y_res = win32functions.GetSystemMetrics(win32defines.SM_CYSCREEN)
x_coord = int(float(coords[0]) * (65535. / float(x_res - 1)))
y_coord = int(float(coords[1]) * (65535. / float(y_res - 1)))
win32api.mouse_event(dw_flags, x_coord, y_coord, dw_data)
else:
for event in events:
if event == win32defines.MOUSEEVENTF_MOVE:
x_res = win32functions.GetSystemMetrics(win32defines.SM_CXSCREEN)
y_res = win32functions.GetSystemMetrics(win32defines.SM_CYSCREEN)
x_coord = int(float(coords[0]) * (65535. / float(x_res - 1)))
y_coord = int(float(coords[1]) * (65535. / float(y_res - 1)))
win32api.mouse_event(
win32defines.MOUSEEVENTF_MOVE | win32defines.MOUSEEVENTF_ABSOLUTE,
x_coord, y_coord, dw_data)
else:
win32api.mouse_event(
event | win32defines.MOUSEEVENTF_ABSOLUTE,
coords[0], coords[1], dw_data)
time.sleep(Timings.after_clickinput_wait)
if ('control' in keyboard_keys) and key_up:
keyboard.VirtualKeyAction(keyboard.VK_CONTROL, down=False).run()
if ('shift' in keyboard_keys) and key_up:
keyboard.VirtualKeyAction(keyboard.VK_SHIFT, down=False).run()
if ('alt' in keyboard_keys) and key_up:
keyboard.VirtualKeyAction(keyboard.VK_MENU, down=False).run()
else:
_display = Display()
def _perform_click_input(button='left', coords=(0, 0),
button_down=True, button_up=True, double=False,
wheel_dist=0, pressed="", key_down=True, key_up=True):
"""Perform a click action using Python-xlib"""
#Move mouse
x = int(coords[0])
y = int(coords[1])
fake_input(_display, X.MotionNotify, x=x, y=y)
_display.sync()
if button == 'wheel':
if wheel_dist == 0:
return
if wheel_dist > 0:
button = 'up_scroll'
if wheel_dist < 0:
button = 'down_scroll'
for _ in range(abs(wheel_dist)):
_perform_click_input(button, coords)
else:
pointer_map = _display.get_pointer_mapping()
button = pointer_map[BUTTON_MAPPING[button]]
repeat = 1
if double:
repeat = 2
for _ in range(repeat):
if button_down:
fake_input(_display, X.ButtonPress, button)
_display.sync()
if button_up:
fake_input(_display, X.ButtonRelease, button)
_display.sync()
def click(button='left', coords=(0, 0)):
"""Click at the specified coordinates"""
_perform_click_input(button=button, coords=coords)
def double_click(button='left', coords=(0, 0)):
"""Double click at the specified coordinates"""
_perform_click_input(button=button, coords=coords, double=True)
def right_click(coords=(0, 0)):
"""Right click at the specified coords"""
_perform_click_input(button='right', coords=coords)
def move(coords=(0, 0)):
"""Move the mouse"""
_perform_click_input(button='move',coords=coords,button_down=False,button_up=False)
def press(button='left', coords=(0, 0)):
"""Press the mouse button"""
_perform_click_input(button=button, coords=coords, button_down=True, button_up=False)
def release(button='left', coords=(0, 0)):
"""Release the mouse button"""
_perform_click_input(button=button, coords=coords, button_down=False, button_up=True)
def scroll(coords=(0, 0), wheel_dist=1):
"""Do mouse wheel"""
if wheel_dist:
_perform_click_input(button='wheel', wheel_dist=wheel_dist, coords=coords)
def wheel_click(coords=(0, 0)):
"""Middle mouse button click at the specified coords"""
_perform_click_input(button='middle', coords=coords)
|
|
'''
.. module:: strategies
This module contains the backtesting strategies
.. moduleauthor:: Christopher Phillippi <c_phillippi@mfe.berkeley.edu>
'''
from pandas.stats.api import ols
import datetime
import normalize
import numpy as np
import numpy.linalg as nplinalg
import pandas as pd
import scipy.optimize as optimize
def accumulate( f, iterable, f0 ):
prev = f0
for item in iterable:
prev = f( prev, item )
yield prev
class Backtest( object ):
class Results( object ):
_daysPerYear = 252
_tol = 0.000000000001
def __init__( self,
returns,
tradeDates,
rho = None,
name = 'Returns',
riskModelName = 'NA',
budget = 1,
stocks = None,
weights = None,
riskFree = 0.001,
benchmarkPrices = None ):
def extractDate( date ):
try:
return date.date()
except AttributeError:
try:
return datetime.datetime.strptime( date, '%Y-%m-%d %H:%M:%S' )
except TypeError:
return date
self.name = name
self.riskModelName = riskModelName
self.rho = rho
self.budget = budget
try:
self.weights = pd.DataFrame( np.hstack( weights ).T,
index = tradeDates,
columns = stocks )
except TypeError:
pass
self.returns = pd.DataFrame( np.array( returns ),
index = tradeDates,
columns = [ name ] )
self.begin = extractDate( tradeDates[ 0 ] )
self.end = extractDate( tradeDates[ -1 ] )
self.riskFree = riskFree
self.benchmarkPrices = benchmarkPrices
def timeSpan( self, freq = 'years' ):
if freq == 'seconds':
return float( ( self.end - self.begin ).seconds )
if freq == 'days':
days = ( self.end - self.begin ).days
if days == 0:
return self.timeSpan( freq = 'seconds' ) / ( 60.0 * 60.0 * 8 )
return ( self.end - self.begin ).days
if freq == 'years':
return float( self.timeSpan( freq = 'days' ) ) / 365.0
def cumulativeReturns( self ):
return ( self.returns + 1 ).cumprod() - 1
def portfolioValues( self ):
return ( 1 + self.cumulativeReturns() ) * self.budget
def totalReturn( self ):
return float( self.cumulativeReturns().as_matrix()[-1] )
def annualizedReturn( self ):
return self.totalReturn() / self.timeSpan()
def annualizedRisk( self ):
return float( np.std( self.returns.as_matrix() ) * np.sqrt( self._daysPerYear ) )
def sharpeRatio( self ):
return ( self.annualizedReturn() - self.riskFree ) / self.annualizedRisk()
def informationRatio( self ):
if type( self.benchmarkPrices ) == type( None ):
raise Exception( 'Benchmark not provided for Information Ratio.' )
returns = self.returns[1:] # ignore first day tcosts
factorReturns = self.benchmarkPrices.pct_change().ix[ returns.index ]
factors = factorReturns.columns
beta = ols( y = returns[ self.name ], x = factorReturns ).beta[ factors ]
benchmarkReturns = ( beta * factorReturns ).sum( axis = 1 )
benchmarkTest = Backtest.Results( returns - benchmarkReturns, returns.index )
activeReturn = benchmarkTest.annualizedReturn()
activeRisk = benchmarkTest.annualizedRisk()
if np.abs( activeReturn ) < self._tol:
return 0
return activeReturn / activeRisk
def winningRatio( self ):
return float((self.returns>0).sum() / self.returns.shape[ 0 ])
def losingRatio( self ):
return float((self.returns<0).sum() / self.returns.shape[ 0 ])
def profitPerWinningTrade( self ):
return float(self.returns[self.returns > 0].mean())
def lossPerLosingTrade( self ):
return float( self.returns[self.returns < 0].mean() )
def toDataFrame( self ):
columns = [ 'Strategy',
'Risk Model',
'Rho',
'Budget',
'Begin',
'End',
'Days',
'Years',
'Total Return',
'Annualized Return',
'Annualized Risk',
'Sharpe Ratio',
'Winning Ratio',
'Losing Ratio',
'Profit per Winning Trade',
'Loss per Losing Trade' ]
values = [ self.name,
self.riskModelName,
self.rho,
self.budget,
self.begin,
self.end,
self.timeSpan( 'days' ),
self.timeSpan( 'years' ),
self.totalReturn(),
self.annualizedReturn(),
self.annualizedRisk(),
self.sharpeRatio(),
self.winningRatio(),
self.losingRatio(),
self.profitPerWinningTrade(),
self.lossPerLosingTrade() ]
try:
values.append( self.informationRatio() )
columns.append( 'Information Ratio' )
except:
pass
values = np.array( [values] )
return pd.DataFrame( values, columns = columns )
def __init__( self,
prices,
riskModel,
strategy,
begin,
end,
budget = 1,
rho = 1,
riskFree = 0.01,
tCosts = 0.0005 ):
self.dates = prices.index
self.prices = prices
self.riskModel = riskModel
self.returns = prices.pct_change()
self.strategy = strategy
self.budget = budget
self.trainDates = self.prices.ix[:begin].index
self.tradeDates = self.prices.ix[begin:end].index
self.rho = rho
self.riskFree = riskFree
self.benchmarkPrices = self.strategy.getBenchmark()
self.tCosts = tCosts
def run( self ):
def delever( weights ):
return weights / np.sum( np.abs( weights ) )
yesterday = self.trainDates[ -1 ]
ptfReturns = dict()
weights = { yesterday : np.zeros( ( self.prices.shape[1], 1 ) ) }
yesterdaysValue = self.budget
for date in self.tradeDates:
cov = self.riskModel.getCovariance( yesterday, rho = self.rho )
weights[ date ] = delever( self.strategy.getWeights( cov ) )
weightChange = weights[ date ] - weights[ yesterday ]
transCharge = self.tCosts * yesterdaysValue * np.sum( abs( weightChange ) )
returns = float( np.dot( self.returns.ix[ date.date() ], weights[ date ] ) )
todaysValue = ( 1 + returns ) * yesterdaysValue - transCharge
ptfReturns[ date ] = ( todaysValue - yesterdaysValue ) / yesterdaysValue
yesterday = date
allWeights, returns = zip( *( ( weights[ date ], ptfReturns[ date ] )
for date in self.tradeDates ) )
return ( Backtest.Results( returns,
self.tradeDates,
rho = self.rho,
name = self.strategy.getName(),
riskModelName = self.riskModel.getName(),
budget = self.budget,
weights = allWeights,
stocks = self.prices.columns,
riskFree = self.riskFree,
benchmarkPrices = self.benchmarkPrices ) )
class PairsBacktest( Backtest ):
def __init__( self,
prices,
divergence,
threshold,
endOfDay,
periodsToExit = 1,
budget = 1,
riskFree = 0.01,
tCosts = 0.0003 ):
n = prices.shape[ 0 ]
self.dates = prices.index
self.prices = prices
self.divergence = divergence
self.threshold = threshold
self.endOfDay = endOfDay
self.returns = prices.pct_change().fillna( 0 )
self.budget = budget
self.riskFree = riskFree / ( 252 * n )
self.tCosts = tCosts
self.periodsToExit = periodsToExit
self.first, self.second = prices.columns
self.name = self.first + '|' + self.second
def run( self ):
def strategy( args, date ):
daysActive, weights, cash = args
divergence = float( self.divergence.ix[ date ] )
absDiv = np.abs( divergence )
threshold = float( self.threshold.ix[ date ] )
isEndOfDay = int( self.endOfDay.ix[ date ] ) == 1
if daysActive >= self.periodsToExit or isEndOfDay or absDiv < threshold / 4:
dCash = float( np.dot( weights.T, self.prices.ix[ date ] ) )
dAbs = float( np.dot( abs( weights.T ), self.prices.ix[ date ] ) )
tCosts = dAbs * self.tCosts
return( 0,
np.zeros( weights.shape ),
cash * ( 1 + self.riskFree ) + dCash - tCosts )
if daysActive == 0 and np.abs( divergence ) > threshold:
priceA = float( self.prices.ix[ date, self.first ] )
w = -float( priceA / self.prices.ix[ date, self.second ] )
rawWeights = -np.sign( divergence ) * np.array( [ [ 1 ], [ w ] ] )
newWeights = ( cash / priceA ) * rawWeights
dAbs = float( np.dot( abs( newWeights.T ), self.prices.ix[ date ] ) )
tCosts = dAbs * self.tCosts
return ( 1,
newWeights,
cash * ( 1 + self.riskFree ) - tCosts )
return( daysActive + 1,
weights,
cash * ( 1 + self.riskFree ) )
initial = ( 0, np.zeros( ( 2, 1 ) ), self.budget )
_, weights, cash = zip( *list( accumulate( strategy, self.dates, initial ) ) )
cashSeries = pd.DataFrame( { ( '|'.join( self.prices.columns ) ) : cash }, index = self.dates )
return Backtest.Results( cashSeries.pct_change().fillna( 0 ).as_matrix(),
self.dates,
name = self.name,
budget = self.budget,
weights = weights,
stocks = self.prices.columns )
class RiskModel( object ):
def dyad( self, x ):
return np.outer( x, x )
def getCovariance( self, date ):
raise NotImplemented
def getVolDyad( self, date ):
raise NotImplemented
def getName( self ):
return self.name
def getDates( self ):
return self.dates
class EmpiricalCovariance( RiskModel ):
def __init__( self,
prices,
volFunc = lambda returns: pd.ewmstd( returns, span = 252 ),
name = 'Empirical' ):
self.name = name
self.dates = prices.index
self.returns = prices.pct_change()
self.vol = volFunc( self.returns )
self.volDyad = dict( ( today, self.dyad( self.vol.ix[ today.date() ] ) ) for today in self.dates )
self.empCov = dict( ( today, self.returns[ 1:today.date() ].corr().as_matrix() * self.volDyad[ today ] )
for today in self.dates[1:] )
def getCovariance( self, date, **kwargs ):
return self.empCov[ date ]
def getVolDyad( self, date ):
return self.volDyad[ date ]
class ShrunkCovarianceModel( RiskModel ):
def __init__( self, baseModel, name = 'Shrinkage (Constant Corr)' ):
self.baseModel = baseModel
self.name = name
self.dates = baseModel.getDates()
def getCovariance( self, date, rho ):
cov = self.baseModel.getCovariance( date, rho = rho )
constantCorr = rho*np.ones( cov.shape )
np.fill_diagonal( constantCorr, 1 )
vol = np.sqrt( np.diag( cov ) )
shrinkCov = np.outer( vol, vol ) * constantCorr
return ( 1 - rho ) * cov + shrinkCov
class NewsCovarianceModel( RiskModel ):
def __init__( self,
baseModel,
counts,
name = 'NewsCovariance' ):
def tfIdfCov( today ):
return normalize.TfIdf()( counts.ix[ :today.date() ] ).corr().as_matrix() * self.baseModel.getVolDyad( today )
self.name = name
counts = counts.to_dense()
self.baseModel = baseModel
self.dates = self.baseModel.getDates()
self.newsCov = dict( ( today, tfIdfCov( today ) )
for today in self.dates[1:] )
for date in self.dates[1:]:
newsCov = self.newsCov[ date ]
empCov = self.baseModel.getCovariance( date )
nans = newsCov != newsCov
self.newsCov[ date ][ nans ] = empCov[ nans ]
def getCovariance( self, date, rho = 0 ):
return rho * self.newsCov[ date ] + ( 1 - rho ) * self.baseModel.getCovariance( date )
def getVolDyad( self, date ):
return self.baseModel.getVolDyad( date )
def getName( self ):
return self.name
def getDates( self ):
return self.dates
class RiskOnlyStrategy( object ):
_min = 0
_max = None
def getWeights( self, cov ):
raise NotImplemented
def getName( self ):
return self.name
def getBenchmark( self ):
return self.benchmark
class MinimumVariance( RiskOnlyStrategy ):
_constraints = ( {'type': 'eq', 'fun': lambda x: np.sum( x ) - 1 } )
def __init__( self, benchmark = None, constrained = False ):
self.name = 'Minimum Variance'
self.benchmark = benchmark
self.constrained = constrained
def getWeights( self, cov ):
def constrained():
def obj( x ):
return np.dot( x, np.dot( cov, x ) )
n = float( cov.shape[ 0 ] )
x0 = np.ones( ( n, 1 ) ) / n
solution = optimize.minimize( obj, x0, method = 'SLSQP',
bounds = tuple( ( self._min, self._max ) for _ in range( 0, int( n ) ) ),
constraints = self._constraints )
weights = solution.x
weights.shape = ( n, 1 )
return weights
def analytical():
ones = np.ones( ( cov.shape[ 0 ], 1 ) )
invCovOnes = nplinalg.solve( cov, ones )
return invCovOnes / np.sum( invCovOnes )
if self.constrained:
return constrained()
return analytical()
class MaximumDiversification( RiskOnlyStrategy ):
def __init__( self, benchmark = None, constrained = False ):
self.name = 'Maximum Diversification'
self.benchmark = benchmark
self.constrained = constrained
def getWeights( self, cov ):
vol = np.sqrt( np.diag( cov ) )
def constrained():
n = float( cov.shape[ 0 ] )
constraints = ( {'type': 'eq', 'fun': lambda x: np.dot( x, vol ) - 1 } )
bounds = tuple( ( self._min , self._max ) for _ in range( 0, int( n ) ) )
def obj( x ):
return np.dot( x, np.dot( cov, x ) )
x0 = np.ones( ( n, 1 ) ) / n
solution = optimize.minimize( obj, x0, method = 'SLSQP', bounds = bounds, constraints = constraints )
weights = solution.x
weights.shape = ( n, 1 )
return weights / np.sum( weights )
def analytical():
invCovVol = nplinalg.solve( cov, vol )
weights = invCovVol / np.sum( invCovVol )
weights.shape = ( weights.shape[ 0 ], 1 )
return weights
if self.constrained:
return constrained()
return analytical()
class RiskParity( RiskOnlyStrategy ):
_constraints = ( { 'type': 'eq', 'fun': lambda x: np.sum( x[:-1] ) - 1 } )
_avgVol = 0.10 / 65.0
def __init__( self, benchmark = None ):
self.name = 'Risk Parity'
self.benchmark = benchmark
def getWeights( self, cov ):
def obj( xi ):
phi = xi[-1]
x = xi[:-1]
diff = x * np.dot( cov, x ) - phi
return np.dot( diff, diff )
n = float( cov.shape[ 0 ] )
bounds = tuple( ( self._min , self._max ) for _ in range( 0, int( n ) + 1 ) )
x0 = np.concatenate( ( np.ones( ( n, 1 ) ), [[ self._avgVol ]] ) )
solution = optimize.minimize( obj, x0, method = 'SLSQP', bounds = bounds, constraints = self._constraints )
weights = solution.x[:-1]
weights.shape = ( n, 1 )
return weights
def testStrategies( empiricalDf, riskModels, strategies, dateRanges, rhos, budget = 1 ):
def getPortfolio( result ):
def valueWithStrategy( result ):
values = result.portfolioValues()
values.rename( columns = { result.name : 'Returns' }, inplace = True )
values.insert( 0, 'Strategy', result.name )
return values
values = valueWithStrategy( result )
df = pd.merge( values, result.toDataFrame(), on = 'Strategy' )
df.index = values.index
return df
results = [ Backtest( empiricalDf,
riskModel,
strategy,
begin,
end,
budget = budget,
rho = rho ).run()
for strategy in strategies
for begin, end in dateRanges
for rho in rhos
for riskModel in riskModels ]
portfolios = [ getPortfolio( result ) for result in results ]
return ( results, portfolios )
if __name__ == '__main__':
pass
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import json
import logging
import time
import luigi
from luigi import postgres
from luigi.contrib import rdbms
from luigi.s3 import S3PathTask, S3Target
logger = logging.getLogger('luigi-interface')
try:
import psycopg2
import psycopg2.errorcodes
except ImportError:
logger.warning("Loading postgres module without psycopg2 installed. "
"Will crash at runtime if postgres functionality is used.")
class RedshiftTarget(postgres.PostgresTarget):
"""
Target for a resource in Redshift.
Redshift is similar to postgres with a few adjustments
required by redshift.
"""
marker_table = luigi.configuration.get_config().get(
'redshift',
'marker-table',
'table_updates')
use_db_timestamps = False
class S3CopyToTable(rdbms.CopyToTable):
"""
Template task for inserting a data set into Redshift from s3.
Usage:
* Subclass and override the required attributes:
* `host`,
* `database`,
* `user`,
* `password`,
* `table`,
* `columns`,
* `aws_access_key_id`,
* `aws_secret_access_key`,
* `s3_load_path`.
"""
@abc.abstractproperty
def s3_load_path(self):
"""
Override to return the load path.
"""
return None
@abc.abstractproperty
def aws_access_key_id(self):
"""
Override to return the key id.
"""
return None
@abc.abstractproperty
def aws_secret_access_key(self):
"""
Override to return the secret access key.
"""
return None
@abc.abstractproperty
def copy_options(self):
"""
Add extra copy options, for example:
* TIMEFORMAT 'auto'
* IGNOREHEADER 1
* TRUNCATECOLUMNS
* IGNOREBLANKLINES
* DELIMITER '\t'
"""
return ''
@property
def prune_table(self):
"""
Override to set equal to the name of the table which is to be pruned.
Intended to be used in conjunction with prune_column and prune_date
i.e. copy to temp table, prune production table to prune_column with a date greater than prune_date, then insert into production table from temp table
"""
return None
@property
def prune_column(self):
"""
Override to set equal to the column of the prune_table which is to be compared
Intended to be used in conjunction with prune_table and prune_date
i.e. copy to temp table, prune production table to prune_column with a date greater than prune_date, then insert into production table from temp table
"""
return None
@property
def prune_date(self):
"""
Override to set equal to the date by which prune_column is to be compared
Intended to be used in conjunction with prune_table and prune_column
i.e. copy to temp table, prune production table to prune_column with a date greater than prune_date, then insert into production table from temp table
"""
return None
@property
def table_attributes(self):
"""
Add extra table attributes, for example:
DISTSTYLE KEY
DISTKEY (MY_FIELD)
SORTKEY (MY_FIELD_2, MY_FIELD_3)
"""
return ''
def do_truncate_table(self):
"""
Return True if table should be truncated before copying new data in.
"""
return False
def do_prune(self):
"""
Return True if prune_table, prune_column, and prune_date are implemented.
If only a subset of prune variables are override, an exception is raised to remind the user to implement all or none.
Prune (data newer than prune_date deleted) before copying new data in.
"""
if self.prune_table and self.prune_column and self.prune_date:
return True
elif self.prune_table or self.prune_column or self.prune_date:
raise Exception('override zero or all prune variables')
else:
return False
@property
def table_type(self):
"""
Return table type (i.e. 'temp').
"""
return ''
def queries(self):
"""
Override to return a list of queries to be executed in order.
"""
return []
def truncate_table(self, connection):
query = "truncate %s" % self.table
cursor = connection.cursor()
try:
cursor.execute(query)
finally:
cursor.close()
def prune(self, connection):
query = "delete from %s where %s >= %s" % (self.prune_table, self.prune_column, self.prune_date)
cursor = connection.cursor()
try:
cursor.execute(query)
finally:
cursor.close()
def create_table(self, connection):
"""
Override to provide code for creating the target table.
By default it will be created using types (optionally)
specified in columns.
If overridden, use the provided connection object for
setting up the table in order to create the table and
insert data using the same transaction.
"""
if len(self.columns[0]) == 1:
# only names of columns specified, no types
raise NotImplementedError("create_table() not implemented "
"for %r and columns types not "
"specified" % self.table)
elif len(self.columns[0]) == 2:
# if columns is specified as (name, type) tuples
coldefs = ','.join(
'{name} {type}'.format(
name=name,
type=type) for name, type in self.columns
)
query = ("CREATE {type} TABLE "
"{table} ({coldefs}) "
"{table_attributes}").format(
type=self.table_type(),
table=self.table,
coldefs=coldefs,
table_attributes=self.table_attributes())
connection.cursor().execute(query)
def run(self):
"""
If the target table doesn't exist, self.create_table
will be called to attempt to create the table.
"""
if not (self.table):
raise Exception("table need to be specified")
path = self.s3_load_path()
connection = self.output().connect()
cursor = connection.cursor()
self.init_copy(connection)
self.copy(cursor, path)
self.post_copy(cursor)
# update marker table
self.output().touch(connection)
connection.commit()
# commit and clean up
connection.close()
def copy(self, cursor, f):
"""
Defines copying from s3 into redshift.
"""
logger.info("Inserting file: %s", f)
cursor.execute("""
COPY %s from '%s'
CREDENTIALS 'aws_access_key_id=%s;aws_secret_access_key=%s'
%s
;""" % (self.table, f, self.aws_access_key_id,
self.aws_secret_access_key, self.copy_options))
def output(self):
"""
Returns a RedshiftTarget representing the inserted dataset.
Normally you don't override this.
"""
return RedshiftTarget(
host=self.host,
database=self.database,
user=self.user,
password=self.password,
table=self.table,
update_id=self.update_id())
def does_table_exist(self, connection):
"""
Determine whether the table already exists.
"""
if '.' in self.table:
query = ("select 1 as table_exists "
"from information_schema.tables "
"where table_schema = %s and table_name = %s limit 1")
else:
query = ("select 1 as table_exists "
"from pg_table_def "
"where tablename = %s limit 1")
cursor = connection.cursor()
try:
cursor.execute(query, tuple(self.table.split('.')))
result = cursor.fetchone()
return bool(result)
finally:
cursor.close()
def init_copy(self, connection):
"""
Perform pre-copy sql - such as creating table, truncating, or removing data older than x.
"""
if not self.does_table_exist(connection):
logger.info("Creating table %s", self.table)
connection.reset()
self.create_table(connection)
elif self.do_truncate_table():
logger.info("Truncating table %s", self.table)
self.truncate_table(connection)
elif self.do_prune():
logger.info("Removing %s older than %s from %s", self.prune_column, self.prune_date, self.prune_table)
self.prune(connection)
def post_copy(self, cursor):
"""
Performs post-copy sql - such as cleansing data, inserting into production table (if copied to temp table), etc.
"""
logger.info('Executing post copy queries')
for query in self.queries():
cursor.execute(query)
class S3CopyJSONToTable(S3CopyToTable):
"""
Template task for inserting a JSON data set into Redshift from s3.
Usage:
* Subclass and override the required attributes:
* `host`,
* `database`,
* `user`,
* `password`,
* `table`,
* `columns`,
* `aws_access_key_id`,
* `aws_secret_access_key`,
* `s3_load_path`,
* `jsonpath`,
* `copy_json_options`.
"""
@abc.abstractproperty
def jsonpath(self):
"""
Override the jsonpath schema location for the table.
"""
return ''
@abc.abstractproperty
def copy_json_options(self):
"""
Add extra copy options, for example:
* GZIP
* LZOP
"""
return ''
def copy(self, cursor, f):
"""
Defines copying JSON from s3 into redshift.
"""
cursor.execute("""
COPY %s from '%s'
CREDENTIALS 'aws_access_key_id=%s;aws_secret_access_key=%s'
JSON AS '%s' %s
%s
;""" % (self.table, f, self.aws_access_key_id,
self.aws_secret_access_key, self.jsonpath,
self.copy_json_options, self.copy_options))
class RedshiftManifestTask(S3PathTask):
"""
Generic task to generate a manifest file that can be used
in S3CopyToTable in order to copy multiple files from your
s3 folder into a redshift table at once.
For full description on how to use the manifest file see
http://docs.aws.amazon.com/redshift/latest/dg/loading-data-files-using-manifest.html
Usage:
* requires parameters
* path - s3 path to the generated manifest file, including the
name of the generated file
to be copied into a redshift table
* folder_paths - s3 paths to the folders containing files you wish to be copied
Output:
* generated manifest file
"""
# should be over ridden to point to a variety
# of folders you wish to copy from
folder_paths = luigi.Parameter()
text_target = True
def run(self):
entries = []
for folder_path in self.folder_paths:
s3 = S3Target(folder_path)
client = s3.fs
for file_name in client.list(s3.path):
entries.append({
'url': '%s/%s' % (folder_path, file_name),
'mandatory': True
})
manifest = {'entries': entries}
target = self.output().open('w')
dump = json.dumps(manifest)
if not self.text_target:
dump = dump.encode('utf8')
target.write(dump)
target.close()
class KillOpenRedshiftSessions(luigi.Task):
"""
An task for killing any open Redshift sessions
in a given database. This is necessary to prevent open user sessions
with transactions against the table from blocking drop or truncate
table commands.
Usage:
Subclass and override the required `host`, `database`,
`user`, and `password` attributes.
"""
# time in seconds to wait before
# reconnecting to Redshift if our session is killed too.
# 30 seconds is usually fine; 60 is conservative
connection_reset_wait_seconds = luigi.IntParameter(default=60)
@abc.abstractproperty
def host(self):
return None
@abc.abstractproperty
def database(self):
return None
@abc.abstractproperty
def user(self):
return None
@abc.abstractproperty
def password(self):
return None
def update_id(self):
"""
This update id will be a unique identifier
for this insert on this table.
"""
return self.task_id
def output(self):
"""
Returns a RedshiftTarget representing the inserted dataset.
Normally you don't override this.
"""
# uses class name as a meta-table
return RedshiftTarget(
host=self.host,
database=self.database,
user=self.user,
password=self.password,
table=self.__class__.__name__,
update_id=self.update_id())
def run(self):
"""
Kill any open Redshift sessions for the given database.
"""
connection = self.output().connect()
# kill any sessions other than ours and
# internal Redshift sessions (rdsdb)
query = ("select pg_terminate_backend(process) "
"from STV_SESSIONS "
"where db_name=%s "
"and user_name != 'rdsdb' "
"and process != pg_backend_pid()")
cursor = connection.cursor()
logger.info('Killing all open Redshift sessions for database: %s', self.database)
try:
cursor.execute(query, (self.database,))
cursor.close()
connection.commit()
except psycopg2.DatabaseError as e:
if e.message and 'EOF' in e.message:
# sometimes this operation kills the current session.
# rebuild the connection. Need to pause for 30-60 seconds
# before Redshift will allow us back in.
connection.close()
logger.info('Pausing %s seconds for Redshift to reset connection', self.connection_reset_wait_seconds)
time.sleep(self.connection_reset_wait_seconds)
logger.info('Reconnecting to Redshift')
connection = self.output().connect()
else:
raise
try:
self.output().touch(connection)
connection.commit()
finally:
connection.close()
logger.info('Done killing all open Redshift sessions for database: %s', self.database)
|
|
# tests for the config reader module
from tardis.io import config_reader
from astropy import units as u
import os
import pytest
import yaml
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from tardis.util import parse_quantity
def data_path(filename):
data_dir = os.path.dirname(__file__)
return os.path.join(data_dir, 'data', filename)
def test_config_namespace_attribute_test():
namespace = config_reader.ConfigurationNameSpace({'param1':1})
assert namespace.param1 == 1
def test_config_namespace_attribute_test():
namespace = config_reader.ConfigurationNameSpace({'param1':1})
with pytest.raises(AttributeError):
assert namespace.param2 == 1
def test_quantity_linspace():
quantity_linspace_dict = dict(start='1.1e4 km/s', stop='2e4 cm/h', num=1000)
quantity_linspace = config_reader.parse_quantity_linspace(quantity_linspace_dict)
assert_almost_equal(quantity_linspace[0].value, 1.1e4)
assert_almost_equal(quantity_linspace[-1].to('cm/h').value, 2e4)
assert len(quantity_linspace) == 1001
def test_spectrum_list2_dict():
spectrum_dict = config_reader.parse_spectrum_list2dict(
[200*u.angstrom, 10000 * u.angstrom, 100])
assert_almost_equal(spectrum_dict['start'].to(u.angstrom).value, 200)
assert_almost_equal(spectrum_dict['end'].to(u.angstrom).value, 10000)
assert_almost_equal(spectrum_dict['bins'], 100)
def test_convergence_section_parser():
test_convergence_section = {'type': 'damped',
'lock_t_inner_cyles': 1,
't_inner_update_exponent': -0.5,
'global_convergence_parameters' : {
'damping_constant': 0.5},
't_rad': {'damping_constant':1.0}}
parsed_convergence_section = config_reader.parse_convergence_section(
test_convergence_section)
assert_almost_equal(parsed_convergence_section['t_rad']['damping_constant'],
1.0)
assert_almost_equal(parsed_convergence_section['w']['damping_constant'],
0.5)
def test_parse_density_section():
density_dict = {'type': 'branch85_w7', 'w7_time_0': 0.000231481 * u.day,
'w7_rho_0': 3e29 * u.Unit('g/cm^3'),
'w7_v_0': 1 * u.Unit('km/s')}
velocities = np.arange(10000, 20000, 1000) * u.Unit('km/s')
v_inner, v_outer = velocities[:-1], velocities[1:]
mean_densities = config_reader.parse_density_section(density_dict,
v_inner, v_outer,
10 * u.day)
desired_mean_densities_0 = 2.58940268372887e-13 * u.Unit('g/cm^3')
assert_almost_equal(mean_densities[0].cgs.value,
desired_mean_densities_0.cgs.value)
class TestParsePaper1Config:
def setup(self):
#general parsing of the paper config
self.config = config_reader.Configuration.from_yaml(data_path('paper1_tardis_configv1.yml'),
test_parser=True)
self.yaml_data = yaml.load(open(data_path('paper1_tardis_configv1.yml')))
def test_abundances(self):
oxygen_abundance = self.yaml_data['model']['abundances']['O']
assert_array_almost_equal(oxygen_abundance, self.config.abundances.ix[8].values)
def test_velocities(self):
assert_almost_equal(parse_quantity(self.yaml_data['model']['structure']['velocity']['start']).cgs.value,
self.config.structure.v_inner[0].cgs.value)
assert_almost_equal(parse_quantity(self.yaml_data['model']['structure']['velocity']['stop']).cgs.value,
self.config.structure.v_outer[-1].cgs.value)
assert len(self.config.structure.v_outer) == (self.yaml_data['model']['structure']['velocity']['num'])
def test_densities(self):
assert_almost_equal(self.config['structure']['mean_densities'][0].cgs.value,
(7.542803599143591e-14 * u.Unit('g/cm^3')).value)
assert_almost_equal(self.config['structure']['mean_densities'][-1].cgs.value,
(1.432259798833509e-15 * u.Unit('g/cm^3')).value)
def test_t_inner(self):
assert_almost_equal(self.config['plasma']['t_inner'].value,
9974.969233778693)
def test_montecarlo_black_body_sampling(self):
black_body_sampling = self.config['montecarlo']['black_body_sampling']
assert_almost_equal(black_body_sampling['start'].to(u.angstrom).value, 50)
assert_almost_equal(black_body_sampling['end'].to(u.angstrom).value, 200000)
assert_almost_equal(black_body_sampling['samples'], int(1e6))
def test_number_of_packets(self):
assert_almost_equal(self.config['montecarlo']['no_of_packets'], 200000)
def test_spectrum_section(self):
assert_almost_equal(self.config['spectrum']['start'].value,
parse_quantity(self.yaml_data['spectrum']['start']).value)
assert_almost_equal(self.config['spectrum']['end'].value,
parse_quantity(self.yaml_data['spectrum']['stop']).value)
assert self.config['spectrum']['bins'] == self.yaml_data['spectrum']['num']
def test_time_explosion(self):
assert_almost_equal(self.config['supernova']['time_explosion'].to(
u.day).value, 13.0)
def test_last_no_of_packets():
yaml_data = yaml.load(open(data_path('paper1_tardis_configv1.yml')))
del yaml_data['montecarlo']['last_no_of_packets']
config = config_reader.Configuration.from_config_dict(yaml_data,
test_parser=True)
assert (config.montecarlo.last_no_of_packets ==
config.montecarlo.no_of_packets)
class TestParseConfigV1ASCIIDensity:
def setup(self):
#general parsing of the paper config
filename = 'tardis_configv1_ascii_density.yml'
self.config = config_reader.Configuration.from_yaml(data_path(filename),
test_parser=True)
self.yaml_data = yaml.load(open(data_path(filename)))
def test_velocities(self):
assert self.config.structure.v_inner.unit == u.Unit('cm/s')
assert_almost_equal(self.config.structure.v_inner[0].value, 1e4 * 1e5)
def test_abundances(self):
oxygen_abundance = self.yaml_data['model']['abundances']['O']
assert_array_almost_equal(oxygen_abundance, self.config.abundances.ix[8].values)
class TestParseConfigV1ArtisDensity:
def setup(self):
#general parsing of the paper config
filename = 'tardis_configv1_artis_density.yml'
self.config = config_reader.Configuration.from_yaml(data_path(filename),
test_parser=True)
self.yaml_data = yaml.load(open(data_path(filename)))
def test_velocities(self):
assert self.config.structure.v_inner.unit == u.Unit('cm/s')
assert_almost_equal(self.config.structure.v_inner[0].value, 1.259375e+03 * 1e5)
def test_abundances(self):
oxygen_abundance = self.yaml_data['model']['abundances']['O']
assert_array_almost_equal(oxygen_abundance, self.config.abundances.ix[8].values)
class TestParseConfigV1ArtisDensityAbundances:
def setup(self):
#general parsing of the paper config
filename = 'tardis_configv1_artis_density.yml'
self.yaml_data = yaml.load(open(data_path(filename)))
self.yaml_data['model']['abundances'] = {'type': 'file',
'filename': 'tardis/io/tests/data/artis_abundances.dat',
'filetype': 'artis'}
self.config = config_reader.Configuration.from_config_dict(self.yaml_data,
test_parser=True)
def test_velocities(self):
assert self.config.structure.v_inner.unit == u.Unit('cm/s')
assert_almost_equal(self.config.structure.v_inner[0].value, 1.259375e+03 * 1e5)
def test_abundances(self):
assert_almost_equal(self.config.abundances.ix[14, 54], 0.21864420000000001)
class TestParseConfigV1ArtisDensityAbundancesVSlice:
def setup(self):
#general parsing of the paper config
filename = 'tardis_configv1_artis_density_v_slice.yml'
self.yaml_data = yaml.load(open(data_path(filename)))
self.yaml_data['model']['abundances'] = {'type': 'file',
'filename': 'tardis/io/tests/data/artis_abundances.dat',
'filetype': 'artis'}
self.config = config_reader.Configuration.from_config_dict(self.yaml_data,
test_parser=True)
def test_velocities(self):
assert self.config.structure.v_inner.unit == u.Unit('cm/s')
assert_almost_equal(self.config.structure.v_inner[0].to(
u.km / u.s).value, 9000)
def test_abundances(self):
assert_almost_equal(self.config.abundances.ix[14, 31], 2.156751e-01)
class TestParseConfigV1UniformDensity:
def setup(self):
#general parsing of the paper config
filename = 'tardis_configv1_uniform_density.yml'
self.yaml_data = yaml.load(open(data_path(filename)))
self.config = config_reader.Configuration.from_config_dict(self.yaml_data,
test_parser=True)
def test_density(self):
assert_array_almost_equal(self.config.structure.mean_densities.to(u.Unit('g / cm3')).value,
1.e-14)
class TestParseConfigTinner:
def setup(self):
#general parsing of the paper config
filename = 'tardis_configv1_uniform_density.yml'
self.yaml_data = yaml.load(open(data_path(filename)))
self.yaml_data['plasma']['initial_t_inner'] = "2508 K"
self.config = config_reader.Configuration.from_config_dict(self.yaml_data,
test_parser=True)
def test_initial_temperature(self):
assert_almost_equal(self.config.plasma.t_inner.value, 2508)
class TestParseConfigV1ArtisDensityAbundancesAllAscii:
def setup(self):
#general parsing of the paper config
filename = 'tardis_configv1_ascii_density_abund.yml'
self.yaml_data = yaml.load(open(data_path(filename)))
self.yaml_data['model']['structure']['filename'] = 'tardis/io/tests/data/density.dat'
self.yaml_data['model']['abundances']['filename'] = 'tardis/io/tests/data/abund.dat'
self.config = config_reader.Configuration.from_config_dict(self.yaml_data,
test_parser=True)
def test_velocities(self):
assert self.config.structure.v_inner.unit == u.Unit('cm/s')
assert_almost_equal(self.config.structure.v_inner[0].to(
u.km / u.s).value, 11000)
def test_abundances(self):
assert_almost_equal(self.config.abundances.ix[14, 0], 0.1)
assert_almost_equal(self.config.abundances.ix[14, 1], 0.2)
assert_almost_equal(self.config.abundances.ix[14, 2], 0.2)
assert_almost_equal(self.config.abundances.ix[14, 3], 0.2)
assert_almost_equal(self.config.abundances.ix[14, 4], 0.2)
assert_almost_equal(self.config.abundances.ix[14, 5], 0.2)
assert_almost_equal(self.config.abundances.ix[14, 6], 0.0)
assert_almost_equal(self.config.abundances.ix[6, 0], 0.0)
assert_almost_equal(self.config.abundances.ix[6, 1], 0.0)
assert_almost_equal(self.config.abundances.ix[6, 2], 0.0)
assert_almost_equal(self.config.abundances.ix[6, 3], 0.0)
assert_almost_equal(self.config.abundances.ix[6, 4], 0.0)
assert_almost_equal(self.config.abundances.ix[6, 5], 0.0)
assert_almost_equal(self.config.abundances.ix[6, 6], 0.5)
def test_densities(self):
assert_almost_equal(self.config.structure.mean_densities[0].to(u.Unit('g/cm3')).value, 9.7656229e-11 / 13.0**3 )
assert_almost_equal(self.config.structure.mean_densities[1].to(u.Unit('g/cm3')).value, 4.8170911e-11/ 13.0**3 )
assert_almost_equal(self.config.structure.mean_densities[2].to(u.Unit('g/cm3')).value, 2.5600000e-11/ 13.0**3 )
assert_almost_equal(self.config.structure.mean_densities[3].to(u.Unit('g/cm3')).value, 1.4450533e-11/ 13.0**3 )
assert_almost_equal(self.config.structure.mean_densities[4].to(u.Unit('g/cm3')).value, 8.5733893e-11/ 13.0**3 )
assert_almost_equal(self.config.structure.mean_densities[5].to(u.Unit('g/cm3')).value, 5.3037103e-11/ 13.0**3 )
assert_almost_equal(self.config.structure.mean_densities[6].to(u.Unit('g/cm3')).value, 3.3999447e-11/ 13.0**3 )
def test_ascii_reader_power_law():
with open(data_path('tardis_configv1_density_power_law_test.yml')) as f:
yaml_data = yaml.load(f)
#for later use
density_data = yaml_data['model']['structure']['density']
t_explosion = density_data['time_0']
rho_0 = density_data['rho_0']
exponent = density_data['exponent']
v_inner = yaml_data['model']['structure']['velocity']['start']
v_outer = yaml_data['model']['structure']['velocity']['stop']
my_conf = config_reader.Configuration.from_yaml(data_path('tardis_configv1_density_power_law_test.yml'),test_parser=True)
structure = my_conf['structure']
expected_densites = [3.29072513e-14, 2.70357804e-14, 2.23776573e-14,
1.86501954e-14, 1.56435277e-14, 1.32001689e-14, 1.12007560e-14,
9.55397475e-15, 8.18935779e-15, 7.05208050e-15, 6.09916083e-15,
5.29665772e-15, 4.61758699e-15, 4.04035750e-15, 3.54758837e-15,
3.12520752e-15, 2.76175961e-15, 2.44787115e-15, 2.17583442e-15,
1.93928168e-15]
assert structure['no_of_shells'] == 20
for i, mdens in enumerate(expected_densites):
assert_almost_equal(structure['mean_densities'][i].to(
u.Unit('g / (cm3)')).value, mdens)
def test_ascii_reader_exponential_law():
with open(data_path('tardis_configv1_density_exponential_test.yml')) as f:
yaml_data = yaml.load(f)
#for later use
density_data = yaml_data['model']['structure']['density']
t_explosion = density_data['time_0']
rho_0 = density_data['rho_0']
v0 = density_data['v_0']
v_inner = yaml_data['model']['structure']['velocity']['start']
v_outer = yaml_data['model']['structure']['velocity']['stop']
my_conf = config_reader.Configuration.from_yaml(data_path('tardis_configv1_density_exponential_test.yml'),test_parser=True)
structure = my_conf['structure']
expected_densites = [5.18114795e-14, 4.45945537e-14, 3.83828881e-14, 3.30364579e-14, 2.84347428e-14, 2.44740100e-14, 2.10649756e-14, 1.81307925e-14, 1.56053177e-14, 1.34316215e-14, 1.15607037e-14, 9.95038990e-15, 8.56437996e-15, 7.37143014e-15, 6.34464872e-15, 5.46088976e-15, 4.70023138e-15, 4.04552664e-15, 3.48201705e-15, 2.99699985e-15]
expected_unit = 'g / (cm3)'
assert structure['no_of_shells'] == 20
for i, mdens in enumerate(expected_densites):
assert_almost_equal(structure['mean_densities'][i].value,mdens)
assert structure['mean_densities'][i].unit == u.Unit(expected_unit)
#write tests for inner and outer boundary indices
|
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: type_Result.py
from types import *
import array
RESULT_UNIQUE_NAME = 0
RESULT_REGISTERED = 4
RESULT_DEREGISTERED = 5
RESULT_DUPLICATE = 6
RESULT_DUPLICATE_DEREG = 7
RESULT_GROUP_NAME = 128
RESULT_NRC_GOODRET = 0
RESULT_NRC_BUFLEN = 1
RESULT_NRC_ILLCMD = 3
RESULT_NRC_CMDTMO = 5
RESULT_NRC_INCOMP = 6
RESULT_NRC_BADDR = 7
RESULT_NRC_SNUMOUT = 8
RESULT_NRC_NORES = 9
RESULT_NRC_SCLOSED = 10
RESULT_NRC_CMDCAN = 11
RESULT_NRC_DUPNAME = 13
RESULT_NRC_NAMTFUL = 14
RESULT_NRC_ACTSES = 15
RESULT_NRC_LOCTFUL = 17
RESULT_NRC_REMTFUL = 18
RESULT_NRC_ILLNN = 19
RESULT_NRC_NOCALL = 20
RESULT_NRC_NOWILD = 21
RESULT_NRC_INUSE = 22
RESULT_NRC_NAMERR = 23
RESULT_NRC_SABORT = 24
RESULT_NRC_NAMCONF = 25
RESULT_NRC_IFBUSY = 33
RESULT_NRC_TOOMANY = 34
RESULT_NRC_BRIDGE = 35
RESULT_NRC_CANOCCR = 36
RESULT_NRC_CANCEL = 38
RESULT_NRC_DUPENV = 48
RESULT_NRC_ENVNOTDEF = 52
RESULT_NRC_OSRESNOTAV = 53
RESULT_NRC_MAXAPPS = 54
RESULT_NRC_NOSAPS = 55
RESULT_NRC_NORESOURCES = 56
RESULT_NRC_INVADDRESS = 57
RESULT_NRC_INVDDID = 59
RESULT_NRC_LOCKFAIL = 60
RESULT_NRC_OPENERR = 63
RESULT_NRC_SYSTEM = 64
RESULT_NRC_PENDING = 255
class ResultNCB:
def __init__(self):
self.__dict__['ncb_command'] = 0
self.__dict__['ncb_retcode'] = 0
self.__dict__['ncb_lsn'] = 0
self.__dict__['ncb_num'] = 0
self.__dict__['ncb_rto'] = 0
self.__dict__['ncb_sto'] = 0
self.__dict__['ncb_lana_num'] = 0
self.__dict__['ncb_cmd_cplt'] = 0
self.__dict__['ncb_callname'] = ''
self.__dict__['ncb_name'] = ''
def __getattr__(self, name):
if name == 'ncb_command':
return self.__dict__['ncb_command']
if name == 'ncb_retcode':
return self.__dict__['ncb_retcode']
if name == 'ncb_lsn':
return self.__dict__['ncb_lsn']
if name == 'ncb_num':
return self.__dict__['ncb_num']
if name == 'ncb_rto':
return self.__dict__['ncb_rto']
if name == 'ncb_sto':
return self.__dict__['ncb_sto']
if name == 'ncb_lana_num':
return self.__dict__['ncb_lana_num']
if name == 'ncb_cmd_cplt':
return self.__dict__['ncb_cmd_cplt']
if name == 'ncb_callname':
return self.__dict__['ncb_callname']
if name == 'ncb_name':
return self.__dict__['ncb_name']
raise AttributeError("Attribute '%s' not found" % name)
def __setattr__(self, name, value):
if name == 'ncb_command':
self.__dict__['ncb_command'] = value
elif name == 'ncb_retcode':
self.__dict__['ncb_retcode'] = value
elif name == 'ncb_lsn':
self.__dict__['ncb_lsn'] = value
elif name == 'ncb_num':
self.__dict__['ncb_num'] = value
elif name == 'ncb_rto':
self.__dict__['ncb_rto'] = value
elif name == 'ncb_sto':
self.__dict__['ncb_sto'] = value
elif name == 'ncb_lana_num':
self.__dict__['ncb_lana_num'] = value
elif name == 'ncb_cmd_cplt':
self.__dict__['ncb_cmd_cplt'] = value
elif name == 'ncb_callname':
self.__dict__['ncb_callname'] = value
elif name == 'ncb_name':
self.__dict__['ncb_name'] = value
else:
raise AttributeError("Attribute '%s' not found" % name)
def Marshal(self, mmsg):
from mcl.object.Message import MarshalMessage
submsg = MarshalMessage()
submsg.AddU8(MSG_KEY_RESULT_NCB_COMMAND, self.__dict__['ncb_command'])
submsg.AddU8(MSG_KEY_RESULT_NCB_RETCODE, self.__dict__['ncb_retcode'])
submsg.AddU8(MSG_KEY_RESULT_NCB_LSN, self.__dict__['ncb_lsn'])
submsg.AddU8(MSG_KEY_RESULT_NCB_NUM, self.__dict__['ncb_num'])
submsg.AddU8(MSG_KEY_RESULT_NCB_RTO, self.__dict__['ncb_rto'])
submsg.AddU8(MSG_KEY_RESULT_NCB_STO, self.__dict__['ncb_sto'])
submsg.AddU8(MSG_KEY_RESULT_NCB_LANA_NUM, self.__dict__['ncb_lana_num'])
submsg.AddU8(MSG_KEY_RESULT_NCB_CMD_CPLT, self.__dict__['ncb_cmd_cplt'])
submsg.AddStringUtf8(MSG_KEY_RESULT_NCB_CALLNAME, self.__dict__['ncb_callname'])
submsg.AddStringUtf8(MSG_KEY_RESULT_NCB_NAME, self.__dict__['ncb_name'])
mmsg.AddMessage(MSG_KEY_RESULT_NCB, submsg)
def Demarshal(self, dmsg, instance=-1):
import mcl.object.Message
msgData = dmsg.FindData(MSG_KEY_RESULT_NCB, mcl.object.Message.MSG_TYPE_MSG, instance)
submsg = mcl.object.Message.DemarshalMessage(msgData)
self.__dict__['ncb_command'] = submsg.FindU8(MSG_KEY_RESULT_NCB_COMMAND)
self.__dict__['ncb_retcode'] = submsg.FindU8(MSG_KEY_RESULT_NCB_RETCODE)
self.__dict__['ncb_lsn'] = submsg.FindU8(MSG_KEY_RESULT_NCB_LSN)
self.__dict__['ncb_num'] = submsg.FindU8(MSG_KEY_RESULT_NCB_NUM)
self.__dict__['ncb_rto'] = submsg.FindU8(MSG_KEY_RESULT_NCB_RTO)
self.__dict__['ncb_sto'] = submsg.FindU8(MSG_KEY_RESULT_NCB_STO)
self.__dict__['ncb_lana_num'] = submsg.FindU8(MSG_KEY_RESULT_NCB_LANA_NUM)
self.__dict__['ncb_cmd_cplt'] = submsg.FindU8(MSG_KEY_RESULT_NCB_CMD_CPLT)
self.__dict__['ncb_callname'] = submsg.FindString(MSG_KEY_RESULT_NCB_CALLNAME)
self.__dict__['ncb_name'] = submsg.FindString(MSG_KEY_RESULT_NCB_NAME)
class ResultAdapter:
def __init__(self):
self.__dict__['adapter_address'] = array.array('B')
i = 0
while i < 6:
self.__dict__['adapter_address'].append(0)
i = i + 1
self.__dict__['adapter_type'] = 0
self.__dict__['rev_major'] = 0
self.__dict__['rev_minor'] = 0
self.__dict__['duration'] = 0
self.__dict__['name_count'] = 0
self.__dict__['frmr_recv'] = 0
self.__dict__['frmr_xmit'] = 0
self.__dict__['iframe_recv_err'] = 0
self.__dict__['xmit_aborts'] = 0
self.__dict__['xmit_success'] = 0
self.__dict__['recv_success'] = 0
self.__dict__['iframe_xmit_err'] = 0
self.__dict__['recv_buff_unavail'] = 0
self.__dict__['t1_timeouts'] = 0
self.__dict__['ti_timeouts'] = 0
self.__dict__['free_ncbs'] = 0
self.__dict__['max_dgram_size'] = 0
self.__dict__['max_sess_pkt_size'] = 0
self.__dict__['pending_sess'] = 0
self.__dict__['max_cfg_sess'] = 0
self.__dict__['max_cfg_ncbs'] = 0
self.__dict__['max_ncbs'] = 0
self.__dict__['xmit_buf_unavail'] = 0
self.__dict__['max_sess'] = 0
def __getattr__(self, name):
if name == 'adapter_address':
return self.__dict__['adapter_address']
if name == 'adapter_type':
return self.__dict__['adapter_type']
if name == 'rev_major':
return self.__dict__['rev_major']
if name == 'rev_minor':
return self.__dict__['rev_minor']
if name == 'duration':
return self.__dict__['duration']
if name == 'name_count':
return self.__dict__['name_count']
if name == 'frmr_recv':
return self.__dict__['frmr_recv']
if name == 'frmr_xmit':
return self.__dict__['frmr_xmit']
if name == 'iframe_recv_err':
return self.__dict__['iframe_recv_err']
if name == 'xmit_aborts':
return self.__dict__['xmit_aborts']
if name == 'xmit_success':
return self.__dict__['xmit_success']
if name == 'recv_success':
return self.__dict__['recv_success']
if name == 'iframe_xmit_err':
return self.__dict__['iframe_xmit_err']
if name == 'recv_buff_unavail':
return self.__dict__['recv_buff_unavail']
if name == 't1_timeouts':
return self.__dict__['t1_timeouts']
if name == 'ti_timeouts':
return self.__dict__['ti_timeouts']
if name == 'free_ncbs':
return self.__dict__['free_ncbs']
if name == 'max_dgram_size':
return self.__dict__['max_dgram_size']
if name == 'max_sess_pkt_size':
return self.__dict__['max_sess_pkt_size']
if name == 'pending_sess':
return self.__dict__['pending_sess']
if name == 'max_cfg_sess':
return self.__dict__['max_cfg_sess']
if name == 'max_cfg_ncbs':
return self.__dict__['max_cfg_ncbs']
if name == 'max_ncbs':
return self.__dict__['max_ncbs']
if name == 'xmit_buf_unavail':
return self.__dict__['xmit_buf_unavail']
if name == 'max_sess':
return self.__dict__['max_sess']
raise AttributeError("Attribute '%s' not found" % name)
def __setattr__(self, name, value):
if name == 'adapter_address':
self.__dict__['adapter_address'] = value
elif name == 'adapter_type':
self.__dict__['adapter_type'] = value
elif name == 'rev_major':
self.__dict__['rev_major'] = value
elif name == 'rev_minor':
self.__dict__['rev_minor'] = value
elif name == 'duration':
self.__dict__['duration'] = value
elif name == 'name_count':
self.__dict__['name_count'] = value
elif name == 'frmr_recv':
self.__dict__['frmr_recv'] = value
elif name == 'frmr_xmit':
self.__dict__['frmr_xmit'] = value
elif name == 'iframe_recv_err':
self.__dict__['iframe_recv_err'] = value
elif name == 'xmit_aborts':
self.__dict__['xmit_aborts'] = value
elif name == 'xmit_success':
self.__dict__['xmit_success'] = value
elif name == 'recv_success':
self.__dict__['recv_success'] = value
elif name == 'iframe_xmit_err':
self.__dict__['iframe_xmit_err'] = value
elif name == 'recv_buff_unavail':
self.__dict__['recv_buff_unavail'] = value
elif name == 't1_timeouts':
self.__dict__['t1_timeouts'] = value
elif name == 'ti_timeouts':
self.__dict__['ti_timeouts'] = value
elif name == 'free_ncbs':
self.__dict__['free_ncbs'] = value
elif name == 'max_dgram_size':
self.__dict__['max_dgram_size'] = value
elif name == 'max_sess_pkt_size':
self.__dict__['max_sess_pkt_size'] = value
elif name == 'pending_sess':
self.__dict__['pending_sess'] = value
elif name == 'max_cfg_sess':
self.__dict__['max_cfg_sess'] = value
elif name == 'max_cfg_ncbs':
self.__dict__['max_cfg_ncbs'] = value
elif name == 'max_ncbs':
self.__dict__['max_ncbs'] = value
elif name == 'xmit_buf_unavail':
self.__dict__['xmit_buf_unavail'] = value
elif name == 'max_sess':
self.__dict__['max_sess'] = value
else:
raise AttributeError("Attribute '%s' not found" % name)
def Marshal(self, mmsg):
from mcl.object.Message import MarshalMessage
submsg = MarshalMessage()
submsg.AddData(MSG_KEY_RESULT_ADAPTER_ADDRESS, self.__dict__['adapter_address'])
submsg.AddU8(MSG_KEY_RESULT_ADAPTER_TYPE, self.__dict__['adapter_type'])
submsg.AddU8(MSG_KEY_RESULT_ADAPTER_REV_MAJOR, self.__dict__['rev_major'])
submsg.AddU8(MSG_KEY_RESULT_ADAPTER_REV_MINOR, self.__dict__['rev_minor'])
submsg.AddU16(MSG_KEY_RESULT_ADAPTER_DURATION, self.__dict__['duration'])
submsg.AddU16(MSG_KEY_RESULT_ADAPTER_NAME_COUNT, self.__dict__['name_count'])
submsg.AddU16(MSG_KEY_RESULT_ADAPTER_FRMR_RECV, self.__dict__['frmr_recv'])
submsg.AddU16(MSG_KEY_RESULT_ADAPTER_FRMR_XMIT, self.__dict__['frmr_xmit'])
submsg.AddU16(MSG_KEY_RESULT_ADAPTER_IFRAME_RECV_ERR, self.__dict__['iframe_recv_err'])
submsg.AddU16(MSG_KEY_RESULT_ADAPTER_XMIT_ABORTS, self.__dict__['xmit_aborts'])
submsg.AddU32(MSG_KEY_RESULT_ADAPTER_XMIT_SUCCESS, self.__dict__['xmit_success'])
submsg.AddU32(MSG_KEY_RESULT_ADAPTER_RECV_SUCCESS, self.__dict__['recv_success'])
submsg.AddU16(MSG_KEY_RESULT_ADAPTER_IFRAME_XMIT_ERR, self.__dict__['iframe_xmit_err'])
submsg.AddU16(MSG_KEY_RESULT_ADAPTER_RECV_BUFF_UNAVAIL, self.__dict__['recv_buff_unavail'])
submsg.AddU16(MSG_KEY_RESULT_ADAPTER_T1_TIMEOUTS, self.__dict__['t1_timeouts'])
submsg.AddU16(MSG_KEY_RESULT_ADAPTER_TI_TIMEOUTS, self.__dict__['ti_timeouts'])
submsg.AddU16(MSG_KEY_RESULT_ADAPTER_FREE_NCBS, self.__dict__['free_ncbs'])
submsg.AddU16(MSG_KEY_RESULT_ADAPTER_MAX_DGRAM_SIZE, self.__dict__['max_dgram_size'])
submsg.AddU16(MSG_KEY_RESULT_ADAPTER_MAX_SESS_PKT_SIZE, self.__dict__['max_sess_pkt_size'])
submsg.AddU16(MSG_KEY_RESULT_ADAPTER_PENDING_SESS, self.__dict__['pending_sess'])
submsg.AddU16(MSG_KEY_RESULT_ADAPTER_MAX_CFG_SESS, self.__dict__['max_cfg_sess'])
submsg.AddU16(MSG_KEY_RESULT_ADAPTER_MAX_CFG_NCBS, self.__dict__['max_cfg_ncbs'])
submsg.AddU16(MSG_KEY_RESULT_ADAPTER_MAX_NCBS, self.__dict__['max_ncbs'])
submsg.AddU16(MSG_KEY_RESULT_ADAPTER_XMIT_BUF_UNAVAIL, self.__dict__['xmit_buf_unavail'])
submsg.AddU16(MSG_KEY_RESULT_ADAPTER_MAX_SESS, self.__dict__['max_sess'])
mmsg.AddMessage(MSG_KEY_RESULT_ADAPTER, submsg)
def Demarshal(self, dmsg, instance=-1):
import mcl.object.Message
msgData = dmsg.FindData(MSG_KEY_RESULT_ADAPTER, mcl.object.Message.MSG_TYPE_MSG, instance)
submsg = mcl.object.Message.DemarshalMessage(msgData)
self.__dict__['adapter_address'] = submsg.FindData(MSG_KEY_RESULT_ADAPTER_ADDRESS)
self.__dict__['adapter_type'] = submsg.FindU8(MSG_KEY_RESULT_ADAPTER_TYPE)
self.__dict__['rev_major'] = submsg.FindU8(MSG_KEY_RESULT_ADAPTER_REV_MAJOR)
self.__dict__['rev_minor'] = submsg.FindU8(MSG_KEY_RESULT_ADAPTER_REV_MINOR)
self.__dict__['duration'] = submsg.FindU16(MSG_KEY_RESULT_ADAPTER_DURATION)
self.__dict__['name_count'] = submsg.FindU16(MSG_KEY_RESULT_ADAPTER_NAME_COUNT)
self.__dict__['frmr_recv'] = submsg.FindU16(MSG_KEY_RESULT_ADAPTER_FRMR_RECV)
self.__dict__['frmr_xmit'] = submsg.FindU16(MSG_KEY_RESULT_ADAPTER_FRMR_XMIT)
self.__dict__['iframe_recv_err'] = submsg.FindU16(MSG_KEY_RESULT_ADAPTER_IFRAME_RECV_ERR)
self.__dict__['xmit_aborts'] = submsg.FindU16(MSG_KEY_RESULT_ADAPTER_XMIT_ABORTS)
self.__dict__['xmit_success'] = submsg.FindU32(MSG_KEY_RESULT_ADAPTER_XMIT_SUCCESS)
self.__dict__['recv_success'] = submsg.FindU32(MSG_KEY_RESULT_ADAPTER_RECV_SUCCESS)
self.__dict__['iframe_xmit_err'] = submsg.FindU16(MSG_KEY_RESULT_ADAPTER_IFRAME_XMIT_ERR)
self.__dict__['recv_buff_unavail'] = submsg.FindU16(MSG_KEY_RESULT_ADAPTER_RECV_BUFF_UNAVAIL)
self.__dict__['t1_timeouts'] = submsg.FindU16(MSG_KEY_RESULT_ADAPTER_T1_TIMEOUTS)
self.__dict__['ti_timeouts'] = submsg.FindU16(MSG_KEY_RESULT_ADAPTER_TI_TIMEOUTS)
self.__dict__['free_ncbs'] = submsg.FindU16(MSG_KEY_RESULT_ADAPTER_FREE_NCBS)
self.__dict__['max_dgram_size'] = submsg.FindU16(MSG_KEY_RESULT_ADAPTER_MAX_DGRAM_SIZE)
self.__dict__['max_sess_pkt_size'] = submsg.FindU16(MSG_KEY_RESULT_ADAPTER_MAX_SESS_PKT_SIZE)
self.__dict__['pending_sess'] = submsg.FindU16(MSG_KEY_RESULT_ADAPTER_PENDING_SESS)
self.__dict__['max_cfg_sess'] = submsg.FindU16(MSG_KEY_RESULT_ADAPTER_MAX_CFG_SESS)
self.__dict__['max_cfg_ncbs'] = submsg.FindU16(MSG_KEY_RESULT_ADAPTER_MAX_CFG_NCBS)
self.__dict__['max_ncbs'] = submsg.FindU16(MSG_KEY_RESULT_ADAPTER_MAX_NCBS)
self.__dict__['xmit_buf_unavail'] = submsg.FindU16(MSG_KEY_RESULT_ADAPTER_XMIT_BUF_UNAVAIL)
self.__dict__['max_sess'] = submsg.FindU16(MSG_KEY_RESULT_ADAPTER_MAX_SESS)
class ResultStatus:
def __init__(self):
self.__dict__['status'] = 0
self.__dict__['errType'] = 0
self.__dict__['rtnCode'] = 0
def __getattr__(self, name):
if name == 'status':
return self.__dict__['status']
if name == 'errType':
return self.__dict__['errType']
if name == 'rtnCode':
return self.__dict__['rtnCode']
raise AttributeError("Attribute '%s' not found" % name)
def __setattr__(self, name, value):
if name == 'status':
self.__dict__['status'] = value
elif name == 'errType':
self.__dict__['errType'] = value
elif name == 'rtnCode':
self.__dict__['rtnCode'] = value
else:
raise AttributeError("Attribute '%s' not found" % name)
def Marshal(self, mmsg):
from mcl.object.Message import MarshalMessage
submsg = MarshalMessage()
submsg.AddU32(MSG_KEY_RESULT_STATUS_STATUS, self.__dict__['status'])
submsg.AddU32(MSG_KEY_RESULT_STATUS_ERR_TYPE, self.__dict__['errType'])
submsg.AddU32(MSG_KEY_RESULT_STATUS_RTN_CODE, self.__dict__['rtnCode'])
mmsg.AddMessage(MSG_KEY_RESULT_STATUS, submsg)
def Demarshal(self, dmsg, instance=-1):
import mcl.object.Message
msgData = dmsg.FindData(MSG_KEY_RESULT_STATUS, mcl.object.Message.MSG_TYPE_MSG, instance)
submsg = mcl.object.Message.DemarshalMessage(msgData)
self.__dict__['status'] = submsg.FindU32(MSG_KEY_RESULT_STATUS_STATUS)
self.__dict__['errType'] = submsg.FindU32(MSG_KEY_RESULT_STATUS_ERR_TYPE)
self.__dict__['rtnCode'] = submsg.FindU32(MSG_KEY_RESULT_STATUS_RTN_CODE)
class ResultName:
def __init__(self):
self.__dict__['type'] = 0
self.__dict__['nameFlags'] = 0
self.__dict__['networkName'] = ''
def __getattr__(self, name):
if name == 'type':
return self.__dict__['type']
if name == 'nameFlags':
return self.__dict__['nameFlags']
if name == 'networkName':
return self.__dict__['networkName']
raise AttributeError("Attribute '%s' not found" % name)
def __setattr__(self, name, value):
if name == 'type':
self.__dict__['type'] = value
elif name == 'nameFlags':
self.__dict__['nameFlags'] = value
elif name == 'networkName':
self.__dict__['networkName'] = value
else:
raise AttributeError("Attribute '%s' not found" % name)
def Marshal(self, mmsg):
from mcl.object.Message import MarshalMessage
submsg = MarshalMessage()
submsg.AddU8(MSG_KEY_RESULT_NAME_TYPE, self.__dict__['type'])
submsg.AddU8(MSG_KEY_RESULT_NAME_FLAGS, self.__dict__['nameFlags'])
submsg.AddStringUtf8(MSG_KEY_RESULT_NAME_NETWORK_NAME, self.__dict__['networkName'])
mmsg.AddMessage(MSG_KEY_RESULT_NAME, submsg)
def Demarshal(self, dmsg, instance=-1):
import mcl.object.Message
msgData = dmsg.FindData(MSG_KEY_RESULT_NAME, mcl.object.Message.MSG_TYPE_MSG, instance)
submsg = mcl.object.Message.DemarshalMessage(msgData)
self.__dict__['type'] = submsg.FindU8(MSG_KEY_RESULT_NAME_TYPE)
self.__dict__['nameFlags'] = submsg.FindU8(MSG_KEY_RESULT_NAME_FLAGS)
self.__dict__['networkName'] = submsg.FindString(MSG_KEY_RESULT_NAME_NETWORK_NAME)
|
|
from datetime import datetime, timedelta
from operator import attrgetter
from django.contrib.auth import get_user_model
from django.db import IntegrityError
from django.test import TestCase, skipUnlessDBFeature
from ..models import Document, Poll
User = get_user_model()
class AsOfTest(TestCase):
model = Document
def setUp(self):
user = User.objects.create_user("tester", "tester@example.com")
self.now = datetime.now()
self.yesterday = self.now - timedelta(days=1)
self.obj = self.model.objects.create()
self.obj.changed_by = user
self.obj.save()
self.model.objects.all().delete() # allows us to leave PK on instance
(
self.delete_history,
self.change_history,
self.create_history,
) = self.model.history.all()
self.create_history.history_date = self.now - timedelta(days=2)
self.create_history.save()
self.change_history.history_date = self.now - timedelta(days=1)
self.change_history.save()
self.delete_history.history_date = self.now
self.delete_history.save()
def test_created_after(self):
"""An object created after the 'as of' date should not be
included.
"""
as_of_list = list(self.model.history.as_of(self.now - timedelta(days=5)))
self.assertFalse(as_of_list)
def test_deleted_before(self):
"""An object deleted before the 'as of' date should not be
included.
"""
as_of_list = list(self.model.history.as_of(self.now + timedelta(days=1)))
self.assertFalse(as_of_list)
def test_deleted_after(self):
"""An object created before, but deleted after the 'as of'
date should be included.
"""
as_of_list = list(self.model.history.as_of(self.now - timedelta(days=1)))
self.assertEqual(len(as_of_list), 1)
self.assertEqual(as_of_list[0].pk, self.obj.pk)
def test_modified(self):
"""An object modified before the 'as of' date should reflect
the last version.
"""
as_of_list = list(self.model.history.as_of(self.now - timedelta(days=1)))
self.assertEqual(as_of_list[0].changed_by, self.obj.changed_by)
class AsOfAdditionalTestCase(TestCase):
def test_create_and_delete(self):
now = datetime.now()
document = Document.objects.create()
document.delete()
for doc_change in Document.history.all():
doc_change.history_date = now
doc_change.save()
docs_as_of_tmw = Document.history.as_of(now + timedelta(days=1))
self.assertFalse(list(docs_as_of_tmw))
def test_multiple(self):
document1 = Document.objects.create()
document2 = Document.objects.create()
historical = Document.history.as_of(datetime.now() + timedelta(days=1))
self.assertEqual(list(historical), [document1, document2])
class BulkHistoryCreateTestCase(TestCase):
def setUp(self):
self.data = [
Poll(id=1, question="Question 1", pub_date=datetime.now()),
Poll(id=2, question="Question 2", pub_date=datetime.now()),
Poll(id=3, question="Question 3", pub_date=datetime.now()),
Poll(id=4, question="Question 4", pub_date=datetime.now()),
]
def test_simple_bulk_history_create(self):
created = Poll.history.bulk_history_create(self.data)
self.assertEqual(len(created), 4)
self.assertQuerysetEqual(
Poll.history.order_by("question"),
["Question 1", "Question 2", "Question 3", "Question 4"],
attrgetter("question"),
)
self.assertTrue(
all([history.history_type == "+" for history in Poll.history.all()])
)
created = Poll.history.bulk_create([])
self.assertEqual(created, [])
self.assertEqual(Poll.history.count(), 4)
def test_bulk_history_create_with_change_reason(self):
for poll in self.data:
poll._change_reason = "reason"
Poll.history.bulk_history_create(self.data)
self.assertTrue(
all(
[
history.history_change_reason == "reason"
for history in Poll.history.all()
]
)
)
def test_bulk_history_create_with_default_user(self):
user = User.objects.create_user("tester", "tester@example.com")
Poll.history.bulk_history_create(self.data, default_user=user)
self.assertTrue(
all([history.history_user == user for history in Poll.history.all()])
)
def test_bulk_history_create_with_default_change_reason(self):
Poll.history.bulk_history_create(self.data, default_change_reason="test")
self.assertTrue(
all(
[
history.history_change_reason == "test"
for history in Poll.history.all()
]
)
)
def test_bulk_history_create_history_user_overrides_default(self):
user1 = User.objects.create_user("tester1", "tester1@example.com")
user2 = User.objects.create_user("tester2", "tester2@example.com")
for data in self.data:
data._history_user = user1
Poll.history.bulk_history_create(self.data, default_user=user2)
self.assertTrue(
all([history.history_user == user1 for history in Poll.history.all()])
)
def test_bulk_history_create_change_reason_overrides_default(self):
for data in self.data:
data._change_reason = "my_reason"
Poll.history.bulk_history_create(self.data, default_change_reason="test")
self.assertTrue(
all(
[
history.history_change_reason == "my_reason"
for history in Poll.history.all()
]
)
)
def test_bulk_history_create_on_objs_without_ids(self):
self.data = [
Poll(question="Question 1", pub_date=datetime.now()),
Poll(question="Question 2", pub_date=datetime.now()),
Poll(question="Question 3", pub_date=datetime.now()),
Poll(question="Question 4", pub_date=datetime.now()),
]
with self.assertRaises(IntegrityError):
Poll.history.bulk_history_create(self.data)
def test_set_custom_history_date_on_first_obj(self):
self.data[0]._history_date = datetime(2000, 1, 1)
Poll.history.bulk_history_create(self.data)
self.assertEqual(
Poll.history.order_by("question")[0].history_date, datetime(2000, 1, 1)
)
def test_set_custom_history_user_on_first_obj(self):
user = User.objects.create_user("tester", "tester@example.com")
self.data[0]._history_user = user
Poll.history.bulk_history_create(self.data)
self.assertEqual(Poll.history.order_by("question")[0].history_user, user)
@skipUnlessDBFeature("has_bulk_insert")
def test_efficiency(self):
with self.assertNumQueries(1):
Poll.history.bulk_history_create(self.data)
class BulkHistoryUpdateTestCase(TestCase):
def setUp(self):
self.data = [
Poll(id=1, question="Question 1", pub_date=datetime.now()),
Poll(id=2, question="Question 2", pub_date=datetime.now()),
Poll(id=3, question="Question 3", pub_date=datetime.now()),
Poll(id=4, question="Question 4", pub_date=datetime.now()),
]
def test_simple_bulk_history_create(self):
created = Poll.history.bulk_history_create(self.data, update=True)
self.assertEqual(len(created), 4)
self.assertQuerysetEqual(
Poll.history.order_by("question"),
["Question 1", "Question 2", "Question 3", "Question 4"],
attrgetter("question"),
)
self.assertTrue(
all([history.history_type == "~" for history in Poll.history.all()])
)
created = Poll.history.bulk_create([])
self.assertEqual(created, [])
self.assertEqual(Poll.history.count(), 4)
def test_bulk_history_create_with_change_reason(self):
for poll in self.data:
poll._change_reason = "reason"
Poll.history.bulk_history_create(self.data)
self.assertTrue(
all(
[
history.history_change_reason == "reason"
for history in Poll.history.all()
]
)
)
|
|
"""
Earley Parser.
@author: Hardik
"""
import argparse
import sys
import string
from collections import defaultdict
from nltk.tree import Tree
class Rule(object):
"""
Represents a CFG rule.
"""
def __init__(self, lhs, rhs):
# Represents the rule 'lhs -> rhs', where lhs is a non-terminal and
# rhs is a list of non-terminals and terminals.
self.lhs, self.rhs = lhs, rhs
def __contains__(self, sym):
return sym in self.rhs
def __eq__(self, other):
if type(other) is Rule:
return self.lhs == other.lhs and self.rhs == other.rhs
return False
def __getitem__(self, i):
return self.rhs[i]
def __len__(self):
return len(self.rhs)
def __repr__(self):
return self.__str__()
def __str__(self):
return self.lhs + ' -> ' + ' '.join(self.rhs)
class Grammar(object):
"""
Represents a CFG.
"""
def __init__(self):
# The rules are represented as a dictionary from L.H.S to R.H.S.
self.rules = defaultdict(list)
def add(self, rule):
"""
Adds the given rule to the grammar.
"""
self.rules[rule.lhs].append(rule)
@staticmethod
def load_grammar(fpath):
"""
Loads the grammar from file (from the )
"""
grammar = Grammar()
with open(fpath) as f:
for line in f:
line = line.strip()
if len(line) == 0:
continue
entries = line.split('->')
lhs = entries[0].strip()
for rhs in entries[1].split('|'):
grammar.add(Rule(lhs, rhs.strip().split()))
return grammar
def __repr__(self):
return self.__str__()
def __str__(self):
s = [str(r) for r in self.rules['S']]
for nt, rule_list in self.rules.iteritems():
if nt == 'S':
continue
s += [str(r) for r in rule_list]
return '\n'.join(s)
# Returns the rules for a given Non-terminal.
def __getitem__(self, nt):
return self.rules[nt]
def is_terminal(self, sym):
"""
Checks is the given symbol is terminal.
"""
return len(self.rules[sym]) == 0
def is_tag(self, sym):
"""
Checks whether the given symbol is a tag, i.e. a non-terminal with rules
to solely terminals.
"""
if not self.is_terminal(sym):
return all(self.is_terminal(s) for r in self.rules[sym] for s in
r.rhs)
return False
class EarleyState(object):
"""
Represents a state in the Earley algorithm.
"""
GAM = '<GAM>'
def __init__(self, rule, dot=0, sent_pos=0, chart_pos=0, back_pointers=[]):
# CFG Rule.
self.rule = rule
# Dot position in the rule.
self.dot = dot
# Sentence position.
self.sent_pos = sent_pos
# Chart index.
self.chart_pos = chart_pos
# Pointers to child states (if the given state was generated using
# Completer).
self.back_pointers = back_pointers
def __eq__(self, other):
if type(other) is EarleyState:
return self.rule == other.rule and self.dot == other.dot and \
self.sent_pos == other.sent_pos
return False
def __len__(self):
return len(self.rule)
def __repr__(self):
return self.__str__()
def __str__(self):
def str_helper(state):
return ('(' + state.rule.lhs + ' -> ' +
' '.join(state.rule.rhs[:state.dot] + ['*'] +
state.rule.rhs[state.dot:]) +
(', [%d, %d])' % (state.sent_pos, state.chart_pos)))
return (str_helper(self) +
' (' + ', '.join(str_helper(s) for s in self.back_pointers) + ')')
def next(self):
"""
Return next symbol to parse, i.e. the one after the dot
"""
if self.dot < len(self):
return self.rule[self.dot]
def is_complete(self):
"""
Checks whether the given state is complete.
"""
return len(self) == self.dot
@staticmethod
def init():
"""
Returns the state used to initialize the chart in the Earley algorithm.
"""
return EarleyState(Rule(EarleyState.GAM, ['S']))
class ChartEntry(object):
"""
Represents an entry in the chart used by the Earley algorithm.
"""
def __init__(self, states):
# List of Earley states.
self.states = states
def __iter__(self):
return iter(self.states)
def __len__(self):
return len(self.states)
def __repr__(self):
return self.__str__()
def __str__(self):
return '\n'.join(str(s) for s in self.states)
def add(self, state):
"""
Add the given state (if it hasn't already been added).
"""
if state not in self.states:
self.states.append(state)
class Chart(object):
"""
Represents the chart used in the Earley algorithm.
"""
def __init__(self, entries):
# List of chart entries.
self.entries = entries
def __getitem__(self, i):
return self.entries[i]
def __len__(self):
return len(self.entries)
def __repr__(self):
return self.__str__()
def __str__(self):
return '\n\n'.join([("Chart[%d]:\n" % i) + str(entry) for i, entry in
enumerate(self.entries)])
@staticmethod
def init(l):
"""
Initializes a chart with l entries (Including the dummy start state).
"""
return Chart([(ChartEntry([]) if i > 0 else
ChartEntry([EarleyState.init()])) for i in range(l)])
class EarleyParse(object):
"""
Represents the Earley-generated parse for a given sentence according to a
given grammar.
"""
def __init__(self, sentence, grammar):
self.words = sentence.split()
self.grammar = grammar
self.chart = Chart.init(len(self.words) + 1)
def predictor(self, state, pos):
"""
Earley Predictor.
"""
for rule in self.grammar[state.next()]:
self.chart[pos].add(EarleyState(rule, dot=0,
sent_pos=state.chart_pos, chart_pos=state.chart_pos))
def scanner(self, state, pos):
"""
Earley Scanner.
"""
if state.chart_pos < len(self.words):
word = self.words[state.chart_pos]
if any((word in r) for r in self.grammar[state.next()]):
self.chart[pos + 1].add(EarleyState(Rule(state.next(), [word]),
dot=1, sent_pos=state.chart_pos,
chart_pos=(state.chart_pos + 1)))
def completer(self, state, pos):
"""
Earley Completer.
"""
for prev_state in self.chart[state.sent_pos]:
if prev_state.next() == state.rule.lhs:
self.chart[pos].add(EarleyState(prev_state.rule,
dot=(prev_state.dot + 1), sent_pos=prev_state.sent_pos,
chart_pos=pos,
back_pointers=(prev_state.back_pointers + [state])))
def parse(self):
"""
Parses the sentence by running the Earley algorithm and filling out the
chart.
"""
# Checks whether the next symbol for the given state is a tag.
def is_tag(state):
return self.grammar.is_tag(state.next())
for i in range(len(self.chart)):
for state in self.chart[i]:
if not state.is_complete():
if is_tag(state):
self.scanner(state, i)
else:
self.predictor(state, i)
else:
self.completer(state, i)
def has_parse(self):
"""
Checks whether the sentence has a parse.
"""
for state in self.chart[-1]:
if state.is_complete() and state.rule.lhs == 'S' and \
state.sent_pos == 0 and state.chart_pos == len(self.words):
return True
return False
def get(self):
"""
Returns the parse if it exists, otherwise returns None.
"""
def get_helper(state):
if self.grammar.is_tag(state.rule.lhs):
return Tree(state.rule.lhs, [state.rule.rhs[0]])
return Tree(state.rule.lhs,
[get_helper(s) for s in state.back_pointers])
for state in self.chart[-1]:
if state.is_complete() and state.rule.lhs == 'S' and \
state.sent_pos == 0 and state.chart_pos == len(self.words):
return get_helper(state)
return None
def main():
"""
Main.
"""
parser_description = ("Runs the Earley parser according to a given "
"grammar.")
parser = argparse.ArgumentParser(description=parser_description)
parser.add_argument('draw', nargs='?', default=False)
parser.add_argument('grammar_file', help="Filepath to grammer file")
args = parser.parse_args()
grammar = Grammar.load_grammar(args.grammar_file)
def run_parse(sentence):
parse = EarleyParse(sentence, grammar)
parse.parse()
return parse.get()
while True:
try:
sentence = raw_input()
# Strip the sentence of any puncutation.
stripped_sentence = sentence
for p in string.punctuation:
stripped_sentence = stripped_sentence.replace(p, '')
parse = run_parse(stripped_sentence)
if parse is None:
print sentence + '\n'
else:
if args.draw:
parse.draw()
else:
parse.pretty_print()
except EOFError:
sys.exit()
if args.draw:
sys.exit()
if __name__ == '__main__':
main()
|
|
import re
import numpy as np
def get_atoms_adapter(monomer, arg):
return monomer.get_atoms(arg)
def get_atomset_adapter(monomer, arg):
return monomer.get_atomset(arg)
def get_not_atomset_adapter(monomer, arg):
return monomer.get_not_atomset(arg)
def regex_get_other_atoms_adapter(monomer, arg):
return monomer.regex_get_other_atoms(arg)
def regex_get_atoms_adapter(monomer, arg):
return monomer.regex_get_atoms(arg)
def filter_atomset(s):
s = set(s)
def filter_condition(atom_name):
return atom_name in s
return filter_condition
def filter_not_atomset(s):
s = set(s)
def filter_condition(atom_name):
return atom_name not in s
return filter_condition
def filter_regex(regex):
def filter_condition(atom_name):
return re.match(regex, atom_name)
return filter_condition
def filter_not_regex(regex):
def filter_condition(atom_name):
return not re.match(regex, atom_name)
return filter_condition
def change_by_atom_ndof(offsets_by_atom, new_ndof):
"""Adapt offsets_by_atom vector to one with the new ndof."""
old_ndof = offsets_by_atom.shape[1]
indices = offsets_by_atom[:, 0]/old_ndof
num_atoms = offsets_by_atom.shape[0]
dof_delta = np.array(range(new_ndof))
indices_by_atom = np.tile(indices, (1, new_ndof)).transpose()
dof_by_atom = np.tile(dof_delta, (num_atoms, 1))
new_offsets_by_atom = (indices_by_atom
+ dof_by_atom)
return new_offsets_by_atom
class TopologyError(Exception):
pass
class Topology(object):
def __init__(self, name, atom_offsets=None, start=0, shape=None, target_offsets=None, ndof=3):
self.name = name
self.ndof = ndof
# try:
# atoms = self.atoms
# except AttributeError:
# pass
# try:
# monomers = self.monomers
# except AttributeError:
# pass
if atom_offsets is None:
atom_offsets = np.array(range(start, start + ndof * self.num_atoms))
elif len(atom_offsets) % ndof != 0:
raise TopologyError("Bad offset length: len(%s) = %s with ndof %s" % (atom_offsets, len(atom_offsets), ndof))
self.atom_offsets = np.array(atom_offsets)
if shape is None:
self.shape = self.atom_offsets.shape
else:
self.shape = shape
if target_offsets is None:
self.target_offsets = atom_offsets
else:
self.target_offsets = target_offsets
if self.target_offsets.shape != self.atom_offsets.shape:
raise TopologyError("atom and target offsets do not have the same shape")
def indent_str(self, level):
name = self.name
myindent = ' ' * level
try:
mols = self.monomers
mytype = '<< %s Polymer' % name
mysubstrs = [mol.indent_str(level+1) for mol in mols]
subindent = '\n'
mysubstr = "\n%s\n%s >>" % (subindent.join(mysubstrs), myindent)
except AttributeError:
atoms = self.atoms
mytype = '<< %s Molecule' % name
mysubstr = ' %s >>' % (', '.join(atoms))
return '%s%s%s' % (myindent, mytype, mysubstr)
def __str__(self):
return self.indent_str(0)
@property
def offsets_by_atom(self):
try:
return self.atom_offsets.reshape(-1, self.ndof)
except ValueError:
raise Exception("can't resize %s" % self.atom_offsets)
@property
def targets_by_atom(self):
try:
return self.target_offsets.reshape(-1, self.ndof)
except ValueError:
raise Exception("can't resize %s" % self.target_offsets)
def get_coords(self, x):
try:
return x[self.atom_offsets]
except TypeError:
x = np.array(x)
return x[self.atom_offsets]
def set_coords(self, x, new_x):
x[self.atom_offsets] = new_x
def transfer_coords(self, y, x):
y[self.target_offsets] = x[self.atom_offsets]
return y
def lift_coords(self, x):
y = np.zeros(self.shape)
self.transfer_coords(y, x)
return y
class Molecule(Topology):
def __init__(self, name, atoms, atom_offsets=None, start=0, shape=None, target_offsets=None, ndof=3):
self.name = name
self.atoms = atoms
Topology.__init__(self, name, atom_offsets=atom_offsets, start=start, shape=shape, target_offsets=target_offsets,
ndof=ndof)
def copy(self):
return Molecule(self.name, self.atoms[:],
atom_offsets=self.atom_offsets.copy(),
shape=self.shape,
target_offsets=self.target_offsets.copy(),
ndof=self.ndof)
@property
def num_atoms(self):
return len(self.atoms)
@property
def atom_names(self):
return self.atoms
def __len__(self):
return len(self.atoms)
def _get(self, filter_condition):
"""Generic function to return a sub topology."""
idx = -1
atoms_in_query = []
offset_indices = []
for idx, atom_name in enumerate(self.atoms):
if filter_condition(atom_name):
offset_indices.append(idx)
atoms_in_query.append(atom_name)
atom_offsets = self.offsets_by_atom[offset_indices].reshape((-1,))
target_offsets = self.targets_by_atom[offset_indices].reshape((-1,))
return Molecule(self.name,
atoms=atoms_in_query,
atom_offsets=atom_offsets,
shape=self.shape,
target_offsets=target_offsets,
ndof=self.ndof)
def get_atoms(self, query):
return self._get(filter_atomset([query]))
def get_atomset(self, query):
return self._get(filter_atomset(query))
def get_not_atomset(self, query):
return self._get(filter_not_atomset(query))
def regex_get_atoms(self, query):
return self._get(filter_regex(query))
def regex_get_other_atoms(self, query):
return self._get(filter_not_regex(query))
def lift_topology(self, other_topology, namemap=None,
reorder=False):
if self.ndof != other_topology.ndof:
raise TopologyError("Can not raise a topology with %s dof to %s dof." % (other_topology.ndof, self.ndof))
if namemap:
other_atom_names = [namemap(atom_name) for atom_name in other_topology.atoms]
else:
other_atom_names = other_topology.atoms
if len(set(self.atoms)) != len(self.atoms):
raise TopologyError("Can not reorder atoms in a molecule with redundant atom names.")
if ((reorder and (set(self.atoms) != set(other_atom_names)))
or len(set(other_atom_names) - set(self.atoms)) > 0):
first = '\n'.join(set(self.atoms) - set(other_atom_names))
second = '\n'.join(set(other_atom_names) - set(self.atoms))
raise TopologyError("Can not reorder topology %s to %s because the set of atom names do not match.\nIn first but not second: %s\nsecond but not first: %s" % (self.name, other_topology.name, first, second))
offsets_by_atom = self.offsets_by_atom
other_offsets_by_atom = other_topology.offsets_by_atom
atom_offsets = []
target_offsets = []
for jdx, atom_name in enumerate(other_atom_names):
idx = self.atoms.index(atom_name)
atom_offsets.extend(list(other_offsets_by_atom[jdx]))
target_offsets.extend(list(offsets_by_atom[idx]))
atom_offsets = np.array(atom_offsets)
target_offsets = np.array(target_offsets)
return Molecule(self.name,
other_atom_names,
atom_offsets=atom_offsets,
shape=self.shape,
target_offsets=target_offsets)
def change_ndof(self, new_ndof):
new_offsets_by_atom = change_by_atom_ndof(self.offsets_by_atom, new_ndof)
new_targets_by_atom = change_by_atom_ndof(self.targets_by_atom, new_ndof)
assert len(self.shape) == 1, str(self.shape)
new_shape = (self.shape[0]/self.ndof * new_ndof, )
return Molecule(self.name,
atoms=self.atoms,
atom_offsets=new_offsets_by_atom.reshape((-1,)),
shape=new_shape,
target_offsets=new_targets_by_atom.reshape((-1,)),
ndof=new_ndof)
def get_contiguous_topology(self, start=0):
return self.__class__(atoms=self.atoms, start=start, ndof=self.ndof, name=self.name)
def monomers_offset_vector(monomers):
atom_offsets = []
for monomer in monomers:
atom_offsets.extend(list(monomer.atom_offsets))
return np.array(atom_offsets)
def monomers_target_vector(monomers):
target_offsets = []
for monomer in monomers:
target_offsets.extend(list(monomer.target_offsets))
return np.array(target_offsets)
class Polymer(Topology):
def __init__(self, name, monomers, fixed_monomers=False, atom_offsets=None, start=0, shape=None, target_offsets=None, ndof=3):
# If the offsets of the monomers are predetermined,
# fixed_monomers should be True, otherwise we will adjust them
# to be contiguous.
self.name = name
if fixed_monomers:
concrete_monomers = monomers
else:
start = 0
concrete_monomers = []
for monomer in monomers:
monomer = monomer.get_contiguous_topology(start)
concrete_monomers.append(monomer)
start = monomer.atom_offsets[-1] + 1
self.monomers = concrete_monomers
if atom_offsets is None:
atom_offsets = monomers_offset_vector(self.monomers)
if target_offsets is None:
target_offsets = monomers_target_vector(self.monomers)
if shape is None:
shape = atom_offsets.shape
for monomer in self.monomers:
monomer.shape = shape
Topology.__init__(self, name, atom_offsets=atom_offsets, start=start, shape=shape, target_offsets=target_offsets,
ndof=ndof)
def flatten(self):
"""Flatten a Polymer of Polymers into a single chain."""
if not isinstance(self.monomers[0], Polymer):
raise Exception("Only polymers of polymers can be flattened.")
new_monomers = []
for monomer in self.monomers:
new_monomers.extend(monomer.monomers)
return Polymer(self.name, new_monomers, fixed_monomers=True, ndof=self.ndof)
def monomers_slice(self, i, j):
the_slice = self.monomers.__getslice__(i, j)
if len(the_slice) == 0:
raise Exception("Empty monomers slice range %d %d for %d monomers" % (i, j, len(self.monomers)))
return Polymer(self.name, the_slice, fixed_monomers=True, ndof=self.ndof)
def get_monomer_by_index(self, idx):
return self.monomers[idx]
@property
def sequence(self):
return [monomer.name for monomer in self.monomers]
@property
def atoms(self):
atom_names = []
for monomer in self.monomers:
atom_names.extend(monomer.atom_names)
return atom_names
@property
def num_monomers(self):
return len(self.monomers)
@property
def num_atoms(self):
return sum(monomer.num_atoms for monomer in self.monomers)
def __len__(self):
return sum(len(monomer) for monomer in self.monomers)
def get_monomer(self, query):
concrete_monomers = [monomer for monomer in self.monomers if monomer.name == query]
return Polymer(self.name, concrete_monomers, fixed_monomers=True, ndof=self.ndof)
def _get(self, get_submonomer, query):
"""Generic function to return a sub topologies of the comprised molecules."""
concrete_monomers = []
for monomer in self.monomers:
sub_monomer = get_submonomer(monomer, query)
concrete_monomers.append(sub_monomer)
return Polymer(self.name, concrete_monomers, fixed_monomers=True, ndof=self.ndof)
def get_atoms(self, query):
return self._get(get_atoms_adapter, query)
def get_atomset(self, query):
return self._get(get_atomset_adapter, query)
def get_not_atomset(self, query):
return self._get(get_not_atomset_adapter, query)
def regex_get_atoms(self, query):
return self._get(regex_get_atoms_adapter, query)
def regex_get_other_atoms(self, query):
return self._get(regex_get_other_atoms_adapter, query)
def lift_topology(self, other_topology, namemap=None,
reorder=False):
if self.ndof != other_topology.ndof:
raise TopologyError("Can not raise a topology with %s dof to %s dof." % (other_topology.ndof, self.ndof))
if self.num_monomers != other_topology.num_monomers:
raise TopologyError("Topologies have incompatible numbers of monomers: %d != %d" % (self.num_monomers, other_topology.num_monomers))
concrete_monomers = []
for monomer, other_monomer in zip(self.monomers, other_topology.monomers):
concrete_monomers.append(monomer.lift_topology(other_monomer, namemap=namemap,
reorder=reorder))
return Polymer(self.name, concrete_monomers, fixed_monomers=True, shape=self.shape, ndof=self.ndof)
def change_ndof(self, new_ndof):
concrete_monomers = []
for monomer in self.monomers:
concrete_monomers.append(monomer.change_ndof(new_ndof))
assert len(self.shape) == 1, str(self.shape)
new_shape = (self.shape[0]/self.ndof * new_ndof, )
return Polymer(self.name, concrete_monomers, shape=new_shape, ndof=new_ndof)
def get_contiguous_topology(self, start=0):
concrete_monomers = []
for monomer in self.monomers:
concrete_monomer = monomer.get_contiguous_topology(start)
concrete_monomers.append(concrete_monomer)
if concrete_monomer.num_atoms > 0:
start = concrete_monomer.atom_offsets[-1] + 1
return Polymer(self.name, concrete_monomers, fixed_monomers=True, ndof=self.ndof)
def namedict(d):
"""reorder namemap using the dict."""
def namemap(atom_name):
try:
return d[atom_name]
except KeyError:
return atom_name
return namemap
class Monomers(object):
"""Store a dictionary of monomers for transformation into a sequence."""
def __init__(self, monomers):
self.monomer_dict = dict((monomer.name, monomer) for monomer in monomers)
def sequence(self, resseq):
monomer_dict = self.monomer_dict
return [monomer_dict[resname].copy() for resname in resseq]
|
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
python %prog study.file population.file gene-association.file
This program returns P-values for functional enrichment in a cluster of
study genes using Fisher's exact test, and corrected for multiple testing
(including Bonferroni, Holm, Sidak, and false discovery rate)
"""
from __future__ import absolute_import
__copyright__ = "Copyright (C) 2010-2017, H Tang et al., All rights reserved."
__author__ = "various"
import sys
import collections as cx
import datetime
from .multiple_testing import Methods, Bonferroni, Sidak, HolmBonferroni, FDR, calc_qval
from .ratio import get_terms, count_terms, is_ratio_different
from . import wr_tbl as RPT
from .pvalcalc import FisherFactory
class GOEnrichmentRecord(object):
"""Represents one result (from a single GOTerm) in the GOEnrichmentStudy
"""
namespace2NS = cx.OrderedDict([
('biological_process', 'BP'),
('molecular_function', 'MF'),
('cellular_component', 'CC')])
# Fields seen in every enrichment result
_fldsdefprt = [
"GO",
"NS",
"enrichment",
"name",
"ratio_in_study",
"ratio_in_pop",
"p_uncorrected",
"depth",
"study_count",
"study_items"]
_fldsdeffmt = ["%s"]*3 + ["%-30s"] + ["%d/%d"] * 2 + ["%.3g"] + ["%d"] * 2 + ["%15s"]
_flds = set(_fldsdefprt).intersection(
set(['study_items', 'study_count', 'study_n', 'pop_items', 'pop_count', 'pop_n']))
def __init__(self, **kwargs):
# Methods seen in current enrichment result
self._methods = []
for k, v in kwargs.items():
setattr(self, k, v)
if k == 'ratio_in_study':
setattr(self, 'study_count', v[0])
setattr(self, 'study_n', v[1])
if k == 'ratio_in_pop':
setattr(self, 'pop_count', v[0])
setattr(self, 'pop_n', v[1])
self._init_enrichment()
self.goterm = None # the reference to the GOTerm
def get_method_name(self):
"""Return name of first method in the _methods list."""
return self._methods[0].fieldname
def get_pvalue(self):
"""Returns pval for 1st method, if it exists. Else returns uncorrected pval."""
if self._methods:
return getattr(self, "p_{m}".format(m=self.get_method_name()))
return getattr(self, "p_uncorrected")
def set_corrected_pval(self, nt_method, pvalue):
"""Add object attribute based on method name."""
self._methods.append(nt_method)
fieldname = "".join(["p_", nt_method.fieldname])
setattr(self, fieldname, pvalue)
def __str__(self, indent=False):
field_data = [getattr(self, f, "n.a.") for f in self._fldsdefprt[:-1]] + \
[getattr(self, "p_{}".format(m.fieldname)) for m in self._methods] + \
[", ".join(sorted(getattr(self, self._fldsdefprt[-1], set())))]
fldsdeffmt = self._fldsdeffmt
field_formatter = fldsdeffmt[:-1] + ["%.3g"]*len(self._methods) + [fldsdeffmt[-1]]
self._chk_fields(field_data, field_formatter)
# default formatting only works for non-"n.a" data
for i, f in enumerate(field_data):
if f == "n.a.":
field_formatter[i] = "%s"
# print dots to show the level of the term
dots = self.get_indent_dots() if indent else ""
prtdata = "\t".join(a % b for (a, b) in zip(field_formatter, field_data))
return "".join([dots, prtdata])
def get_indent_dots(self):
"""Get a string of dots ("....") representing the level of the GO term."""
return "." * self.goterm.level if self.goterm is not None else ""
@staticmethod
def _chk_fields(field_data, field_formatter):
"""Check that expected fields are present."""
if len(field_data) == len(field_formatter):
return
len_dat = len(field_data)
len_fmt = len(field_formatter)
msg = [
"FIELD DATA({d}) != FORMATTER({f})".format(d=len_dat, f=len_fmt),
"DAT({N}): {D}".format(N=len_dat, D=field_data),
"FMT({N}): {F}".format(N=len_fmt, F=field_formatter)]
raise Exception("\n".join(msg))
def __repr__(self):
return "GOEnrichmentRecord({GO})".format(GO=self.GO)
def set_goterm(self, goid):
"""Set goterm and copy GOTerm's name and namespace."""
self.goterm = goid.get(self.GO, None)
present = self.goterm is not None
self.name = self.goterm.name if present else "n.a."
self.NS = self.namespace2NS[self.goterm.namespace] if present else "XX"
def _init_enrichment(self):
"""Mark as 'enriched' or 'purified'."""
self.enrichment = 'e' if ((1.0 * self.study_count / self.study_n) >
(1.0 * self.pop_count / self.pop_n)) else 'p'
def update_remaining_fldsdefprt(self, min_ratio=None):
self.is_ratio_different = is_ratio_different(min_ratio, self.study_count,
self.study_n, self.pop_count, self.pop_n)
# -------------------------------------------------------------------------------------
# Methods for getting flat namedtuple values from GOEnrichmentRecord object
def get_prtflds_default(self):
"""Get default fields."""
return self._fldsdefprt[:-1] + \
["p_{M}".format(M=m.fieldname) for m in self._methods] + \
[self._fldsdefprt[-1]]
def get_prtflds_all(self):
"""When converting to a namedtuple, get all possible fields in their original order."""
flds = []
dont_add = set(['_parents', '_methods'])
# Fields: GO NS enrichment name ratio_in_study ratio_in_pop p_uncorrected
# depth study_count p_sm_bonferroni p_fdr_bh study_items
self._flds_append(flds, self.get_prtflds_default(), dont_add)
# Fields: GO NS goterm
# ratio_in_pop pop_n pop_count pop_items name
# ratio_in_study study_n study_count study_items
# _methods enrichment p_uncorrected p_sm_bonferroni p_fdr_bh
self._flds_append(flds, vars(self).keys(), dont_add)
# Fields: name level is_obsolete namespace id depth parents children _parents alt_ids
self._flds_append(flds, vars(self.goterm).keys(), dont_add)
return flds
@staticmethod
def _flds_append(flds, addthese, dont_add):
"""Retain order of fields as we add them once to the list."""
for fld in addthese:
if fld not in flds and fld not in dont_add:
flds.append(fld)
def get_field_values(self, fldnames, rpt_fmt=True):
"""Get flat namedtuple fields for one GOEnrichmentRecord."""
row = []
# Loop through each user field desired
for fld in fldnames:
# 1. Check the GOEnrichmentRecord's attributes
val = getattr(self, fld, None)
if val is not None:
if rpt_fmt:
val = self._get_rpt_fmt(fld, val)
row.append(val)
else:
# 2. Check the GO object for the field
val = getattr(self.goterm, fld, None)
if rpt_fmt:
val = self._get_rpt_fmt(fld, val)
if val is not None:
row.append(val)
else:
# 3. Field not found, raise Exception
self._err_fld(fld, fldnames, row)
if rpt_fmt:
assert not isinstance(val, list), \
"UNEXPECTED LIST: FIELD({F}) VALUE({V})".format(
P=rpt_fmt, F=fld, V=val)
return row
@staticmethod
def _get_rpt_fmt(fld, val):
"""Return values in a format amenable to printing in a table."""
if fld.startswith("ratio_"):
return "{N}/{TOT}".format(N=val[0], TOT=val[1])
elif fld in set(['study_items', 'pop_items', 'alt_ids']):
return ", ".join([str(v) for v in sorted(val)])
return val
def _err_fld(self, fld, fldnames):
"""Unrecognized field. Print detailed Failure message."""
msg = ['ERROR. UNRECOGNIZED FIELD({F})'.format(F=fld)]
actual_flds = set(self.get_prtflds_default() + self.goterm.__dict__.keys())
bad_flds = set(fldnames).difference(set(actual_flds))
if bad_flds:
msg.append("\nGOEA RESULT FIELDS: {}".format(" ".join(self._fldsdefprt)))
msg.append("GO FIELDS: {}".format(" ".join(self.goterm.__dict__.keys())))
msg.append("\nFATAL: {N} UNEXPECTED FIELDS({F})\n".format(
N=len(bad_flds), F=" ".join(bad_flds)))
msg.append(" {N} User-provided fields:".format(N=len(fldnames)))
for idx, fld in enumerate(fldnames, 1):
mrk = "ERROR -->" if fld in bad_flds else ""
msg.append(" {M:>9} {I:>2}) {F}".format(M=mrk, I=idx, F=fld))
raise Exception("\n".join(msg))
class GOEnrichmentStudy(object):
"""Runs Fisher's exact test, as well as multiple corrections
"""
# Default Excel table column widths for GOEA results
default_fld2col_widths = {
'NS' : 3,
'GO' : 12,
'level' : 3,
'enrichment': 1,
'name' : 60,
'ratio_in_study': 8,
'ratio_in_pop' : 12,
'study_items' : 15,
}
def __init__(self, pop, assoc, obo_dag, propagate_counts=True, alpha=.05, methods=None, **kws):
self.log = kws['log'] if 'log' in kws else sys.stdout
self._run_multitest = {
'local':lambda iargs: self._run_multitest_local(iargs),
'statsmodels':lambda iargs: self._run_multitest_statsmodels(iargs)}
self.pop = pop
self.pop_n = len(pop)
self.assoc = assoc
self.obo_dag = obo_dag
self.alpha = alpha
if methods is None:
methods = ["bonferroni", "sidak", "holm"]
self.methods = Methods(methods)
self.pval_obj = FisherFactory(**kws).pval_obj
if propagate_counts:
sys.stderr.write("Propagating term counts to parents ..\n")
obo_dag.update_association(assoc)
self.go2popitems = get_terms("population", pop, assoc, obo_dag, self.log)
def run_study(self, study, **kws):
"""Run Gene Ontology Enrichment Study (GOEA) on study ids."""
# Key-word arguments:
methods = Methods(kws['methods']) if 'methods' in kws else self.methods
alpha = kws['alpha'] if 'alpha' in kws else self.alpha
log = kws['log'] if 'log' in kws else self.log
# Calculate uncorrected pvalues
results = self._get_pval_uncorr(study)
if not results:
return []
# Do multipletest corrections on uncorrected pvalues and update results
self._run_multitest_corr(results, methods, alpha, study)
for rec in results:
# get go term for name and level
rec.set_goterm(self.obo_dag)
# 'keep_if' can be used to keep only significant GO terms. Example:
# >>> keep_if = lambda nt: nt.p_fdr_bh < 0.05 # if results are significant
# >>> goea_results = goeaobj.run_study(geneids_study, keep_if=keep_if)
if 'keep_if' in kws:
keep_if = kws['keep_if']
results = [r for r in results if keep_if(r)]
# Default sort order: First, sort by BP, MF, CC. Second, sort by pval
results.sort(key=lambda r: [r.NS, r.p_uncorrected])
if log is not None:
log.write(" {MSG}\n".format(MSG="\n ".join(self.get_results_msg(results, study))))
return results # list of GOEnrichmentRecord objects
def run_study_nts(self, study, **kws):
"""Run GOEA on study ids. Return results as a list of namedtuples."""
goea_results = self.run_study(study, **kws)
return get_goea_nts_all(goea_results)
def get_results_msg(self, results, study):
"""Return summary for GOEA results."""
# To convert msg list to string: "\n".join(msg)
msg = []
if results:
stu_items, num_gos_stu = self.get_item_cnt(results, "study_items")
pop_items, num_gos_pop = self.get_item_cnt(results, "pop_items")
msg.append("{M:,} GO terms are associated with {N:,} of {NT:,} study items".format(
N=len(stu_items), NT=len(set(study)), M=num_gos_stu))
msg.append("{M:,} GO terms are associated with {N:,} of {NT:,} population items".format(
N=len(pop_items), NT=self.pop_n, M=num_gos_pop))
return msg
def _get_pval_uncorr(self, study, log=sys.stdout):
"""Calculate the uncorrected pvalues for study items."""
log.write("Calculating uncorrected p-values using {PFNC}\n".format(PFNC=self.pval_obj.name))
results = []
go2studyitems = get_terms("study", study, self.assoc, self.obo_dag, log)
pop_n, study_n = self.pop_n, len(study)
allterms = set(go2studyitems.keys()).union(
set(self.go2popitems.keys()))
calc_pvalue = self.pval_obj.calc_pvalue
for term in allterms:
study_items = go2studyitems.get(term, set())
study_count = len(study_items)
pop_items = self.go2popitems.get(term, set())
pop_count = len(pop_items)
one_record = GOEnrichmentRecord(
GO=term,
p_uncorrected=calc_pvalue(study_count, study_n, pop_count, pop_n),
study_items=study_items,
pop_items=pop_items,
ratio_in_study=(study_count, study_n),
ratio_in_pop=(pop_count, pop_n))
results.append(one_record)
return results
def _run_multitest_corr(self, results, usr_methods, alpha, study):
"""Do multiple-test corrections on uncorrected pvalues."""
assert 0 < alpha < 1, "Test-wise alpha must fall between (0, 1)"
pvals = [r.p_uncorrected for r in results]
NtMt = cx.namedtuple("NtMt", "results pvals alpha nt_method study")
for nt_method in usr_methods:
ntmt = NtMt(results, pvals, alpha, nt_method, study)
sys.stdout.write("Running multitest correction: {MSRC} {METHOD}\n".format(
MSRC=ntmt.nt_method.source, METHOD=ntmt.nt_method.method))
self._run_multitest[nt_method.source](ntmt)
def _run_multitest_statsmodels(self, ntmt):
"""Use multitest mthods that have been implemented in statsmodels."""
# Only load statsmodels if it is used
multipletests = self.methods.get_statsmodels_multipletests()
results = multipletests(ntmt.pvals, ntmt.alpha, ntmt.nt_method.method)
reject_lst, pvals_corrected, alphacSidak, alphacBonf = results
self._update_pvalcorr(ntmt, pvals_corrected)
def _run_multitest_local(self, ntmt):
"""Use multitest mthods that have been implemented locally."""
corrected_pvals = None
method = ntmt.nt_method.method
if method == "bonferroni":
corrected_pvals = Bonferroni(ntmt.pvals, ntmt.alpha).corrected_pvals
elif method == "sidak":
corrected_pvals = Sidak(ntmt.pvals, ntmt.alpha).corrected_pvals
elif method == "holm":
corrected_pvals = HolmBonferroni(ntmt.pvals, ntmt.alpha).corrected_pvals
elif method == "fdr":
# get the empirical p-value distributions for FDR
term_pop = getattr(self, 'term_pop', None)
if term_pop is None:
term_pop = count_terms(self.pop, self.assoc, self.obo_dag)
p_val_distribution = calc_qval(len(ntmt.study),
self.pop_n,
self.pop, self.assoc,
term_pop, self.obo_dag)
corrected_pvals = FDR(p_val_distribution,
ntmt.results, ntmt.alpha).corrected_pvals
self._update_pvalcorr(ntmt, corrected_pvals)
@staticmethod
def _update_pvalcorr(ntmt, corrected_pvals):
"""Add data members to store multiple test corrections."""
if corrected_pvals is None:
return
for rec, val in zip(ntmt.results, corrected_pvals):
rec.set_corrected_pval(ntmt.nt_method, val)
# Methods for writing results into tables: text, tab-separated, Excel spreadsheets
def wr_txt(self, fout_txt, goea_results, prtfmt=None, **kws):
"""Print GOEA results to text file."""
with open(fout_txt, 'w') as prt:
data_nts = self.prt_txt(prt, goea_results, prtfmt, **kws)
self.log.write(" {N:>5} items WROTE: {F}\n".format(
N=len(data_nts), F=fout_txt))
def prt_txt(self, prt, goea_results, prtfmt=None, **kws):
"""Print GOEA results in text format."""
if prtfmt is None:
prtfmt = "{GO} {NS} {p_uncorrected:5.2e} {study_count:>5} {name}\n"
prtfmt = self.adjust_prtfmt(prtfmt)
prt_flds = RPT.get_fmtflds(prtfmt)
data_nts = get_goea_nts_prt(goea_results, prt_flds, **kws)
RPT.prt_txt(prt, data_nts, prtfmt, prt_flds, **kws)
return data_nts
def wr_xlsx(self, fout_xlsx, goea_results, **kws):
"""Write a xlsx file."""
# kws: prt_if indent
prt_flds = kws.get('prt_flds', self.get_prtflds_default(goea_results))
xlsx_data = get_goea_nts_prt(goea_results, prt_flds, **kws)
if 'fld2col_widths' not in kws:
kws['fld2col_widths'] = {f:self.default_fld2col_widths.get(f, 8) for f in prt_flds}
RPT.wr_xlsx(fout_xlsx, xlsx_data, **kws)
def wr_tsv(self, fout_tsv, goea_results, **kws):
"""Write tab-separated table data to file"""
prt_flds = kws.get('prt_flds', self.get_prtflds_default(goea_results))
tsv_data = get_goea_nts_prt(goea_results, prt_flds, **kws)
RPT.wr_tsv(fout_tsv, tsv_data, **kws)
def prt_tsv(self, prt, goea_results, **kws):
"""Write tab-separated table data"""
prt_flds = kws.get('prt_flds', self.get_prtflds_default(goea_results))
tsv_data = get_goea_nts_prt(goea_results, prt_flds, **kws)
RPT.prt_tsv(prt, tsv_data, prt_flds, **kws)
@staticmethod
def adjust_prtfmt(prtfmt):
"""Adjust format_strings for legal values."""
prtfmt = prtfmt.replace("{p_holm-sidak", "{p_holm_sidak")
prtfmt = prtfmt.replace("{p_simes-hochberg", "{p_simes_hochberg")
return prtfmt
@staticmethod
def get_NS2nts(results, fldnames=None, **kws):
"""Get namedtuples of GOEA results, split into BP, MF, CC."""
NS2nts = cx.defaultdict(list)
nts = get_goea_nts_all(results, fldnames, **kws)
for nt in nts:
NS2nts[nt.NS].append(nt)
return NS2nts
@staticmethod
def get_item_cnt(results, attrname="study_items"):
"""Get all study or population items (e.g., geneids)."""
items = set()
go_cnt = 0
for rec in results:
if hasattr(rec, attrname):
items_cur = getattr(rec, attrname)
# Only count GO term if there are items in the set.
if len(items_cur) != 0:
items |= items_cur
go_cnt += 1
return items, go_cnt
@staticmethod
def get_prtflds_default(results):
"""Get default fields names. Used in printing GOEA results.
Researchers can control which fields they want to print in the GOEA results
or they can use the default fields.
"""
if results:
return results[0].get_prtflds_default()
return []
@staticmethod
def print_summary(results, min_ratio=None, indent=False, pval=0.05):
from .version import __version__ as version
# Header contains provenance and parameters
print("# Generated by GOATOOLS v{0} ({1})".format(version, datetime.date.today()))
print("# min_ratio={0} pval={1}".format(min_ratio, pval))
# field names for output
if results:
print("\t".join(GOEnrichmentStudy.get_prtflds_default(results)))
for rec in results:
# calculate some additional statistics
# (over_under, is_ratio_different)
rec.update_remaining_fldsdefprt(min_ratio=min_ratio)
if pval is not None and rec.p_uncorrected >= pval:
continue
if rec.is_ratio_different:
print(rec.__str__(indent=indent))
def wr_py_goea_results(self, fout_py, goea_results, **kws):
"""Save GOEA results into Python package containing list of namedtuples."""
var_name = kws.get("var_name", "goea_results")
docstring = kws.get("docstring", "")
sortby = kws.get("sortby", None)
if goea_results:
from goatools.nt_utils import wr_py_nts
nts_goea = goea_results
# If list has GOEnrichmentRecords or verbose namedtuples, exclude some fields.
if hasattr(goea_results[0], "_fldsdefprt") or hasattr(goea_results[0], 'goterm'):
# Exclude some attributes from the namedtuple when saving results
# to a Python file because the information is redundant or verbose.
nts_goea = get_goea_nts_prt(goea_results)
docstring = "\n".join([docstring, "# {OBO_VER}\n\n".format(OBO_VER=self.obo_dag.version)])
assert hasattr(nts_goea[0], '_fields')
if sortby is None:
sortby = lambda nt: getattr(nt, 'p_uncorrected')
nts_goea = sorted(nts_goea, key=sortby)
wr_py_nts(fout_py, nts_goea, docstring, var_name)
def get_study_items(goea_results):
"""Get all study items (e.g., geneids)."""
study_items = set()
for rec in goea_results:
study_items |= rec.study_items
return study_items
def get_goea_nts_prt(goea_results, fldnames=None, **usr_kws):
"""Return list of namedtuples removing fields which are redundant or verbose."""
kws = usr_kws.copy()
if 'not_fldnames' not in kws:
kws['not_fldnames'] = ['goterm', 'parents', 'children', 'id']
if 'rpt_fmt' not in kws:
kws['rpt_fmt'] = True
return get_goea_nts_all(goea_results, fldnames, **kws)
def get_goea_nts_all(goea_results, fldnames=None, **kws):
"""Get namedtuples containing user-specified (or default) data from GOEA results.
Reformats data from GOEnrichmentRecord objects into lists of
namedtuples so the generic table writers may be used.
"""
data_nts = [] # A list of namedtuples containing GOEA results
if not goea_results:
return data_nts
keep_if = kws.get('keep_if', None)
rpt_fmt = kws.get('rpt_fmt', False)
indent = kws.get('indent', False)
# I. FIELD (column) NAMES
not_fldnames = kws.get('not_fldnames', None)
if fldnames is None:
fldnames = get_fieldnames(goea_results[0])
# Ia. Explicitly exclude specific fields from named tuple
if not_fldnames is not None:
fldnames = [f for f in fldnames if f not in not_fldnames]
nttyp = cx.namedtuple("NtGoeaResults", " ".join(fldnames))
goid_idx = fldnames.index("GO") if 'GO' in fldnames else None
# II. Loop through GOEA results stored in a GOEnrichmentRecord object
for goerec in goea_results:
vals = get_field_values(goerec, fldnames, rpt_fmt)
if indent:
vals[goid_idx] = "".join([goerec.get_indent_dots(), vals[goid_idx]])
ntobj = nttyp._make(vals)
if keep_if is None or keep_if(ntobj):
data_nts.append(ntobj)
return data_nts
def get_field_values(item, fldnames, rpt_fmt=None):
"""Return fieldnames and values of either a namedtuple or GOEnrichmentRecord."""
if hasattr(item, "_fldsdefprt"): # Is a GOEnrichmentRecord
return item.get_field_values(fldnames, rpt_fmt)
if hasattr(item, "_fields"): # Is a namedtuple
return [getattr(item, f) for f in fldnames]
def get_fieldnames(item):
"""Return fieldnames of either a namedtuple or GOEnrichmentRecord."""
if hasattr(item, "_fldsdefprt"): # Is a GOEnrichmentRecord
return item.get_prtflds_all()
if hasattr(item, "_fields"): # Is a namedtuple
return item._fields
# Copyright (C) 2010-2017, H Tang et al., All rights reserved.
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Model subclassing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import test
from tensorflow.python.training.checkpointable import data_structures
from tensorflow.python.training.rmsprop import RMSPropOptimizer
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
# pylint: disable=not-callable
class SimpleTestModel(keras.Model):
def __init__(self, use_bn=False, use_dp=False, num_classes=10):
super(SimpleTestModel, self).__init__(name='test_model')
self.use_bn = use_bn
self.use_dp = use_dp
self.num_classes = num_classes
self.dense1 = keras.layers.Dense(32, activation='relu')
self.dense2 = keras.layers.Dense(num_classes, activation='softmax')
if self.use_dp:
self.dp = keras.layers.Dropout(0.5)
if self.use_bn:
self.bn = keras.layers.BatchNormalization(axis=-1)
def call(self, x):
x = self.dense1(x)
if self.use_dp:
x = self.dp(x)
if self.use_bn:
x = self.bn(x)
return self.dense2(x)
class SimpleConvTestModel(keras.Model):
def __init__(self, num_classes=10):
super(SimpleConvTestModel, self).__init__(name='test_model')
self.num_classes = num_classes
self.conv1 = keras.layers.Conv2D(32, (3, 3), activation='relu')
self.flatten = keras.layers.Flatten()
self.dense1 = keras.layers.Dense(num_classes, activation='softmax')
def call(self, x):
x = self.conv1(x)
x = self.flatten(x)
return self.dense1(x)
class MultiIOTestModel(keras.Model):
def __init__(self, use_bn=False, use_dp=False, num_classes=(2, 3)):
super(MultiIOTestModel, self).__init__(name='test_model')
self.use_bn = use_bn
self.use_dp = use_dp
self.num_classes = num_classes
self.dense1 = keras.layers.Dense(32, activation='relu')
self.dense2 = keras.layers.Dense(num_classes[0], activation='softmax')
self.dense3 = keras.layers.Dense(num_classes[1], activation='softmax')
if use_dp:
self.dp = keras.layers.Dropout(0.5)
if use_bn:
self.bn = keras.layers.BatchNormalization()
def call(self, inputs):
x1, x2 = inputs
x1 = self.dense1(x1)
x2 = self.dense1(x2)
if self.use_dp:
x1 = self.dp(x1)
if self.use_bn:
x2 = self.bn(x2)
return [self.dense2(x1), self.dense3(x2)]
class NestedTestModel1(keras.Model):
"""A model subclass nested inside a model subclass.
"""
def __init__(self, num_classes=2):
super(NestedTestModel1, self).__init__(name='nested_model_1')
self.num_classes = num_classes
self.dense1 = keras.layers.Dense(32, activation='relu')
self.dense2 = keras.layers.Dense(num_classes, activation='relu')
self.bn = keras.layers.BatchNormalization()
self.test_net = SimpleTestModel(num_classes=4,
use_bn=True,
use_dp=True)
def call(self, inputs):
x = self.dense1(inputs)
x = self.bn(x)
x = self.test_net(x)
return self.dense2(x)
def get_functional_graph_model(input_dim, num_classes):
# A simple functional-API model (a.k.a. graph network)
inputs = keras.Input(shape=(input_dim,))
x = keras.layers.Dense(32, activation='relu')(inputs)
x = keras.layers.BatchNormalization()(x)
outputs = keras.layers.Dense(num_classes)(x)
return keras.Model(inputs, outputs)
class NestedTestModel2(keras.Model):
"""A model subclass with a functional-API graph network inside.
"""
def __init__(self, num_classes=2):
super(NestedTestModel2, self).__init__(name='nested_model_2')
self.num_classes = num_classes
self.dense1 = keras.layers.Dense(32, activation='relu')
self.dense2 = keras.layers.Dense(num_classes, activation='relu')
self.bn = self.bn = keras.layers.BatchNormalization()
self.test_net = get_functional_graph_model(32, 4)
def call(self, inputs):
x = self.dense1(inputs)
x = self.bn(x)
x = self.test_net(x)
return self.dense2(x)
def get_nested_model_3(input_dim, num_classes):
# A functional-API model with a subclassed model inside.
# NOTE: this requires the inner subclass to implement `compute_output_shape`.
inputs = keras.Input(shape=(input_dim,))
x = keras.layers.Dense(32, activation='relu')(inputs)
x = keras.layers.BatchNormalization()(x)
class Inner(keras.Model):
def __init__(self):
super(Inner, self).__init__()
self.dense1 = keras.layers.Dense(32, activation='relu')
self.dense2 = keras.layers.Dense(5, activation='relu')
self.bn = keras.layers.BatchNormalization()
def call(self, inputs):
x = self.dense1(inputs)
x = self.dense2(x)
return self.bn(x)
test_model = Inner()
x = test_model(x)
outputs = keras.layers.Dense(num_classes)(x)
return keras.Model(inputs, outputs, name='nested_model_3')
@test_util.run_all_in_graph_and_eager_modes
class ModelSubclassingTest(test.TestCase):
def test_custom_build(self):
class DummyModel(keras.Model):
def __init__(self):
super(DummyModel, self).__init__()
self.dense1 = keras.layers.Dense(32, activation='relu')
self.uses_custom_build = False
def call(self, inputs):
return self.dense1(inputs)
def build(self, input_shape):
self.uses_custom_build = True
test_model = DummyModel()
dummy_data = array_ops.ones((32, 50))
test_model(dummy_data)
self.assertTrue(test_model.uses_custom_build, 'Model should use user '
'defined build when called.')
def test_invalid_input_shape_build(self):
num_classes = 2
input_dim = 50
model = SimpleTestModel(num_classes=num_classes,
use_dp=True,
use_bn=True)
self.assertFalse(model.built, 'Model should not have been built')
self.assertFalse(model.weights, ('Model should have no weights since it '
'has not been built.'))
with self.assertRaisesRegexp(
ValueError, 'input shape is not one of the valid types'):
model.build(input_shape=tensor_shape.Dimension(input_dim))
def test_embed_dtype_with_subclass_build(self):
class Embedding(keras.layers.Layer):
"""An Embedding layer."""
def __init__(self, vocab_size, embedding_dim, **kwargs):
super(Embedding, self).__init__(**kwargs)
self.vocab_size = vocab_size
self.embedding_dim = embedding_dim
def build(self, _):
self.embedding = self.add_variable(
'embedding_kernel',
shape=[self.vocab_size, self.embedding_dim],
dtype=np.float32,
initializer=init_ops.random_uniform_initializer(-0.1, 0.1),
trainable=True)
def call(self, x):
return embedding_ops.embedding_lookup(self.embedding, x)
class EmbedModel(keras.Model):
def __init__(self, vocab_size, embed_size):
super(EmbedModel, self).__init__()
self.embed1 = Embedding(vocab_size, embed_size)
def call(self, inputs):
return self.embed1(inputs)
model = EmbedModel(100, 20)
self.assertFalse(model.built, 'Model should not have been built')
self.assertFalse(model.weights, ('Model should have no weights since it '
'has not been built.'))
with self.assertRaisesRegexp(
ValueError, 'if your layers do not support float type inputs'):
model.build(input_shape=(35, 20))
def test_single_time_step_rnn_build(self):
dim = 4
timesteps = 1
batch_input_shape = (None, timesteps, dim)
units = 3
class SimpleRNNModel(keras.Model):
def __init__(self):
super(SimpleRNNModel, self).__init__()
self.lstm = keras.layers.LSTM(units)
def call(self, inputs):
return self.lstm(inputs)
model = SimpleRNNModel()
self.assertFalse(model.built, 'Model should not have been built')
self.assertFalse(model.weights, ('Model should have no weights since it '
'has not been built.'))
model.build(batch_input_shape)
self.assertTrue(model.weights, ('Model should have weights now that it '
'has been properly built.'))
self.assertTrue(model.built, 'Model should be built after calling `build`.')
model(array_ops.ones((32, timesteps, dim)))
def test_single_io_subclass_build(self):
num_classes = 2
input_dim = 50
batch_size = None
model = SimpleTestModel(num_classes=num_classes,
use_dp=True,
use_bn=True)
self.assertFalse(model.built, 'Model should not have been built')
self.assertFalse(model.weights, ('Model should have no weights since it '
'has not been built.'))
model.build(input_shape=(batch_size, input_dim))
self.assertTrue(model.weights, ('Model should have weights now that it '
'has been properly built.'))
self.assertTrue(model.built, 'Model should be built after calling `build`.')
model(array_ops.ones((32, input_dim)))
def test_single_io_dimension_subclass_build(self):
num_classes = 2
input_dim = tensor_shape.Dimension(50)
batch_size = tensor_shape.Dimension(None)
model = SimpleTestModel(num_classes=num_classes,
use_dp=True,
use_bn=True)
self.assertFalse(model.built, 'Model should not have been built')
self.assertFalse(model.weights, ('Model should have no weights since it '
'has not been built.'))
model.build(input_shape=(batch_size, input_dim))
self.assertTrue(model.weights, ('Model should have weights now that it '
'has been properly built.'))
self.assertTrue(model.built, 'Model should be built after calling `build`.')
model(array_ops.ones((32, input_dim)))
def test_multidim_io_subclass_build(self):
num_classes = 10
# Input size, e.g. image
batch_size = 32
input_shape = (32, 32, 3)
model = SimpleConvTestModel(num_classes)
self.assertFalse(model.built, 'Model should not have been built')
self.assertFalse(model.weights, ('Model should have no weights since it '
'has not been built.'))
batch_input_shape = (batch_size,) + input_shape
model.build(input_shape=batch_input_shape)
self.assertTrue(model.weights, ('Model should have weights now that it '
'has been properly built.'))
self.assertTrue(model.built, 'Model should be built after calling `build`.')
model(array_ops.ones(batch_input_shape))
def test_tensorshape_io_subclass_build(self):
num_classes = 10
# Input size, e.g. image
batch_size = None
input_shape = (32, 32, 3)
model = SimpleConvTestModel(num_classes)
self.assertFalse(model.built, 'Model should not have been built')
self.assertFalse(model.weights, ('Model should have no weights since it '
'has not been built.'))
model.build(
input_shape=tensor_shape.TensorShape((batch_size,) + input_shape))
self.assertTrue(model.weights, ('Model should have weights now that it '
'has been properly built.'))
self.assertTrue(model.built, 'Model should be built after calling `build`.')
model(array_ops.ones((32,) + input_shape))
def test_subclass_save_model(self):
num_classes = 10
# Input size, e.g. image
batch_size = None
input_shape = (32, 32, 3)
model = SimpleConvTestModel(num_classes)
self.assertFalse(model.built, 'Model should not have been built')
self.assertFalse(model.weights, ('Model should have no weights since it '
'has not been built.'))
model.build(
input_shape=tensor_shape.TensorShape((batch_size,) + input_shape))
self.assertTrue(model.weights, ('Model should have weights now that it '
'has been properly built.'))
self.assertTrue(model.built, 'Model should be built after calling `build`.')
weights = model.get_weights()
tf_format_name = os.path.join(self.get_temp_dir(), 'ckpt')
model.save_weights(tf_format_name)
if h5py is not None:
hdf5_format_name = os.path.join(self.get_temp_dir(), 'weights.h5')
model.save_weights(hdf5_format_name)
model = SimpleConvTestModel(num_classes)
model.build(
input_shape=tensor_shape.TensorShape((batch_size,) + input_shape))
if h5py is not None:
model.load_weights(hdf5_format_name)
self.assertAllClose(weights, model.get_weights())
model.load_weights(tf_format_name)
self.assertAllClose(weights, model.get_weights())
def test_multi_io_subclass_build(self):
batch_size = None
num_samples = 1000
input_dim = 50
model = MultiIOTestModel()
self.assertFalse(model.built, 'Model should not have been built')
self.assertFalse(model.weights, ('Model should have no weights since it '
'has not been built.'))
batch_input_shape = tensor_shape.TensorShape((batch_size, input_dim))
model.build(
input_shape=[batch_input_shape, batch_input_shape])
self.assertTrue(model.weights, ('Model should have weights now that it '
'has been properly built.'))
self.assertTrue(model.built, 'Model should be built after calling `build`.')
x1 = array_ops.ones((num_samples, input_dim))
x2 = array_ops.ones((num_samples, input_dim))
model([x1, x2])
def test_summary(self):
class ToString(object):
def __init__(self):
self.contents = ''
def __call__(self, msg):
self.contents += msg + '\n'
# Single-io
model = SimpleTestModel(num_classes=4, use_bn=True, use_dp=True)
model._set_inputs(np.ones((3, 4))) # need to build model first
print_fn = ToString()
model.summary(print_fn=print_fn)
self.assertTrue('Trainable params: 356' in print_fn.contents)
# Multi-io
model = MultiIOTestModel(num_classes=(5, 6), use_bn=True, use_dp=True)
model._set_inputs([np.ones((3, 4)),
np.ones((3, 4))]) # need to build model first
print_fn = ToString()
model.summary(print_fn=print_fn)
self.assertTrue('Trainable params: 587' in print_fn.contents)
def test_no_dependency(self):
class Foo(keras.Model):
def __init__(self):
super(Foo, self).__init__()
self.isdep = keras.layers.Dense(1)
self.notdep = data_structures.NoDependency(keras.layers.Dense(2))
self.notdep_var = data_structures.NoDependency(
resource_variable_ops.ResourceVariable(1., name='notdep_var'))
m = Foo()
self.assertEqual([m.isdep, m.notdep], m.layers)
self.assertEqual(1, len(m._checkpoint_dependencies))
self.assertIs(m.isdep, m._checkpoint_dependencies[0].ref)
self.assertEqual('notdep_var:0', m.notdep_var.name)
def test_extra_variable(self):
class ExtraVar(keras.Model):
def __init__(self):
super(ExtraVar, self).__init__()
self.dense = keras.layers.Dense(1)
self.var = resource_variable_ops.ResourceVariable(1.)
self.not_trainable_var = resource_variable_ops.ResourceVariable(
2., trainable=False)
def call(self, inputs):
return self.dense(inputs + self.var)
m = ExtraVar()
self.assertTrue(m.trainable)
self.assertEqual([m.dense], m.layers)
self.assertEqual([m.var, m.not_trainable_var], m.variables)
self.assertEqual([m.var], m.trainable_variables)
self.assertEqual([m.not_trainable_var], m.non_trainable_variables)
m.trainable = False
self.assertEqual([m.var, m.not_trainable_var], m.variables)
self.assertEqual([], m.trainable_variables)
self.assertEqual([m.var, m.not_trainable_var], m.non_trainable_variables)
m.trainable = True
m(array_ops.ones([1, 1]))
self.assertEqual([m.dense.kernel, m.dense.bias], m.dense.variables)
self.assertEqual([m.dense.kernel, m.dense.bias], m.dense.weights)
self.assertEqual([m.dense.kernel, m.dense.bias, m.var, m.not_trainable_var],
m.variables)
self.assertEqual([m.dense.kernel, m.dense.bias, m.var],
m.trainable_variables)
self.assertEqual([m.not_trainable_var], m.non_trainable_variables)
m.dense.trainable = False
self.assertEqual(
[m.var, m.dense.kernel, m.dense.bias, m.not_trainable_var],
m.variables)
self.assertEqual([m.var], m.trainable_variables)
self.assertEqual([m.dense.kernel, m.dense.bias, m.not_trainable_var],
m.non_trainable_variables)
def test_add_weight_in_model(self):
class MyModel(keras.Model):
def __init__(self):
super(MyModel, self).__init__()
self.b = self.add_weight('bias', (10,))
self.c = self.add_weight('bias2', (10,), trainable=False)
def call(self, inputs):
return inputs + self.b + self.c
x = ops.convert_to_tensor(np.ones((10, 10), 'float32'))
model = MyModel()
model(x)
self.assertEqual(1, len(model.trainable_weights))
self.assertEqual(1, len(model.non_trainable_weights))
self.assertEqual(2, len(model.weights))
class MyModelCustomBuild(keras.Model):
def build(self, input_shape):
self.b = self.add_weight('bias', (10,))
self.c = self.add_weight('bias2', (10,), trainable=False)
def call(self, inputs):
return inputs + self.b + self.c
x = ops.convert_to_tensor(np.ones((10, 10), 'float32'))
model = MyModelCustomBuild()
model(x)
self.assertEqual(1, len(model.trainable_weights))
self.assertEqual(1, len(model.non_trainable_weights))
self.assertEqual(2, len(model.weights))
def test_add_update_in_model(self):
class MyModel(keras.Model):
def __init__(self):
super(MyModel, self).__init__()
self.b = self.add_weight('bias', (10,))
self.c = self.add_weight('bias2', (10,))
def call(self, inputs):
# Unconditional
self.add_update(self.b.assign(self.b * 2))
# Conditional
self.add_update(self.c.assign(inputs[1, :]), inputs)
return inputs + self.b + self.c
x = ops.convert_to_tensor(np.ones((10, 10), 'float32'))
model = MyModel()
model(x)
if context.executing_eagerly():
self.assertEqual(0, len(model.updates))
else:
self.assertEqual(2, len(model.updates))
self.assertEqual(1, len(model.get_updates_for(None)))
self.assertEqual(1, len(model.get_updates_for(x)))
@keras_parameterized.run_all_keras_modes
class ModelSubclassCompiledTest(keras_parameterized.TestCase):
def test_single_io_workflow_with_np_arrays(self):
num_classes = 2
num_samples = 100
input_dim = 50
model = SimpleTestModel(num_classes=num_classes,
use_dp=True,
use_bn=True)
model.compile(
loss='mse',
optimizer=RMSPropOptimizer(learning_rate=0.001),
metrics=['acc', keras.metrics.CategoricalAccuracy()],
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones((num_samples, input_dim))
y = np.zeros((num_samples, num_classes))
model.fit(x, y, epochs=2, batch_size=32, verbose=0)
_ = model.evaluate(x, y, verbose=0)
def test_multi_io_workflow_with_np_arrays(self):
num_classes = (2, 3)
num_samples = 1000
input_dim = 50
model = MultiIOTestModel(num_classes=num_classes,
use_dp=True,
use_bn=True)
model.compile(loss='mse',
optimizer=RMSPropOptimizer(learning_rate=0.001),
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly())
x1 = np.ones((num_samples, input_dim))
x2 = np.ones((num_samples, input_dim))
y1 = np.zeros((num_samples, num_classes[0]))
y2 = np.zeros((num_samples, num_classes[1]))
model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0)
_ = model.evaluate([x1, x2], [y1, y2], verbose=0)
def test_single_io_workflow_with_dataset_iterators(self):
num_classes = 2
num_samples = 10
input_dim = 50
with self.cached_session():
model = SimpleTestModel(num_classes=num_classes, use_dp=True, use_bn=True)
model.compile(
loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001),
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones((num_samples, input_dim), dtype=np.float32)
y = np.zeros((num_samples, num_classes), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
iterator = dataset_ops.make_one_shot_iterator(dataset)
model.fit(iterator, epochs=2, steps_per_epoch=10, verbose=0)
_ = model.evaluate(iterator, steps=10, verbose=0)
def test_attributes(self):
# layers, weights, trainable_weights, non_trainable_weights, inputs, outputs
num_classes = (2, 3)
num_samples = 100
input_dim = 50
model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
x1 = np.ones((num_samples, input_dim))
x2 = np.ones((num_samples, input_dim))
y1 = np.zeros((num_samples, num_classes[0]))
y2 = np.zeros((num_samples, num_classes[1]))
self.assertEqual(model.name, 'test_model')
self.assertEqual(model.built, False)
self.assertEqual(len(model.weights), 0)
model.compile(
loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001),
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch([x1, x2], [y1, y2])
self.assertEqual(model.built, True)
self.assertEqual(len(model.layers), 4)
self.assertEqual(len(model.weights), 10)
self.assertEqual(len(model.trainable_weights), 8)
self.assertEqual(len(model.non_trainable_weights), 2)
self.assertEqual(len(model.inputs), 2)
self.assertEqual(len(model.outputs), 2)
def test_updates(self):
# test that updates get run during training
num_samples = 100
input_dim = 50
class BNNet(keras.Model):
def __init__(self):
super(BNNet, self).__init__()
self.bn = keras.layers.BatchNormalization(beta_initializer='ones',
gamma_initializer='ones')
def call(self, inputs):
return self.bn(inputs)
x = np.ones((num_samples, input_dim))
y = np.ones((num_samples, input_dim))
model = BNNet()
model.compile(
loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001),
run_eagerly=testing_utils.should_run_eagerly())
y_ref = model.predict(x)
model.train_on_batch(x, y)
y_new = model.predict(x)
self.assertGreater(np.sum(np.abs(y_ref - y_new)), 0.1)
def test_training_and_inference_behavior(self):
# test that dropout is applied in training and not inference
num_samples = 100
input_dim = 50
class DPNet(keras.Model):
def __init__(self):
super(DPNet, self).__init__()
self.dp = keras.layers.Dropout(0.5)
self.dense = keras.layers.Dense(1,
use_bias=False,
kernel_initializer='ones')
def call(self, inputs):
x = self.dp(inputs)
return self.dense(x)
model = DPNet()
x = np.ones((num_samples, input_dim))
y = model.predict(x)
self.assertEqual(np.sum(y), np.sum(x))
model.compile(
loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001),
run_eagerly=testing_utils.should_run_eagerly())
loss = model.train_on_batch(x, y)
self.assertGreater(loss, 0.1)
def test_training_methods(self):
# test fit, train_on_batch
# on different input types: list, dict
num_classes = (2, 3)
num_samples = 100
input_dim = 50
x1 = np.ones((num_samples, input_dim))
x2 = np.ones((num_samples, input_dim))
y1 = np.zeros((num_samples, num_classes[0]))
y2 = np.zeros((num_samples, num_classes[1]))
model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
model.compile(
loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001),
run_eagerly=testing_utils.should_run_eagerly())
model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0)
model.fit({'input_1': x1, 'input_2': x2},
{'output_1': y1, 'output_2': y2},
epochs=2, batch_size=32)
model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0,
validation_data=([x1, x2], [y1, y2]))
model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
model.compile(
loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001),
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch([x1, x2], [y1, y2])
model.train_on_batch({'input_1': x1, 'input_2': x2},
{'output_1': y1, 'output_2': y2})
def test_inference_methods(self):
# test predict, evaluate, test_on_batch, predict_on_batch
# on different input types: list, dict
num_classes = (2, 3)
num_samples = 100
input_dim = 50
x1 = np.ones((num_samples, input_dim))
x2 = np.ones((num_samples, input_dim))
y1 = np.zeros((num_samples, num_classes[0]))
y2 = np.zeros((num_samples, num_classes[1]))
model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
model.compile(
loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001),
run_eagerly=testing_utils.should_run_eagerly())
model.evaluate([x1, x2], [y1, y2])
model.test_on_batch([x1, x2], [y1, y2])
model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
model.predict([x1, x2])
model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
model.predict_on_batch([x1, x2])
def test_saving(self):
num_classes = (2, 3)
num_samples = 100
input_dim = 50
x1 = np.ones((num_samples, input_dim))
x2 = np.ones((num_samples, input_dim))
y1 = np.zeros((num_samples, num_classes[0]))
y2 = np.zeros((num_samples, num_classes[1]))
model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
model.compile(
loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001),
run_eagerly=testing_utils.should_run_eagerly())
model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0)
y_ref_1, y_ref_2 = model.predict([x1, x2])
tf_format_name = os.path.join(self.get_temp_dir(), 'ckpt')
model.save_weights(tf_format_name)
if h5py is not None:
hdf5_format_name = os.path.join(self.get_temp_dir(), 'weights.h5')
model.save_weights(hdf5_format_name)
model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
if h5py is not None:
with self.assertRaises(ValueError):
model.load_weights(hdf5_format_name)
model.load_weights(tf_format_name)
y1, y2 = model.predict([x1, x2])
self.assertAllClose(y_ref_1, y1, atol=1e-5)
self.assertAllClose(y_ref_2, y2, atol=1e-5)
if h5py is not None:
model.load_weights(hdf5_format_name)
y1, y2 = model.predict([x1, x2])
self.assertAllClose(y_ref_1, y1, atol=1e-5)
self.assertAllClose(y_ref_2, y2, atol=1e-5)
def test_subclass_nested_in_subclass(self):
num_classes = 2
num_samples = 100
input_dim = 50
model = NestedTestModel1(num_classes=num_classes)
model.compile(loss='mse',
optimizer=RMSPropOptimizer(learning_rate=0.001),
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones((num_samples, input_dim))
y = np.zeros((num_samples, num_classes))
model.fit(x, y, epochs=2, batch_size=32, verbose=0)
_ = model.evaluate(x, y, verbose=0)
self.assertEqual(len(model.weights), 8 + len(model.test_net.weights))
self.assertEqual(len(model.non_trainable_weights),
2 + len(model.test_net.non_trainable_weights))
self.assertEqual(len(model.trainable_weights),
6 + len(model.test_net.trainable_weights))
def test_graph_nested_in_subclass(self):
num_classes = 2
num_samples = 100
input_dim = 50
model = NestedTestModel2(num_classes=num_classes)
model.compile(loss='mse',
optimizer=RMSPropOptimizer(learning_rate=0.001),
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones((num_samples, input_dim))
y = np.zeros((num_samples, num_classes))
model.fit(x, y, epochs=2, batch_size=32, verbose=0)
_ = model.evaluate(x, y, verbose=0)
self.assertEqual(len(model.weights), 8 + len(model.test_net.weights))
self.assertEqual(len(model.non_trainable_weights),
2 + len(model.test_net.non_trainable_weights))
self.assertEqual(len(model.trainable_weights),
6 + len(model.test_net.trainable_weights))
def test_subclass_nested_in_graph(self):
num_classes = 2
num_samples = 100
input_dim = 50
model = get_nested_model_3(input_dim=input_dim, num_classes=num_classes)
model.compile(loss='mse',
optimizer=RMSPropOptimizer(learning_rate=0.001),
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones((num_samples, input_dim))
y = np.zeros((num_samples, num_classes))
model.fit(x, y, epochs=2, batch_size=32, verbose=0)
_ = model.evaluate(x, y, verbose=0)
self.assertEqual(len(model.weights), 16)
self.assertEqual(len(model.non_trainable_weights), 4)
self.assertEqual(len(model.trainable_weights), 12)
def test_subclass_nested_in_sequential(self):
num_classes = 2
num_samples = 100
input_dim = 50
class Inner(keras.Model):
def __init__(self):
super(Inner, self).__init__()
self.dense1 = keras.layers.Dense(32, activation='relu')
self.dense2 = keras.layers.Dense(num_classes, activation='relu')
self.bn = keras.layers.BatchNormalization()
def call(self, inputs):
x = self.dense1(inputs)
x = self.dense2(x)
return self.bn(x)
model = keras.Sequential([Inner()])
model.compile(loss='mse',
optimizer=RMSPropOptimizer(learning_rate=0.001),
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones((num_samples, input_dim))
y = np.zeros((num_samples, num_classes))
model.fit(x, y, epochs=2, batch_size=32, verbose=0)
_ = model.evaluate(x, y, verbose=0)
self.assertEqual(len(model.weights), 8)
self.assertEqual(len(model.non_trainable_weights), 2)
self.assertEqual(len(model.trainable_weights), 6)
def test_support_for_manual_training_arg(self):
# In most cases, the `training` argument is left unspecified, in which
# case it defaults to value corresponding to the Model method being used
# (fit -> True, predict -> False, etc).
# If the user writes their model `call` method to take
# an explicit `training` argument, we must check that the correct value
# is being passed to the model for each method call.
class DPNet(keras.Model):
def __init__(self):
super(DPNet, self).__init__()
self.dp = keras.layers.Dropout(0.5)
self.dense = keras.layers.Dense(1,
use_bias=False,
kernel_initializer='ones')
def call(self, inputs, training=False):
x = self.dp(inputs, training=training)
return self.dense(x)
model = DPNet()
x = np.ones((10, 10))
y = model.predict(x)
self.assertEqual(np.sum(y), np.sum(x))
model.compile(
loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001),
run_eagerly=testing_utils.should_run_eagerly())
loss = model.train_on_batch(x, y)
self.assertGreater(loss, 0.1)
class GraphSpecificModelSubclassingTests(test.TestCase):
@test_util.run_deprecated_v1
def test_single_io_workflow_with_tensors(self):
num_classes = 2
num_samples = 10
input_dim = 50
with self.cached_session():
model = SimpleTestModel(num_classes=num_classes,
use_dp=True,
use_bn=True)
model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
x = array_ops.ones((num_samples, input_dim))
y = array_ops.zeros((num_samples, num_classes))
model.fit(x, y, epochs=2, steps_per_epoch=10, verbose=0)
_ = model.evaluate(steps=10, verbose=0)
@test_util.run_deprecated_v1
def test_multi_io_workflow_with_tensors(self):
num_classes = (2, 3)
num_samples = 10
input_dim = 50
with self.cached_session():
model = MultiIOTestModel(num_classes=num_classes,
use_dp=True,
use_bn=True)
model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
x1 = array_ops.ones((num_samples, input_dim))
x2 = array_ops.ones((num_samples, input_dim))
y1 = array_ops.zeros((num_samples, num_classes[0]))
y2 = array_ops.zeros((num_samples, num_classes[1]))
model.fit([x1, x2], [y1, y2], epochs=2, steps_per_epoch=10, verbose=0)
_ = model.evaluate(steps=10, verbose=0)
@test_util.run_deprecated_v1
def test_updates_and_losses_for_nested_models_in_subclassed_model(self):
# Case 1: deferred-build sequential nested in subclass.
class TestModel1(keras.Model):
def __init__(self):
super(TestModel1, self).__init__()
self.fc = keras.layers.Dense(10, input_shape=(784,),
activity_regularizer='l1')
self.bn = keras.Sequential([keras.layers.BatchNormalization(axis=1)])
def call(self, x):
return self.bn(self.fc(x))
with self.cached_session():
model = TestModel1()
x = array_ops.ones(shape=[100, 784], dtype='float32')
model(x)
self.assertEqual(len(model.get_updates_for(x)), 2)
self.assertEqual(len(model.get_losses_for(x)), 1)
# Case 2: placeholder-sequential nested in subclass.
class TestModel2(keras.Model):
def __init__(self):
super(TestModel2, self).__init__()
self.fc = keras.layers.Dense(10, input_shape=(784,),
activity_regularizer='l1')
self.bn = keras.Sequential(
[keras.layers.BatchNormalization(axis=1, input_shape=(10,))])
def call(self, x):
return self.bn(self.fc(x))
with self.cached_session():
model = TestModel2()
x = array_ops.ones(shape=[100, 784], dtype='float32')
model(x)
self.assertEqual(len(model.get_updates_for(x)), 2)
self.assertEqual(len(model.get_losses_for(x)), 1)
# Case 3: functional-API model nested in subclass.
inputs = keras.Input((10,))
outputs = keras.layers.BatchNormalization(axis=1)(inputs)
bn = keras.Model(inputs, outputs)
class TestModel3(keras.Model):
def __init__(self):
super(TestModel3, self).__init__()
self.fc = keras.layers.Dense(10, input_shape=(784,),
activity_regularizer='l1')
self.bn = bn
def call(self, x):
return self.bn(self.fc(x))
with self.cached_session():
model = TestModel3()
x = array_ops.ones(shape=[100, 784], dtype='float32')
model(x)
self.assertEqual(len(model.get_updates_for(x)), 2)
self.assertEqual(len(model.get_losses_for(x)), 1)
@test_util.run_deprecated_v1
def test_multi_io_workflow_with_numpy_arrays_and_custom_placeholders(self):
num_classes = (2, 3)
num_samples = 1000
input_dim = 50
with self.cached_session():
model = MultiIOTestModel(num_classes=num_classes,
use_dp=True,
use_bn=True)
model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
x1 = np.ones((num_samples, input_dim))
x2 = np.ones((num_samples, input_dim))
y1 = np.zeros((num_samples, num_classes[0]))
y2 = np.zeros((num_samples, num_classes[1]))
x2_placeholder = array_ops.placeholder(
dtype='float32', shape=(None, input_dim))
model._set_inputs([x1, x2_placeholder])
model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0)
_ = model.evaluate([x1, x2], [y1, y2], verbose=0)
class CustomCallModel(keras.Model):
def __init__(self):
super(CustomCallModel, self).__init__()
self.dense1 = keras.layers.Dense(1, activation='relu')
self.dense2 = keras.layers.Dense(1, activation='softmax')
def call(self, first, second, fiddle_with_output='no', training=True):
combined = self.dense1(first) + self.dense2(second)
if fiddle_with_output == 'yes':
return 10. * combined
else:
return combined
class TrainingNoDefaultModel(keras.Model):
def __init__(self):
super(TrainingNoDefaultModel, self).__init__()
self.dense1 = keras.layers.Dense(1)
def call(self, x, training):
return self.dense1(x)
class TrainingMaskingModel(keras.Model):
def __init__(self):
super(TrainingMaskingModel, self).__init__()
self.dense1 = keras.layers.Dense(1)
def call(self, x, training=False, mask=None):
return self.dense1(x)
@test_util.run_all_in_graph_and_eager_modes
class CustomCallSignatureTests(test.TestCase):
def test_no_inputs_in_signature(self):
model = CustomCallModel()
first = array_ops.ones([2, 3])
second = array_ops.ones([2, 5])
output = model(first, second)
self.evaluate([v.initializer for v in model.variables])
expected_output = self.evaluate(model.dense1(first) + model.dense2(second))
self.assertAllClose(expected_output, self.evaluate(output))
output = model(first, second, fiddle_with_output='yes')
self.assertAllClose(10. * expected_output, self.evaluate(output))
output = model(first, second=second, training=False)
self.assertAllClose(expected_output, self.evaluate(output))
def test_training_args_call_build(self):
input_dim = 2
model = TrainingNoDefaultModel()
self.assertFalse(model.built, 'Model should not have been built')
self.assertFalse(model.weights, ('Model should have no weights since it '
'has not been built.'))
model.build((None, input_dim))
self.assertTrue(model.weights, ('Model should have weights now that it '
'has been properly built.'))
self.assertTrue(model.built, 'Model should be built after calling `build`.')
def test_training_and_mask_args_call_build(self):
input_dim = 2
model = TrainingMaskingModel()
self.assertFalse(model.built, 'Model should not have been built')
self.assertFalse(model.weights, ('Model should have no weights since it '
'has not been built.'))
model.build((None, input_dim))
self.assertTrue(model.weights, ('Model should have weights now that it '
'has been properly built.'))
self.assertTrue(model.built, 'Model should be built after calling `build`.')
def test_custom_call_kwargs_and_build(self):
first_input_shape = (2, 3)
second_input_shape = (2, 5)
model = CustomCallModel()
self.assertFalse(model.built, 'Model should not have been built')
self.assertFalse(model.weights, ('Model should have no weights since it '
'has not been built.'))
with self.assertRaisesRegexp(
ValueError, 'cannot build your model if it has positional'):
model.build(input_shape=[first_input_shape, second_input_shape])
def test_inputs_in_signature(self):
class HasInputsAndOtherPositional(keras.Model):
def call(self, inputs, some_other_arg, training=False):
return inputs
def compute_output_shape(self, input_shape):
return input_shape
model = HasInputsAndOtherPositional()
with self.assertRaisesRegexp(
TypeError, 'everything else as a keyword argument'):
x1, x2 = keras.Input((1, 1)), keras.Input((1, 1))
model(x1, x2)
def test_kwargs_in_signature(self):
class HasKwargs(keras.Model):
def call(self, x, y=3, **kwargs):
return x
model = HasKwargs()
arg = array_ops.ones([1])
model(arg, a=3)
if not context.executing_eagerly():
self.assertEqual(len(model.inputs), 1)
def test_args_in_signature(self):
class HasArgs(keras.Model):
def call(self, x, *args, **kwargs):
return [x] + list(args)
def compute_output_shape(self, input_shape):
return input_shape
model = HasArgs()
x1, x2, x3 = keras.Input((1, 1)), keras.Input((1, 1)), keras.Input((1, 1))
model(x1, x2, x3, a=3)
self.assertEqual(len(model.inputs), 3)
def test_args_and_keywords_in_signature(self):
class HasArgs(keras.Model):
def call(self, x, training=True, *args, **kwargs): # pylint:disable=keyword-arg-before-vararg
return x
model = HasArgs()
x1, x2, x3 = keras.Input((1, 1)), keras.Input((1, 1)), keras.Input((1, 1))
with self.assertRaisesRegexp(
TypeError, 'may not accept both positional arguments and '):
model(x1, x2, x3, a=3)
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def test_training_no_default(self):
if context.executing_eagerly():
self.skipTest('b/120997007')
model = TrainingNoDefaultModel()
arg = array_ops.ones([1, 1])
model(arg, True)
self.assertEqual(len(model.inputs), 1)
def test_training_no_default_with_positional(self):
class TrainingNoDefaultWithPositional(keras.Model):
def call(self, x, training, positional):
return x
model = TrainingNoDefaultWithPositional()
x1, x2, x3 = keras.Input((1, 1)), keras.Input((1, 1)), keras.Input((1, 1))
with self.assertRaisesRegexp(TypeError, 'after a non-input'):
model(x1, x2, x3)
if __name__ == '__main__':
test.main()
|
|
# $Id: test_MurckoScaffold.py 3672 2010-06-14 17:10:00Z landrgr1 $
#
# Created by Peter Gedeck, June 2008
#
from collections import namedtuple
import doctest
import unittest
from rdkit import Chem
from rdkit.Chem.Scaffolds import MurckoScaffold
from rdkit.Chem.Scaffolds.MurckoScaffold import (GetScaffoldForMol, _pyGetScaffoldForMol,
MurckoScaffoldSmilesFromSmiles,
MurckoScaffoldSmiles, MakeScaffoldGeneric)
TestMolecule = namedtuple('TestMolecule', 'smiles,scaffold')
def load_tests(loader, tests, ignore):
""" Add the Doctests from the module """
tests.addTests(doctest.DocTestSuite(MurckoScaffold, optionflags=doctest.ELLIPSIS))
return tests
class TestCase(unittest.TestCase):
def test1MurckoScaffold(self):
# Test the functionality on a smaller test set
for testMol in self.testMolecules:
mol = Chem.MolFromSmiles(testMol.smiles)
calcScaffold = Chem.MolToSmiles(GetScaffoldForMol(mol))
actualScaffold = Chem.MolToSmiles(Chem.MolFromSmiles(testMol.scaffold))
self.assertEqual(calcScaffold, actualScaffold)
def test2MurckoScaffold(self):
# Test the functionality on a larger test set
for testMol in self.testMolecules2:
mol = Chem.MolFromSmiles(testMol.smiles)
calcScaffold = Chem.MolToSmiles(GetScaffoldForMol(mol))
actualScaffold = Chem.MolToSmiles(Chem.MolFromSmiles(testMol.scaffold))
self.assertEqual(calcScaffold, actualScaffold)
def test_ReferenceImplementation(self):
# Check that the C++ implementation is equivalent to the Python reference implementation
for testMol in self.testMolecules:
mol = Chem.MolFromSmiles(testMol.smiles)
calcScaffold1 = Chem.MolToSmiles(GetScaffoldForMol(mol))
calcScaffold2 = Chem.MolToSmiles(_pyGetScaffoldForMol(mol))
self.assertEqual(calcScaffold1, calcScaffold2)
def test_MurckScaffoldSmilesFromSmiles(self):
self.assertEqual(
MurckoScaffoldSmilesFromSmiles('Cc1cc(Oc2nccc(CCC)c2)ccc1'), 'c1ccc(Oc2ccccn2)cc1')
self.assertEqual(MurckoScaffoldSmilesFromSmiles('CCCC'), '')
def test_MurckoScaffoldSmiles(self):
self.assertEqual(MurckoScaffoldSmiles('Cc1cc(Oc2nccc(CCC)c2)ccc1'), 'c1ccc(Oc2ccccn2)cc1')
self.assertEqual(
MurckoScaffoldSmiles(mol=Chem.MolFromSmiles('Cc1cc(Oc2nccc(CCC)c2)ccc1')),
'c1ccc(Oc2ccccn2)cc1')
self.assertRaises(ValueError, MurckoScaffoldSmiles, smiles=None, mol=None)
def test_MakeScaffoldGeneric(self):
def testSmiles(smiles):
return Chem.MolToSmiles(MakeScaffoldGeneric(Chem.MolFromSmiles(smiles)))
self.assertEqual(testSmiles('c1ccccc1'), 'C1CCCCC1')
self.assertEqual(testSmiles('c1cccnc1'), 'C1CCCCC1')
# Examples associated with sf.net issue 246
self.assertEqual(testSmiles('c1[nH]ccc1'), 'C1CCCC1')
self.assertEqual(testSmiles('C1[NH2+]C1'), 'C1CC1')
self.assertEqual(testSmiles('C1[C@](Cl)(F)O1'), 'CC1(C)CC1')
testMolecules = [
TestMolecule('CC1CCC1', 'C1CCC1'),
TestMolecule('NCNCC2CC2C1CC1O', 'C1CC1C1CC1'),
# Spiro
TestMolecule('OC2C(C)C21C(N)C1C', 'C2CC12CC1'),
# Carbonyl outside scaffold
TestMolecule('C1CC1C(=O)OC', 'C1CC1'),
# Double bond outside scaffold
TestMolecule('C1CC1C=C', 'C1CC1'),
# Double bond in scaffold
TestMolecule('C1CC1C=CC1CC1C=CNNCO', 'C1CC1C=CC1CC1'),
TestMolecule('CC1CC1C(N)C1C(N)C1', 'C1CC1CC1CC1'),
# Double bond in linker
TestMolecule('C1CC1C(C(C)C)=NC1CC1', 'C1CC1C=NC1CC1'),
# S=O group in scaffold
TestMolecule('C1CC1S(=O)C1CC1C=CNNCO', 'C1CC1S(=O)C1CC1'),
# S=O group outside scaffold
TestMolecule('O=SCNC1CC1S(=O)C1CC1C=CNNCO', 'C1CC1S(=O)C1CC1'),
# SO2 group in scaffold
TestMolecule('C1CC1S(=O)(=O)C1CC1C=CNNCO', 'C1CC1S(=O)(=O)C1CC1'),
# SO2 group outside scaffold
TestMolecule('O=S(CNCNC)(=O)CNC1CC1S(=O)(=O)C1CC1C=CNNCO', 'C1CC1S(=O)(=O)C1CC1'),
# Hydroxamide
TestMolecule('C1CC1C=NO', 'C1CC1'),
# Cyano group
TestMolecule('C1CC1C#N', 'C1CC1'),
# Acetylene group
TestMolecule('C1CC1C#CNC', 'C1CC1'),
TestMolecule('O=C1N(C)C(=O)N1C#CNC', 'O=C1NC(=O)N1'),
TestMolecule('[O-][N+](=O)c1cc(ccc1Cl)NS(=O)(=O)Cc2ccccc2', 'c1ccccc1NS(=O)(=O)Cc2ccccc2'),
# N-Substituted pyrrol
TestMolecule('Cn1cccc1', 'c1ccc[nH]1'),
# Explicit hydrogens are removed
TestMolecule('C1CC1[CH](C)C1CC1', 'C1CC1CC1CC1'),
]
testMolecules2 = [
TestMolecule('CCOc1ccccc1N(S(C)(=O)=O)CC(NC1CCCCC1)=O', 'O=C(NC1CCCCC1)CNc1ccccc1'),
TestMolecule('c1ccc(-c2c(C)n(-c3c(C(O)=O)cccc3)c(C)nc2=O)cc1',
'O=c1c(cn(cn1)-c1ccccc1)-c1ccccc1'),
TestMolecule('Cc1ccc(Cl)c2c1NC(=O)C2=C1NC(=S)NC1=O', 'c1cc2c(cc1)C(=C1C(NC(N1)=S)=O)C(=O)N2'),
TestMolecule('CNC(=O)CCc1[nH]c2c(c1Sc1ccccc1)cccc2', 'c1cc(Sc2c3c([nH]c2)cccc3)ccc1'),
TestMolecule('CC(=O)OCC(=O)C1(O)CCC2C1(C)CC(=O)C1C3(C)CCC(=O)C=C3CCC21',
'O=C1C=C2CCC3C4CCCC4CC(=O)C3C2CC1'),
TestMolecule('CC(C)CC(Nc1nc(Cl)ccc1[N+]([O-])=O)C(O)=O', 'c1ccncc1'),
TestMolecule('COc1ccc(C(Nc2ccc(S(N3C(C)CCCC3)(=O)=O)cc2)=O)c(OC)c1OC',
'O=C(Nc1ccc(S(=O)(=O)N2CCCCC2)cc1)c1ccccc1'),
TestMolecule('CC(C)CCNc1nc(N)c([N+](=O)[O-])c(NCCO)n1', 'c1cncnc1'),
TestMolecule('c1ccc(Oc2c(NC(COC(c3c(C)noc3C)=O)=O)cccc2)cc1',
'O=C(COC(=O)c1cnoc1)Nc1ccccc1Oc1ccccc1'),
TestMolecule('COC(CCCCC1SCC(NC(OC)=O)C1NC(OC)=O)=O', 'C1CCCS1'),
TestMolecule('CSc1ccc(-c2c(C#N)c(N)nc3n(-c4ccccc4)nc(C)c32)cc1',
'c1ccc(cc1)-c1c2c(n(nc2)-c2ccccc2)ncc1'),
TestMolecule('O=C1Cc2ccccc2Sc2c1cc(Cl)cc2', 'O=C1Cc2ccccc2Sc2ccccc21'),
TestMolecule('COC(c1n(CC(N(C)c2ccccc2)=O)c2ccsc2c1)=O', 'O=C(Cn1c2ccsc2cc1)Nc1ccccc1'),
TestMolecule('N=C1C(=Cc2coc3ccccc3c2=O)C(=O)N=C2SC(c3ccncc3)=NN12',
'N=C1C(=Cc2coc3ccccc3c2=O)C(=O)N=C2SC(c3ccncc3)=NN12'),
TestMolecule('CCOC(c1ccc(NC(CCc2c(C)nc3ncnn3c2C)=O)cc1)=O', 'O=C(Nc1ccccc1)CCc1cnc2n(ncn2)c1'),
TestMolecule('COC(=O)C1=C(C)NC(C)=C(C(OC)=O)C1c1oc(-c2c(Cl)c(Cl)ccc2)cc1',
'c1ccc(-c2oc(C3C=CNC=C3)cc2)cc1'),
TestMolecule('CCN(S(c1cc(NC(COC(CCc2nc3ccccc3s2)=O)=O)ccc1)(=O)=O)CC',
'c1cc(NC(COC(=O)CCc2nc3c(s2)cccc3)=O)ccc1'),
TestMolecule('CCOC(c1cc(OC(c2ccccc2)=O)n(-c2ccccc2)n1)=O', 'O=C(Oc1n(ncc1)-c1ccccc1)c1ccccc1'),
TestMolecule('CCOC(=O)c1nc2c(c(NCc3ccccc3F)n1)cccc2', 'c1ccc(CNc2ncnc3c2cccc3)cc1'),
TestMolecule('Cc1nc(C)n(CC(N2CCCC(C(c3c(C)cc(Cl)cc3)=O)C2)=O)n1',
'c1ccc(cc1)C(=O)C1CCCN(C(=O)Cn2cncn2)C1'),
TestMolecule('COc1cc(NC(=O)c2nnn(CCc3ccccc3)c2N)c(OC)cc1', 'O=C(c1nnn(c1)CCc1ccccc1)Nc1ccccc1'),
TestMolecule('Cc1cc(C(=O)CN2C(=O)c3ccccc3C2=O)c(C)n1Cc1cccs1',
'O=C(CN1C(c2c(cccc2)C1=O)=O)c1cn(Cc2cccs2)cc1'),
TestMolecule('c1cnc2c(c1)cccc2S(N1CCC(C(=O)N2CCN(c3ccc(Cl)cc3)CC2)CC1)(=O)=O',
'c1ccc(cc1)N1CCN(C(=O)C2CCN(S(=O)(=O)c3c4ncccc4ccc3)CC2)CC1'),
TestMolecule('CCOC(c1c(C)[nH]c(C(NNC(c2ccc(C(C)(C)C)cc2)=O)=O)c1C)=O',
'c1ccc(cc1)C(NNC(c1ccc[nH]1)=O)=O'),
TestMolecule('CCOC(c1cc(C(C)C)sc1NC(=O)COC(CCS(c1ccccc1)(=O)=O)=O)=O',
'c1ccc(S(CCC(=O)OCC(Nc2cccs2)=O)(=O)=O)cc1'),
TestMolecule('CCC1CCCCN1CCCNC(=O)Cn1nc(-c2ccccc2)ccc1=O',
'O=C(NCCCN1CCCCC1)Cn1nc(ccc1=O)-c1ccccc1'),
TestMolecule('CCc1cc(OCCn2nc(C(O)=O)c3ccccc3c2=O)ccc1', 'O=c1n(CCOc2ccccc2)ncc2ccccc21'),
TestMolecule('Fc1ccc(CN2CCN3C(CCC3)C2C2CCCCC2)cc1F', 'c1ccc(cc1)CN1CCN2CCCC2C1C1CCCCC1'),
TestMolecule('O=[N+]([O-])c1cc(-c2nnc(N3CCOCC3)c3ccccc23)ccc1N1CCOCC1',
'c1cc2c(nnc(c2cc1)N1CCOCC1)-c1ccc(cc1)N1CCOCC1'),
TestMolecule('Cc1ccnc(NC(=O)COc2ccc3oc4c(c3c2)CCCC4)c1',
'O=C(COc1ccc2oc3c(c2c1)CCCC3)Nc1ccccn1'),
TestMolecule('Cc1cc(=O)oc(C)c1C(=O)NCCCN1CCN(c2ccc(F)cc2)CC1',
'c1ccc(N2CCN(CCCNC(c3ccc(oc3)=O)=O)CC2)cc1'),
TestMolecule('Cc1cc(C(=O)CSc2nc(=O)cc(N)[nH]2)c(C)n1-c1cccc(F)c1',
'O=C(CSc1nc(cc[nH]1)=O)c1cn(cc1)-c1ccccc1'),
TestMolecule('CCN(S(c1cccc(C(=O)N2CCCCC2)c1)(=O)=O)CC', 'O=C(N1CCCCC1)c1ccccc1'),
TestMolecule('CNC(=S)N1CCC(NC(=O)C23CC4CC(C2)CC(C3)C4)CC1',
'O=C(NC1CCNCC1)C12CC3CC(C1)CC(C3)C2'),
TestMolecule('Cc1cc2c(cc1)N=C(C)C(N=O)=C(C)N2', 'c1cc2NC=CC=Nc2cc1'),
TestMolecule('COc1ccc(Sc2cc(C(F)(F)F)nc(-c3ncccc3)n2)cc1', 'c1ccc(cc1)Sc1nc(ncc1)-c1ncccc1'),
TestMolecule('c1coc(CNC(Cn2cc(C(c3ccccc3)=O)c3c2cccc3)=O)c1',
'c1coc(CNC(Cn2cc(C(c3ccccc3)=O)c3c2cccc3)=O)c1'),
TestMolecule('O=C(NCc1ccc(Cl)cc1)c1noc(-c2ccco2)c1', 'O=C(c1noc(c1)-c1ccco1)NCc1ccccc1'),
TestMolecule('CN(C)c1ccc(C(c2n(CCOC(=O)Nc3ccc(Cl)cc3)nnn2)N2CCOCC2)cc1',
'O=C(Nc1ccccc1)OCCn1nnnc1C(c1ccccc1)N1CCOCC1'),
TestMolecule('NC(=NOC(=O)c1cc(Cn2cc(C(F)(F)F)ccc2=O)ccc1)c1ccccc1',
'c1ccc(C=NOC(c2cc(Cn3ccccc3=O)ccc2)=O)cc1'),
TestMolecule('CCc1nnc(NC(=O)Cc2c(-c3ccc(C)cc3)nc(C)s2)s1', 'O=C(Cc1c(-c2ccccc2)ncs1)Nc1nncs1'),
TestMolecule('COCCCNC(=O)CN1C(=O)N(Cc2ccccc2Cl)CC1', 'O=C1NCCN1Cc1ccccc1'),
TestMolecule('Cc1cc([N+]([O-])=O)nn1CC(=O)NCCCn1ccnc1', 'O=C(Cn1nccc1)NCCCn1ccnc1'),
TestMolecule('c1cc(F)c(N2CCN(C(=O)c3ccc(S(NCC4OCCC4)(=O)=O)cc3)CC2)cc1',
'c1ccc(cc1)N1CCN(C(c2ccc(cc2)S(=O)(=O)NCC2OCCC2)=O)CC1'),
TestMolecule('CC(NCc1cccnc1)=C1C(=O)NC(=O)N(c2ccc(C)cc2)C1=O',
'c1cc(ccc1)N1C(=O)NC(C(=CNCc2cccnc2)C1=O)=O'),
TestMolecule('Cc1ccn(C)c(=N)c1', 'N=c1[nH]cccc1'),
TestMolecule('Cc1cc(C)nc(N2CCC(CNC(=O)CCc3ccccc3)CC2)n1',
'O=C(CCc1ccccc1)NCC1CCN(c2ncccn2)CC1'),
TestMolecule('CCOC1=CC(=CNNC(CCCC(NC2CCCCC2)=O)=O)C=CC1=O',
'C1=CC(C=CC1=O)=CNNC(=O)CCCC(=O)NC1CCCCC1'),
TestMolecule('CC(=O)N1CCN(c2ccc([N+]([O-])=O)cc2)CC1', 'c1ccc(cc1)N1CCNCC1'),
TestMolecule('CS(N(CC(=O)N1CCCCC1)Cc1ccc(Cl)cc1)(=O)=O', 'O=C(N1CCCCC1)CNCc1ccccc1'),
TestMolecule('c1coc(C(=O)N2CCN(C(COc3cc(C(NCc4ccccc4)=O)ccc3)=O)CC2)c1',
'c1coc(C(=O)N2CCN(C(COc3cc(C(NCc4ccccc4)=O)ccc3)=O)CC2)c1'),
TestMolecule('Cc1cccc2sc(NNC(=O)C3=COCCO3)nc12', 'O=C(NNc1nc2ccccc2s1)C1=COCCO1'),
TestMolecule('c1ccc2c(c1)N(C)C1(C=Nc3c(cc(N4CCOCC4)c4ccccc34)O1)C2(C)C',
'C1=Nc2c(cc(c3ccccc23)N2CCOCC2)OC11Nc2ccccc2C1'),
TestMolecule('COc1cccc(C2N(CCN3CCOCC3)C(=O)C(O)=C2C(=O)c2sc(C)nc2C)c1',
'O=C(C1=CC(=O)N(C1c1ccccc1)CCN1CCOCC1)c1scnc1'),
TestMolecule('COc1cc(OC)c(NC(CSc2nc3c(c(=O)n2-c2ccc(F)cc2)SCC3)=O)cc1',
'c1ccc(cc1)NC(=O)CSc1n(c(=O)c2c(n1)CCS2)-c1ccccc1'),
TestMolecule('Cc1ccccc1CN1c2ccccc2C2(C1=O)OCCCO2', 'O=C1C2(OCCCO2)c2c(N1Cc1ccccc1)cccc2'),
TestMolecule('O=C(N1C2(OCC1)CCN(c1ncc(C(F)(F)F)cc1Cl)CC2)c1ccccc1',
'O=C(c1ccccc1)N1C2(OCC1)CCN(c1ccccn1)CC2'),
TestMolecule('CC=CC=CC(=O)Nc1nccs1', 'c1ncsc1'),
TestMolecule('CC(C)(C)c1ccc(C(c2c[nH]c(C(NCc3cccnc3)=O)c2)=O)cc1',
'c1ccc(cc1)C(=O)c1c[nH]c(c1)C(=O)NCc1cccnc1'),
TestMolecule('CCC(=O)Nc1c(C)nn(-c2cc(C)c(C)cc2)c1C', 'c1ccc(cc1)-n1nccc1'),
TestMolecule('Cc1ccc(SCCC(=O)NCCSCc2c(C)cccc2)cc1', 'O=C(NCCSCc1ccccc1)CCSc1ccccc1'),
TestMolecule('CC1=NN(Cc2ccccc2)C(=O)C1=Cc1ccc(N(C)C)cc1', 'O=C1C(C=NN1Cc1ccccc1)=Cc1ccccc1'),
TestMolecule('COCC(=O)Nc1ccc(S(NCCc2ccccc2)(=O)=O)cc1', 'c1ccc(CCNS(=O)(=O)c2ccccc2)cc1'),
TestMolecule('CCOC(=O)N(C)c1ccc(C(O)(C(F)(F)F)C(F)(F)F)cc1', 'c1ccccc1'),
TestMolecule('Fc1ccc(COC2=C(C(O)=O)CCNC2=O)cc1F', 'O=C1NCCC=C1OCc1ccccc1'),
TestMolecule('O=C1N2C(Nc3ccccc31)CCCCC2', 'O=C1N2C(Nc3ccccc31)CCCCC2'),
TestMolecule('Cl.COc1ccc(-c2nc3n(ccc4ccccc43)c2CN2CCOCC2)cc1OC',
'c1cccc(c1)-c1nc2c3c(ccn2c1CN1CCOCC1)cccc3'),
TestMolecule('ClCc1oc(-c2ccccc2)nn1', 'c1oc(nn1)-c1ccccc1'),
TestMolecule('Cl.Cc1ccc(OCC(O)Cn2c(=N)n(CCN3CCCCC3)c3ccccc32)cc1',
'N=c1n(CCCOc2ccccc2)c2ccccc2n1CCN1CCCCC1'),
TestMolecule('COc1ccc(C(=O)C=C(C)Nc2ccc3c(c2)OCO3)cc1', 'O=C(C=CNc1ccc2c(c1)OCO2)c1ccccc1'),
TestMolecule('c1csc(CN(C(c2ccc(F)cc2)C(NC2CCCCC2)=O)C(=O)CN2S(=O)(=O)c3ccccc3C2=O)c1',
'c1cc(CN(C(=O)CN2S(=O)(c3ccccc3C2=O)=O)C(C(=O)NC2CCCCC2)c2ccccc2)sc1'),
TestMolecule('c1csc(S(NCCSc2n(-c3ccccc3)nnn2)(=O)=O)c1',
'c1csc(S(NCCSc2n(-c3ccccc3)nnn2)(=O)=O)c1'),
TestMolecule('Cc1cccc(C=NNC(=O)Cn2c(N)nnn2)n1', 'O=C(Cn1cnnn1)NN=Cc1ccccn1'),
TestMolecule('CCOC(C1(Cc2ccc(Cl)cc2)CCN(C(c2cc(C)nc(C)n2)=O)CC1)=O',
'O=C(N1CCC(CC1)Cc1ccccc1)c1ccncn1'),
TestMolecule('c1ccc(C(N(CC2OCCC2)C(Cn2nnc3ccccc23)=O)C(NCc2ccc(F)cc2)=O)cc1',
'O=C(N(C(c1ccccc1)C(=O)NCc1ccccc1)CC1OCCC1)Cn1nnc2c1cccc2'),
TestMolecule('O=C1CSC(c2ccncc2)N1Cc1occc1', 'O=C1CSC(c2ccncc2)N1Cc1occc1'),
TestMolecule('COc1c(OCc2ccccc2)c(Br)cc(C=NNC(=O)Cn2nc([N+]([O-])=O)cc2C)c1',
'O=C(Cn1nccc1)NN=Cc1ccc(cc1)OCc1ccccc1'),
TestMolecule('Cc1c(Cn2nnc(-c3cc(C(=O)O)ccc3)n2)cccc1', 'c1cccc(-c2nn(nn2)Cc2ccccc2)c1'),
TestMolecule('O=C(c1ccc2snnc2c1)N1CCCC1', 'O=C(c1ccc2snnc2c1)N1CCCC1'),
TestMolecule('c1ccc(CC(NN2C(=O)C(=Cc3c(C(O)=O)cccc3)SC2=S)=O)cc1',
'O=C1C(=Cc2ccccc2)SC(=S)N1NC(Cc1ccccc1)=O'),
TestMolecule('Cc1ccccc1OCC(=O)NN=Cc1ccncc1', 'O=C(COc1ccccc1)NN=Cc1ccncc1'),
TestMolecule('O=C(C=Cc1ccccc1)NC(=S)Nc1ccc(CN2CCOCC2)cc1',
'O=C(C=Cc1ccccc1)NC(=S)Nc1ccc(CN2CCOCC2)cc1'),
TestMolecule('COc1ccc(NC(=S)N(Cc2cnccc2)Cc2c(=O)[nH]c3c(c2)cc(OC)c(OC)c3)cc1',
'O=c1c(CN(C(=S)Nc2ccccc2)Cc2cnccc2)cc2ccccc2[nH]1'),
TestMolecule('Nc1ccc2nc3c([nH]c(=O)n(C4CCCCC4)c3=O)nc2c1',
'c1ccc2nc3[nH]c(n(c(c3nc2c1)=O)C1CCCCC1)=O'),
TestMolecule('Cc1cc(NC(=O)c2ccc(S(Nc3ccccc3)(=O)=O)cc2)no1',
'c1cc(no1)NC(=O)c1ccc(S(=O)(=O)Nc2ccccc2)cc1'),
TestMolecule('Nn1c(Cc2c3c(cccc3)ccc2)nnc1SCc1ccccc1',
'c1ccc(CSc2nnc([nH]2)Cc2c3c(cccc3)ccc2)cc1'),
TestMolecule('Cc1[nH]nc(Nc2cc(C)ccc2)c1[N+](=O)[O-]', 'c1ccc(cc1)Nc1n[nH]cc1'),
TestMolecule('CC1Cn2c(nc3n(C)c(=O)[nH]c(=O)c23)O1', 'O=c1[nH]c2nc3n(c2c([nH]1)=O)CCO3'),
TestMolecule('c1csc(C(OCC(NC23CC4CC(C2)CC(C3)C4)=O)=O)c1',
'c1csc(C(OCC(NC23CC4CC(C2)CC(C3)C4)=O)=O)c1'),
TestMolecule('c1ccc(S(NC2=NC(=O)C(=Cc3cnccc3)S2)(=O)=O)cc1',
'c1ccc(S(NC2=NC(=O)C(=Cc3cnccc3)S2)(=O)=O)cc1'),
TestMolecule('CCCn1c(N2CCN(C)CC2)nc2n(C)c(=O)[nH]c(=O)c12',
'O=c1[nH]c([nH]c2nc([nH]c12)N1CCNCC1)=O'),
TestMolecule('CCn1c(SCC(Nc2cc(S(N3CCOCC3)(=O)=O)ccc2OC)=O)nnc1-c1ccncc1',
'c1cc(S(=O)(=O)N2CCOCC2)cc(NC(=O)CSc2nnc(-c3ccncc3)[nH]2)c1'),
TestMolecule('C#CCNC(=O)C1=CC(c2ccc(Br)cc2)CC(OCc2ccc(CO)cc2)O1',
'c1cccc(c1)C1C=COC(OCc2ccccc2)C1'),
TestMolecule('CCc1c(SCC(=O)Nc2cc(C)on2)nc2ccc(C)cc2c1', 'O=C(Nc1ccon1)CSc1ccc2c(cccc2)n1'),
TestMolecule('CCOCCCN(C(C(NC1CCCC1)=O)c1cccc(OC)c1OC)C(c1ccco1)=O',
'c1cc(ccc1)C(NC(c1occc1)=O)C(=O)NC1CCCC1'),
TestMolecule('Cc1ccc(C(=O)NC(=S)NNS(c2ccccc2)(=O)=O)cc1',
'c1cccc(c1)C(NC(=S)NNS(=O)(=O)c1ccccc1)=O'),
TestMolecule('COc1ccc(CC(N)=NOC(=O)c2sccc2)cc1', 'O=C(ON=CCc1ccccc1)c1sccc1'),
TestMolecule('c1ccc(C(O)=C2C(c3ncccc3)N(CC(OC)OC)C(=O)C2=O)cc1',
'c1cc(C=C2C(=O)C(=O)NC2c2ncccc2)ccc1'),
TestMolecule('COC(=O)CSc1nc(C)cc(Oc2ccccc2)n1', 'c1ccc(Oc2ccncn2)cc1'),
TestMolecule('COc1ccc(Cn2c(C)ccc2C)cc1', 'c1ccc(cc1)Cn1cccc1'),
TestMolecule('COc1cccc(N2CCN(C3CC(=O)N(c4ccc(C)c(Cl)c4)C3=O)CC2)c1',
'O=C1N(c2ccccc2)C(=O)C(C1)N1CCN(c2ccccc2)CC1'),
TestMolecule('COc1cccc(OC)c1OCCN(C)C.OC(=O)C(O)=O', 'c1ccccc1'),
TestMolecule('C1CCC(NC(=O)c2ccc(S(N3CCCC3)(=O)=O)cc2)C1',
'C1CCC(NC(=O)c2ccc(S(N3CCCC3)(=O)=O)cc2)C1'),
TestMolecule('CCCN(C(=O)Cn1ncc2c(=O)oc3c(c12)cccc3)c1cc(C)ccc1',
'O=C(Cn1ncc2c(oc3c(cccc3)c12)=O)Nc1ccccc1'),
TestMolecule('CNC(NC(CSc1nnc(C(F)(F)F)n1C)=O)=O', 'n1nc[nH]c1'),
TestMolecule('CCOCCCN1C(=O)CC(C(NCCc2ccc(C)cc2)=O)C1', 'O=C1NCC(C1)C(NCCc1ccccc1)=O'),
TestMolecule('COc1c([N+](=O)[O-])cc(CSc2n[nH]c(C)n2)cc1', 'c1ccc(CSc2nc[nH]n2)cc1'),
TestMolecule('CN(C)CC(=O)c1ccc(-c2ccccc2)cc1', 'c1cccc(c1)-c1ccccc1'),
TestMolecule('CC1(O)C(=O)c2c(cccc2)N(c2ccccc2)C1=O', 'O=C1CC(=O)N(c2c1cccc2)c1ccccc1'),
TestMolecule('CN(S(c1ccccc1)(=O)=O)CC(=O)NCCc1ccccc1', 'c1ccc(CCNC(=O)CNS(=O)(=O)c2ccccc2)cc1'),
TestMolecule('CCNc1ccccc1C(=O)O', 'c1ccccc1'),
TestMolecule('CC1(C)C(CSc2nc3ccccc3[nH]2)C1(Cl)Cl', 'c1ccc2c(nc([nH]2)SCC2CC2)c1'),
TestMolecule('CC(C)c1ccc(OCC(=O)NC(=S)Nc2c3cccc4c3c(cc2)CC4)cc1',
'O=C(NC(=S)Nc1c2cccc3c2c(cc1)CC3)COc1ccccc1'),
TestMolecule('CN(C)c1ccc(NC(CN2CCC(C(c3ccc(F)cc3)=O)CC2)=O)cc1',
'c1cccc(c1)NC(CN1CCC(CC1)C(=O)c1ccccc1)=O'),
TestMolecule('CCCCN(C)C(=O)Cc1c(OC)ccc2cc(Br)ccc21', 'c1c2ccccc2ccc1'),
TestMolecule('Cc1ccc(NC(CSc2sc(NC(CN3CCOCC3)=O)nn2)=O)cc1',
'O=C(Nc1ccccc1)CSc1sc(nn1)NC(=O)CN1CCOCC1'),
TestMolecule('COCCNC(=S)NNc1cccc(C(=O)O)c1', 'c1ccccc1'),
TestMolecule('O=C(CNc1ccccc1)NN=Cc1ccc2c(c1)OCCO2', 'O=C(CNc1ccccc1)NN=Cc1ccc2c(c1)OCCO2'),
TestMolecule('COc1cc2ccccc2cc1C(=O)NCC(c1sccc1)N(C)C', 'O=C(NCCc1sccc1)c1cc2c(cc1)cccc2'),
TestMolecule('COc1ccc(C(N(C)C)CNC(=O)CCOc2ccccc2)cc1', 'O=C(NCCc1ccccc1)CCOc1ccccc1'),
TestMolecule('Cl.CCN(CC)CCCN1C(=O)CSC1c1ccc([N+]([O-])=O)cc1', 'O=C1CSC(c2ccccc2)N1'),
TestMolecule('CCC(Nc1ccc(OC)cc1OC)=C1C(=O)NC(=O)NC1=O', 'c1cc(NC=C2C(=O)NC(=O)NC2=O)ccc1'),
TestMolecule('c1coc(-c2cc(C(F)(F)F)nc(NCc3ccc(F)cc3)n2)c1', 'c1ccc(CNc2nccc(n2)-c2occc2)cc1'),
TestMolecule('CCOC(Nc1sc(C)c(C)c1C(OCC)=O)=O', 'c1ccsc1'),
TestMolecule('O=CN1CCN(C(C(=O)NC2CCCCC2)c2cc3c(cc2[N+]([O-])=O)OCO3)CC1',
'O=C(C(N1CCNCC1)c1ccc2c(c1)OCO2)NC1CCCCC1'),
TestMolecule('COc1cc(C2N(c3ccc(Br)cc3)C(=O)c3n[nH]c(C)c32)ccc1O',
'O=C1c2n[nH]cc2C(N1c1ccccc1)c1ccccc1'),
TestMolecule('c1cc(NC(=O)c2ccccc2[N+]([O-])=O)c(N2CCOCC2)cc1',
'O=C(Nc1c(cccc1)N1CCOCC1)c1ccccc1'),
TestMolecule('N#Cc1cc2c(nc1SCC(=O)N1CCCCC1)CCCCC2', 'O=C(N1CCCCC1)CSc1ccc2c(n1)CCCCC2'),
TestMolecule('CCN(CC)c1ccc(CN(C(=O)c2cc(OC)c(OC)c(OC)c2)C2CCS(=O)(=O)C2)cc1',
'O=S1(=O)CCC(N(Cc2ccccc2)C(=O)c2ccccc2)C1'),
TestMolecule('COc1cc(NC(=S)N2CCN(Cc3ccccc3)CC2)cc(OC)c1', 'S=C(N1CCN(CC1)Cc1ccccc1)Nc1ccccc1'),
TestMolecule('CC(=O)C(=CNc1ccc(OCc2ccccc2)cc1)c1ccccc1', 'c1cccc(c1)COc1ccc(NC=Cc2ccccc2)cc1'),
TestMolecule('CC(C)C(C(NC(C)C(N)=O)=O)NC(C1CCCN1C(OC(C)(C)C)=O)=O', 'C1CCNC1'),
TestMolecule('CCOc1ccc(N2CC(C(=O)Nc3cccc(S(NC4=NCCC4)(=O)=O)c3)CC2=O)cc1',
'c1cccc(c1)N1CC(C(=O)Nc2cccc(S(=O)(=O)NC3=NCCC3)c2)CC1=O'),
TestMolecule('O=C(NCc1ccccc1Cl)CSc1ccc(-c2cccs2)nn1', 'O=C(NCc1ccccc1)CSc1ccc(nn1)-c1sccc1'),
TestMolecule('COc1ccc(OC)c(N=c2ssnc2Cl)c1', 'c1cccc(c1)N=c1ssnc1'),
TestMolecule('CC(=O)C1=C(C)NC(=O)CC1c1c(Cl)cccc1', 'O=C1CC(C=CN1)c1ccccc1'),
TestMolecule('CCC(=O)N=C(N)Nc1nc(C)c2cc(C)c(C)cc2n1', 'c1cc2c(cc1)ncnc2'),
TestMolecule('Cc1ccccc1C(OC1OC(=O)C(Cl)=C1Nc1ccc(C(O)=O)cc1)=O',
'O=C(OC1OC(C=C1Nc1ccccc1)=O)c1ccccc1'),
TestMolecule('CCOc1cc(CN2CCC(CO)(Cc3cccc(C(F)(F)F)c3)CC2)ccc1OC',
'c1ccc(cc1)CC1CCN(Cc2ccccc2)CC1'),
TestMolecule('Cc1cc2c([nH]c(=O)c(CCNC(c3cccs3)=O)c2)cc1C',
'O=C(NCCc1cc2ccccc2[nH]c1=O)c1cccs1'),
TestMolecule('Cc1ccc(Nc2cc(=O)[nH]c(=O)[nH]2)cc1C', 'c1cccc(c1)Nc1cc([nH]c([nH]1)=O)=O'),
TestMolecule('Cc1cc(OCC(=O)NC2CCS(=O)(=O)C2)c2c(oc(=O)c3c2CCC3)c1',
'O=C(NC1CCS(=O)(C1)=O)COc1c2c(ccc1)oc(c1c2CCC1)=O'),
TestMolecule('CCc1sc(NC(CCC(NCCc2ccc(OC)c(OC)c2)=O)=O)nn1',
'c1cc(ccc1)CCNC(=O)CCC(=O)Nc1scnn1'),
TestMolecule('N#CC1=C(SCc2ccccc2)NC(=O)CC1c1ccc(O)cc1', 'O=C1NC(=CC(C1)c1ccccc1)SCc1ccccc1'),
TestMolecule('O=C(NCCN1CCOCC1)c1csc2c1CCCC2', 'O=C(NCCN1CCOCC1)c1csc2c1CCCC2'),
TestMolecule('CCCCC(=O)Nc1cc(OC)c(NC(C2CCCCC2)=O)cc1OC', 'O=C(Nc1ccccc1)C1CCCCC1'),
TestMolecule('Cc1ccc(C(C(C)OC(C2CC(=O)N(C3CCCCC3)C2)=O)=O)cc1',
'c1cc(C(=O)COC(C2CC(=O)N(C2)C2CCCCC2)=O)ccc1'),
TestMolecule('Cc1ccc(S(C(C#N)c2c(N3CCCC3)nc3ccccc3n2)(=O)=O)cc1C',
'c1ccc(cc1)S(=O)(=O)Cc1c(nc2ccccc2n1)N1CCCC1'),
TestMolecule('CC1(C)OC(=O)C(=Cc2[nH]ccc2)C(=O)O1', 'O=C1OCOC(=O)C1=Cc1[nH]ccc1'),
TestMolecule('Cc1cc(C)cc(Oc2nc3n(cccc3C)c(=O)c2C=C(C#N)C(=O)NC2CCS(=O)(=O)C2)c1',
'c1ccc(cc1)Oc1c(c(=O)n2ccccc2n1)C=CC(=O)NC1CCS(=O)(=O)C1'),
TestMolecule('COc1cc(NC(=O)NCc2c(C)onc2-c2ccccc2)ccc1', 'O=C(NCc1conc1-c1ccccc1)Nc1ccccc1'),
TestMolecule('c1ccc(C(Oc2cc3c(cc2)C(=O)CO3)=O)cc1', 'c1ccc(C(Oc2cc3c(cc2)C(=O)CO3)=O)cc1'),
TestMolecule('CCN1C(=O)C2C(c3cccs3)N3C4C(=O)N(CC)C(=O)C4C(c4cccs4)N3C2C1=O',
'c1cc(sc1)C1C2C(NC(=O)C2N2N1C1C(=O)NC(=O)C1C2c1cccs1)=O'),
TestMolecule('Cc1cc(C(N2CCCC(C(c3cc(F)ccc3F)=O)C2)=O)c(C)o1',
'O=C(N1CCCC(C(=O)c2ccccc2)C1)c1cocc1'),
TestMolecule('COc1cc(C=NO)ccc1Oc1c([N+]([O-])=O)cc([N+]([O-])=O)cc1', 'c1cccc(Oc2ccccc2)c1'),
TestMolecule('Cc1ccc(N(Cc2c(=O)[nH]c3ccc(C)cc3c2)C(c2cccs2)=O)cc1',
'O=C(N(c1ccccc1)Cc1c([nH]c2c(cccc2)c1)=O)c1cccs1'),
TestMolecule('COc1ccc(C(=O)Nn2c(C)nnc2-n2c(C)cc(C)n2)cc1OC', 'O=C(c1ccccc1)Nn1cnnc1-n1nccc1'),
TestMolecule('Cc1c(NC(=O)c2c(C)c(Cl)c(C)nc2Cl)cccc1', 'O=C(c1cccnc1)Nc1ccccc1'),
TestMolecule('c1ccc(CNC(CC(C(=O)NCc2ccccc2)c2nc(=O)c3ccccc3[nH]2)=O)cc1',
'c1ccc(CNC(CC(C(=O)NCc2ccccc2)c2nc(=O)c3ccccc3[nH]2)=O)cc1'),
TestMolecule('CNc1n(-c2ccccc2)ncc1[N+](=O)[O-]', 'c1n(ncc1)-c1ccccc1'),
TestMolecule('CC1SC2(NC1=O)C1CC3CC(C1)CC2C3', 'O=C1CSC2(N1)C1CC3CC(C1)CC2C3'),
TestMolecule('CCc1ccccc1NC(=S)N(C(C)c1occc1)CCOC', 'S=C(NCc1occc1)Nc1ccccc1'),
TestMolecule('CCC(C)NC(=O)C1CCCN(S(c2ccc(-n3cnnn3)cc2)(=O)=O)C1',
'C1CCN(CC1)S(=O)(=O)c1ccc(cc1)-n1nnnc1'),
TestMolecule('COc1c2c(ccc1)C1CC(C)(O2)N(Cc2ccccc2)C(=O)N1', 'O=C1NC2CC(Oc3ccccc32)N1Cc1ccccc1'),
TestMolecule('COc1ccc(C2NC(=O)c3c(cccc3)O2)c(OC)c1OC', 'O=C1NC(Oc2c1cccc2)c1ccccc1'),
TestMolecule('O=C(NNC=C1C=Nc2ccccc21)c1ccn(Cc2c(Cl)cc(Cl)cc2)n1',
'O=C(NNC=C1c2c(cccc2)N=C1)c1nn(cc1)Cc1ccccc1'),
TestMolecule('c1ccc(NS(c2ccc(OCC(=O)NCc3cnccc3)cc2)(=O)=O)cc1',
'c1ccc(NS(c2ccc(OCC(=O)NCc3cnccc3)cc2)(=O)=O)cc1'),
TestMolecule('COC1=CC(=O)C(=C2NNC(C(F)(F)F)=C2c2cc3ccccc3o2)C=C1',
'O=C1C=CC=CC1=C1NNC=C1c1cc2ccccc2o1'),
TestMolecule('CCOC(=O)c1c(C(COC(C=Cc2ccc(Cl)cc2)=O)=O)c(C)[nH]c1C',
'c1ccc(C=CC(OCC(=O)c2cc[nH]c2)=O)cc1'),
TestMolecule('Cc1nc2ncnn2c(N2CCN(c3nnnn3-c3ccccc3)CC2)c1',
'c1nc2ncnn2c(c1)N1CCN(c2nnnn2-c2ccccc2)CC1'),
TestMolecule('CC(C)Oc1ccc(C(=O)Nc2ccc(NC(c3ccco3)=O)c(Cl)c2)cc1',
'O=C(Nc1ccc(cc1)NC(=O)c1ccccc1)c1occc1'),
TestMolecule('CC(c1ccccc1)NC(C(NCC1OCCC1)=O)=O', 'O=C(NCc1ccccc1)C(=O)NCC1OCCC1'),
TestMolecule('CCCCOc1ccc(NC(=O)CCSc2nccn2C)cc1', 'O=C(Nc1ccccc1)CCSc1ncc[nH]1'),
TestMolecule('O=C(OCc1ncccc1)c1oc(COc2c(Cl)cccc2)cc1', 'O=C(OCc1ncccc1)c1ccc(o1)COc1ccccc1'),
TestMolecule('COc1ccc(C=NNC(=O)OC(C)(C)C)cc1OC', 'c1ccccc1'),
TestMolecule('CC1CCCCC1NC(COC(c1ccc(S(NCc2ccco2)(=O)=O)cc1)=O)=O',
'c1coc(c1)CNS(=O)(=O)c1ccc(cc1)C(=O)OCC(=O)NC1CCCCC1'),
TestMolecule('Nn1c(SCC(=O)Nc2cccc(F)c2)nnc1C1CCCCC1', 'O=C(CSc1[nH]c(nn1)C1CCCCC1)Nc1ccccc1'),
TestMolecule('Cc1n[nH]c(NC2CCCCC2)nc1=O', 'O=c1cn[nH]c(n1)NC1CCCCC1'),
TestMolecule('CCCCCCCCC(=O)NC(C(Cl)(Cl)Cl)NC(=S)N1CCOCC1', 'C1NCCOC1'),
TestMolecule('CCCc1ccc(Oc2coc3cc(OCC(Nc4c(C)cccc4)=O)ccc3c2=O)cc1',
'c1cccc(c1)Oc1c(c2ccc(cc2oc1)OCC(=O)Nc1ccccc1)=O'),
TestMolecule('Cc1ccc(C(=O)NN=C2CCSC2)cc1[N+]([O-])=O', 'O=C(NN=C1CCSC1)c1ccccc1'),
TestMolecule('N#CC1=C2SCN(c3ccc(F)cc3)CN2C(=O)CC1c1cc(F)ccc1',
'O=C1N2CN(c3ccccc3)CSC2=CC(c2ccccc2)C1'),
TestMolecule('c1ccc(CN2C(=O)CC(Nc3cc4c(cc3)cccc4)C2=O)cc1',
'c1ccc(CN2C(=O)CC(Nc3cc4c(cc3)cccc4)C2=O)cc1'),
TestMolecule('COc1ccc(NC(C)=O)cc1NC(=O)CN1CCN(CC(=O)Nc2ccc(Cl)cc2)CC1',
'O=C(Nc1ccccc1)CN1CCN(CC1)CC(=O)Nc1ccccc1'),
TestMolecule('Clc1c(Cl)c(C2NC(=O)CCC2[N+]([O-])=O)ccc1', 'O=C1NC(CCC1)c1ccccc1'),
TestMolecule('CCN(C(=O)CSc1n(-c2ccccc2)c(-c2ccccc2)nn1)CC', 'c1ccc(cc1)-n1cnnc1-c1ccccc1'),
TestMolecule('CC(=O)CCCCn1cnc2n(C)c(=O)n(C)c(=O)c12', 'O=c1[nH]c(c2c(nc[nH]2)[nH]1)=O'),
TestMolecule('CC1=NN(c2ccccc2)C(=N)C1=NNc1ccc(Cl)cc1', 'N=C1C(=NNc2ccccc2)C=NN1c1ccccc1'),
TestMolecule('CCc1ccc(OCC(=O)N(CC)CC)cc1', 'c1ccccc1'),
TestMolecule('CN(CC(=O)N1CCCCC1)S(c1ccc(Cl)cc1)(=O)=O', 'O=C(CNS(=O)(=O)c1ccccc1)N1CCCCC1'),
TestMolecule('CSc1ncc(C=C2C(=O)NC(=O)N(c3ccc(C)cc3)C2=O)cn1',
'c1ccc(N2C(NC(=O)C(=Cc3cncnc3)C2=O)=O)cc1'),
TestMolecule('COCCNC(=S)Nc1c(Cc2ccccc2)cccc1', 'c1ccc(Cc2ccccc2)cc1'),
TestMolecule('COc1cc(C(=O)Nc2nnc(C(C)(C)C)s2)c([N+]([O-])=O)cc1OC', 'O=C(Nc1nncs1)c1ccccc1'),
TestMolecule('CCOC(=O)c1ccc(NC(=O)c2cc(OC)c(OC(C)C)cc2)cc1', 'O=C(Nc1ccccc1)c1ccccc1'),
TestMolecule('COc1ccc(C(=O)C=C2Sc3cc4c(cc3N2C)OCO4)cc1', 'O=C(C=C1Sc2cc3c(cc2N1)OCO3)c1ccccc1'),
TestMolecule('CCCC1=NN(c2sc3c(n2)cccc3)C(=O)C1=CNCCCN(CC)CC', 'C=C1C=NN(C1=O)c1sc2ccccc2n1'),
TestMolecule('COc1ccc(C(COC(CN2C(=O)NC(C)(C)C2=O)=O)=O)cc1OC',
'c1ccc(C(=O)COC(=O)CN2C(=O)CNC2=O)cc1'),
TestMolecule('O=C(Oc1ccc(Br)cc1)C1CC(=O)N(c2ccc(F)cc2)C1',
'O=C(C1CC(N(C1)c1ccccc1)=O)Oc1ccccc1'),
TestMolecule('O=c1nc(-c2ccccn2)[nH]c(C(F)(F)F)c1Br', 'O=c1cc[nH]c(-c2ncccc2)n1'),
TestMolecule('CCOC(c1oc2ccccc2c1NC(CN1CCN(C)CC1)=O)=O', 'O=C(CN1CCNCC1)Nc1coc2ccccc21'),
TestMolecule('CSc1nsc(NN=Cc2ccc3c(c2)OCO3)c1C#N', 'c1cc(sn1)NN=Cc1ccc2OCOc2c1'),
TestMolecule('CC(C)(C)NC(NC(CSc1nc(C)c(C)c(C)n1)=O)=O', 'c1cncnc1'),
TestMolecule('Cc1cccnc1CN1CCN(Cc2onc(C(c3ccccc3)c3ccccc3)n2)CC1',
'c1cccnc1CN1CCN(CC1)Cc1onc(n1)C(c1ccccc1)c1ccccc1'),
TestMolecule('COc1ccc(Nc2oc3cc(=O)ccc-3cc2C(=O)Nc2ncccc2)cc1OC',
'c1ccc(cc1)Nc1oc2-c(ccc(c2)=O)cc1C(Nc1ncccc1)=O'),
TestMolecule('c1cc(C)c(OCC(NS(c2ccc(C)cc2)(=O)=O)=O)cc1', 'O=C(COc1ccccc1)NS(=O)(=O)c1ccccc1'),
TestMolecule('CCOc1ccc(-c2scc(CSc3sc(N)nn3)n2)cc1OC', 'c1cccc(c1)-c1nc(cs1)CSc1scnn1'),
TestMolecule('c1ccc(C(=O)COC(=O)CN2C(=O)C3C4CC(C3C2=O)C=C4)cc1',
'c1ccc(C(=O)COC(=O)CN2C(=O)C3C4CC(C3C2=O)C=C4)cc1'),
TestMolecule('Cc1occc1C(=O)NC(C)c1ccc2c(c1)OCO2', 'O=C(NCc1ccc2c(c1)OCO2)c1ccoc1'),
TestMolecule('CCn1c(SCC(=O)Nc2c(Cl)nccc2)nnc1-c1ccccc1',
'O=C(Nc1cnccc1)CSc1[nH]c(nn1)-c1ccccc1'),
TestMolecule('CCC(C)N(C)C1CCN(C(=S)Nc2cc(OC)ccc2)CC1', 'S=C(Nc1ccccc1)N1CCCCC1'),
TestMolecule('Brc1oc(C(=O)N2CC(=O)Nc3c(cc(Br)cc3)C2c2ccccc2)cc1',
'O=C(N1CC(Nc2ccccc2C1c1ccccc1)=O)c1occc1'),
TestMolecule('CN(C(=O)CCSc1nc(-c2cc3c(cc2)OCO3)cc(C(F)(F)F)n1)Cc1ccccc1',
'O=C(NCc1ccccc1)CCSc1nc(ccn1)-c1cc2c(cc1)OCO2'),
TestMolecule('[Br-].COc1c(OC)c(OC)cc(-c2nc3c[n+](CC(=O)c4ccccc4)ccc3n2C)c1',
'O=C(C[n+]1cc2nc([nH]c2cc1)-c1ccccc1)c1ccccc1'),
TestMolecule('CCOC(CSc1n(-c2c(OC)cccc2)c(CNC(Cc2ccccc2)=O)nn1)=O',
'O=C(Cc1ccccc1)NCc1n(cnn1)-c1ccccc1'),
TestMolecule('CS(N(Cc1ccccc1)c1ccc(C(Nc2c(Sc3ccccc3)cccc2)=O)cc1)(=O)=O',
'O=C(c1ccc(NCc2ccccc2)cc1)Nc1c(cccc1)Sc1ccccc1'),
TestMolecule('Cc1nc(C2N(C(=O)c3cn(C)c4c(c3=O)cccc4)CCc3c4c([nH]c32)cccc4)ccc1',
'O=C(c1c[nH]c2c(cccc2)c1=O)N1C(c2ncccc2)c2[nH]c3ccccc3c2CC1'),
TestMolecule('CCCCc1nc(N2CCOCC2)c(C#N)c2c1CCCC2', 'c1nc(cc2c1CCCC2)N1CCOCC1'),
TestMolecule('O=C(NN=Cc1cc([N+]([O-])=O)ccc1Cl)c1nccnc1', 'O=C(NN=Cc1ccccc1)c1nccnc1'),
TestMolecule('COc1ccc(-n2c(SCC(=O)c3ccc4c(c3)OCCO4)nnn2)cc1',
'O=C(c1ccc2c(c1)OCCO2)CSc1n(nnn1)-c1ccccc1'),
TestMolecule('COc1c(C=CC(=O)Nc2cc(S(NC3=NCCCCC3)(=O)=O)ccc2)cccc1',
'O=C(Nc1cc(ccc1)S(=O)(=O)NC1=NCCCCC1)C=Cc1ccccc1'),
TestMolecule('Cc1nn(-c2ccc(F)cc2)c(Cl)c1C=C(CC(=O)O)c1sc2ccccc2n1',
'c1cc2sc(nc2cc1)C=Cc1cn(nc1)-c1ccccc1'),
TestMolecule('COc1c(OC)c(OC)cc(C2N(c3ccccc3)OC3C2C(=O)N(Cc2ccccc2)C3=O)c1',
'c1cccc(c1)CN1C(=O)C2C(N(OC2C1=O)c1ccccc1)c1ccccc1'),
TestMolecule('COCCNC(=S)Nc1cc(OC)c(NC(=O)c2ccco2)cc1OC', 'O=C(Nc1ccccc1)c1occc1'),
TestMolecule('N#Cc1c(SCC(=O)c2cc3c(oc2=O)cccc3)nc(-c2ccccc2)cc1',
'O=C(c1cc2c(cccc2)oc1=O)CSc1cccc(n1)-c1ccccc1'),
TestMolecule('O=C(N1CCCC1)c1nc2ccccn2c1CN1CCCC(OCc2ccccc2)C1',
'O=C(N1CCCC1)c1nc2ccccn2c1CN1CCCC(OCc2ccccc2)C1'),
TestMolecule('Brc1cccc(OCCSc2ncccn2)c1', 'c1cccc(c1)OCCSc1ncccn1'),
TestMolecule('CC(C)(C)NC(=O)C12CCC(C)(C1(C)C)c1nc3ccccc3nc12', 'c1cccc2nc3C4CC(CC4)c3nc12'),
TestMolecule('[I-].CC(C)C1C(OCC(O)C[N+]2(C)CCCCC2)CC(C)CC1', 'C1CC[NH+](CC1)CCCOC1CCCCC1'),
TestMolecule('Cc1ccccc1NS(=O)(=O)c1ccc(OCC(=O)N2CCCCC2)cc1',
'c1cc(ccc1)NS(=O)(=O)c1ccc(cc1)OCC(=O)N1CCCCC1'),
TestMolecule('Cc1cc(NC(=O)CSc2nc3c(c(=O)n2-c2ccc(Br)cc2)SCC3)no1',
'O=C(CSc1nc2c(c(n1-c1ccccc1)=O)SCC2)Nc1ccon1'),
TestMolecule('Cc1ccccc1C(NC(C(C)C)C(OCC(c1[nH]ccc1)=O)=O)=O',
'c1cc([nH]c1)C(COC(CNC(=O)c1ccccc1)=O)=O'),
TestMolecule('Cc1ccnc(NS(c2ccc(NS(C)(=O)=O)cc2)(=O)=O)n1', 'c1ccc(S(=O)(=O)Nc2ncccn2)cc1'),
TestMolecule('Cn1c(-c2ccc(Cl)cc2)cnc1NCc1cc2c(cc1[N+]([O-])=O)OCO2.OC(=O)C(O)=O',
'c1cc(ccc1)-c1[nH]c(nc1)NCc1cc2c(cc1)OCO2'),
TestMolecule('CC1Cc2ccccc2N1C(=O)CON=Cc1ccc(OC(F)F)cc1', 'O=C(CON=Cc1ccccc1)N1CCc2c1cccc2'),
TestMolecule('C=C1C(=O)OC2C(O)C(C)=CC(=O)C=C(C)CC(OC(C(C)=CC)=O)C12',
'C=C1C2CCC=CC(C=CCC2OC1=O)=O'),
TestMolecule('O=C1C2N(CSC2)c2c(cc(C(F)(F)F)cc2)N1Cc1cccc(F)c1',
'O=C1C2N(CSC2)c2ccccc2N1Cc1ccccc1'),
TestMolecule('Cc1ccc(OCC(=O)Nc2c[nH]c(=O)[nH]c2=O)cc1C',
'O=C(COc1ccccc1)Nc1c[nH]c([nH]c1=O)=O'),
TestMolecule('Cn1c(CN2CCOCC2)nc2cc(NC(=O)c3ccccc3Cl)ccc12',
'O=C(c1ccccc1)Nc1ccc2[nH]c(nc2c1)CN1CCOCC1'),
TestMolecule('O=c1oc2ccc(O)cc2c(CN2CCN(CC=Cc3ccccc3)CC2)c1',
'O=c1oc2ccccc2c(c1)CN1CCN(CC1)CC=Cc1ccccc1'),
TestMolecule('Cn1c(Cc2ccccc2)nnc1SCCC(=O)Nc1ccccc1', 'O=C(CCSc1nnc([nH]1)Cc1ccccc1)Nc1ccccc1'),
TestMolecule('c1cc2nc(CC(=O)c3cc([N+]([O-])=O)ccc3)[nH]c2cc1',
'O=C(Cc1nc2ccccc2[nH]1)c1ccccc1'),
TestMolecule('c1cc2cc(C(=O)N3CCN(c4ccc(N5CCOCC5)nn4)CC3)c(=O)oc2cc1',
'c1cc2cc(C(=O)N3CCN(c4ccc(N5CCOCC5)nn4)CC3)c(=O)oc2cc1'),
TestMolecule('COc1ccccc1-n1c(=S)[nH]nc1CCn1nc(C)c(Br)c1C', 'S=c1[nH]nc(n1-c1ccccc1)CCn1cccn1'),
TestMolecule('CCC(=O)NC(=S)Nc1ccc(N2CCOCC2)cc1', 'c1cccc(c1)N1CCOCC1'),
TestMolecule('CCCCCC(=O)N1CCN(CCNC=C2C(=O)CC(c3ccc(OC)c(OC)c3)CC2=O)CC1',
'c1ccc(cc1)C1CC(=O)C(C(=O)C1)=CNCCN1CCNCC1'),
TestMolecule('CN1CCN(C(=O)CN(S(C)(=O)=O)Cc2ccc(Cl)cc2)CC1', 'O=C(CNCc1ccccc1)N1CCNCC1'),
TestMolecule('COc1cc(OC)cc(C(=O)NCc2cccnc2)c1', 'O=C(NCc1cccnc1)c1ccccc1'),
TestMolecule('c1cncc(NC(=O)C2CCCN(S(c3cccc4c3nsn4)(=O)=O)C2)c1',
'c1cncc(NC(=O)C2CCCN(S(c3cccc4c3nsn4)(=O)=O)C2)c1'),
TestMolecule('CC(NC1=NN(C(C)=O)C(C)(c2cccs2)S1)=O', 'c1cc(sc1)C1SC=NN1'),
TestMolecule('CCCC(=O)Nc1ccc(-c2nc3cc(C)c(C)cc3o2)cc1', 'c1cccc(c1)-c1nc2ccccc2o1'),
TestMolecule('Cc1c(C)n(CC(O)CN2CCOCC2)c2ccccc12.OC(=O)C(O)=O', 'c1cn(c2ccccc12)CCCN1CCOCC1'),
TestMolecule('Cc1occc1-c1n(CCc2ccccc2)c(SCC(=O)Nc2sccn2)nn1',
'O=C(Nc1sccn1)CSc1n(c(nn1)-c1cocc1)CCc1ccccc1'),
TestMolecule('Cc1oc(-c2cc(F)ccc2)nc1CN1C(CCc2ncccc2)CCCC1',
'c1ccc(cc1)-c1nc(co1)CN1C(CCCC1)CCc1ncccc1'),
TestMolecule('COc1c(OC)c(C(O)=O)c(C=NNC(c2cc(NC(c3ccc(F)cc3)=O)ccc2)=O)cc1',
'O=C(Nc1cc(ccc1)C(=O)NN=Cc1ccccc1)c1ccccc1'),
TestMolecule('CCn1c(Cc2ccccc2)nnc1SCC(=O)Nc1ccc(S(N)(=O)=O)cc1',
'O=C(CSc1[nH]c(nn1)Cc1ccccc1)Nc1ccccc1'),
TestMolecule('CCn1c(COc2nn(-c3ccccc3)c(=O)cc2)nnc1SCc1ccc(OC)cc1',
'O=c1ccc(nn1-c1ccccc1)OCc1[nH]c(nn1)SCc1ccccc1'),
TestMolecule('CC1=NC(=O)C(=C2CC(O)(C(F)(F)F)ON2)C(C)=C1', 'O=C1C(=C2NOCC2)C=CC=N1'),
TestMolecule('COc1ccc(NC(=S)Nc2ccccc2C(F)(F)F)cc1', 'S=C(Nc1ccccc1)Nc1ccccc1'),
TestMolecule('CCCc1cc(=O)nc(SCC(=O)c2cc(C)n(CCOC)c2C)[nH]1',
'O=C(c1c[nH]cc1)CSc1[nH]ccc(=O)n1'),
TestMolecule('CC(=O)Nc1ccc2c(c1)C(C)(C)C(C)N2C', 'c1ccc2c(c1)NCC2'),
TestMolecule('CCN1CCN(C(c2ccc(OCC(Nc3ccc(F)cc3)=O)c(OC)c2)=O)CC1',
'c1cc(ccc1)NC(=O)COc1ccc(C(N2CCNCC2)=O)cc1'),
TestMolecule('CCCCN1C2CCCC1CC(NC(=O)c1ccc(OC)c(OC)c1)C2', 'O=C(NC1CC2NC(CCC2)C1)c1ccccc1'),
TestMolecule('c1ccc(N(CC(=O)N2CCOCC2)S(c2ccccc2)(=O)=O)cc1',
'c1ccc(N(CC(=O)N2CCOCC2)S(c2ccccc2)(=O)=O)cc1'),
TestMolecule('CCn1c(C)nc2cc(C(=O)NN=Cc3ccc(OC)c(O)c3)ccc12',
'O=C(NN=Cc1ccccc1)c1ccc2[nH]cnc2c1'),
TestMolecule('[Cl-].NC(=O)CN1C=CC(=C[NH+]=O)C=C1', 'C=C1C=CNC=C1'),
TestMolecule('Cn1cnnc1SC1C(NS(c2ccccc2)(=O)=O)c2c3c(ccc2)cccc31',
'O=S(=O)(NC1C(Sc2[nH]cnn2)c2cccc3c2c1ccc3)c1ccccc1'),
TestMolecule('COc1ccc(Nc2nc(NCc3ccco3)nc(NN=Cc3ccccc3F)n2)cc1',
'c1ccc(Nc2nc(nc(n2)NN=Cc2ccccc2)NCc2ccco2)cc1'),
TestMolecule('CC1=CC(=O)C(=C2C=C(c3ccccc3[N+]([O-])=O)NN2)C=C1',
'O=C1C(=C2NNC(=C2)c2ccccc2)C=CC=C1'),
TestMolecule('COc1ccc(CC2[N+]([O-])(C)CCc3cc(OC)c(O)cc32)cc1O',
'c1ccc(cc1)CC1c2c(cccc2)CC[NH2+]1'),
TestMolecule('Cl.NC(N)=Nc1nc(=O)c2cc(Br)ccc2[nH]1', 'O=c1nc[nH]c2ccccc21'),
TestMolecule('CC(=O)N1CCC(=NNc2ccc(S(=O)(=O)N3CCOCC3)cc2[N+]([O-])=O)CC1',
'c1cc(ccc1NN=C1CCNCC1)S(=O)(=O)N1CCOCC1'),
TestMolecule('Cc1cc(S(N(Cc2ccc(F)cc2)CC2OCCC2)(=O)=O)ccc1-n1cnnn1',
'c1cc(ccc1)CN(CC1OCCC1)S(c1ccc(cc1)-n1cnnn1)(=O)=O'),
TestMolecule('CC1(C)OCc2c(c3c(sc4c(NCCCO)ncnc43)nc2-c2ccco2)C1',
'c1ncnc2c1sc1nc(c3c(c12)CCOC3)-c1ccco1'),
TestMolecule('COc1ccc(CCNC(=O)CSc2n(-c3ccc(OC)c(OC)c3)nnn2)cc1OC',
'O=C(CSc1n(-c2ccccc2)nnn1)NCCc1ccccc1'),
TestMolecule('CC(C)(CC(O)=O)CC(NCc1c(Cl)cccc1Sc1ccc(Cl)cc1)=O', 'c1ccc(Sc2ccccc2)cc1'),
TestMolecule('COc1ccc(-c2cc(CCCC(=O)NCCc3cc(OC)ccc3OC)no2)cc1',
'O=C(NCCc1ccccc1)CCCc1noc(c1)-c1ccccc1'),
TestMolecule('Cc1ccc(-c2ncns2)cc1', 'c1ccc(cc1)-c1sncn1'),
TestMolecule('C(O)CCn1c(=O)c2c(nc1C=Cc1ccc([N+]([O-])=O)o1)cccc2',
'O=c1[nH]c(C=Cc2ccco2)nc2c1cccc2'),
TestMolecule('COC(CC(O)CC(O)C(C)OCc1ccccc1)OC', 'c1ccccc1'),
TestMolecule('Cl.CCCC(N1CCN(C(=O)c2occc2)CC1)c1n(C(C)(C)C)nnn1',
'O=C(N1CCN(Cc2nnn[nH]2)CC1)c1ccco1'),
TestMolecule('O=C(NC(CO)c1ccccc1)c1occc1', 'O=C(NCc1ccccc1)c1occc1'),
TestMolecule('O=C(Nc1ccc(N2CCOCC2)cc1)c1c(Cl)cc(F)c(F)c1', 'O=C(Nc1ccc(N2CCOCC2)cc1)c1ccccc1'),
TestMolecule('CCc1sc(N2C(=O)c3ccc(Oc4ccc([N+]([O-])=O)cc4)cc3C2=O)nn1',
'O=C1N(C(=O)c2cc(Oc3ccccc3)ccc21)c1scnn1'),
TestMolecule('CC(C)Cc1ccc(C(C)C(=O)O)cc1', 'c1ccccc1'),
TestMolecule('Cl.N=c1sccn1CC(=O)Nc1cc(S(N2CCCC2)(=O)=O)ccc1Cl',
'N=c1n(CC(=O)Nc2cccc(S(=O)(N3CCCC3)=O)c2)ccs1'),
TestMolecule('c1ccc(-c2ccc(C(=O)OC3CC4OC(=O)CC4C3CO)cc2)cc1',
'c1ccc(cc1)-c1ccc(C(=O)OC2CC3CC(=O)OC3C2)cc1'),
TestMolecule('CN(CCC#N)CC(=O)Nc1ccc(S(N)(=O)=O)cc1', 'c1ccccc1'),
TestMolecule('Cc1nc(-c2ccc([N+]([O-])=O)cc2)sc1C(=O)O', 'c1cc(-c2sccn2)ccc1'),
TestMolecule('c1coc(C(=O)N2CCN(C(Cn3nnc(-c4ccc(NC(c5ccc(F)cc5)=O)cc4)n3)=O)CC2)c1',
'O=C(N1CCN(C(=O)Cn2nc(nn2)-c2ccc(NC(=O)c3ccccc3)cc2)CC1)c1ccco1'),
TestMolecule('Cc1onc(-c2c(Cl)cccc2Cl)c1C(N)=S', 'c1ccc(cc1)-c1nocc1'),
TestMolecule('CCOC(=O)c1cnc2ccccc2c1NCCO', 'c1cnc2ccccc2c1'),
TestMolecule('Cc1ccc(C)c(NC(=O)Cn2nnc(-c3ccc(N4CCOCC4)cc3)n2)c1',
'O=C(Cn1nnc(n1)-c1ccc(cc1)N1CCOCC1)Nc1ccccc1'),
TestMolecule('CC(C)(C)c1cc(C(=O)NNc2ccc(OC(F)(F)F)cc2)n(Cc2ccccc2)n1',
'O=C(NNc1ccccc1)c1ccnn1Cc1ccccc1'),
TestMolecule('CCCCCOC(=O)C1=C(C)N=C2N(NN=N2)C1c1ccc(OC)c(OC)c1OC',
'c1cccc(c1)C1N2NN=NC2=NC=C1'),
TestMolecule('Cc1cc2cc(CNC(=O)C3CC3)ccc2n1C', 'O=C(NCc1ccc2c(cc[nH]2)c1)C1CC1'),
TestMolecule('Cc1ccccc1C(NC(CC(C)C)C(Nc1cc(S(N(C)C)(=O)=O)ccc1)=O)=O',
'c1ccc(cc1)NC(CNC(=O)c1ccccc1)=O'),
TestMolecule('COCCCNC(=S)N1CCC(NC(=O)c2ccco2)CC1', 'O=C(NC1CCNCC1)c1ccco1'),
TestMolecule('Cn1c(C=Cc2oc([N+]([O-])=O)cc2)nc2ccccc2c1=O', 'O=c1[nH]c(C=Cc2occc2)nc2ccccc12'),
TestMolecule('c1cc2nc(SCc3cc(=O)n4ccsc4n3)n(CCCO)c(=O)c2cc1',
'c1ccc2nc(SCc3cc(=O)n4ccsc4n3)[nH]c(=O)c2c1'),
TestMolecule('c1ccc2c(c1)cccc2NC(=O)CC1SC(NCC2OCCC2)=NC1=O',
'c1ccc2c(c1)cccc2NC(=O)CC1SC(NCC2OCCC2)=NC1=O'),
]
if __name__ == '__main__': # pragma: no cover
unittest.main()
|
|
"""
:mod:`zsl.resource.json_server_resource`
----------------------------------------
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import *
import http.client
import logging
import re
from typing import Any, Dict
from flask import request
from sqlalchemy import or_
from sqlalchemy.orm.exc import NoResultFound
from zsl.interface.resource import ResourceResult
from zsl.resource.resource_helper import filter_from_url_arg, flat_model, model_tree
from zsl.service.service import transactional
from zsl.utils.http import get_http_status_code_value
from .model_resource import ModelResource
NOT_FOUND = ResourceResult(
body={},
status=get_http_status_code_value(http.client.NOT_FOUND)
)
NOT_IMPLEMENTED = ResourceResult(
body={},
status=get_http_status_code_value(http.client.NOT_IMPLEMENTED)
)
# any other arguments from these are considered as `property_name(_operator)=some_vaule` filter
_SKIPPED_ARGUMENTS = set(['callback', '_', 'q', '_start', '_end', '_sort', '_order', '_limit', '_embed', '_expand'])
# first group is the column name, then it can have a . separator or an operator suffix
_re_column_name = re.compile(r'^([^.]*?)(\..*?)?(_lte|_gte|_ne|_like)?$')
def _page_arg(p):
# type: (int) -> str
"""Create a page argument from int."""
return 'page=' + str(p)
def _get_link_pages(page, per_page, count, page_url):
# type: (int, int, int, str) -> Dict[str, str]
"""Create link header for page metadata.
:param page: current page
:param per_page: page limit
:param count: count of all resources
:param page_url: url for resources
:return: dictionary with name of the link as key and its url as value
"""
current_page = _page_arg(page)
links = {}
end = page * per_page
if page > 1:
links['prev'] = page_url.replace(current_page, _page_arg(page - 1))
if end < count:
links['next'] = page_url.replace(current_page, _page_arg(page + 1))
if per_page < count:
links['first'] = page_url.replace(current_page, _page_arg(1))
links['last'] = page_url.replace(current_page, _page_arg((count + per_page - 1) // per_page))
return links
class JsonServerResource(ModelResource):
"""Model resource implementation to correspond with json-server.
This implements the same REST interface which json-server
(https://github.com/typicode/json-server) uses. It transforms the given
input arguments into ModelResource-like and then adds metadata to result.
"""
def to_filter(self, query, arg):
"""Json-server filter using the _or_ operator."""
return filter_from_url_arg(self.model_cls, query, arg, query_operator=or_)
def create(self, *args, **kwargs):
"""Adds created http status response and location link."""
resource = super(JsonServerResource, self).create(*args, **kwargs)
return ResourceResult(
body=resource,
status=get_http_status_code_value(http.client.CREATED),
location="{}/{}".format(request.url, resource.get_id())
)
def _create_filter_by(self):
"""Transform the json-server filter arguments to model-resource ones."""
filter_by = []
for name, values in request.args.copy().lists(): # copy.lists works in py2 and py3
if name not in _SKIPPED_ARGUMENTS:
column = _re_column_name.search(name).group(1)
if column not in self._model_columns:
continue
for value in values:
if name.endswith('_ne'):
filter_by.append(name[:-3] + '!=' + value)
elif name.endswith('_lte'):
filter_by.append(name[:-4] + '<=' + value)
elif name.endswith('_gte'):
filter_by.append(name[:-4] + '>=' + value)
elif name.endswith('_like'):
filter_by.append(name[:-5] + '::like::%' + value + '%')
else:
filter_by.append(name.replace('__', '.') + '==' + value)
filter_by += self._create_fulltext_query()
return ','.join(filter_by)
@staticmethod
def _create_related(args):
# type: (Dict) -> None
"""Create related field from `_embed` arguments."""
if '_embed' in request.args:
embeds = request.args.getlist('_embed')
args['related'] = ','.join(embeds)
del args['_embed']
def _create_fulltext_query(self):
"""Support the json-server fulltext search with a broad LIKE filter."""
filter_by = []
if 'q' in request.args:
columns = flat_model(model_tree(self.__class__.__name__, self.model_cls))
for q in request.args.getlist('q'):
filter_by += ['{col}::like::%{q}%'.format(col=col, q=q) for col in columns]
return filter_by
def _transform_list_args(self, args):
# type: (dict) -> None
"""Transforms all list arguments from json-server to model-resource ones.
This modifies the given arguments.
"""
if '_limit' in args:
args['limit'] = int(args['_limit'])
del args['_limit']
if '_page' in args:
page = int(args['_page'])
if page < 0:
page = 1
args['page'] = page
del args['_page']
if 'limit' not in args:
args['limit'] = 10
if '_end' in args:
end = int(args['_end'])
args['limit'] = end - int(args.get('_start', 0))
if '_start' in args:
args['offset'] = args['_start']
del args['_start']
if '_sort' in args:
args['order_by'] = args['_sort'].replace('__', '.')
del args['_sort']
if args.get('_order', 'ASC') == 'DESC':
args['order_by'] = '-' + args['order_by']
if '_order' in args:
del args['_order']
filter_by = self._create_filter_by()
if filter_by:
args['filter_by'] = filter_by
def read(self, params, args, data):
"""Modifies the parameters and adds metadata for read results."""
result_count = None
result_links = None
if params is None:
params = []
if args:
args = args.copy()
else:
args = {}
ctx = self._create_context(params, args, data)
row_id = ctx.get_row_id()
if not row_id:
self._transform_list_args(args)
if 'page' in args or 'limit' in args:
ctx = self._create_context(params, args, data)
result_count = self._get_collection_count(ctx)
if 'page' in args:
result_links = _get_link_pages(
page=args['page'],
per_page=int(args['limit']),
count=result_count,
page_url=request.url
)
if 'limit' not in args:
args['limit'] = 'unlimited'
self._create_related(args)
try:
return ResourceResult(
body=super(JsonServerResource, self).read(params, args, data),
count=result_count,
links=result_links
)
except NoResultFound:
return NOT_FOUND
def update(self, *args, **kwargs):
"""Modifies the parameters and adds metadata for update results.
Currently it does not support `PUT` method, which works as replacing
the resource. This is somehow questionable in relation DB.
"""
if request.method == 'PUT':
logging.warning("Called not implemented resource method PUT")
resource = super(JsonServerResource, self).update(*args, **kwargs)
if resource:
return resource
else:
return NOT_FOUND
@transactional
def delete(self, params, args, data):
"""Supports only singular delete and adds proper http status."""
ctx = self._create_context(params, args, data)
row_id = ctx.get_row_id()
if row_id:
deleted = self._delete_one(row_id, ctx)
if deleted:
return ResourceResult(body={})
else:
return NOT_FOUND
else:
return NOT_FOUND
|
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Fetch a variety of acoustic metrics from The Echo Nest.
"""
from __future__ import (division, absolute_import, print_function,
unicode_literals)
import time
import socket
import os
import tempfile
from string import Template
import subprocess
from beets import util, plugins, ui
from beets.dbcore import types
import pyechonest
import pyechonest.song
import pyechonest.track
# If a request at the EchoNest fails, we want to retry the request RETRIES
# times and wait between retries for RETRY_INTERVAL seconds.
RETRIES = 10
RETRY_INTERVAL = 10
DEVNULL = open(os.devnull, 'wb')
ALLOWED_FORMATS = ('MP3', 'OGG', 'AAC')
UPLOAD_MAX_SIZE = 50 * 1024 * 1024
# FIXME: use avconv?
CONVERT_COMMAND = u'ffmpeg -i $source -y -acodec libvorbis -vn -aq 2 $dest'
TRUNCATE_COMMAND = u'ffmpeg -t 300 -i $source'\
u'-y -acodec libvorbis -vn -aq 2 $dest'
# Maps attribute names from echonest to their field names in beets.
# The attributes are retrieved from a songs `audio_summary`. See:
# http://echonest.github.io/pyechonest/song.html#pyechonest.song.profile
ATTRIBUTES = {
'energy': 'energy',
'liveness': 'liveness',
'speechiness': 'speechiness',
'acousticness': 'acousticness',
'danceability': 'danceability',
'valence': 'valence',
'tempo': 'bpm',
}
# Types for the flexible fields added by `ATTRIBUTES`
FIELD_TYPES = {
'energy': types.FLOAT,
'liveness': types.FLOAT,
'speechiness': types.FLOAT,
'acousticness': types.FLOAT,
'danceability': types.FLOAT,
'valence': types.FLOAT,
}
MUSICAL_SCALE = ['C', 'C#', 'D', 'D#', 'E' 'F',
'F#', 'G', 'G#', 'A', 'A#', 'B']
# We also use echonest_id (song_id) and echonest_fingerprint to speed up
# lookups.
ID_KEY = 'echonest_id'
FINGERPRINT_KEY = 'echonest_fingerprint'
def _splitstrip(string, delim=u','):
"""Split string (at commas by default) and strip whitespace from the
pieces.
"""
return [s.strip() for s in string.split(delim)]
def diff(item1, item2):
"""Score two Item objects according to the Echo Nest numerical
fields.
"""
result = 0.0
for attr in ATTRIBUTES.values():
if attr == 'bpm':
# BPM (tempo) is handled specially to normalize.
continue
try:
result += abs(
float(item1.get(attr, None)) -
float(item2.get(attr, None))
)
except TypeError:
result += 1.0
try:
bpm1 = float(item1.get('bpm', None))
bpm2 = float(item2.get('bpm', None))
result += abs(bpm1 - bpm2) / max(bpm1, bpm2, 1)
except TypeError:
result += 1.0
return result
def similar(lib, src_item, threshold=0.15, fmt='${difference}: ${path}'):
for item in lib.items():
if item.path != src_item.path:
d = diff(item, src_item)
if d < threshold:
s = fmt.replace('${difference}', '{:2.2f}'.format(d))
ui.print_(format(item, s))
class EchonestMetadataPlugin(plugins.BeetsPlugin):
item_types = FIELD_TYPES
def __init__(self):
super(EchonestMetadataPlugin, self).__init__()
self.config.add({
'auto': True,
'apikey': u'NY2KTZHQ0QDSHBAP6',
'upload': True,
'convert': True,
'truncate': True,
})
self.config.add(ATTRIBUTES)
self.config['apikey'].redact = True
pyechonest.config.ECHO_NEST_API_KEY = \
self.config['apikey'].get(unicode)
if self.config['auto']:
self.import_stages = [self.imported]
def _echofun(self, func, **kwargs):
"""Wrapper for requests to the EchoNest API. Will retry up to
RETRIES times and wait between retries for RETRY_INTERVAL
seconds.
"""
for i in range(RETRIES):
try:
result = func(**kwargs)
except pyechonest.util.EchoNestAPIError as e:
if e.code == 3:
# reached access limit per minute
self._log.debug(u'rate-limited on try {0}; waiting {1} '
u'seconds', i + 1, RETRY_INTERVAL)
time.sleep(RETRY_INTERVAL)
elif e.code == 5:
# specified identifier does not exist
# no use in trying again.
self._log.debug(u'{0}', e)
return None
else:
self._log.error(u'{0}', e.args[0][0])
return None
except (pyechonest.util.EchoNestIOError, socket.error) as e:
self._log.warn(u'IO error: {0}', e)
time.sleep(RETRY_INTERVAL)
except Exception as e:
# there was an error analyzing the track, status: error
self._log.debug(u'{0}', e)
return None
else:
break
else:
# If we exited the loop without breaking, then we used up all
# our allotted retries.
self._log.error(u'request failed repeatedly')
return None
return result
def _pick_song(self, songs, item):
"""Helper method to pick the best matching song from a list of songs
returned by the EchoNest. Compares artist, title and duration. If
the artist and title match and the duration difference is <= 1.0
seconds, it's considered a match.
"""
if not songs:
self._log.debug(u'no songs found')
return
pick = None
min_dist = item.length
for song in songs:
if song.artist_name.lower() == item.artist.lower() \
and song.title.lower() == item.title.lower():
dist = abs(item.length - song.audio_summary['duration'])
if dist < min_dist:
min_dist = dist
pick = song
if min_dist > 2.5:
return None
return pick
def _flatten_song(self, song):
"""Given an Echo Nest song object, return a flat dict containing
attributes we care about. If song is None, return None.
"""
if not song:
return
values = dict(song.audio_summary)
values['id'] = song.id
return values
# "Profile" (ID-based) lookup.
def profile(self, item):
"""Do a lookup on the EchoNest by MusicBrainz ID.
"""
# Use an existing Echo Nest ID.
if ID_KEY in item:
enid = item[ID_KEY]
# Look up the Echo Nest ID based on the MBID.
else:
if not item.mb_trackid:
self._log.debug(u'no ID available')
return
mbid = 'musicbrainz:track:{0}'.format(item.mb_trackid)
track = self._echofun(pyechonest.track.track_from_id,
identifier=mbid)
if not track:
self._log.debug(u'lookup by MBID failed')
return
enid = track.song_id
# Use the Echo Nest ID to look up the song.
songs = self._echofun(pyechonest.song.profile, ids=enid,
buckets=['id:musicbrainz', 'audio_summary'])
return self._flatten_song(self._pick_song(songs, item))
# "Search" (metadata-based) lookup.
def search(self, item):
"""Search the item at the EchoNest by artist and title.
"""
songs = self._echofun(pyechonest.song.search, title=item.title,
results=100, artist=item.artist,
buckets=['id:musicbrainz', 'tracks',
'audio_summary'])
return self._flatten_song(self._pick_song(songs, item))
# "Analyze" (upload the audio itself) method.
def prepare_upload(self, item):
"""Truncate and convert an item's audio file so it can be
uploaded to echonest.
Return a ``(source, tmp)`` tuple where `source` is the path to
the file to be uploaded and `tmp` is a temporary file to be
deleted after the upload or `None`.
If conversion or truncation fails, return `None`.
"""
source = item.path
tmp = None
if item.format not in ALLOWED_FORMATS:
if self.config['convert']:
tmp = source = self.convert(source)
if not tmp:
return
if os.stat(source).st_size > UPLOAD_MAX_SIZE:
if self.config['truncate']:
source = self.truncate(source)
if tmp is not None:
util.remove(tmp)
tmp = source
else:
return
if source:
return source, tmp
def convert(self, source):
"""Converts an item in an unsupported media format to ogg. Config
pending.
This is stolen from Jakob Schnitzers convert plugin.
"""
fd, dest = tempfile.mkstemp(b'.ogg')
os.close(fd)
self._log.info(u'encoding {0} to {1}',
util.displayable_path(source),
util.displayable_path(dest))
opts = []
for arg in CONVERT_COMMAND.split():
arg = arg.encode('utf-8')
opts.append(Template(arg).substitute(source=source, dest=dest))
# Run the command.
try:
util.command_output(opts)
except (OSError, subprocess.CalledProcessError) as exc:
self._log.debug(u'encode failed: {0}', exc)
util.remove(dest)
return
self._log.info(u'finished encoding {0}', util.displayable_path(source))
return dest
def truncate(self, source):
"""Truncates an item to a size less than UPLOAD_MAX_SIZE."""
fd, dest = tempfile.mkstemp(u'.ogg')
os.close(fd)
self._log.info(u'truncating {0} to {1}',
util.displayable_path(source),
util.displayable_path(dest))
opts = []
for arg in TRUNCATE_COMMAND.split():
arg = arg.encode('utf-8')
opts.append(Template(arg).substitute(source=source, dest=dest))
# Run the command.
try:
util.command_output(opts)
except (OSError, subprocess.CalledProcessError) as exc:
self._log.debug(u'truncate failed: {0}', exc)
util.remove(dest)
return
self._log.info(u'truncate encoding {0}', util.displayable_path(source))
return dest
def analyze(self, item):
"""Upload the item to the EchoNest for analysis. May require to
convert the item to a supported media format.
"""
prepared = self.prepare_upload(item)
if not prepared:
self._log.debug(u'could not prepare file for upload')
return
source, tmp = prepared
self._log.info(u'uploading file, please be patient')
track = self._echofun(pyechonest.track.track_from_filename,
filename=source)
if tmp is not None:
util.remove(tmp)
if not track:
self._log.debug(u'failed to upload file')
return
# Sometimes we have a track but no song. I guess this happens for
# new / unverified songs. We need to "extract" the audio_summary
# from the track object manually. I don't know why the
# pyechonest API handles tracks (merge audio_summary to __dict__)
# and songs (keep audio_summary in an extra attribute)
# differently.
# Maybe a patch for pyechonest could help?
# First get the (limited) metadata from the track in case
# there's no associated song.
from_track = {}
for key in ATTRIBUTES:
try:
from_track[key] = getattr(track, key)
except AttributeError:
pass
from_track['duration'] = track.duration
# Try to look up a song for the full metadata.
try:
song_id = track.song_id
except AttributeError:
return from_track
songs = self._echofun(pyechonest.song.profile,
ids=[song_id], track_ids=[track.id],
buckets=['audio_summary'])
if songs:
pick = self._pick_song(songs, item)
if pick:
return self._flatten_song(pick)
return from_track # Fall back to track metadata.
# Shared top-level logic.
def fetch_song(self, item):
"""Try all methods to get a matching song object from the
EchoNest. If no method succeeds, return None.
"""
# There are four different ways to get a song. Each method is a
# callable that takes the Item as an argument.
methods = [self.profile, self.search]
if self.config['upload']:
methods.append(self.analyze)
# Try each method in turn.
for method in methods:
song = method(item)
if song:
self._log.debug(u'got song through {0}: {1} [{2}]',
method.__name__,
item,
song.get('duration'),
)
return song
def apply_metadata(self, item, values, write=False):
"""Copy the metadata from the dictionary of song information to
the item.
"""
# Update each field.
for k, v in values.iteritems():
if k in ATTRIBUTES:
field = ATTRIBUTES[k]
self._log.debug(u'metadata: {0} = {1}', field, v)
if field == 'bpm':
item[field] = int(v)
else:
item[field] = v
if 'key' in values and 'mode' in values:
key = MUSICAL_SCALE[values['key'] - 1]
if values['mode'] == 0: # Minor key
key += 'm'
item['initial_key'] = key
if 'id' in values:
enid = values['id']
self._log.debug(u'metadata: {0} = {1}', ID_KEY, enid)
item[ID_KEY] = enid
# Write and save.
if write:
item.try_write()
item.store()
# Automatic (on-import) metadata fetching.
def imported(self, session, task):
"""Import pipeline stage.
"""
for item in task.imported_items():
song = self.fetch_song(item)
if song:
self.apply_metadata(item, song)
# Explicit command invocation.
def requires_update(self, item):
"""Check if this item requires an update from the EchoNest (its
data is missing).
"""
for field in ATTRIBUTES.values():
if not item.get(field):
return True
self._log.info(u'no update required')
return False
def commands(self):
fetch_cmd = ui.Subcommand('echonest',
help='fetch metadata from The Echo Nest')
fetch_cmd.parser.add_option(
'-f', '--force', dest='force', action='store_true', default=False,
help='(re-)download information from the EchoNest'
)
def fetch_func(lib, opts, args):
self.config.set_args(opts)
write = ui.should_write()
for item in lib.items(ui.decargs(args)):
self._log.info(u'{0}', item)
if self.config['force'] or self.requires_update(item):
song = self.fetch_song(item)
if song:
self.apply_metadata(item, song, write)
fetch_cmd.func = fetch_func
sim_cmd = ui.Subcommand('echosim', help='show related files')
sim_cmd.parser.add_option(
'-t', '--threshold', dest='threshold', action='store',
type='float', default=0.15, help='Set difference threshold'
)
sim_cmd.parser.add_format_option()
def sim_func(lib, opts, args):
self.config.set_args(opts)
for item in lib.items(ui.decargs(args)):
similar(lib, item, opts.threshold, opts.format)
sim_cmd.func = sim_func
return [fetch_cmd, sim_cmd]
|
|
# Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import uuid as stdlib_uuid
from oslo_serialization import jsonutils
import webob
from nova.api.openstack.compute import views
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import matchers
NS = {
'atom': 'http://www.w3.org/2005/Atom',
'ns': 'http://docs.openstack.org/common/api/v1.0'
}
EXP_LINKS = {
'v2.0': {
'html': 'http://docs.openstack.org/',
},
'v2.1': {
'html': 'http://docs.openstack.org/'
},
}
EXP_VERSIONS = {
"v2.0": {
"id": "v2.0",
"status": "SUPPORTED",
"version": "",
"min_version": "",
"updated": "2011-01-21T11:33:21Z",
"links": [
{
"rel": "describedby",
"type": "text/html",
"href": EXP_LINKS['v2.0']['html'],
},
],
"media-types": [
{
"base": "application/json",
"type": "application/vnd.openstack.compute+json;version=2",
},
],
},
"v2.1": {
"id": "v2.1",
"status": "CURRENT",
"version": "2.10",
"min_version": "2.1",
"updated": "2013-07-23T11:33:21Z",
"links": [
{
"rel": "self",
"href": "http://localhost/v2.1/",
},
{
"rel": "describedby",
"type": "text/html",
"href": EXP_LINKS['v2.1']['html'],
},
],
"media-types": [
{
"base": "application/json",
"type": "application/vnd.openstack.compute+json;version=2.1",
}
],
}
}
class VersionsTestV20(test.NoDBTestCase):
def test_get_version_list(self):
req = webob.Request.blank('/')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, "application/json")
versions = jsonutils.loads(res.body)["versions"]
expected = [
{
"id": "v2.0",
"status": "SUPPORTED",
"version": "",
"min_version": "",
"updated": "2011-01-21T11:33:21Z",
"links": [
{
"rel": "self",
"href": "http://localhost/v2/",
}],
},
{
"id": "v2.1",
"status": "CURRENT",
"version": "2.10",
"min_version": "2.1",
"updated": "2013-07-23T11:33:21Z",
"links": [
{
"rel": "self",
"href": "http://localhost/v2.1/",
}],
},
]
self.assertEqual(versions, expected)
def test_get_version_list_302(self):
req = webob.Request.blank('/v2')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 302)
redirect_req = webob.Request.blank('/v2/')
self.assertEqual(res.location, redirect_req.url)
def _test_get_version_2_detail(self, url, accept=None):
if accept is None:
accept = "application/json"
req = webob.Request.blank(url)
req.accept = accept
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, "application/json")
version = jsonutils.loads(res.body)
expected = {
"version": {
"id": "v2.0",
"status": "SUPPORTED",
"version": "",
"min_version": "",
"updated": "2011-01-21T11:33:21Z",
"links": [
{
"rel": "self",
"href": "http://localhost/v2/",
},
{
"rel": "describedby",
"type": "text/html",
"href": EXP_LINKS['v2.0']['html'],
},
],
"media-types": [
{
"base": "application/json",
"type": "application/"
"vnd.openstack.compute+json;version=2",
},
],
},
}
self.assertEqual(expected, version)
def test_get_version_2_detail(self):
self._test_get_version_2_detail('/v2/')
def test_get_version_2_detail_content_type(self):
accept = "application/json;version=2"
self._test_get_version_2_detail('/', accept=accept)
def test_get_version_2_versions_invalid(self):
req = webob.Request.blank('/v2/versions/1234')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(404, res.status_int)
def test_multi_choice_image(self):
req = webob.Request.blank('/images/1')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 300)
self.assertEqual(res.content_type, "application/json")
expected = {
"choices": [
{
"id": "v2.0",
"status": "SUPPORTED",
"links": [
{
"href": "http://localhost/v2/images/1",
"rel": "self",
},
],
"media-types": [
{
"base": "application/json",
"type": "application/vnd.openstack.compute+json"
";version=2"
},
],
},
{
"id": "v2.1",
"status": "CURRENT",
"links": [
{
"href": "http://localhost/v2.1/images/1",
"rel": "self",
},
],
"media-types": [
{
"base": "application/json",
"type":
"application/vnd.openstack.compute+json;version=2.1",
}
],
},
], }
self.assertThat(jsonutils.loads(res.body),
matchers.DictMatches(expected))
def test_multi_choice_server_atom(self):
"""Make sure multi choice responses do not have content-type
application/atom+xml (should use default of json)
"""
req = webob.Request.blank('/servers')
req.accept = "application/atom+xml"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 300)
self.assertEqual(res.content_type, "application/json")
def test_multi_choice_server(self):
uuid = str(stdlib_uuid.uuid4())
req = webob.Request.blank('/servers/' + uuid)
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 300)
self.assertEqual(res.content_type, "application/json")
expected = {
"choices": [
{
"id": "v2.0",
"status": "SUPPORTED",
"links": [
{
"href": "http://localhost/v2/servers/" + uuid,
"rel": "self",
},
],
"media-types": [
{
"base": "application/json",
"type": "application/vnd.openstack.compute+json"
";version=2"
},
],
},
{
"id": "v2.1",
"status": "CURRENT",
"links": [
{
"href": "http://localhost/v2.1/servers/" + uuid,
"rel": "self",
},
],
"media-types": [
{
"base": "application/json",
"type":
"application/vnd.openstack.compute+json;version=2.1",
}
],
},
], }
self.assertThat(jsonutils.loads(res.body),
matchers.DictMatches(expected))
class VersionsViewBuilderTests(test.NoDBTestCase):
def test_view_builder(self):
base_url = "http://example.org/"
version_data = {
"v3.2.1": {
"id": "3.2.1",
"status": "CURRENT",
"version": "2.3",
"min_version": "2.1",
"updated": "2011-07-18T11:30:00Z",
}
}
expected = {
"versions": [
{
"id": "3.2.1",
"status": "CURRENT",
"version": "2.3",
"min_version": "2.1",
"updated": "2011-07-18T11:30:00Z",
"links": [
{
"rel": "self",
"href": "http://example.org/v2/",
},
],
}
]
}
builder = views.versions.ViewBuilder(base_url)
output = builder.build_versions(version_data)
self.assertEqual(output, expected)
def test_generate_href(self):
base_url = "http://example.org/app/"
expected = "http://example.org/app/v2/"
builder = views.versions.ViewBuilder(base_url)
actual = builder.generate_href('v2')
self.assertEqual(actual, expected)
def test_generate_href_v21(self):
base_url = "http://example.org/app/"
expected = "http://example.org/app/v2.1/"
builder = views.versions.ViewBuilder(base_url)
actual = builder.generate_href('v2.1')
self.assertEqual(actual, expected)
def test_generate_href_unknown(self):
base_url = "http://example.org/app/"
expected = "http://example.org/app/v2/"
builder = views.versions.ViewBuilder(base_url)
actual = builder.generate_href('foo')
self.assertEqual(actual, expected)
# NOTE(oomichi): Now version API of v2.0 covers "/"(root).
# So this class tests "/v2.1" only for v2.1 API.
class VersionsTestV21(test.NoDBTestCase):
exp_versions = copy.deepcopy(EXP_VERSIONS)
exp_versions['v2.0']['links'].insert(0,
{'href': 'http://localhost/v2.1/', 'rel': 'self'},
)
def test_get_version_list_302(self):
req = webob.Request.blank('/v2.1')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app_v21())
self.assertEqual(res.status_int, 302)
redirect_req = webob.Request.blank('/v2.1/')
self.assertEqual(res.location, redirect_req.url)
def test_get_version_21_detail(self):
req = webob.Request.blank('/v2.1/')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app_v21())
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, "application/json")
version = jsonutils.loads(res.body)
expected = {"version": self.exp_versions['v2.1']}
self.assertEqual(expected, version)
def test_get_version_21_versions_v21_detail(self):
req = webob.Request.blank('/v2.1/fake/versions/v2.1')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app_v21())
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, "application/json")
version = jsonutils.loads(res.body)
expected = {"version": self.exp_versions['v2.1']}
self.assertEqual(expected, version)
def test_get_version_21_versions_v20_detail(self):
req = webob.Request.blank('/v2.1/fake/versions/v2.0')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app_v21())
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, "application/json")
version = jsonutils.loads(res.body)
expected = {"version": self.exp_versions['v2.0']}
self.assertEqual(expected, version)
def test_get_version_21_versions_invalid(self):
req = webob.Request.blank('/v2.1/versions/1234')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app_v21())
self.assertEqual(res.status_int, 404)
def test_get_version_21_detail_content_type(self):
req = webob.Request.blank('/')
req.accept = "application/json;version=2.1"
res = req.get_response(fakes.wsgi_app_v21())
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, "application/json")
version = jsonutils.loads(res.body)
expected = {"version": self.exp_versions['v2.1']}
self.assertEqual(expected, version)
|
|
from sklearn import svm
import datetime
from datetime import date
import random
from scipy import spatial
# import numpy as np
playerPos={}
with open("nflPlayerInfo") as f:
for line in f:
tup=eval(line)
playerPos[tup[0]]=tup[6]
topPlayerInfo={}
with open("nflTopPlayersInfo") as f:
for line in f:
tup=eval(line)
topPlayerInfo[tup[0]]=[tup[4],tup[5],tup[6]]
allStar={
"WR":[],
"QB":[],
"RB":[]
}
nonAllStar={
"WR":[],
"QB":[],
"RB":[]
}
mvp={
"WR":[],
"QB":[],
"RB":[]
}
nonMvp={
"WR":[],
"QB":[],
"RB":[]
}
playerStats={}
with open("nflPlayerStats") as f:
for line in f:
tup=eval(line)
# print(tup[2])
# print(tup[7])
# print(tup[9])
# print(tup[11])
temp=[float(tup[3])/float(tup[2]),float(tup[4])/float(tup[2]),float(tup[5])/float(tup[2]),float(tup[6])/max(float(tup[2]),1.0),float(tup[7])/max(float(tup[2]),1.0),float(tup[8])/max(float(tup[2]),1.0),float(tup[9]),float(tup[10])/float(tup[2]),float(tup[11])/float(tup[2]),float(tup[12])/float(tup[2]),float(tup[13])/float(tup[2])]
if tup[1]==2014:
if tup[0]==387:
print(tup)
print(temp)
playerStats[tup[0]]=temp
if tup[len(tup)-2]:
allStar[playerPos[tup[0]]].append(temp)
else:
nonAllStar[playerPos[tup[0]]].append(temp)
if tup[len(tup)-1]:
mvp[playerPos[tup[0]]].append(temp)
else:
nonMvp[playerPos[tup[0]]].append(temp)
topPlayerStats={}
with open("nflTopPlayersStats") as f:
for line in f:
tup=eval(line)
temp=[]
for i in range(1,len(tup)):
temp.append(tup[i])
topPlayerStats[tup[0]]=temp
if topPlayerInfo[tup[0]][1]>0:
allStar[topPlayerInfo[tup[0]][0]].append(temp)
else:
nonAllStar[topPlayerInfo[tup[0]][0]].append(temp)
if topPlayerInfo[tup[0]][2]>0:
mvp[topPlayerInfo[tup[0]][0]].append(temp)
else:
nonMvp[topPlayerInfo[tup[0]][0]].append(temp)
allStarModels={
"WR":None,
"QB":None,
"RB":None
}
mvpModels={
"WR":None,
"QB":None,
"RB":None
}
for pos in allStarModels:
X=[]
y=[]
for value in allStar[pos]:
X.append(value)
y.append(1)
rand_smpl = [nonAllStar[pos][i] for i in sorted(random.sample(xrange(len(nonAllStar[pos])),len(allStar[pos])))]
for value in rand_smpl:
X.append(value)
y.append(0)
# print(X)
# X=np.array(X)
# y=np.array(y)
clf=svm.SVC(probability=True)
clf.fit(X,y)
allStarModels[pos]=clf
for pos in mvpModels:
X=[]
y=[]
for value in mvp[pos]:
X.append(value)
y.append(1)
rand_smpl = [nonMvp[pos][i] for i in sorted(random.sample(xrange(len(nonMvp[pos])),len(mvp[pos])))]
for value in rand_smpl:
X.append(value)
y.append(0)
# print(X)
# X=np.array(X)
# y=np.array(y)
clf=svm.SVC(probability=True)
clf.fit(X,y)
mvpModels[pos]=clf
probs={}
with open("probs","w") as f:
for i in range(0,3):
for player in playerPos:
if player in playerStats:
v=playerStats[player]
amodel=allStarModels[playerPos[player]]
mmodel=mvpModels[playerPos[player]]
if player in probs:
probs[player][0]+=mmodel.predict_proba(v)[0][1]
probs[player][1]+=amodel.predict_proba(v)[0][1]
else:
probs[player]=[mmodel.predict_proba(v)[0][1],amodel.predict_proba(v)[0][1]]
else:
probs[player]=[0,0]
if i<2:
for pos in allStarModels:
X=[]
y=[]
for value in allStar[pos]:
X.append(value)
y.append(1)
rand_smpl = [nonAllStar[pos][i] for i in sorted(random.sample(xrange(len(nonAllStar[pos])),len(allStar[pos])))]
for value in rand_smpl:
X.append(value)
y.append(0)
# print(X)
# X=np.array(X)
# y=np.array(y)
clf=svm.SVC(probability=True)
clf.fit(X,y)
allStarModels[pos]=clf
for pos in mvpModels:
X=[]
y=[]
for value in mvp[pos]:
X.append(value)
y.append(1)
rand_smpl = [nonMvp[pos][i] for i in sorted(random.sample(xrange(len(nonMvp[pos])),len(mvp[pos])))]
for value in rand_smpl:
X.append(value)
y.append(0)
# print(X)
# X=np.array(X)
# y=np.array(y)
clf=svm.SVC(probability=True)
clf.fit(X,y)
mvpModels[pos]=clf
for player in probs:
cur=[0,0]
if player in playerStats:
cur=[-1,0]
v1=playerStats[player]
# print(v1)
for player2 in topPlayerStats:
# print(str(player2))
v2=topPlayerStats[player2]
result=1-spatial.distance.cosine(v1,v2)
if result>cur[1]:
cur[0]=player2
cur[1]=result
# print(player2)
# break
# break
# 184
temp=(player,cur[0],cur[1],(probs[player][1]/3),(probs[player][0]/4.5))
f.write(str(temp))
f.write("\n")
|
|
# -*- coding: utf-8 -*-
"""Test exporting functions."""
# Authors: MNE Developers
#
# License: BSD-3-Clause
from datetime import datetime, timezone
from mne.io import RawArray
from mne.io.meas_info import create_info
from pathlib import Path
import os.path as op
import pytest
import numpy as np
from numpy.testing import (assert_allclose, assert_array_almost_equal,
assert_array_equal)
from mne import (read_epochs_eeglab, Epochs, read_evokeds, read_evokeds_mff,
Annotations)
from mne.datasets import testing, misc
from mne.export import export_evokeds, export_evokeds_mff
from mne.io import read_raw_fif, read_raw_eeglab, read_raw_edf
from mne.utils import (_check_eeglabio_installed, requires_version,
object_diff, _check_edflib_installed, _resource_path)
from mne.tests.test_epochs import _get_data
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
fname_evoked = op.join(base_dir, 'test-ave.fif')
data_path = testing.data_path(download=False)
egi_evoked_fname = op.join(data_path, 'EGI', 'test_egi_evoked.mff')
@pytest.mark.skipif(not _check_eeglabio_installed(strict=False),
reason='eeglabio not installed')
def test_export_raw_eeglab(tmpdir):
"""Test saving a Raw instance to EEGLAB's set format."""
fname = (Path(__file__).parent.parent.parent /
"io" / "tests" / "data" / "test_raw.fif")
raw = read_raw_fif(fname)
raw.load_data()
temp_fname = op.join(str(tmpdir), 'test.set')
raw.export(temp_fname)
raw.drop_channels([ch for ch in ['epoc']
if ch in raw.ch_names])
raw_read = read_raw_eeglab(temp_fname, preload=True)
assert raw.ch_names == raw_read.ch_names
cart_coords = np.array([d['loc'][:3] for d in raw.info['chs']]) # just xyz
cart_coords_read = np.array([d['loc'][:3] for d in raw_read.info['chs']])
assert_allclose(cart_coords, cart_coords_read)
assert_allclose(raw.times, raw_read.times)
assert_allclose(raw.get_data(), raw_read.get_data())
@pytest.mark.skipif(not _check_edflib_installed(strict=False),
reason='edflib-python not installed')
def test_double_export_edf(tmp_path):
"""Test exporting an EDF file multiple times."""
rng = np.random.RandomState(123456)
format = 'edf'
ch_types = ['eeg', 'eeg', 'stim', 'ecog', 'ecog', 'seeg',
'eog', 'ecg', 'emg', 'dbs', 'bio']
ch_names = np.arange(len(ch_types)).astype(str).tolist()
info = create_info(ch_names, sfreq=1000,
ch_types=ch_types)
data = rng.random(size=(len(ch_names), 1000)) * 1.e-5
# include subject info and measurement date
subject_info = dict(first_name='mne', last_name='python',
birthday=(1992, 1, 20), sex=1, hand=3)
info['subject_info'] = subject_info
raw = RawArray(data, info)
# export once
temp_fname = op.join(str(tmp_path), f'test.{format}')
raw.export(temp_fname, add_ch_type=True)
raw_read = read_raw_edf(temp_fname, preload=True)
# export again
raw_read.load_data()
raw_read.export(temp_fname, add_ch_type=True)
raw_read = read_raw_edf(temp_fname, preload=True)
# stim channel should be dropped
raw.drop_channels('2')
assert raw.ch_names == raw_read.ch_names
# only compare the original length, since extra zeros are appended
orig_raw_len = len(raw)
assert_array_almost_equal(
raw.get_data(), raw_read.get_data()[:, :orig_raw_len], decimal=4)
assert_allclose(
raw.times, raw_read.times[:orig_raw_len], rtol=0, atol=1e-5)
# check channel types except for 'bio', which loses its type
orig_ch_types = raw.get_channel_types()
read_ch_types = raw_read.get_channel_types()
assert_array_equal(orig_ch_types, read_ch_types)
@pytest.mark.skipif(not _check_edflib_installed(strict=False),
reason='edflib-python not installed')
def test_export_edf_annotations(tmp_path):
"""Test that exporting EDF preserves annotations."""
rng = np.random.RandomState(123456)
format = 'edf'
ch_types = ['eeg', 'eeg', 'stim', 'ecog', 'ecog', 'seeg',
'eog', 'ecg', 'emg', 'dbs', 'bio']
ch_names = np.arange(len(ch_types)).astype(str).tolist()
info = create_info(ch_names, sfreq=1000,
ch_types=ch_types)
data = rng.random(size=(len(ch_names), 2000)) * 1.e-5
raw = RawArray(data, info)
annotations = Annotations(
onset=[0.01, 0.05, 0.90, 1.05], duration=[0, 1, 0, 0],
description=['test1', 'test2', 'test3', 'test4'])
raw.set_annotations(annotations)
# export
temp_fname = op.join(str(tmp_path), f'test.{format}')
raw.export(temp_fname)
# read in the file
raw_read = read_raw_edf(temp_fname, preload=True)
assert_array_equal(raw.annotations.onset, raw_read.annotations.onset)
assert_array_equal(raw.annotations.duration, raw_read.annotations.duration)
assert_array_equal(raw.annotations.description,
raw_read.annotations.description)
@pytest.mark.skipif(not _check_edflib_installed(strict=False),
reason='edflib-python not installed')
def test_rawarray_edf(tmp_path):
"""Test saving a Raw array with integer sfreq to EDF."""
rng = np.random.RandomState(12345)
format = 'edf'
ch_types = ['eeg', 'eeg', 'stim', 'ecog', 'seeg', 'eog', 'ecg', 'emg',
'dbs', 'bio']
ch_names = np.arange(len(ch_types)).astype(str).tolist()
info = create_info(ch_names, sfreq=1000,
ch_types=ch_types)
data = rng.random(size=(len(ch_names), 1000)) * 1e-5
# include subject info and measurement date
subject_info = dict(first_name='mne', last_name='python',
birthday=(1992, 1, 20), sex=1, hand=3)
info['subject_info'] = subject_info
raw = RawArray(data, info)
time_now = datetime.now()
meas_date = datetime(year=time_now.year, month=time_now.month,
day=time_now.day, hour=time_now.hour,
minute=time_now.minute, second=time_now.second,
tzinfo=timezone.utc)
raw.set_meas_date(meas_date)
temp_fname = op.join(str(tmp_path), f'test.{format}')
raw.export(temp_fname, add_ch_type=True)
raw_read = read_raw_edf(temp_fname, preload=True)
# stim channel should be dropped
raw.drop_channels('2')
assert raw.ch_names == raw_read.ch_names
# only compare the original length, since extra zeros are appended
orig_raw_len = len(raw)
assert_array_almost_equal(
raw.get_data(), raw_read.get_data()[:, :orig_raw_len], decimal=4)
assert_allclose(
raw.times, raw_read.times[:orig_raw_len], rtol=0, atol=1e-5)
# check channel types except for 'bio', which loses its type
orig_ch_types = raw.get_channel_types()
read_ch_types = raw_read.get_channel_types()
assert_array_equal(orig_ch_types, read_ch_types)
assert raw.info['meas_date'] == raw_read.info['meas_date']
# channel name can't be longer than 16 characters with the type added
raw_bad = raw.copy()
raw_bad.rename_channels({'1': 'abcdefghijklmnopqrstuvwxyz'})
with pytest.raises(RuntimeError, match='Signal label'), \
pytest.warns(RuntimeWarning, match='Data has a non-integer'):
raw_bad.export(temp_fname)
# include bad birthday that is non-EDF compliant
bad_info = info.copy()
bad_info['subject_info']['birthday'] = (1700, 1, 20)
raw = RawArray(data, bad_info)
with pytest.raises(RuntimeError, match='Setting patient birth date'):
raw.export(temp_fname)
# include bad measurement date that is non-EDF compliant
raw = RawArray(data, info)
meas_date = datetime(year=1984, month=1, day=1, tzinfo=timezone.utc)
raw.set_meas_date(meas_date)
with pytest.raises(RuntimeError, match='Setting start date time'):
raw.export(temp_fname)
# test that warning is raised if there are non-voltage based channels
raw = RawArray(data, info)
with pytest.warns(RuntimeWarning, match='The unit'):
raw.set_channel_types({'9': 'hbr'})
with pytest.warns(RuntimeWarning, match='Non-voltage channels'):
raw.export(temp_fname)
# data should match up to the non-accepted channel
raw_read = read_raw_edf(temp_fname, preload=True)
orig_raw_len = len(raw)
assert_array_almost_equal(
raw.get_data()[:-1, :], raw_read.get_data()[:, :orig_raw_len],
decimal=4)
assert_allclose(
raw.times, raw_read.times[:orig_raw_len], rtol=0, atol=1e-5)
# the data should still match though
raw_read = read_raw_edf(temp_fname, preload=True)
raw.drop_channels('2')
assert raw.ch_names == raw_read.ch_names
orig_raw_len = len(raw)
assert_array_almost_equal(
raw.get_data(), raw_read.get_data()[:, :orig_raw_len], decimal=4)
assert_allclose(
raw.times, raw_read.times[:orig_raw_len], rtol=0, atol=1e-5)
@pytest.mark.skipif(not _check_edflib_installed(strict=False),
reason='edflib-python not installed')
@pytest.mark.parametrize(
['dataset', 'format'], [
['test', 'edf'],
pytest.param('misc', 'edf', marks=pytest.mark.slowtest),
])
def test_export_raw_edf(tmp_path, dataset, format):
"""Test saving a Raw instance to EDF format."""
if dataset == 'test':
fname = _resource_path('mne.io.tests.data', 'test_raw.fif')
raw = read_raw_fif(fname)
elif dataset == 'misc':
fname = op.join(misc.data_path(), 'ecog', 'sample_ecog_ieeg.fif')
raw = read_raw_fif(fname)
# only test with EEG channels
raw.pick_types(eeg=True, ecog=True, seeg=True,
eog=True, ecg=True, emg=True)
raw.load_data()
orig_ch_names = raw.ch_names
temp_fname = op.join(str(tmp_path), f'test.{format}')
# test runtime errors
with pytest.raises(RuntimeError, match='The maximum'), \
pytest.warns(RuntimeWarning, match='Data has a non-integer'):
raw.export(temp_fname, physical_range=(-1e6, 0))
with pytest.raises(RuntimeError, match='The minimum'), \
pytest.warns(RuntimeWarning, match='Data has a non-integer'):
raw.export(temp_fname, physical_range=(0, 1e6))
if dataset == 'test':
orig_ch_names = [ch.split(' ')[1] for ch in raw.ch_names]
with pytest.warns(RuntimeWarning, match='Data has a non-integer'):
raw.export(temp_fname, add_ch_type=False)
elif dataset == 'misc':
with pytest.warns(RuntimeWarning, match='EDF format requires'):
raw.export(temp_fname)
if 'epoc' in raw.ch_names:
raw.drop_channels(['epoc'])
raw_read = read_raw_edf(temp_fname, preload=True)
assert orig_ch_names == raw_read.ch_names
# only compare the original length, since extra zeros are appended
orig_raw_len = len(raw)
# assert data and times are not different
# Due to the physical range of the data, reading and writing is
# not lossless. For example, a physical min/max of -/+ 3200 uV
# will result in a resolution of 0.09 uV. This resolution
# though is acceptable for most EEG manufacturers.
assert_array_almost_equal(
raw.get_data(), raw_read.get_data()[:, :orig_raw_len], decimal=4)
# Due to the data record duration limitations of EDF files, one
# cannot store arbitrary float sampling rate exactly. Usually this
# results in two sampling rates that are off by very low number of
# decimal points. This for practical purposes does not matter
# but will result in an error when say the number of time points
# is very very large.
assert_allclose(
raw.times, raw_read.times[:orig_raw_len], rtol=0, atol=1e-5)
@pytest.mark.skipif(not _check_eeglabio_installed(strict=False),
reason='eeglabio not installed')
@pytest.mark.parametrize('preload', (True, False))
def test_export_epochs_eeglab(tmpdir, preload):
"""Test saving an Epochs instance to EEGLAB's set format."""
raw, events = _get_data()[:2]
raw.load_data()
epochs = Epochs(raw, events, preload=preload)
temp_fname = op.join(str(tmpdir), 'test.set')
epochs.export(temp_fname)
epochs.drop_channels([ch for ch in ['epoc', 'STI 014']
if ch in epochs.ch_names])
epochs_read = read_epochs_eeglab(temp_fname)
assert epochs.ch_names == epochs_read.ch_names
cart_coords = np.array([d['loc'][:3]
for d in epochs.info['chs']]) # just xyz
cart_coords_read = np.array([d['loc'][:3]
for d in epochs_read.info['chs']])
assert_allclose(cart_coords, cart_coords_read)
assert_array_equal(epochs.events[:, 0],
epochs_read.events[:, 0]) # latency
assert epochs.event_id.keys() == epochs_read.event_id.keys() # just keys
assert_allclose(epochs.times, epochs_read.times)
assert_allclose(epochs.get_data(), epochs_read.get_data())
@requires_version('mffpy', '0.5.7')
@testing.requires_testing_data
@pytest.mark.parametrize('fmt', ('auto', 'mff'))
@pytest.mark.parametrize('do_history', (True, False))
def test_export_evokeds_to_mff(tmpdir, fmt, do_history):
"""Test exporting evoked dataset to MFF."""
evoked = read_evokeds_mff(egi_evoked_fname)
export_fname = op.join(str(tmpdir), 'evoked.mff')
history = [
{
'name': 'Test Segmentation',
'method': 'Segmentation',
'settings': ['Setting 1', 'Setting 2'],
'results': ['Result 1', 'Result 2']
},
{
'name': 'Test Averaging',
'method': 'Averaging',
'settings': ['Setting 1', 'Setting 2'],
'results': ['Result 1', 'Result 2']
}
]
if do_history:
export_evokeds_mff(export_fname, evoked, history=history)
else:
export_evokeds(export_fname, evoked)
# Drop non-EEG channels
evoked = [ave.drop_channels(['ECG', 'EMG']) for ave in evoked]
evoked_exported = read_evokeds_mff(export_fname)
assert len(evoked) == len(evoked_exported)
for ave, ave_exported in zip(evoked, evoked_exported):
# Compare infos
assert object_diff(ave_exported.info, ave.info) == ''
# Compare data
assert_allclose(ave_exported.data, ave.data)
# Compare properties
assert ave_exported.nave == ave.nave
assert ave_exported.kind == ave.kind
assert ave_exported.comment == ave.comment
assert_allclose(ave_exported.times, ave.times)
@requires_version('mffpy', '0.5.7')
@testing.requires_testing_data
def test_export_to_mff_no_device():
"""Test no device type throws ValueError."""
evoked = read_evokeds_mff(egi_evoked_fname, condition='Category 1')
evoked.info['device_info'] = None
with pytest.raises(ValueError, match='No device type.'):
export_evokeds('output.mff', evoked)
@requires_version('mffpy', '0.5.7')
def test_export_to_mff_incompatible_sfreq():
"""Test non-whole number sampling frequency throws ValueError."""
evoked = read_evokeds(fname_evoked)
with pytest.raises(ValueError, match=f'sfreq: {evoked[0].info["sfreq"]}'):
export_evokeds('output.mff', evoked)
@pytest.mark.parametrize('fmt,ext', [
('EEGLAB', 'set'),
('EDF', 'edf'),
('BrainVision', 'eeg')
])
def test_export_evokeds_unsupported_format(fmt, ext):
"""Test exporting evoked dataset to non-supported formats."""
evoked = read_evokeds(fname_evoked)
with pytest.raises(NotImplementedError, match=f'Export to {fmt} not imp'):
export_evokeds(f'output.{ext}', evoked)
|
|
from pyglet.gl import *
from camera import *
from light import *
from fos.actor import Box, Actor
from fos.transform import *
from vsml import vsml
from actor.base import DynamicActor
class Scene(object):
def __init__(self, scenename="Main", transform=None,
extent_min=None, extent_max=None,
aabb_color=(1.0, 1.0, 1.0, 1.0),
activate_aabb=True):
"""Create a Scene which is a spatial reference system
and acts as a container for Actors presenting datasets.
Parameters
----------
scenename : str
The unique name of the Scene
transform : fos.transform.Transform3D
The affine transformation of the Scene, defining
origo and the axes orientation, i.e. the local coordinate
system of the Scene
extent_min, extent_max : two 3x1 numpy.array
Defines the minimum and maximum extent of the Scene along
all three axes. This implicitly defines an
axis-aligned bounding box which can be overwritten by the
addition of Actors bigger then the extent and calling the
update() function of the Scene
Notes
-----
Scenes can be overlapping
"""
super(Scene, self).__init__()
self.scenename = scenename
self.aabb_color = aabb_color
self.activate_aabb = activate_aabb
if transform is None:
self.transform = IdentityTransform()
else:
self.transform = transform
self.actors = {}
if not extent_min is None and not extent_max is None:
self.extent_min = np.array(extent_min, dtype=np.float32)
self.extent_max = np.array(extent_max, dtype=np.float32)
if self.activate_aabb:
self.add_actor(Box(name="AABB",
blf=self.extent_min,
trb=self.extent_max,
color=self.aabb_color))
else:
self.extent_min = None
self.extent_max = None
def get_centroid(self, apply_transform = True):
"""Returns the centroid of the Scene.
Parameters
----------
apply_transform : bool
Applies the Scene affine transformation to the centroid
"""
if not self.extent_min is None and not self.extent_max is None:
ret = np.vstack( (self.extent_min,self.extent_max) ).mean( axis = 0 )
else:
ret = np.zeros( (1,3), dtype = np.float32 )
if apply_transform:
ret = self.transform.apply( ret )
return ret
def get_extent_min(self, apply_transform = True):
if not self.extent_min is None:
ret = self.extent_min
else:
ret = np.zeros( (1,3), dtype = np.float32 )
if apply_transform:
ret = self.transform.apply( ret )
return ret
def get_extent_max(self, apply_transform = True):
if not self.extent_max is None:
ret = self.extent_max
else:
ret = np.zeros( (1,3), dtype = np.float32 )
if apply_transform:
ret = self.transform.apply( ret )
return ret
def update_extent(self):
"""
Loop over all contained actors and query for the min/max extent
and update the Scene's extent accordingly
"""
for name, actor in self.actors.items():
if not self.extent_min is None:
self.extent_min = np.vstack( (self.extent_min,
actor.get_extent_min()) ).min( axis = 0 )
else:
self.extent_min = actor.get_extent_min()
if not self.extent_max is None:
self.extent_max = np.vstack( (self.extent_max,
actor.get_extent_max()) ).max( axis = 0 )
else:
self.extent_max = actor.get_extent_max()
# update AABB
if self.activate_aabb:
if "AABB" in self.actors:
self.actors['AABB'].update( self.extent_min,
self.extent_max, 0.0 )
else:
self.add_actor( Box(name="AABB",
blf=self.extent_min,
trb=self.extent_max,
color=self.aabb_color) )
def update(self):
self.update_extent()
def add_actor(self, actor, trigger_update = True):
if isinstance( actor, Actor ):
if actor.name in self.actors:
print("Actor {0} already exist in Scene {1}".format(actor.name, self.scenename) )
else:
self.actors[actor.name] = actor
self.update()
else:
print("Not a valid Actor instance.")
def remove_actor(self, actor, trigger_update = True):
if isinstance( actor, Actor ):
if actor.name in self.actors:
del self.actors[actor.name]
self.update()
else:
print("Actor {0} does not exist in Scene {1}".format(actor.name, self.scenename) )
elif isinstance( actor, str ):
# actor is the unique name of the actor
if actor in self.actors:
del self.actors[actor]
self.update()
else:
print("Actor {0} does not exist in Scene {1}".format(actor.name, self.scenename) )
else:
print("Not a valid Actor instance or actor name.")
def nextTimeFrame(self):
for k, actor in self.actors.items():
if actor.visible and isinstance(actor, DynamicActor):
actor.next()
def previousTimeFrame(self):
for k, actor in self.actors.items():
if actor.visible and isinstance(actor, DynamicActor):
actor.previous()
def draw_actors(self):
""" Draw all visible actors in the scene
"""
for k, actor in self.actors.items():
if actor.visible:
#print("Draw actor", actor.name)
actor.draw()
def pick_actors(self, x, y):
""" Pick all visible actors in the scene
"""
for k, actor in self.actors.items():
if actor.visible:
#print("Pick actor", actor.name)
actor.pick( x, y )
def send_messages(self,messages):
for k, actor in self.actors.items():
if actor.visible:
#print('Actor: ',actor.name)
actor.process_messages(messages)
class World(object):
def __init__(self):
self.scenes = {}
self.set_camera( SimpleCamera() )
self.light = None
def setup_light(self):
self.light = Light()
def update_lightposition(self, x, y, z):
self.light.update_lightposition(x, y, z)
def add_scene(self, scene):
if scene.scenename in self.scenes:
print("Scene {0} already exist.".format(scene.scenename))
else:
self.scenes[scene.scenename] = scene
def set_camera(self, camera):
self.camera = camera
# hackish, store a reference to the camera in the global static
# vsml object to enable actor's positioning depending on camera parameters
vsml.camera = self.camera
def get_camera(self):
return self.camera
def refocus_camera(self):
# loop over all scenes, get global min, max, centroid
# set focus point to average, and location to centroid + 2x(max-min).z
centroids = np.zeros( (len(self.scenes), 3), dtype = np.float32 )
maxextent = np.zeros( (len(self.scenes), 3), dtype = np.float32 )
minextent = np.zeros( (len(self.scenes), 3), dtype = np.float32 )
for i, scene in enumerate(self.scenes.items()):
centroids[i,:] = scene[1].get_centroid()
maxextent[i,:] = scene[1].get_extent_max()
minextent[i,:] = scene[1].get_extent_min()
newfoc = centroids.mean( axis = 0 )
self.camera.set_focal( newfoc )
newloc = newfoc.copy()
# move along z axes sufficiently far to see all the scenes
# TODO: better
dist = maxextent.max( axis = 0 ) - minextent.min( axis = 0 )
newloc[2] += dist[0]
self.camera.set_location( newloc, np.array([0.0,1.0,0.0]) )
self.camera.update()
def nextTimeFrame(self):
for k, scene in self.scenes.items():
scene.nextTimeFrame()
def previousTimeFrame(self):
for k, scene in self.scenes.items():
scene.previousTimeFrame()
def pick_all(self, x, y):
""" Calls the pick function on all Scenes
"""
self.camera.draw()
for k, scene in self.scenes.items():
# use transformation matrix of the scene to setup the modelview
vsml.pushMatrix( vsml.MatrixTypes.MODELVIEW )
vsml.multMatrix( vsml.MatrixTypes.MODELVIEW,
scene.transform.get_transform_numpy() )
glMatrixMode(GL_MODELVIEW)
glLoadMatrixf(vsml.get_modelview())
scene.pick_actors( x, y )
# take back the old camera modelview
vsml.popMatrix( vsml.MatrixTypes.MODELVIEW )
def draw_all(self):
""" Draw all actors
"""
self.camera.draw()
for k, scene in self.scenes.items():
# use transformation matrix of the scene to setup the modelview
vsml.pushMatrix( vsml.MatrixTypes.MODELVIEW ) # in fact, push the camera modelview
vsml.multMatrix( vsml.MatrixTypes.MODELVIEW,
scene.transform.get_transform_numpy() )
glMatrixMode(GL_MODELVIEW)
glLoadMatrixf(vsml.get_modelview())
scene.draw_actors()
# take back the old camera modelview
vsml.popMatrix( vsml.MatrixTypes.MODELVIEW )
def send_all_messages(self,messages):
#print 'scenes.items',self.scenes.items
#print self.scenes.items()
for regname,scene in self.scenes.items():
#print 'Scene name ',regname
scene.send_messages(messages)
#print
|
|
"""
Django-specific helper utilities.
"""
from __future__ import print_function
import os
import re
import sys
import traceback
import glob
from importlib import import_module
from collections import defaultdict
from pprint import pprint
import six
from six import StringIO
from burlap import Satchel
from burlap.constants import *
from burlap.decorators import task
from burlap.common import get_last_modified_timestamp
from burlap.trackers import BaseTracker
cmp = lambda a, b: (a > b) - (a < b)
class DjangoSettingsTracker(BaseTracker):
"""
Tracks changes to one or more satchel settings.
Has only two custom parameters:
names = A list of Django setting names.
"""
def __init__(self, names, *args, **kwargs):
if isinstance(names, six.string_types):
names = names.replace(',', ' ').split(' ')
names = names or []
assert isinstance(names, (tuple, list, set))
names = sorted(set(_.strip() for _ in names if _.strip()))
super(DjangoSettingsTracker, self).__init__(*args, **kwargs)
self.names = names
@property
def names_string(self):
return ', '.join(self.names)
def __repr__(self):
return '<%s %s>' % (type(self).__name__, self.names_string)
def natural_key(self):
return (self.names_string,)
def get_thumbprint(self):
"""
Calculates the current thumbprint of the item being tracked.
"""
d = {}
settings = dj.get_settings()
for name in self.names:
d[name] = getattr(settings, name)
return d
class DjangoSatchel(Satchel):
# We don't use "django" as the name so as to not conflict with the official django package.
name = 'dj'
def set_defaults(self):
# This is the name of the executable to call to access Django's management features.
self.env.manage_cmd = 'manage.py'
# This is the name of your Django application.
self.env.app_name = None
# This is the import path to your Django settings file.
self.env.settings_module = '{app_name}.settings'
# The folder containing manage.py on the remote host. Must be absolute.
self.env.project_dir = None
# The folder containing manage.py on the local filesystem. May be relative to the fabfile directory.
self.env.local_project_dir = None
self.env.shell_template = 'cd {project_dir}; /bin/bash -i -c \"{manage_cmd} shell;\"'
self.env.fixture_sets = {} # {name: [paths to Django fixtures]}
# These apps will be migrated on a specific database, while faked
# on all others.
# This is necessary since South does not have proper support for
# multi-database applications.
#./manage migrate <app> --fake
#./manage migrate --database=<database> <app>
self.env.migrate_fakeouts = [] # [{database:<database>, app:<app>}]
self.env.install_sql_path_template = '{src_dir}/{app_name}/*/sql/*'
# The target version of Django to assume.
self.env.version = (1, 6, 0)
self.env.createsuperuser_cmd = 'createsuperuser'
self.env.manage_media = True
self.env.manage_migrations = True
self.env.media_dirs = ['static']
self.env.migrate_pre_command = ''
# If true, ignores errors that happen when migrate is run.
# Useful in multitenant dev environments where you don't want
# one missing password to break the entire deployment.
self.env.ignore_migration_errors = 0
# The path relative to fab where the code resides.
self.env.src_dir = 'src'
self.env.manage_dir = 'src'
self.env.ignore_migration_errors = 0
# The relative or absolute path of the root static directory where collect_static places content.
self.env.static_root = 'static'
# Modules whose name start with one of these values will be deleted before settings are imported.
self.env.delete_module_with_prefixes = []
# Modules whose name contains any of these values will be deleted before settings are imported.
self.env.delete_module_containing = []
self.env.configure_media_command = 'cd {local_project_dir}; {manage_cmd} collectstatic --noinput'
def has_database(self, name, site=None, role=None):
settings = self.get_settings(site=site, role=role)
return name in settings.DATABASES
@task
def get_settings(self, site=None, role=None):
"""
Retrieves the Django settings dictionary.
"""
r = self.local_renderer
_stdout = sys.stdout
_stderr = sys.stderr
if not self.verbose:
sys.stdout = StringIO()
sys.stderr = StringIO()
try:
sys.path.insert(0, r.env.src_dir)
# Temporarily override SITE.
tmp_site = self.genv.SITE
if site and site.endswith('_secure'):
site = site[:-7]
site = site or self.genv.SITE or self.genv.default_site
self.set_site(site)
# Temporarily override ROLE.
tmp_role = self.genv.ROLE
if role:
self.set_role(role)
try:
# We need to explicitly delete sub-modules from sys.modules. Otherwise, reload() skips
# them and they'll continue to contain obsolete settings.
if r.env.delete_module_with_prefixes:
for name in sorted(sys.modules):
for prefix in r.env.delete_module_with_prefixes:
if name.startswith(prefix):
if self.verbose:
print('Deleting module %s prior to re-import.' % name)
del sys.modules[name]
break
for name in list(sys.modules):
for s in r.env.delete_module_containing:
if s in name:
del sys.modules[name]
break
if r.env.settings_module in sys.modules:
del sys.modules[r.env.settings_module]
#TODO:fix r.env.settings_module not loading from settings?
# print('r.genv.django_settings_module:', r.genv.django_settings_module, file=_stdout)
# print('r.genv.dj_settings_module:', r.genv.dj_settings_module, file=_stdout)
# print('r.env.settings_module:', r.env.settings_module, file=_stdout)
if 'django_settings_module' in r.genv:
r.env.settings_module = r.genv.django_settings_module
else:
r.env.settings_module = r.env.settings_module or r.genv.dj_settings_module
if self.verbose:
print('r.env.settings_module:', r.env.settings_module, r.format(r.env.settings_module))
module = import_module(r.format(r.env.settings_module))
if site:
assert site == module.SITE, 'Unable to set SITE to "%s" Instead it is set to "%s".' % (site, module.SITE)
# Works as long as settings.py doesn't also reload anything.
import imp
imp.reload(module)
except ImportError as e:
print('Warning: Could not import settings for site "%s": %s' % (site, e), file=_stdout)
traceback.print_exc(file=_stdout)
#raise # breaks *_secure pseudo sites
return
finally:
if tmp_site:
self.set_site(tmp_site)
if tmp_role:
self.set_role(tmp_role)
finally:
sys.stdout = _stdout
sys.stderr = _stderr
sys.path.remove(r.env.src_dir)
return module
def set_db(self, name=None, site=None, role=None):
r = self.local_renderer
name = name or 'default'
site = site or r.env.get('SITE') or r.genv.SITE or r.genv.default_site
role = role or r.env.get('ROLE') or r.genv.ROLE
settings = self.get_settings(site=site, role=role)
assert settings, 'Unable to load Django settings for site %s.' % (site,)
r.env.django_settings = settings
default_db = settings.DATABASES[name]
if self.verbose:
print('default_db:')
pprint(default_db, indent=4)
r.env.db_name = default_db['NAME']
r.env.db_user = default_db.get('USER', r.genv.user) # sqlite doesn't have a user
r.env.db_host = default_db.get('HOST', 'localhost') # sqlite doesn't have a host
r.env.db_password = default_db.get('PASSWORD') # sqlite doesn't have a password
r.env.db_engine = default_db['ENGINE']
r.env.db_schema = 'public'
# Django stores the schema in the database-specific options at ['OPTIONS']['options'].
db_options = default_db.get('OPTIONS', {}).get('options', '')
try:
r.env.db_schema = re.findall(r'search_path=([a-zA-Z0-9_]+)', db_options)[0]
except IndexError:
pass
if 'mysql' in r.env.db_engine.lower():
r.env.db_type = 'mysql'
elif 'postgres' in r.env.db_engine.lower() or 'postgis' in r.env.db_engine.lower():
r.env.db_type = 'postgresql'
elif 'sqlite' in r.env.db_engine.lower():
r.env.db_type = 'sqlite'
else:
r.env.db_type = r.env.db_engine
for k, v in r.genv.items():
if not k.startswith(self.name.lower()+'_db_'):
continue
print('db.kv:', k, v)
return default_db
@task
def install_sql(self, site=None, database='default', apps=None, stop_on_error=0, fn=None, sql=None):
"""
Install custom SQL, by filename or string.
"""
#from burlap.db import load_db_set
stop_on_error = int(stop_on_error)
site = site or ALL
name = database
r = self.local_renderer
paths = glob.glob(r.format(r.env.install_sql_path_template))
apps = [_ for _ in (apps or '').split(',') if _.strip()]
if self.verbose:
print('install_sql.apps:', apps)
def cmp_paths(d0, d1):
if d0[1] and d0[1] in d1[2]:
return -1
if d1[1] and d1[1] in d0[2]:
return +1
return cmp(d0[0], d1[0])
def get_paths(t):
"""
Returns SQL file paths in an execution order that respect dependencies.
"""
data = [] # [(path, view_name, content)]
for path in paths:
if fn and fn not in path:
continue
parts = path.split('.')
if len(parts) == 3 and parts[1] != t:
continue
if not path.lower().endswith('.sql'):
continue
content = open(path, 'r').read()
matches = re.findall(r'[\s\t]+VIEW[\s\t]+([a-zA-Z0-9_]{3,})', content, flags=re.IGNORECASE)
view_name = ''
if matches:
view_name = matches[0]
print('Found view %s.' % view_name)
data.append((path, view_name, content))
for d in sorted(data, cmp=cmp_paths):
yield d[0]
def run_paths(paths, cmd_template, max_retries=3):
r = self.local_renderer
paths = list(paths)
error_counts = defaultdict(int) # {path:count}
terminal = set()
if self.verbose:
print('Checking %i paths.' % len(paths))
while paths:
path = paths.pop(0)
if self.verbose:
print('path:', path)
app_name = re.findall(r'/([^/]+)/sql/', path)[0]
if apps and app_name not in apps:
self.vprint('skipping because app_name %s not in apps' % app_name)
continue
with self.settings(warn_only=True):
if self.is_local:
r.env.sql_path = path
else:
r.env.sql_path = '/tmp/%s' % os.path.split(path)[-1]
r.put(local_path=path, remote_path=r.env.sql_path)
ret = r.run_or_local(cmd_template)
if ret and ret.return_code:
if stop_on_error:
raise Exception('Unable to execute file %s' % path)
error_counts[path] += 1
if error_counts[path] < max_retries:
paths.append(path)
else:
terminal.add(path)
if terminal:
print('%i files could not be loaded.' % len(terminal), file=sys.stderr)
for path in sorted(list(terminal)):
print(path, file=sys.stderr)
print(file=sys.stderr)
if self.verbose:
print('install_sql.db_engine:', r.env.db_engine)
for _site, site_data in self.iter_sites(site=site, no_secure=True):
self.set_db(name=name, site=_site)
if 'postgres' in r.env.db_engine or 'postgis' in r.env.db_engine:
if sql:
r.env.sql = sql
with self.settings(warn_only=not stop_on_error):
r.run('psql --user={db_user} --no-password --host={db_host} -d {db_name} --command="{sql}"')
else:
paths = list(get_paths('postgresql'))
run_paths(
paths=paths,
cmd_template="psql --host={db_host} --user={db_user} --no-password -d {db_name} -f {sql_path}")
elif 'mysql' in r.env.db_engine:
if sql:
raise NotImplementedError("Custom SQL commands are not yet supported for MySQL.")
paths = list(get_paths('mysql'))
run_paths(
paths=paths,
cmd_template="mysql -v -h {db_host} -u {db_user} -p'{db_password}' {db_name} < {sql_path}")
else:
raise NotImplementedError
@task
def createsuperuser(self, username='admin', email=None, password=None, site=None):
"""
Runs the Django createsuperuser management command.
"""
r = self.local_renderer
site = site or self.genv.SITE
self.set_site_specifics(site)
options = ['--username=%s' % username]
if email:
options.append('--email=%s' % email)
if password:
options.append('--password=%s' % password)
r.env.options_str = ' '.join(options)
if self.is_local:
r.env.project_dir = r.env.local_project_dir
r.genv.SITE = r.genv.SITE or site
r.run_or_local('export SITE={SITE}; export ROLE={ROLE}; cd {project_dir}; {manage_cmd} {createsuperuser_cmd} {options_str}')
@task
def loaddata(self, path, site=None):
"""
Runs the Dango loaddata management command.
By default, runs on only the current site.
Pass site=all to run on all sites.
"""
site = site or self.genv.SITE
r = self.local_renderer
r.env._loaddata_path = path
for _site, site_data in self.iter_sites(site=site, no_secure=True):
try:
self.set_db(site=_site)
r.env.SITE = _site
r.sudo('export SITE={SITE}; export ROLE={ROLE}; '
'cd {project_dir}; '
'{manage_cmd} loaddata {_loaddata_path}')
except KeyError:
pass
@task
def manage(self, cmd, *args, **kwargs):
"""
A generic wrapper around Django's manage command.
"""
r = self.local_renderer
environs = kwargs.pop('environs', '').strip()
if environs:
environs = ' '.join('export %s=%s;' % tuple(_.split('=')) for _ in environs.split(','))
environs = ' ' + environs + ' '
r.env.cmd = cmd
r.env.SITE = r.genv.SITE or r.genv.default_site
r.env.args = ' '.join(map(str, args))
r.env.kwargs = ' '.join(
('--%s' % _k if _v in (True, 'True') else '--%s=%s' % (_k, _v))
for _k, _v in kwargs.items())
r.env.environs = environs
if self.is_local:
r.env.project_dir = r.env.local_project_dir
r.run_or_local('export SITE={SITE}; export ROLE={ROLE};{environs} cd {project_dir}; {manage_cmd} {cmd} {args} {kwargs}')
@task
def manage_all(self, *args, **kwargs):
"""
Runs manage() across all unique site default databases.
"""
for site, site_data in self.iter_unique_databases(site='all'):
if self.verbose:
print('-'*80, file=sys.stderr)
print('site:', site, file=sys.stderr)
if self.env.available_sites_by_host:
hostname = self.current_hostname
sites_on_host = self.env.available_sites_by_host.get(hostname, [])
if sites_on_host and site not in sites_on_host:
self.vprint('skipping site:', site, sites_on_host, file=sys.stderr)
continue
self.manage(*args, **kwargs)
def load_django_settings(self):
"""
Loads Django settings for the current site and sets them so Django internals can be run.
"""
r = self.local_renderer
# Save environment variables so we can restore them later.
_env = {}
save_vars = ['ALLOW_CELERY', 'DJANGO_SETTINGS_MODULE']
for var_name in save_vars:
_env[var_name] = os.environ.get(var_name)
try:
# Allow us to import local app modules.
if r.env.local_project_dir:
sys.path.insert(0, r.env.local_project_dir)
#TODO:remove this once bug in django-celery has been fixed
os.environ['ALLOW_CELERY'] = '0'
# print('settings_module:', r.format(r.env.settings_module))
os.environ['DJANGO_SETTINGS_MODULE'] = r.format(r.env.settings_module)
# os.environ['CELERY_LOADER'] = 'django'
# os.environ['SITE'] = r.genv.SITE or r.genv.default_site
# os.environ['ROLE'] = r.genv.ROLE or r.genv.default_role
# In Django >= 1.7, fixes the error AppRegistryNotReady: Apps aren't loaded yet
# Disabling, in Django >= 1.10, throws exception:
# RuntimeError: Model class django.contrib.contenttypes.models.ContentType
# doesn't declare an explicit app_label and isn't in an application in INSTALLED_APPS.
# try:
# from django.core.wsgi import get_wsgi_application
# application = get_wsgi_application()
# except (ImportError, RuntimeError):
# raise
# print('Unable to get wsgi application.')
# traceback.print_exc()
# In Django >= 1.7, fixes the error AppRegistryNotReady: Apps aren't loaded yet
try:
import django
django.setup()
except AttributeError:
# This doesn't exist in Django < 1.7, so ignore it.
pass
# Load Django settings.
settings = self.get_settings()
try:
from django.contrib import staticfiles
from django.conf import settings as _settings
# get_settings() doesn't raise ImportError but returns None instead
if settings is not None:
for k, v in settings.__dict__.items():
setattr(_settings, k, v)
else:
raise ImportError
except (ImportError, RuntimeError):
print('Unable to load settings.')
traceback.print_exc()
finally:
# Restore environment variables.
for var_name, var_value in _env.items():
if var_value is None:
del os.environ[var_name]
else:
os.environ[var_name] = var_value
return settings
def iter_static_paths(self, ignore_import_error=False):
self.load_django_settings()
from django.contrib.staticfiles import finders, storage
for finder in finders.get_finders():
for path, _storage in finder.list(ignore_patterns=[]):
yield path
def iter_app_directories(self, ignore_import_error=False):
settings = self.load_django_settings()
if not settings:
return
_cwd = os.getcwd()
if self.env.local_project_dir:
os.chdir(self.env.local_project_dir)
try:
for app in settings.INSTALLED_APPS:
try:
mod = import_module(app)
except ImportError:
if ignore_import_error:
continue
else:
raise
yield app, os.path.dirname(mod.__file__)
finally:
os.chdir(_cwd)
def iter_south_directories(self, *args, **kwargs):
for app_name, base_app_dir in self.iter_app_directories(*args, **kwargs):
migrations_dir = os.path.join(base_app_dir, 'migrations')
if not os.path.isdir(migrations_dir):
continue
yield app_name, migrations_dir
def iter_migrations(self, d, *args, **kwargs):
for fn in sorted(os.listdir(d)):
if fn.startswith('_') or not fn.endswith('.py'):
continue
fqfn = os.path.join(d, fn)
if not os.path.isfile(fqfn):
continue
yield fn
def iter_unique_databases(self, site=None):
site = site or ALL
r = self.local_renderer
prior_database_names = set()
#print('iter_unique_databases.begin.site_default:', site)
for _site, site_data in self.iter_sites(site=site, no_secure=True):
#print('iter_unique_databases._site:', _site)
self.set_db(site=_site)
key = (r.env.db_name, r.env.db_user, r.env.db_host, r.env.db_engine)
#print('iter_unique_databases._site:', _site, key)
if key in prior_database_names:
continue
prior_database_names.add(key)
r.env.SITE = _site
yield _site, site_data
@task
def shell(self):
"""
Opens a Django focussed Python shell.
Essentially the equivalent of running `manage.py shell`.
"""
r = self.local_renderer
if '@' in self.genv.host_string:
r.env.shell_host_string = self.genv.host_string
else:
r.env.shell_host_string = '{user}@{host_string}'
r.env.shell_default_dir = self.genv.shell_default_dir_template
r.env.shell_interactive_djshell_str = self.genv.interactive_shell_template
r.run_or_local('ssh -t -i {key_filename} {shell_host_string} "{shell_interactive_djshell_str}"')
@task
def syncdb(self, site=None, all=0, database=None, ignore_errors=1): # pylint: disable=redefined-builtin
"""
Runs the standard Django syncdb command for one or more sites.
"""
r = self.local_renderer
ignore_errors = int(ignore_errors)
post_south = self.version_tuple >= (1, 7, 0)
use_run_syncdb = self.version_tuple >= (1, 9, 0)
# DEPRECATED: removed in Django>=1.7
r.env.db_syncdb_all_flag = '--all' if int(all) else ''
r.env.db_syncdb_database = ''
if database:
r.env.db_syncdb_database = ' --database=%s' % database
if self.is_local:
r.env.project_dir = r.env.local_project_dir
site = site or self.genv.SITE
for _site, site_data in r.iter_unique_databases(site=site):
r.env.SITE = _site
with self.settings(warn_only=ignore_errors):
if post_south:
if use_run_syncdb:
r.run_or_local(
'export SITE={SITE}; export ROLE={ROLE}; cd {project_dir}; '
'{manage_cmd} migrate --run-syncdb --noinput {db_syncdb_database}')
else:
# Between Django>=1.7,<1.9 we can only do a regular migrate, no true syncdb.
r.run_or_local(
'export SITE={SITE}; export ROLE={ROLE}; cd {project_dir}; '
'{manage_cmd} migrate --noinput {db_syncdb_database}')
else:
r.run_or_local(
'export SITE={SITE}; export ROLE={ROLE}; cd {project_dir}; '
'{manage_cmd} syncdb --noinput {db_syncdb_all_flag} {db_syncdb_database}')
@property
def version_tuple(self):
r = self.local_renderer
return tuple(r.env.version)
@task
def migrate(self, app='', migration='', site=None, fake=0, ignore_errors=None, skip_databases=None, database=None, migrate_apps='', delete_ghosts=1):
"""
Runs the standard South migrate command for one or more sites.
"""
# Note, to pass a comma-delimted list in a fab command, escape the comma with a back slash.
#
# e.g.
#
# fab staging dj.migrate:migrate_apps=oneapp\,twoapp\,threeapp
r = self.local_renderer
ignore_errors = int(r.env.ignore_migration_errors if ignore_errors is None else ignore_errors)
delete_ghosts = int(delete_ghosts)
post_south = self.version_tuple >= (1, 7, 0)
if self.version_tuple >= (1, 9, 0):
delete_ghosts = 0
skip_databases = (skip_databases or '')
if isinstance(skip_databases, six.string_types):
skip_databases = [_.strip() for _ in skip_databases.split(',') if _.strip()]
migrate_apps = migrate_apps or ''
migrate_apps = [
_.strip().split('.')[-1]
for _ in migrate_apps.strip().split(',')
if _.strip()
]
if app:
migrate_apps.append(app)
r.env.migrate_migration = migration or ''
r.env.migrate_fake_str = '--fake' if int(fake) else ''
r.env.migrate_database = '--database=%s' % database if database else ''
r.env.migrate_merge = '--merge' if not post_south else ''
r.env.delete_ghosts = '--delete-ghost-migrations' if delete_ghosts and not post_south else ''
self.vprint('project_dir0:', r.env.project_dir, r.genv.get('dj_project_dir'), r.genv.get('project_dir'))
self.vprint('migrate_apps:', migrate_apps)
if self.is_local:
r.env.project_dir = r.env.local_project_dir
# CS 2017-3-29 Don't bypass the iterator. That causes reversion to the global env that could corrupt the generated commands.
#databases = list(self.iter_unique_databases(site=site))#TODO:remove
# CS 2017-4-24 Don't specify a single site as the default when none is supplied. Otherwise all other sites will be ignored.
#site = site or self.genv.SITE
site = site or ALL
databases = self.iter_unique_databases(site=site)
for _site, site_data in databases:
self.vprint('-'*80, file=sys.stderr)
self.vprint('site:', _site, file=sys.stderr)
if self.env.available_sites_by_host:
hostname = self.current_hostname
sites_on_host = self.env.available_sites_by_host.get(hostname, [])
if sites_on_host and _site not in sites_on_host:
self.vprint('skipping site:', _site, sites_on_host, file=sys.stderr)
continue
if not migrate_apps:
migrate_apps.append(' ')
for _app in migrate_apps:
# In cases where we're migrating built-in apps or apps with dotted names
# e.g. django.contrib.auth, extract the name used for the migrate command.
r.env.migrate_app = _app.split('.')[-1]
self.vprint('project_dir1:', r.env.project_dir, r.genv.get('dj_project_dir'), r.genv.get('project_dir'))
r.env.SITE = _site
with self.settings(warn_only=ignore_errors):
r.run_or_local(
'export SITE={SITE}; export ROLE={ROLE}; {migrate_pre_command} cd {project_dir}; '
'{manage_cmd} migrate --noinput {migrate_merge} --traceback '
'{migrate_database} {delete_ghosts} {migrate_app} {migrate_migration} '
'{migrate_fake_str}')
@task
def migrate_all(self, *args, **kwargs):
kwargs['site'] = 'all'
return self.migrate(*args, **kwargs)
@task
def truncate(self, app):
assert self.genv.SITE, 'This should only be run for a specific site.'
r = self.local_renderer
r.env.app = app
r.run('rm -f {app_dir}/{app}/migrations/*.py')
r.run('rm -f {app_dir}/{app}/migrations/*.pyc')
r.run('touch {app_dir}/{app}/migrations/__init__.py')
r.run('export SITE={SITE}; export ROLE={ROLE}; cd {app_dir}; {manage_cmd} schemamigration {app} --initial')
r.run('export SITE={SITE}; export ROLE={ROLE}; cd {app_dir}; {manage_cmd} migrate {app} --fake')
@task
def manage_async(self, command='', name='process', site=ALL, exclude_sites='', end_message='', recipients=''):
"""
Starts a Django management command in a screen.
Parameters:
command :- all arguments passed to `./manage` as a single string
site :- the site to run the command for (default is all)
Designed to be ran like:
fab <role> dj.manage_async:"some_management_command --force"
"""
exclude_sites = exclude_sites.split(':')
r = self.local_renderer
for _site, site_data in self.iter_sites(site=site, no_secure=True):
if _site in exclude_sites:
continue
r.env.SITE = _site
r.env.command = command
r.env.end_email_command = ''
r.env.recipients = recipients or ''
r.env.end_email_command = ''
if end_message:
end_message = end_message + ' for ' + _site
end_message = end_message.replace(' ', '_')
r.env.end_message = end_message
r.env.end_email_command = r.format('{manage_cmd} send_mail --subject={end_message} --recipients={recipients}')
r.env.name = name.format(**r.genv)
r.run(
'screen -dmS {name} bash -c "export SITE={SITE}; '\
'export ROLE={ROLE}; cd {project_dir}; '\
'{manage_cmd} {command} --traceback; {end_email_command}"; sleep 3;')
@task
def get_media_timestamp(self, last_timestamp=None):
"""
Retrieves the most recent timestamp of the media in the static root.
If last_timestamp is given, retrieves the first timestamp more recent than this value.
"""
r = self.local_renderer
_latest_timestamp = -1e9999999999999999
for path in self.iter_static_paths():
path = r.env.static_root + '/' + path
self.vprint('checking timestamp of path:', path)
if not os.path.isfile(path):
continue
#print('path:', path)
_latest_timestamp = max(_latest_timestamp, get_last_modified_timestamp(path) or _latest_timestamp)
if last_timestamp is not None and _latest_timestamp > last_timestamp:
break
self.vprint('latest_timestamp:', _latest_timestamp)
return _latest_timestamp
@task
def has_media_changed(self):
print('Checking to see if Django static media has changed...')
lm = self.last_manifest
# Unless this is our first time running, this should always be non-None.
last_timestamp = lm.latest_timestamp
current_timestamp = self.get_media_timestamp(last_timestamp=last_timestamp)
self.vprint('last_timestamp:', last_timestamp)
self.vprint('current_timestamp:', current_timestamp)
changed = current_timestamp != last_timestamp
if changed:
print('It has.')
else:
print('It has not.')
return changed
def get_migration_fingerprint(self):
data = {} # {app: latest_migration_name}
for app_name, _dir in self.iter_app_directories(ignore_import_error=True):
#print('app_name, _dir:', app_name, _dir)
migration_dir = os.path.join(_dir, 'migrations')
if not os.path.isdir(migration_dir):
continue
for migration_name in self.iter_migrations(migration_dir):
data[app_name] = migration_name
if self.verbose:
print('%s.migrations:' % self.name)
pprint(data, indent=4)
return data
def record_manifest(self):
manifest = super(DjangoSatchel, self).record_manifest()
manifest['latest_timestamp'] = self.get_media_timestamp()
manifest['migrations'] = self.get_migration_fingerprint()
return manifest
@task(precursors=['packager', 'pip'])
def configure_media(self, *args, **kwargs):
if self.has_media_changed():
r = self.local_renderer
assert r.env.local_project_dir
r.local(r.env.configure_media_command)
@task(precursors=['packager', 'apache', 'pip', 'tarball', 'postgresql', 'mysql'])
def configure_migrations(self):
r = self.local_renderer
assert r.env.local_project_dir
last = self.last_manifest.migrations or {}
current = self.current_manifest.get('migrations') or {}
migrate_apps = []
if self.verbose:
print('djangomigrations.last:')
pprint(last, indent=4)
print('djangomigrations.current:')
pprint(current, indent=4)
for app_name in current:
if current[app_name] != last.get(app_name):
migrate_apps.append(app_name)
if migrate_apps:
self.vprint('%i apps with new migrations found!' % len(migrate_apps))
self.vprint('migrate_apps:', migrate_apps)
self.vprint('ignore_migration_errors:', self.env.ignore_migration_errors)
# Note, Django's migrate command doesn't support multiple app name arguments
# with all options, so we run it separately for each app.
for app in migrate_apps:
self.migrate(app=app, ignore_errors=self.env.ignore_migration_errors)
else:
self.vprint('No new migrations.')
@task(precursors=['packager', 'tarball'])
def configure(self, *args, **kwargs):
if self.env.manage_media:
self.configure_media()
if self.env.manage_migrations:
self.configure_migrations()
dj = DjangoSatchel()
|
|
"""
test code for html_render.py
includes step 4
"""
import io
from html_render import (Element,
Html,
Body,
P,
TextWrapper,
Head,
Title,
)
# utility function for testing render methods
# needs to be used in multiple tests, so write it once here.
def render_result(element, ind=""):
"""
calls element's render method, and returns what got rendered as a string
"""
outfile = io.StringIO()
element.render(outfile, ind)
return outfile.getvalue()
def test_init():
"""
this only tests that it can be initialized -- but it's a start
"""
e = Element()
e = Element("this is some text")
# These two tests were testing internals
# so they failed when I added the TextWrapper
# but I"m removing them because tests really should be testing
# the external API.
# def test_content():
# # fixme: this tests internals!!!!
# e = Element("this is some text")
# assert "this is some text" in e.content
# def test_append():
# e = Element("this is some text")
# e.append("some more text")
# assert "some more text" in e.content
def test_two_instances():
e = Element("this is some text")
e2 = Element("this is some text")
e.append("some more text")
assert "some more text" not in e2.content
def test_render():
e = Element("this is some text")
e.append("and this is some more text")
file_contents = render_result(e)
assert("this is some text") in file_contents
assert("and this is some more text") in file_contents
assert file_contents.startswith("<html>")
assert file_contents.strip().endswith("</html>")
def test_html():
e = Html("this is some text")
e.append("and this is some more text")
file_contents = render_result(e)
assert("this is some text") in file_contents
assert("and this is some more text") in file_contents
assert file_contents.startswith("<html>")
assert file_contents.strip().endswith("</html>")
def test_body():
e = Body("this is some text")
e.append("and this is some more text")
file_contents = render_result(e)
assert("this is some text") in file_contents
assert("and this is some more text") in file_contents
assert file_contents.startswith("<body>")
assert file_contents.strip().endswith("</body>")
def test_p():
e = P("this is some text")
e.append("and this is some more text")
file_contents = render_result(e)
assert("this is some text") in file_contents
assert("and this is some more text") in file_contents
assert file_contents.startswith("<p>")
assert file_contents.strip().endswith("</p>")
def test_text_wrapper():
tw = TextWrapper("A basic piece of text")
file_contents = render_result(tw)
assert file_contents == "A basic piece of text"
def test_non_str():
""" you should be able to pass anything in, and it will get
"stringified"
"""
e = P(34) # a number
e.append((3, 4, 5)) # even a tuple
file_contents = render_result(e)
print(file_contents)
assert("34") in file_contents
assert("(3, 4, 5)") in file_contents
def test_sub_element():
"""
tests that you can add another element and still render properly
"""
page = Html()
page.append("some plain text.")
page.append(P("A simple paragraph of text"))
page.append("Some more plain text.")
file_contents = render_result(page)
# note: the above tests should make sure that the tags are getting rendered.
assert "some plain text" in file_contents
assert "A simple paragraph of text" in file_contents
assert "Some more plain text." in file_contents
assert "some plain text" in file_contents
def test_step_2_noindent():
"""
This is more if an integration test -- a number of things together
this test does not yet include indentation
"""
page = Html()
body = Body()
page.append(body)
body.append(P("a small paragraph of text"))
body.append(P("another small paragraph of text"))
body.append(P("and here is a bit more"))
file_contents = render_result(page).strip()
print(file_contents)
assert file_contents.startswith("<html>")
assert file_contents.endswith("</html>")
assert "a small paragraph of text" in file_contents
assert "<body>" in file_contents
# you could do more here, but it should all be covered above.
# assert False
def test_indent():
"""
Tests that the indentation gets passed through to the renderer
"""
html = Html("some content")
file_contents = render_result(html, ind=" ")
print(file_contents)
lines = file_contents.split("\n")
assert lines[0].startswith(" <")
assert lines[-1].startswith(" <")
def test_indent_contents():
"""
The contents in a element should be indented more than the tag
by the amount in the indent class attribute
"""
html = Html("some content")
file_contents = render_result(html, ind="")
print(file_contents)
lines = file_contents.split("\n")
assert lines[1].startswith(Element.indent)
def test_multiple_indent():
"""
make sure multiple levels get indented fully
"""
body = Body()
body.append(P("some text"))
html = Html(body)
file_contents = render_result(html)
print(file_contents)
lines = file_contents.split("\n")
for i in range(3):
assert lines[i].startswith(i * Element.indent + "<")
assert lines[3].startswith(3 * Element.indent + "some")
def test_title():
"""
This will implicitly test the OneLineTag element
"""
t = Title("Isn't this a nice title?")
# making sure indentation still works
file_contents = render_result(t, ind=" ")
print(file_contents)
# no "strip()" -- making sure there are no extra newlines
assert "\n" not in file_contents
assert "> " not in file_contents
assert file_contents.startswith(" <title>")
assert file_contents.endswith("</title>")
# the only newline should be at the end
assert "\n" not in file_contents
def test_head():
"""
testing Head with a title in it -- it should never be blank
"""
h = Head()
h.append(Title("A nifty title for the page"))
def test_full_page_with_title():
"""
not much to actually test here, but good to see it put together.
everything should have already been tested.
"""
page = Html()
head = Head()
head.append(Title("PythonClass Example"))
page.append(head)
body = Body()
body.append(P("Here is a paragraph of text -- there could be more of them, "
"but this is enough to show that we can do some text"))
body.append(P("And here is another piece of text -- you should be able to add any number"))
page.append(body)
file_contents = render_result(page)
print(file_contents)
# uncomment this to see results
# assert False
def test_attributes():
"""
tests that you can pass attributes in to the tag
"""
e = Element("some text", id="this", color="red") # could be any attributes
file_contents = render_result(e)
print(file_contents)
assert 'id="this"' in file_contents
assert 'color="red"' in file_contents
# note -- dicts aren't ordered, so you can't enforce order!
# assert '<html color="red" id="this">' in file_contents
def test_attributes_one_line_tag():
"""
tests that you can pass attributes in to the tag
"""
e = Title("some text", id="this", color="red") # could be any attributes
file_contents = render_result(e)
print(file_contents)
assert 'id="this"' in file_contents
assert 'color="red"' in file_contents
|
|
import json
from django.conf import settings
from django.core.mail import send_mail, BadHeaderError
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.http import HttpResponse
from django.shortcuts import render, redirect
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.http import require_http_methods
from contacts.forms import ContactForm
from helpers.models import Helpers
from offer.models import Offer
from offer.models import OfferCategory
from tours.forms import BookNow
from tours.models import Category, Tour, About
def get_lang(request):
lang = request.LANGUAGE_CODE
return lang
def get_company():
return Helpers.objects.get(id=1).company_name
def home(request):
query = request.GET.get('q')
if query:
return redirect(reverse('search') + '?q=' + query)
lang = request.LANGUAGE_CODE
booking_form = BookNow()
breadcrumbs = [
{'url': '/', 'name': _('Home'), 'active': True},
]
header = {
'pt': Helpers.objects.get(id=1).start_page_header_pt,
'en': Helpers.objects.get(id=1).start_page_header_gb,
'de': Helpers.objects.get(id=1).start_page_header_de
}
tour_header = {
'pt': Helpers.objects.get(id=1).tour_header_name_PT,
'en': Helpers.objects.get(id=1).tour_header_name_EN,
'de': Helpers.objects.get(id=1).tour_header_name_DE
}
offer_header = {
'pt': Helpers.objects.get(id=1).offer_header_name_PT,
'en': Helpers.objects.get(id=1).offer_header_name_EN,
'de': Helpers.objects.get(id=1).offer_header_name_DE
}
footer = {
'pt': Helpers.objects.get(id=1).about_footer_PT,
'en': Helpers.objects.get(id=1).about_footer_EN,
'de': Helpers.objects.get(id=1).about_footer_DE
}
context = {
'booking_form': booking_form,
'nav': {
'tour_categories_list': Category.objects.all(),
'offer_categories_list': OfferCategory.objects.all(),
},
'audio': Helpers.objects.get(id=1).audio,
'company': get_company(),
'header': header[lang],
'value': _('Send'),
'footer': {
'about': footer[lang],
'icon': Helpers.objects.get(id=1).footer_icon
},
'section': {
'tour_header': tour_header[lang],
'offer_header': offer_header[lang]
},
'img1': Helpers.objects.get(id=1).img,
'img2': Helpers.objects.get(id=1).img2,
'img3': Helpers.objects.get(id=1).img3,
'img4': Helpers.objects.get(id=1).img4,
'img5': Helpers.objects.get(id=1).img5,
'lang': lang,
'offer_list': Offer.objects.all(),
'tour_list': Tour.objects.all(),
'breadcrumbs': breadcrumbs
}
return render(request, 'partials/home.html', context)
def about(request):
query = request.GET.get('q')
if query:
return redirect(reverse('search') + '?q=' + query)
footer = {
'pt': Helpers.objects.get(id=1).about_footer_PT,
'en': Helpers.objects.get(id=1).about_footer_EN,
'de': Helpers.objects.get(id=1).about_footer_DE
}
breadcrumbs = [
{'url': '/', 'name': _('Home')},
{'url': '#', 'name': _('About'), 'active': True}
]
lang = request.LANGUAGE_CODE
context = {
'footer': {
'about': footer[lang],
'icon': Helpers.objects.get(id=1).footer_icon
},
'nav': {
'tour_categories_list': Category.objects.all(),
'offer_categories_list': OfferCategory.objects.all(),
},
'company': get_company(),
'title': _('About'),
'breadcrumbs': breadcrumbs,
'about_list': About.objects.all(),
}
return render(request, 'partials/about.html', context)
def login_or_register(request):
query = request.GET.get('q')
if query:
return redirect(reverse('search') + '?q=' + query)
breadcrumbs = [{'url': '/', 'name': _('Home'), 'active': True}]
return render(request, 'partials/login_or_register.html', {'breadcrumbs': breadcrumbs})
def search(request):
lang = request.LANGUAGE_CODE
footer = {
'pt': Helpers.objects.get(id=1).about_footer_PT,
'en': Helpers.objects.get(id=1).about_footer_EN,
'de': Helpers.objects.get(id=1).about_footer_DE
}
offer_queryset = Offer.objects.all()
tour_queryset = Tour.objects.all()
query = request.GET.get('q')
offer_object_list = []
tour_object_list = []
if 'pt' in lang:
offer_object_list = offer_queryset.filter(
Q(title_PT__icontains=query) |
Q(description_PT__icontains=query)
).distinct()
else:
if 'en' in lang:
offer_object_list = offer_queryset.filter(
Q(title_EN__icontains=query) |
Q(description_EN__icontains=query)
).distinct()
else:
if 'de' in lang:
offer_object_list = offer_queryset.filter(
Q(title_DE__icontains=query) |
Q(description_DE__icontains=query))
if 'pt' in lang:
tour_object_list = tour_queryset.filter(
Q(title_PT__icontains=query) |
Q(description_PT__icontains=query)
).distinct()
else:
if 'en' in lang:
tour_object_list = tour_queryset.filter(
Q(title_EN__icontains=query) |
Q(description_EN__icontains=query)
).distinct()
else:
if 'de' in lang:
tour_object_list = tour_queryset.filter(
Q(title_DE__icontains=query) |
Q(description_DE__icontains=query))
context = {
'offer_object_list': offer_object_list,
'tour_object_list': tour_object_list,
'footer': {
'about': footer[lang],
'icon': Helpers.objects.get(id=1).footer_icon
},
'nav': {
'tour_categories_list': Category.objects.all(),
'offer_categories_list': OfferCategory.objects.all(),
},
'title': 'Contact me',
'company': get_company(),
'breadcrumbs': [
{'url': '/', 'name': _('Home')},
]}
return render(request, 'partials/search.html', context)
@require_http_methods(['POST'])
def welcome(request):
body_unicode = request.body.decode('utf-8')
body_data = json.loads(body_unicode)
form = ContactForm({
"name": body_data["name"],
"email": body_data["email"],
"message": body_data["message"],
"additional_information": body_data["additionalInformation"],
})
if form.is_valid():
return HttpResponse(request.body)
else:
response = HttpResponse(form.errors)
response.status_code = 422
response.reason_phrase = 'Validation failed'
return response
|
|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2007 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Event dispatch framework.
All objects that produce events in pyglet implement `EventDispatcher`,
providing a consistent interface for registering and manipulating event
handlers. A commonly used event dispatcher is `pyglet.window.Window`.
Event types
===========
For each event dispatcher there is a set of events that it dispatches; these
correspond with the type of event handlers you can attach. Event types are
identified by their name, for example, ''on_resize''. If you are creating a
new class which implements `EventDispatcher`, you must call
`EventDispatcher.register_event_type` for each event type.
Attaching event handlers
========================
An event handler is simply a function or method. You can attach an event
handler by setting the appropriate function on the instance::
def on_resize(width, height):
# ...
dispatcher.on_resize = on_resize
There is also a convenience decorator that reduces typing::
@dispatcher.event
def on_resize(width, height):
# ...
You may prefer to subclass and override the event handlers instead::
class MyDispatcher(DispatcherClass):
def on_resize(self, width, height):
# ...
Event handler stack
===================
When attaching an event handler to a dispatcher using the above methods, it
replaces any existing handler (causing the original handler to no longer be
called). Each dispatcher maintains a stack of event handlers, allowing you to
insert an event handler "above" the existing one rather than replacing it.
There are two main use cases for "pushing" event handlers:
* Temporarily intercepting the events coming from the dispatcher by pushing a
custom set of handlers onto the dispatcher, then later "popping" them all
off at once.
* Creating "chains" of event handlers, where the event propogates from the
top-most (most recently added) handler to the bottom, until a handler
takes care of it.
Use `EventDispatcher.push_handlers` to create a new level in the stack and
attach handlers to it. You can push several handlers at once::
dispatcher.push_handlers(on_resize, on_key_press)
If your function handlers have different names to the events they handle, use
keyword arguments::
dispatcher.push_handlers(on_resize=my_resize,
on_key_press=my_key_press)
After an event handler has processed an event, it is passed on to the
next-lowest event handler, unless the handler returns `EVENT_HANDLED`, which
prevents further propogation.
To remove all handlers on the top stack level, use
`EventDispatcher.pop_handlers`.
Note that any handlers pushed onto the stack have precedence over the
handlers set directly on the instance (for example, using the methods
described in the previous section), regardless of when they were set.
For example, handler ``foo`` is called before handler ``bar`` in the following
example::
dispatcher.push_handlers(on_resize=foo)
dispatcher.on_resize = bar
Dispatching events
==================
pyglet uses a single-threaded model for all application code. Event
handlers are only ever invoked as a result of calling
EventDispatcher.dispatch_events`.
It is up to the specific event dispatcher to queue relevant events until they
can be dispatched, at which point the handlers are called in the order the
events were originally generated.
This implies that your application runs with a main loop that continously
updates the application state and checks for new events::
while True:
dispatcher.dispatch_events()
# ... additional per-frame processing
Not all event dispatchers require the call to ``dispatch_events``; check with
the particular class documentation.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: event.py 1230 2007-09-04 14:57:31Z Alex.Holkner $'
import inspect
EVENT_HANDLED = True
EVENT_UNHANDLED = None
class EventException(Exception):
'''An exception raised when an event handler could not be attached.
'''
pass
class EventDispatcher(object):
'''Generic event dispatcher interface.
See the module docstring for usage.
'''
# Placeholder empty stack; real stack is created only if needed
_event_stack = ()
@classmethod
def register_event_type(cls, name):
'''Register an event type with the dispatcher.
Registering event types allows the dispatcher to validate event
handler names as they are attached, and to search attached objects for
suitable handlers.
:Parameters:
`name` : str
Name of the event to register.
'''
if not hasattr(cls, 'event_types'):
cls.event_types = []
cls.event_types.append(name)
return name
def push_handlers(self, *args, **kwargs):
'''Push a level onto the top of the handler stack, then attach zero or
more event handlers.
If keyword arguments are given, they name the event type to attach.
Otherwise, a callable's `__name__` attribute will be used. Any other
object may also be specified, in which case it will be searched for
callables with event names.
'''
# Create event stack if necessary
if type(self._event_stack) is tuple:
self._event_stack = [{}]
# Place dict full of new handlers at beginning of stack
self._event_stack.insert(0, {})
self.set_handlers(*args, **kwargs)
def set_handlers(self, *args, **kwargs):
'''Attach one or more event handlers to the top level of the handler
stack.
See `push_handlers` for the accepted argument types.
'''
# Create event stack if necessary
if type(self._event_stack) is tuple:
self._event_stack = [{}]
for object in args:
if inspect.isroutine(object):
# Single magically named function
name = object.__name__
if name not in self.event_types:
raise EventException('Unknown event "%s"' % name)
self.set_handler(name, object)
else:
# Single instance with magically named methods
for name, handler in inspect.getmembers(object):
if name in self.event_types:
self.set_handler(name, handler)
for name, handler in kwargs.items():
# Function for handling given event (no magic)
if name not in self.event_types:
raise EventException('Unknown event "%s"' % name)
self.set_handler(name, handler)
def set_handler(self, name, handler):
'''Attach a single event handler.
:Parameters:
`name` : str
Name of the event type to attach to.
`handler` : callable
Event handler to attach.
'''
# Create event stack if necessary
if type(self._event_stack) is tuple:
self._event_stack = [{}]
self._event_stack[0][name] = handler
def pop_handlers(self):
'''Pop the top level of event handlers off the stack.
'''
assert self._event_stack and 'No handlers pushed'
del self._event_stack[0]
def dispatch_event(self, event_type, *args):
'''Dispatch a single event to the attached handlers.
The event is propogated to all handlers from from the top of the stack
until one returns `EVENT_HANDLED`. This method should be used only by
`EventDispatcher` implementors; applications should call
`dispatch_events`.
:Parameters:
`event_type` : str
Name of the event.
`args` : sequence
Arguments to pass to the event handler.
'''
assert event_type in self.event_types
# Search handler stack for matching event handlers
for frame in self._event_stack[:]:
handler = frame.get(event_type, None)
if handler:
ret = handler(*args)
if ret != EVENT_UNHANDLED:
return
# Check instance for an event handler
if hasattr(self, event_type):
getattr(self, event_type)(*args)
def event(self, *args):
'''Function decorator for an event handler.
Usage::
win = window.Window()
@win.event
def on_resize(self, width, height):
# ...
or::
@win.event('on_resize')
def foo(self, width, height):
# ...
'''
if len(args) == 0: # @window.event()
def decorator(func):
name = func.__name__
setattr(self, name, func)
return func
return decorator
elif inspect.isroutine(args[0]): # @window.event
func = args[0]
name = func.__name__
setattr(self, name, func)
return args[0]
elif type(args[0]) in (str, unicode): # @window.event('on_resize')
name = args[0]
def decorator(func):
setattr(self, name, func)
return func
return decorator
|
|
"""ACME AuthHandler."""
import itertools
import logging
import time
import zope.component
from acme import challenges
from acme import messages
from letsencrypt import achallenges
from letsencrypt import constants
from letsencrypt import errors
from letsencrypt import interfaces
logger = logging.getLogger(__name__)
class AuthHandler(object):
"""ACME Authorization Handler for a client.
:ivar dv_auth: Authenticator capable of solving
:class:`~acme.challenges.DVChallenge` types
:type dv_auth: :class:`letsencrypt.interfaces.IAuthenticator`
:ivar cont_auth: Authenticator capable of solving
:class:`~acme.challenges.ContinuityChallenge` types
:type cont_auth: :class:`letsencrypt.interfaces.IAuthenticator`
:ivar acme.client.Client acme: ACME client API.
:ivar account: Client's Account
:type account: :class:`letsencrypt.account.Account`
:ivar dict authzr: ACME Authorization Resource dict where keys are domains
and values are :class:`acme.messages.AuthorizationResource`
:ivar list dv_c: DV challenges in the form of
:class:`letsencrypt.achallenges.AnnotatedChallenge`
:ivar list cont_c: Continuity challenges in the
form of :class:`letsencrypt.achallenges.AnnotatedChallenge`
"""
def __init__(self, dv_auth, cont_auth, acme, account):
self.dv_auth = dv_auth
self.cont_auth = cont_auth
self.acme = acme
self.account = account
self.authzr = dict()
# List must be used to keep responses straight.
self.dv_c = []
self.cont_c = []
def get_authorizations(self, domains, best_effort=False):
"""Retrieve all authorizations for challenges.
:param set domains: Domains for authorization
:param bool best_effort: Whether or not all authorizations are
required (this is useful in renewal)
:returns: tuple of lists of authorization resources. Takes the
form of (`completed`, `failed`)
:rtype: tuple
:raises .AuthorizationError: If unable to retrieve all
authorizations
"""
for domain in domains:
self.authzr[domain] = self.acme.request_domain_challenges(
domain, self.account.regr.new_authzr_uri)
self._choose_challenges(domains)
# While there are still challenges remaining...
while self.dv_c or self.cont_c:
cont_resp, dv_resp = self._solve_challenges()
logger.info("Waiting for verification...")
# Send all Responses - this modifies dv_c and cont_c
self._respond(cont_resp, dv_resp, best_effort)
# Just make sure all decisions are complete.
self.verify_authzr_complete()
# Only return valid authorizations
return [authzr for authzr in self.authzr.values()
if authzr.body.status == messages.STATUS_VALID]
def _choose_challenges(self, domains):
"""Retrieve necessary challenges to satisfy server."""
logger.info("Performing the following challenges:")
for dom in domains:
path = gen_challenge_path(
self.authzr[dom].body.challenges,
self._get_chall_pref(dom),
self.authzr[dom].body.combinations)
dom_cont_c, dom_dv_c = self._challenge_factory(
dom, path)
self.dv_c.extend(dom_dv_c)
self.cont_c.extend(dom_cont_c)
def _solve_challenges(self):
"""Get Responses for challenges from authenticators."""
cont_resp = []
dv_resp = []
try:
if self.cont_c:
cont_resp = self.cont_auth.perform(self.cont_c)
if self.dv_c:
dv_resp = self.dv_auth.perform(self.dv_c)
# This will catch both specific types of errors.
except errors.AuthorizationError:
logger.critical("Failure in setting up challenges.")
logger.info("Attempting to clean up outstanding challenges...")
self._cleanup_challenges()
raise
assert len(cont_resp) == len(self.cont_c)
assert len(dv_resp) == len(self.dv_c)
return cont_resp, dv_resp
def _respond(self, cont_resp, dv_resp, best_effort):
"""Send/Receive confirmation of all challenges.
.. note:: This method also cleans up the auth_handler state.
"""
# TODO: chall_update is a dirty hack to get around acme-spec #105
chall_update = dict()
active_achalls = []
active_achalls.extend(
self._send_responses(self.dv_c, dv_resp, chall_update))
active_achalls.extend(
self._send_responses(self.cont_c, cont_resp, chall_update))
# Check for updated status...
try:
self._poll_challenges(chall_update, best_effort)
finally:
# This removes challenges from self.dv_c and self.cont_c
self._cleanup_challenges(active_achalls)
def _send_responses(self, achalls, resps, chall_update):
"""Send responses and make sure errors are handled.
:param dict chall_update: parameter that is updated to hold
authzr -> list of outstanding solved annotated challenges
"""
active_achalls = []
for achall, resp in itertools.izip(achalls, resps):
# Don't send challenges for None and False authenticator responses
if resp:
self.acme.answer_challenge(achall.challb, resp)
# TODO: answer_challenge returns challr, with URI,
# that can be used in _find_updated_challr
# comparisons...
active_achalls.append(achall)
if achall.domain in chall_update:
chall_update[achall.domain].append(achall)
else:
chall_update[achall.domain] = [achall]
return active_achalls
def _poll_challenges(
self, chall_update, best_effort, min_sleep=3, max_rounds=15):
"""Wait for all challenge results to be determined."""
dom_to_check = set(chall_update.keys())
comp_domains = set()
rounds = 0
while dom_to_check and rounds < max_rounds:
# TODO: Use retry-after...
time.sleep(min_sleep)
all_failed_achalls = set()
for domain in dom_to_check:
comp_achalls, failed_achalls = self._handle_check(
domain, chall_update[domain])
if len(comp_achalls) == len(chall_update[domain]):
comp_domains.add(domain)
elif not failed_achalls:
for achall, _ in comp_achalls:
chall_update[domain].remove(achall)
# We failed some challenges... damage control
else:
# Right now... just assume a loss and carry on...
if best_effort:
comp_domains.add(domain)
else:
all_failed_achalls.update(
updated for _, updated in failed_achalls)
if all_failed_achalls:
_report_failed_challs(all_failed_achalls)
raise errors.FailedChallenges(all_failed_achalls)
dom_to_check -= comp_domains
comp_domains.clear()
rounds += 1
def _handle_check(self, domain, achalls):
"""Returns tuple of ('completed', 'failed')."""
completed = []
failed = []
self.authzr[domain], _ = self.acme.poll(self.authzr[domain])
if self.authzr[domain].body.status == messages.STATUS_VALID:
return achalls, []
# Note: if the whole authorization is invalid, the individual failed
# challenges will be determined here...
for achall in achalls:
updated_achall = achall.update(challb=self._find_updated_challb(
self.authzr[domain], achall))
# This does nothing for challenges that have yet to be decided yet.
if updated_achall.status == messages.STATUS_VALID:
completed.append((achall, updated_achall))
elif updated_achall.status == messages.STATUS_INVALID:
failed.append((achall, updated_achall))
return completed, failed
def _find_updated_challb(self, authzr, achall): # pylint: disable=no-self-use
"""Find updated challenge body within Authorization Resource.
.. warning:: This assumes only one instance of type of challenge in
each challenge resource.
:param .AuthorizationResource authzr: Authorization Resource
:param .AnnotatedChallenge achall: Annotated challenge for which
to get status
"""
for authzr_challb in authzr.body.challenges:
if type(authzr_challb.chall) is type(achall.challb.chall):
return authzr_challb
raise errors.AuthorizationError(
"Target challenge not found in authorization resource")
def _get_chall_pref(self, domain):
"""Return list of challenge preferences.
:param str domain: domain for which you are requesting preferences
"""
# Make sure to make a copy...
chall_prefs = []
chall_prefs.extend(self.cont_auth.get_chall_pref(domain))
chall_prefs.extend(self.dv_auth.get_chall_pref(domain))
return chall_prefs
def _cleanup_challenges(self, achall_list=None):
"""Cleanup challenges.
If achall_list is not provided, cleanup all achallenges.
"""
logger.info("Cleaning up challenges")
if achall_list is None:
dv_c = self.dv_c
cont_c = self.cont_c
else:
dv_c = [achall for achall in achall_list
if isinstance(achall.chall, challenges.DVChallenge)]
cont_c = [achall for achall in achall_list if isinstance(
achall.chall, challenges.ContinuityChallenge)]
if dv_c:
self.dv_auth.cleanup(dv_c)
for achall in dv_c:
self.dv_c.remove(achall)
if cont_c:
self.cont_auth.cleanup(cont_c)
for achall in cont_c:
self.cont_c.remove(achall)
def verify_authzr_complete(self):
"""Verifies that all authorizations have been decided.
:returns: Whether all authzr are complete
:rtype: bool
"""
for authzr in self.authzr.values():
if (authzr.body.status != messages.STATUS_VALID and
authzr.body.status != messages.STATUS_INVALID):
raise errors.AuthorizationError("Incomplete authorizations")
def _challenge_factory(self, domain, path):
"""Construct Namedtuple Challenges
:param str domain: domain of the enrollee
:param list path: List of indices from `challenges`.
:returns: dv_chall, list of DVChallenge type
:class:`letsencrypt.achallenges.Indexed`
cont_chall, list of ContinuityChallenge type
:class:`letsencrypt.achallenges.Indexed`
:rtype: tuple
:raises .errors.Error: if challenge type is not recognized
"""
dv_chall = []
cont_chall = []
for index in path:
challb = self.authzr[domain].body.challenges[index]
chall = challb.chall
achall = challb_to_achall(challb, self.account.key, domain)
if isinstance(chall, challenges.ContinuityChallenge):
cont_chall.append(achall)
elif isinstance(chall, challenges.DVChallenge):
dv_chall.append(achall)
return cont_chall, dv_chall
def challb_to_achall(challb, key, domain):
"""Converts a ChallengeBody object to an AnnotatedChallenge.
:param challb: ChallengeBody
:type challb: :class:`acme.messages.ChallengeBody`
:param key: Key
:type key: :class:`letsencrypt.le_util.Key`
:param str domain: Domain of the challb
:returns: Appropriate AnnotatedChallenge
:rtype: :class:`letsencrypt.achallenges.AnnotatedChallenge`
"""
chall = challb.chall
logger.info("%s challenge for %s", chall.typ, domain)
if isinstance(chall, challenges.DVSNI):
return achallenges.DVSNI(
challb=challb, domain=domain, key=key)
elif isinstance(chall, challenges.SimpleHTTP):
return achallenges.SimpleHTTP(
challb=challb, domain=domain, key=key)
elif isinstance(chall, challenges.DNS):
return achallenges.DNS(challb=challb, domain=domain)
elif isinstance(chall, challenges.RecoveryToken):
return achallenges.RecoveryToken(challb=challb, domain=domain)
elif isinstance(chall, challenges.RecoveryContact):
return achallenges.RecoveryContact(
challb=challb, domain=domain)
elif isinstance(chall, challenges.ProofOfPossession):
return achallenges.ProofOfPossession(
challb=challb, domain=domain)
else:
raise errors.Error(
"Received unsupported challenge of type: %s", chall.typ)
def gen_challenge_path(challbs, preferences, combinations):
"""Generate a plan to get authority over the identity.
.. todo:: This can be possibly be rewritten to use resolved_combinations.
:param tuple challbs: A tuple of challenges
(:class:`acme.messages.Challenge`) from
:class:`acme.messages.AuthorizationResource` to be
fulfilled by the client in order to prove possession of the
identifier.
:param list preferences: List of challenge preferences for domain
(:class:`acme.challenges.Challenge` subclasses)
:param tuple combinations: A collection of sets of challenges from
:class:`acme.messages.Challenge`, each of which would
be sufficient to prove possession of the identifier.
:returns: tuple of indices from ``challenges``.
:rtype: tuple
:raises letsencrypt.errors.AuthorizationError: If a
path cannot be created that satisfies the CA given the preferences and
combinations.
"""
if combinations:
return _find_smart_path(challbs, preferences, combinations)
else:
return _find_dumb_path(challbs, preferences)
def _find_smart_path(challbs, preferences, combinations):
"""Find challenge path with server hints.
Can be called if combinations is included. Function uses a simple
ranking system to choose the combo with the lowest cost.
"""
chall_cost = {}
max_cost = 1
for i, chall_cls in enumerate(preferences):
chall_cost[chall_cls] = i
max_cost += i
# max_cost is now equal to sum(indices) + 1
best_combo = []
# Set above completing all of the available challenges
best_combo_cost = max_cost
combo_total = 0
for combo in combinations:
for challenge_index in combo:
combo_total += chall_cost.get(challbs[
challenge_index].chall.__class__, max_cost)
if combo_total < best_combo_cost:
best_combo = combo
best_combo_cost = combo_total
combo_total = 0
if not best_combo:
msg = ("Client does not support any combination of challenges that "
"will satisfy the CA.")
logger.fatal(msg)
raise errors.AuthorizationError(msg)
return best_combo
def _find_dumb_path(challbs, preferences):
"""Find challenge path without server hints.
Should be called if the combinations hint is not included by the
server. This function returns the best path that does not contain
multiple mutually exclusive challenges.
"""
assert len(preferences) == len(set(preferences))
path = []
satisfied = set()
for pref_c in preferences:
for i, offered_challb in enumerate(challbs):
if (isinstance(offered_challb.chall, pref_c) and
is_preferred(offered_challb, satisfied)):
path.append(i)
satisfied.add(offered_challb)
return path
def mutually_exclusive(obj1, obj2, groups, different=False):
"""Are two objects mutually exclusive?"""
for group in groups:
obj1_present = False
obj2_present = False
for obj_cls in group:
obj1_present |= isinstance(obj1, obj_cls)
obj2_present |= isinstance(obj2, obj_cls)
if obj1_present and obj2_present and (
not different or not isinstance(obj1, obj2.__class__)):
return False
return True
def is_preferred(offered_challb, satisfied,
exclusive_groups=constants.EXCLUSIVE_CHALLENGES):
"""Return whether or not the challenge is preferred in path."""
for challb in satisfied:
if not mutually_exclusive(
offered_challb.chall, challb.chall, exclusive_groups,
different=True):
return False
return True
_ERROR_HELP_COMMON = (
"To fix these errors, please make sure that your domain name was entered "
"correctly and the DNS A/AAAA record(s) for that domain contains the "
"right IP address.")
_ERROR_HELP = {
"connection" :
_ERROR_HELP_COMMON + " Additionally, please check that your computer "
"has publicly routable IP address and no firewalls are preventing the "
"server from communicating with the client.",
"dnssec" :
_ERROR_HELP_COMMON + " Additionally, if you have DNSSEC enabled for "
"your domain, please ensure the signature is valid.",
"malformed" :
"To fix these errors, please make sure that you did not provide any "
"invalid information to the client and try running Let's Encrypt "
"again.",
"serverInternal" :
"Unfortunately, an error on the ACME server prevented you from completing "
"authorization. Please try again later.",
"tls" :
_ERROR_HELP_COMMON + " Additionally, please check that you have an up "
"to date TLS configuration that allows the server to communicate with "
"the Let's Encrypt client.",
"unauthorized" : _ERROR_HELP_COMMON,
"unknownHost" : _ERROR_HELP_COMMON,}
def _report_failed_challs(failed_achalls):
"""Notifies the user about failed challenges.
:param set failed_achalls: A set of failed
:class:`letsencrypt.achallenges.AnnotatedChallenge`.
"""
problems = dict()
for achall in failed_achalls:
if achall.error:
problems.setdefault(achall.error.typ, []).append(achall)
reporter = zope.component.getUtility(interfaces.IReporter)
for achalls in problems.itervalues():
reporter.add_message(
_generate_failed_chall_msg(achalls), reporter.MEDIUM_PRIORITY, True)
def _generate_failed_chall_msg(failed_achalls):
"""Creates a user friendly error message about failed challenges.
:param list failed_achalls: A list of failed
:class:`letsencrypt.achallenges.AnnotatedChallenge` with the same error
type.
:returns: A formatted error message for the client.
:rtype: str
"""
typ = failed_achalls[0].error.typ
msg = [
"The following '{0}' errors were reported by the server:".format(typ)]
problems = dict()
for achall in failed_achalls:
problems.setdefault(achall.error.description, set()).add(achall.domain)
for problem in problems:
msg.append("\n\nDomains: ")
msg.append(", ".join(sorted(problems[problem])))
msg.append("\nError: {0}".format(problem))
if typ in _ERROR_HELP:
msg.append("\n\n")
msg.append(_ERROR_HELP[typ])
return "".join(msg)
|
|
#!/usr/bin/python
#pylint: skip-file
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_ami_find
version_added: 2.0
short_description: Searches for AMIs to obtain the AMI ID and other information
description:
- Returns list of matching AMIs with AMI ID, along with other useful information
- Can search AMIs with different owners
- Can search by matching tag(s), by AMI name and/or other criteria
- Results can be sorted and sliced
author: Tom Bamford
notes:
- This module is not backwards compatible with the previous version of the ec2_search_ami module which worked only for Ubuntu AMIs listed on cloud-images.ubuntu.com.
- See the example below for a suggestion of how to search by distro/release.
options:
region:
description:
- The AWS region to use.
required: true
aliases: [ 'aws_region', 'ec2_region' ]
owner:
description:
- Search AMIs owned by the specified owner
- Can specify an AWS account ID, or one of the special IDs 'self', 'amazon' or 'aws-marketplace'
- If not specified, all EC2 AMIs in the specified region will be searched.
- You can include wildcards in many of the search options. An asterisk (*) matches zero or more characters, and a question mark (?) matches exactly one character. You can escape special characters using a backslash (\) before the character. For example, a value of \*amazon\?\\ searches for the literal string *amazon?\.
required: false
default: null
ami_id:
description:
- An AMI ID to match.
default: null
required: false
ami_tags:
description:
- A hash/dictionary of tags to match for the AMI.
default: null
required: false
architecture:
description:
- An architecture type to match (e.g. x86_64).
default: null
required: false
hypervisor:
description:
- A hypervisor type type to match (e.g. xen).
default: null
required: false
is_public:
description:
- Whether or not the image(s) are public.
choices: ['yes', 'no']
default: null
required: false
name:
description:
- An AMI name to match.
default: null
required: false
platform:
description:
- Platform type to match.
default: null
required: false
sort:
description:
- Optional attribute which with to sort the results.
- If specifying 'tag', the 'tag_name' parameter is required.
choices: ['name', 'description', 'tag']
default: null
required: false
sort_tag:
description:
- Tag name with which to sort results.
- Required when specifying 'sort=tag'.
default: null
required: false
sort_order:
description:
- Order in which to sort results.
- Only used when the 'sort' parameter is specified.
choices: ['ascending', 'descending']
default: 'ascending'
required: false
sort_start:
description:
- Which result to start with (when sorting).
- Corresponds to Python slice notation.
default: null
required: false
sort_end:
description:
- Which result to end with (when sorting).
- Corresponds to Python slice notation.
default: null
required: false
state:
description:
- AMI state to match.
default: 'available'
required: false
virtualization_type:
description:
- Virtualization type to match (e.g. hvm).
default: null
required: false
no_result_action:
description:
- What to do when no results are found.
- "'success' reports success and returns an empty array"
- "'fail' causes the module to report failure"
choices: ['success', 'fail']
default: 'success'
required: false
requirements:
- boto
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Search for the AMI tagged "project:website"
- ec2_ami_find:
owner: self
tags:
project: website
no_result_action: fail
register: ami_find
# Search for the latest Ubuntu 14.04 AMI
- ec2_ami_find:
name: "ubuntu/images/ebs/ubuntu-trusty-14.04-amd64-server-*"
owner: 099720109477
sort: name
sort_order: descending
sort_end: 1
register: ami_find
# Launch an EC2 instance
- ec2:
image: "{{ ami_search.results[0].ami_id }}"
instance_type: m3.medium
key_name: mykey
wait: yes
'''
try:
import boto.ec2
HAS_BOTO=True
except ImportError:
HAS_BOTO=False
import json
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
region = dict(required=True,
aliases = ['aws_region', 'ec2_region']),
owner = dict(required=False, default=None),
ami_id = dict(required=False),
ami_tags = dict(required=False, type='dict',
aliases = ['search_tags', 'image_tags']),
architecture = dict(required=False),
hypervisor = dict(required=False),
is_public = dict(required=False),
name = dict(required=False),
platform = dict(required=False),
sort = dict(required=False, default=None,
choices=['name', 'description', 'tag']),
sort_tag = dict(required=False),
sort_order = dict(required=False, default='ascending',
choices=['ascending', 'descending']),
sort_start = dict(required=False),
sort_end = dict(required=False),
state = dict(required=False, default='available'),
virtualization_type = dict(required=False),
no_result_action = dict(required=False, default='success',
choices = ['success', 'fail']),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module, install via pip or your package manager')
ami_id = module.params.get('ami_id')
ami_tags = module.params.get('ami_tags')
architecture = module.params.get('architecture')
hypervisor = module.params.get('hypervisor')
is_public = module.params.get('is_public')
name = module.params.get('name')
owner = module.params.get('owner')
platform = module.params.get('platform')
sort = module.params.get('sort')
sort_tag = module.params.get('sort_tag')
sort_order = module.params.get('sort_order')
sort_start = module.params.get('sort_start')
sort_end = module.params.get('sort_end')
state = module.params.get('state')
virtualization_type = module.params.get('virtualization_type')
no_result_action = module.params.get('no_result_action')
filter = {'state': state}
if ami_id:
filter['image_id'] = ami_id
if ami_tags:
for tag in ami_tags:
filter['tag:'+tag] = ami_tags[tag]
if architecture:
filter['architecture'] = architecture
if hypervisor:
filter['hypervisor'] = hypervisor
if is_public:
filter['is_public'] = is_public
if name:
filter['name'] = name
if platform:
filter['platform'] = platform
if virtualization_type:
filter['virtualization_type'] = virtualization_type
ec2 = ec2_connect(module)
images_result = ec2.get_all_images(owners=owner, filters=filter)
if no_result_action == 'fail' and len(images_result) == 0:
module.fail_json(msg="No AMIs matched the attributes: %s" % json.dumps(filter))
results = []
for image in images_result:
data = {
'ami_id': image.id,
'architecture': image.architecture,
'description': image.description,
'is_public': image.is_public,
'name': image.name,
'owner_id': image.owner_id,
'platform': image.platform,
'root_device_name': image.root_device_name,
'root_device_type': image.root_device_type,
'state': image.state,
'tags': image.tags,
'virtualization_type': image.virtualization_type,
}
if image.kernel_id:
data['kernel_id'] = image.kernel_id
if image.ramdisk_id:
data['ramdisk_id'] = image.ramdisk_id
results.append(data)
if sort == 'tag':
if not sort_tag:
module.fail_json(msg="'sort_tag' option must be given with 'sort=tag'")
results.sort(key=lambda e: e['tags'][sort_tag], reverse=(sort_order=='descending'))
elif sort:
results.sort(key=lambda e: e[sort], reverse=(sort_order=='descending'))
try:
if sort and sort_start and sort_end:
results = results[int(sort_start):int(sort_end)]
elif sort and sort_start:
results = results[int(sort_start):]
elif sort and sort_end:
results = results[:int(sort_end)]
except TypeError:
module.fail_json(msg="Please supply numeric values for sort_start and/or sort_end")
module.exit_json(results=results)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2011 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from nova.compute import utils as compute_utils
from nova import context
from nova.network import linux_net
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova import utils
from nova.virt import netutils
LOG = logging.getLogger(__name__)
firewall_opts = [
cfg.StrOpt('firewall_driver',
help='Firewall driver '
'(defaults to hypervisor specific iptables driver)'),
cfg.BoolOpt('allow_same_net_traffic',
default=True,
help='Whether to allow network traffic from same network'),
]
CONF = cfg.CONF
CONF.register_opts(firewall_opts)
CONF.import_opt('use_ipv6', 'nova.netconf')
def load_driver(default, *args, **kwargs):
fw_class = importutils.import_class(CONF.firewall_driver or default)
return fw_class(*args, **kwargs)
class FirewallDriver(object):
"""Firewall Driver base class.
Defines methods that any driver providing security groups
and provider firewall functionality should implement.
"""
def __init__(self, virtapi):
self._virtapi = virtapi
def prepare_instance_filter(self, instance, network_info):
"""Prepare filters for the instance.
At this point, the instance isn't running yet.
"""
raise NotImplementedError()
def filter_defer_apply_on(self):
"""Defer application of IPTables rules."""
pass
def filter_defer_apply_off(self):
"""Turn off deferral of IPTables rules and apply the rules now."""
pass
def unfilter_instance(self, instance, network_info):
"""Stop filtering instance."""
raise NotImplementedError()
def apply_instance_filter(self, instance, network_info):
"""Apply instance filter.
Once this method returns, the instance should be firewalled
appropriately. This method should as far as possible be a
no-op. It's vastly preferred to get everything set up in
prepare_instance_filter.
"""
raise NotImplementedError()
def refresh_security_group_rules(self, security_group_id):
"""Refresh security group rules from data store
Gets called when a rule has been added to or removed from
the security group.
"""
raise NotImplementedError()
def refresh_security_group_members(self, security_group_id):
"""Refresh security group members from data store
Gets called when an instance gets added to or removed from
the security group.
"""
raise NotImplementedError()
def refresh_instance_security_rules(self, instance):
"""Refresh security group rules from data store
Gets called when an instance gets added to or removed from
the security group the instance is a member of or if the
group gains or looses a rule.
"""
raise NotImplementedError()
def refresh_provider_fw_rules(self):
"""Refresh common rules for all hosts/instances from data store.
Gets called when a rule has been added to or removed from
the list of rules (via admin api).
"""
raise NotImplementedError()
def setup_basic_filtering(self, instance, network_info):
"""Create rules to block spoofing and allow dhcp.
This gets called when spawning an instance, before
:py:meth:`prepare_instance_filter`.
"""
raise NotImplementedError()
def instance_filter_exists(self, instance, network_info):
"""Check nova-instance-instance-xxx exists."""
raise NotImplementedError()
class IptablesFirewallDriver(FirewallDriver):
"""Driver which enforces security groups through iptables rules."""
def __init__(self, virtapi, **kwargs):
super(IptablesFirewallDriver, self).__init__(virtapi)
self.iptables = linux_net.iptables_manager
self.instances = {}
self.network_infos = {}
self.basically_filtered = False
# Flags for DHCP request rule
self.dhcp_create = False
self.dhcp_created = False
self.iptables.ipv4['filter'].add_chain('sg-fallback')
self.iptables.ipv4['filter'].add_rule('sg-fallback', '-j DROP')
self.iptables.ipv6['filter'].add_chain('sg-fallback')
self.iptables.ipv6['filter'].add_rule('sg-fallback', '-j DROP')
def setup_basic_filtering(self, instance, network_info):
pass
def apply_instance_filter(self, instance, network_info):
"""No-op. Everything is done in prepare_instance_filter."""
pass
def filter_defer_apply_on(self):
self.iptables.defer_apply_on()
def filter_defer_apply_off(self):
self.iptables.defer_apply_off()
def unfilter_instance(self, instance, network_info):
if self.instances.pop(instance['id'], None):
# NOTE(vish): use the passed info instead of the stored info
self.network_infos.pop(instance['id'])
self.remove_filters_for_instance(instance)
self.iptables.apply()
else:
LOG.info(_('Attempted to unfilter instance which is not '
'filtered'), instance=instance)
def prepare_instance_filter(self, instance, network_info):
self.instances[instance['id']] = instance
self.network_infos[instance['id']] = network_info
ipv4_rules, ipv6_rules = self.instance_rules(instance, network_info)
self.add_filters_for_instance(instance, ipv4_rules, ipv6_rules)
LOG.debug(_('Filters added to instance'), instance=instance)
self.refresh_provider_fw_rules()
LOG.debug(_('Provider Firewall Rules refreshed'), instance=instance)
# Ensure that DHCP request rule is updated if necessary
if (self.dhcp_create and not self.dhcp_created):
self.iptables.ipv4['filter'].add_rule(
'INPUT',
'-s 0.0.0.0/32 -d 255.255.255.255/32 '
'-p udp -m udp --sport 68 --dport 67 -j ACCEPT')
self.iptables.ipv4['filter'].add_rule(
'FORWARD',
'-s 0.0.0.0/32 -d 255.255.255.255/32 '
'-p udp -m udp --sport 68 --dport 67 -j ACCEPT')
self.dhcp_created = True
self.iptables.apply()
def _create_filter(self, ips, chain_name):
return ['-d %s -j $%s' % (ip, chain_name) for ip in ips]
def _get_subnets(self, network_info, version):
subnets = []
for vif in network_info:
if 'network' in vif and 'subnets' in vif['network']:
for subnet in vif['network']['subnets']:
if subnet['version'] == version:
subnets.append(subnet)
return subnets
def _filters_for_instance(self, chain_name, network_info):
"""Creates a rule corresponding to each ip that defines a
jump to the corresponding instance - chain for all the traffic
destined to that ip.
"""
v4_subnets = self._get_subnets(network_info, 4)
v6_subnets = self._get_subnets(network_info, 6)
ips_v4 = [ip['address'] for subnet in v4_subnets
for ip in subnet['ips']]
ipv4_rules = self._create_filter(ips_v4, chain_name)
ipv6_rules = ips_v6 = []
if CONF.use_ipv6:
if v6_subnets:
ips_v6 = [ip['address'] for subnet in v6_subnets
for ip in subnet['ips']]
ipv6_rules = self._create_filter(ips_v6, chain_name)
return ipv4_rules, ipv6_rules
def _add_filters(self, chain_name, ipv4_rules, ipv6_rules):
for rule in ipv4_rules:
self.iptables.ipv4['filter'].add_rule(chain_name, rule)
if CONF.use_ipv6:
for rule in ipv6_rules:
self.iptables.ipv6['filter'].add_rule(chain_name, rule)
def add_filters_for_instance(self, instance, inst_ipv4_rules,
inst_ipv6_rules):
network_info = self.network_infos[instance['id']]
chain_name = self._instance_chain_name(instance)
if CONF.use_ipv6:
self.iptables.ipv6['filter'].add_chain(chain_name)
self.iptables.ipv4['filter'].add_chain(chain_name)
ipv4_rules, ipv6_rules = self._filters_for_instance(chain_name,
network_info)
self._add_filters('local', ipv4_rules, ipv6_rules)
self._add_filters(chain_name, inst_ipv4_rules, inst_ipv6_rules)
def remove_filters_for_instance(self, instance):
chain_name = self._instance_chain_name(instance)
self.iptables.ipv4['filter'].remove_chain(chain_name)
if CONF.use_ipv6:
self.iptables.ipv6['filter'].remove_chain(chain_name)
@staticmethod
def _security_group_chain_name(security_group_id):
return 'nova-sg-%s' % (security_group_id,)
def _instance_chain_name(self, instance):
return 'inst-%s' % (instance['id'],)
def _do_basic_rules(self, ipv4_rules, ipv6_rules, network_info):
# Always drop invalid packets
ipv4_rules += ['-m state --state ' 'INVALID -j DROP']
ipv6_rules += ['-m state --state ' 'INVALID -j DROP']
# Allow established connections
ipv4_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT']
ipv6_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT']
# Pass through provider-wide drops
ipv4_rules += ['-j $provider']
ipv6_rules += ['-j $provider']
def _do_dhcp_rules(self, ipv4_rules, network_info):
v4_subnets = self._get_subnets(network_info, 4)
dhcp_servers = [subnet.get_meta('dhcp_server')
for subnet in v4_subnets if subnet.get_meta('dhcp_server')]
for dhcp_server in dhcp_servers:
if dhcp_server:
ipv4_rules.append('-s %s -p udp --sport 67 --dport 68 '
'-j ACCEPT' % (dhcp_server,))
self.dhcp_create = True
def _do_project_network_rules(self, ipv4_rules, ipv6_rules, network_info):
v4_subnets = self._get_subnets(network_info, 4)
v6_subnets = self._get_subnets(network_info, 6)
cidrs = [subnet['cidr'] for subnet in v4_subnets]
for cidr in cidrs:
ipv4_rules.append('-s %s -j ACCEPT' % (cidr,))
if CONF.use_ipv6:
cidrv6s = [subnet['cidr'] for subnet in v6_subnets]
for cidrv6 in cidrv6s:
ipv6_rules.append('-s %s -j ACCEPT' % (cidrv6,))
def _do_ra_rules(self, ipv6_rules, network_info):
v6_subnets = self._get_subnets(network_info, 6)
gateways_v6 = [subnet['gateway']['address'] for subnet in v6_subnets]
for gateway_v6 in gateways_v6:
ipv6_rules.append(
'-s %s/128 -p icmpv6 -j ACCEPT' % (gateway_v6,))
def _build_icmp_rule(self, rule, version):
icmp_type = rule['from_port']
icmp_code = rule['to_port']
if icmp_type == -1:
icmp_type_arg = None
else:
icmp_type_arg = '%s' % icmp_type
if not icmp_code == -1:
icmp_type_arg += '/%s' % icmp_code
if icmp_type_arg:
if version == 4:
return ['-m', 'icmp', '--icmp-type', icmp_type_arg]
elif version == 6:
return ['-m', 'icmp6', '--icmpv6-type', icmp_type_arg]
# return empty list if icmp_type == -1
return []
def _build_tcp_udp_rule(self, rule, version):
if rule['from_port'] == rule['to_port']:
return ['--dport', '%s' % (rule['from_port'],)]
else:
return ['-m', 'multiport',
'--dports', '%s:%s' % (rule['from_port'],
rule['to_port'])]
def instance_rules(self, instance, network_info):
ctxt = context.get_admin_context()
ipv4_rules = []
ipv6_rules = []
# Initialize with basic rules
self._do_basic_rules(ipv4_rules, ipv6_rules, network_info)
# Set up rules to allow traffic to/from DHCP server
self._do_dhcp_rules(ipv4_rules, network_info)
#Allow project network traffic
if CONF.allow_same_net_traffic:
self._do_project_network_rules(ipv4_rules, ipv6_rules,
network_info)
# We wrap these in CONF.use_ipv6 because they might cause
# a DB lookup. The other ones are just list operations, so
# they're not worth the clutter.
if CONF.use_ipv6:
# Allow RA responses
self._do_ra_rules(ipv6_rules, network_info)
security_groups = self._virtapi.security_group_get_by_instance(
ctxt, instance)
# then, security group chains and rules
for security_group in security_groups:
rules = self._virtapi.security_group_rule_get_by_security_group(
ctxt, security_group)
for rule in rules:
LOG.debug(_('Adding security group rule: %r'), rule,
instance=instance)
if not rule['cidr']:
version = 4
else:
version = netutils.get_ip_version(rule['cidr'])
if version == 4:
fw_rules = ipv4_rules
else:
fw_rules = ipv6_rules
protocol = rule['protocol']
if protocol:
protocol = rule['protocol'].lower()
if version == 6 and protocol == 'icmp':
protocol = 'icmpv6'
args = ['-j ACCEPT']
if protocol:
args += ['-p', protocol]
if protocol in ['udp', 'tcp']:
args += self._build_tcp_udp_rule(rule, version)
elif protocol == 'icmp':
args += self._build_icmp_rule(rule, version)
if rule['cidr']:
LOG.debug('Using cidr %r', rule['cidr'], instance=instance)
args += ['-s', rule['cidr']]
fw_rules += [' '.join(args)]
else:
if rule['grantee_group']:
for instance in rule['grantee_group']['instances']:
if instance['info_cache']['deleted']:
LOG.debug('ignoring deleted cache')
continue
nw_info = compute_utils.get_nw_info_for_instance(
instance)
ips = [ip['address']
for ip in nw_info.fixed_ips()
if ip['version'] == version]
LOG.debug('ips: %r', ips, instance=instance)
for ip in ips:
subrule = args + ['-s %s' % ip]
fw_rules += [' '.join(subrule)]
LOG.debug('Using fw_rules: %r', fw_rules, instance=instance)
ipv4_rules += ['-j $sg-fallback']
ipv6_rules += ['-j $sg-fallback']
return ipv4_rules, ipv6_rules
def instance_filter_exists(self, instance, network_info):
pass
def refresh_security_group_members(self, security_group):
self.do_refresh_security_group_rules(security_group)
self.iptables.apply()
def refresh_security_group_rules(self, security_group):
self.do_refresh_security_group_rules(security_group)
self.iptables.apply()
def refresh_instance_security_rules(self, instance):
self.do_refresh_instance_rules(instance)
self.iptables.apply()
@utils.synchronized('iptables', external=True)
def _inner_do_refresh_rules(self, instance, ipv4_rules,
ipv6_rules):
self.remove_filters_for_instance(instance)
self.add_filters_for_instance(instance, ipv4_rules, ipv6_rules)
def do_refresh_security_group_rules(self, security_group):
for instance in self.instances.values():
network_info = self.network_infos[instance['id']]
ipv4_rules, ipv6_rules = self.instance_rules(instance,
network_info)
self._inner_do_refresh_rules(instance, ipv4_rules, ipv6_rules)
def do_refresh_instance_rules(self, instance):
network_info = self.network_infos[instance['id']]
ipv4_rules, ipv6_rules = self.instance_rules(instance, network_info)
self._inner_do_refresh_rules(instance, ipv4_rules, ipv6_rules)
def refresh_provider_fw_rules(self):
"""See :class:`FirewallDriver` docs."""
self._do_refresh_provider_fw_rules()
self.iptables.apply()
@utils.synchronized('iptables', external=True)
def _do_refresh_provider_fw_rules(self):
"""Internal, synchronized version of refresh_provider_fw_rules."""
self._purge_provider_fw_rules()
self._build_provider_fw_rules()
def _purge_provider_fw_rules(self):
"""Remove all rules from the provider chains."""
self.iptables.ipv4['filter'].empty_chain('provider')
if CONF.use_ipv6:
self.iptables.ipv6['filter'].empty_chain('provider')
def _build_provider_fw_rules(self):
"""Create all rules for the provider IP DROPs."""
self.iptables.ipv4['filter'].add_chain('provider')
if CONF.use_ipv6:
self.iptables.ipv6['filter'].add_chain('provider')
ipv4_rules, ipv6_rules = self._provider_rules()
for rule in ipv4_rules:
self.iptables.ipv4['filter'].add_rule('provider', rule)
if CONF.use_ipv6:
for rule in ipv6_rules:
self.iptables.ipv6['filter'].add_rule('provider', rule)
def _provider_rules(self):
"""Generate a list of rules from provider for IP4 & IP6."""
ctxt = context.get_admin_context()
ipv4_rules = []
ipv6_rules = []
rules = self._virtapi.provider_fw_rule_get_all(ctxt)
for rule in rules:
LOG.debug(_('Adding provider rule: %s'), rule['cidr'])
version = netutils.get_ip_version(rule['cidr'])
if version == 4:
fw_rules = ipv4_rules
else:
fw_rules = ipv6_rules
protocol = rule['protocol']
if version == 6 and protocol == 'icmp':
protocol = 'icmpv6'
args = ['-p', protocol, '-s', rule['cidr']]
if protocol in ['udp', 'tcp']:
if rule['from_port'] == rule['to_port']:
args += ['--dport', '%s' % (rule['from_port'],)]
else:
args += ['-m', 'multiport',
'--dports', '%s:%s' % (rule['from_port'],
rule['to_port'])]
elif protocol == 'icmp':
icmp_type = rule['from_port']
icmp_code = rule['to_port']
if icmp_type == -1:
icmp_type_arg = None
else:
icmp_type_arg = '%s' % icmp_type
if not icmp_code == -1:
icmp_type_arg += '/%s' % icmp_code
if icmp_type_arg:
if version == 4:
args += ['-m', 'icmp', '--icmp-type',
icmp_type_arg]
elif version == 6:
args += ['-m', 'icmp6', '--icmpv6-type',
icmp_type_arg]
args += ['-j DROP']
fw_rules += [' '.join(args)]
return ipv4_rules, ipv6_rules
class NoopFirewallDriver(object):
"""Firewall driver which just provides No-op methods."""
def __init__(self, *args, **kwargs):
pass
def _noop(self, *args, **kwargs):
pass
def __getattr__(self, key):
return self._noop
def instance_filter_exists(self, instance, network_info):
return True
|
|
"""
.. module:: dj-stripe.tests.__init__
:synopsis: dj-stripe test fakes
.. moduleauthor:: Alex Kavanaugh (@kavdev)
.. moduleauthor:: Lee Skillen (@lskillen)
A Fake or multiple fakes for each stripe object.
Originally collected using API VERSION 2015-07-28.
Updated to API VERSION 2016-03-07 with bogus fields.
"""
from copy import deepcopy
from datetime import datetime
from django.utils import timezone, dateformat
from djstripe.webhooks import TEST_EVENT_ID
FUTURE_DATE = datetime(2100, 4, 30, tzinfo=timezone.utc)
def datetime_to_unix(datetime_):
return int(dateformat.format(datetime_, 'U'))
class StripeList(dict):
object = "list"
has_more = False
url = "/v1/fakes"
def __init__(self, data):
self.data = data
def __getitem__(self, key):
return self.getattr(key)
def auto_paging_iter(self):
return self.data
@property
def total_count(self):
return len(self.data)
FAKE_BALANCE_TRANSACTION = {
"id": "txn_16YKQi2eZvKYlo2CNx26h2Wz",
"object": "balance_transaction",
"amount": 3340,
"available_on": 1439769600,
"created": 1439229084,
"currency": "usd",
"description": "Charge for RelyMD consultation for Rakesh Mohan",
"fee": 127,
"fee_details": [
{
"amount": 127,
"currency": "usd",
"type": "stripe_fee",
"description": "Stripe processing fees",
"application": None,
}
],
"net": 3213,
"source": "ch_16YKQi2eZvKYlo2CrCuzbJQx",
"sourced_transfers": {
"object": "list",
"total_count": 0,
"has_more": False,
"url": "/v1/transfers?source_transaction=ch_16YKQi2eZvKYlo2CrCuzbJQx",
"data": []
},
"status": "pending",
"type": "charge",
}
FAKE_BALANCE_TRANSACTION_II = {
"id": "txn_16g5h62eZvKYlo2CQ2AHA89s",
"object": "balance_transaction",
"amount": 65400,
"available_on": 1441670400,
"created": 1441079064,
"currency": "usd",
"description": None,
"fee": 1927,
"fee_details": [
{
"amount": 1927,
"currency": "usd",
"type": "stripe_fee",
"description": "Stripe processing fees",
"application": None,
}
],
"net": 63473,
"source": "ch_16g5h62eZvKYlo2CMRXkSqa0",
"sourced_transfers": {
"object": "list",
"total_count": 0,
"has_more": False,
"url": "/v1/transfers?source_transaction=ch_16g5h62eZvKYlo2CMRXkSqa0",
"data": [],
},
"status": "pending",
"type": "charge",
}
FAKE_BALANCE_TRANSACTION_III = {
"id": "txn_16g5h62eZvKYlo2CQ2AHA89s",
"object": "balance_transaction",
"amount": 2000,
"available_on": 1441670400,
"created": 1441079064,
"currency": "usd",
"description": None,
"fee": 1927,
"fee_details": [
{
"amount": 1927,
"currency": "usd",
"type": "stripe_fee",
"description": "Stripe processing fees",
"application": None,
}
],
"net": 73,
"source": "ch_16g5h62eZvKYlo2CMRXkSqa0",
"sourced_transfers": {
"object": "list",
"total_count": 0,
"has_more": False,
"url": "/v1/transfers?source_transaction=ch_16g5h62eZvKYlo2CMRXkSqa0",
"data": [],
},
"status": "pending",
"type": "charge",
}
FAKE_BALANCE_TRANSACTION_IV = {
"id": "txn_16g5h62eZvKYlo2CQ2AHA89s",
"object": "balance_transaction",
"amount": 19010,
"available_on": 1441670400,
"created": 1441079064,
"currency": "usd",
"description": None,
"fee": 1927,
"fee_details": [
{
"amount": 1927,
"currency": "usd",
"type": "stripe_fee",
"description": "Stripe processing fees",
"application": None,
}
],
"net": 17083,
"source": "ch_16g5h62eZvKYlo2CMRXkSqa0",
"sourced_transfers": {
"object": "list",
"total_count": 0,
"has_more": False,
"url": "/v1/transfers?source_transaction=ch_16g5h62eZvKYlo2CMRXkSqa0",
"data": [],
},
"status": "pending",
"type": "charge",
}
FAKE_BANK_ACCOUNT = {
"id": "ba_16hTzo2eZvKYlo2CeSjfb0tS",
"object": "bank_account",
"account_holder_name": None,
"account_holder_type": None,
"bank_name": "STRIPE TEST BANK",
"country": "US",
"currency": "usd",
"fingerprint": "1JWtPxqbdX5Gamtc",
"last4": "6789",
"routing_number": "110000000",
"status": "new",
}
FAKE_BANK_ACCOUNT_II = {
"id": "ba_17O4Tz2eZvKYlo2CMYsxroV5",
"object": "bank_account",
"account_holder_name": None,
"account_holder_type": None,
"bank_name": None,
"country": "US",
"currency": "usd",
"fingerprint": "1JWtPxqbdX5Gamtc",
"last4": "6789",
"routing_number": "110000000",
"status": "new",
}
class CardDict(dict):
def delete(self):
return self
FAKE_CARD = CardDict({
"id": "card_16YKQh2eZvKYlo2Cblc5Feoo",
"object": "card",
"address_city": None,
"address_country": None,
"address_line1": None,
"address_line1_check": None,
"address_line2": None,
"address_state": None,
"address_zip": None,
"address_zip_check": None,
"brand": "Visa",
"country": "US",
"customer": "cus_6lsBvm5rJ0zyHc",
"cvc_check": "pass",
"dynamic_last4": None,
"exp_month": 12,
"exp_year": 2016,
"funding": "credit",
"last4": "4242",
"metadata": {},
"name": "alex-nesnes@hotmail.fr",
"tokenization_method": None,
})
FAKE_CARD_II = CardDict({
"id": "card_14Lc4K2eZvKYlo2CcXyAXlDR",
"object": "card",
"address_city": None,
"address_country": None,
"address_line1": None,
"address_line1_check": None,
"address_line2": None,
"address_state": None,
"address_zip": None,
"address_zip_check": None,
"brand": "Visa",
"country": "US",
"customer": "cus_4UbFSo9tl62jqj",
"cvc_check": None,
"dynamic_last4": None,
"exp_month": 7,
"exp_year": 2015,
"fingerprint": "Xt5EWLLDS7FJjR1c",
"funding": "credit",
"last4": "4242",
"metadata": {},
"name": None,
"tokenization_method": None,
})
FAKE_CARD_III = CardDict({
"id": "card_17PLiR2eZvKYlo2CRwTCUAdZ",
"object": "card",
"address_city": None,
"address_country": None,
"address_line1": None,
"address_line1_check": None,
"address_line2": None,
"address_state": None,
"address_zip": None,
"address_zip_check": None,
"brand": "American Express",
"country": "US",
"customer": None,
"cvc_check": "unchecked",
"dynamic_last4": None,
"exp_month": 7,
"exp_year": 2019,
"fingerprint": "Xt5EWLLDS7FJjR1c",
"funding": "credit",
"last4": "1005",
"metadata": {},
"name": None,
"tokenization_method": None,
})
FAKE_CARD_IV = CardDict({
"id": "card_186Qdm2eZvKYlo2CInjNRrRE",
"object": "card",
"address_city": None,
"address_country": None,
"address_line1": None,
"address_line1_check": None,
"address_line2": None,
"address_state": None,
"address_zip": None,
"address_zip_check": None,
"brand": "Visa",
"country": "US",
"customer": None,
"cvc_check": "unchecked",
"dynamic_last4": None,
"exp_month": 6,
"exp_year": 2018,
"funding": "credit",
"last4": "4242",
"metadata": {},
"name": None,
"tokenization_method": None,
})
FAKE_CARD_V = CardDict({
"id": "card_16YKQh2eZeZvKYlo2CInFeoo",
"object": "card",
"address_city": None,
"address_country": None,
"address_line1": None,
"address_line1_check": None,
"address_line2": None,
"address_state": None,
"address_zip": None,
"address_zip_check": None,
"brand": "Visa",
"country": "US",
"customer": "cus_6lsBvm5rJ0zyHc",
"cvc_check": "pass",
"dynamic_last4": None,
"exp_month": 5,
"exp_year": 2015,
"funding": "credit",
"last4": "4242",
"metadata": {},
"name": None,
"tokenization_method": None,
})
class ChargeDict(dict):
def refund(self, amount=None, reason=None):
self.update({"refunded": True, "amount_refunded": amount})
return self
def capture(self):
self.update({"captured": True})
return self
FAKE_CHARGE = ChargeDict({
"id": "ch_16YKQi2eZvKYlo2CrCuzbJQx",
"object": "charge",
"amount": 2200,
"amount_refunded": 0,
"application_fee": None,
"balance_transaction": deepcopy(FAKE_BALANCE_TRANSACTION),
"captured": True,
"created": 1439229084,
"currency": "usd",
"customer": "cus_6lsBvm5rJ0zyHc",
"description": "VideoDoc consultation for ivanp0001 berkp0001",
"destination": None,
"dispute": None,
"failure_code": None,
"failure_message": None,
"fraud_details": {},
"invoice": "in_7udnik28sj829dj",
"livemode": False,
"metadata": {},
"order": None,
"paid": True,
"receipt_email": None,
"receipt_number": None,
"refunded": False,
"refunds": {
"object": "list",
"total_count": 0,
"has_more": False,
"url": "/v1/charges/ch_16YKQi2eZvKYlo2CrCuzbJQx/refunds",
"data": []
},
"shipping": None,
"source": deepcopy(FAKE_CARD),
"source_transfer": None,
"statement_descriptor": None,
"status": "succeeded",
})
FAKE_CHARGE_II = ChargeDict({
"id": "ch_16ag432eZvKYlo2CGDe6lvVs",
"object": "charge",
"amount": 3000,
"amount_refunded": 0,
"application_fee": None,
"balance_transaction": deepcopy(FAKE_BALANCE_TRANSACTION),
"captured": False,
"created": 1439788903,
"currency": "usd",
"customer": "cus_4UbFSo9tl62jqj",
"description": None,
"destination": None,
"dispute": None,
"failure_code": "expired_card",
"failure_message": "Your card has expired.",
"fraud_details": {},
"invoice": "in_16af5A2eZvKYlo2CJjANLL81",
"livemode": False,
"metadata": {},
"order": None,
"paid": False,
"receipt_email": None,
"receipt_number": None,
"refunded": False,
"refunds": {
"object": "list",
"total_count": 0,
"has_more": False,
"url": "/v1/charges/ch_16ag432eZvKYlo2CGDe6lvVs/refunds",
"data": [],
},
"shipping": None,
"source": deepcopy(FAKE_CARD_II),
"source_transfer": None,
"statement_descriptor": None,
"status": "failed",
})
FAKE_COUPON = {
"id": "fake-coupon-1",
"object": "coupon",
"amount_off": None,
"created": 1490157071,
"currency": None,
"duration": "once",
"duration_in_months": None,
"livemode": False,
"max_redemptions": None,
"metadata": {},
"percent_off": 1,
"redeem_by": None,
"times_redeemed": 0,
"valid": True,
}
FAKE_PLAN = {
"id": "gold21323",
"object": "plan",
"amount": 2000,
"created": 1386247539,
"currency": "usd",
"interval": "month",
"interval_count": 1,
"livemode": False,
"metadata": {},
"name": "New plan name",
"statement_descriptor": None,
"trial_period_days": None,
}
FAKE_PLAN_II = {
"id": "silver41294",
"object": "plan",
"amount": 4000,
"created": 1386247539,
"currency": "usd",
"interval": "week",
"interval_count": 1,
"livemode": False,
"metadata": {},
"name": "New plan name",
"statement_descriptor": None,
"trial_period_days": 12,
}
class SubscriptionDict(dict):
def __setattr__(self, name, value):
if type(value) == datetime:
value = datetime_to_unix(value)
# Special case for plan
if name == "plan":
for plan in [FAKE_PLAN, FAKE_PLAN_II]:
if value == plan["id"]:
value = plan
self[name] = value
def delete(self, **kwargs):
if "at_period_end" in kwargs:
self["cancel_at_period_end"] = kwargs["at_period_end"]
return self
def save(self):
return self
FAKE_SUBSCRIPTION = SubscriptionDict({
"id": "sub_6lsC8pt7IcFpjA",
"object": "subscription",
"application_fee_percent": None,
"cancel_at_period_end": False,
"canceled_at": None,
"current_period_end": 1441907581,
"current_period_start": 1439229181,
"customer": "cus_6lsBvm5rJ0zyHc",
"discount": None,
"ended_at": None,
"metadata": {},
"plan": deepcopy(FAKE_PLAN),
"quantity": 1,
"start": 1439229181,
"status": "active",
"tax_percent": None,
"trial_end": None,
"trial_start": None,
})
FAKE_SUBSCRIPTION_CANCELED = deepcopy(FAKE_SUBSCRIPTION)
FAKE_SUBSCRIPTION_CANCELED["status"] = "canceled"
FAKE_SUBSCRIPTION_CANCELED["canceled_at"] = 1440907580
FAKE_SUBSCRIPTION_CANCELED_AT_PERIOD_END = deepcopy(FAKE_SUBSCRIPTION)
FAKE_SUBSCRIPTION_CANCELED_AT_PERIOD_END["canceled_at"] = 1440907580
FAKE_SUBSCRIPTION_CANCELED_AT_PERIOD_END["cancel_at_period_end"] = True
FAKE_SUBSCRIPTION_II = SubscriptionDict({
"id": "sub_6mkwMbhaZF9jih",
"object": "subscription",
"application_fee_percent": None,
"cancel_at_period_end": False,
"canceled_at": None,
"current_period_end": 1442111228,
"current_period_start": 1439432828,
"customer": "cus_6lsBvm5rJ0zyHc",
"discount": None,
"ended_at": None,
"metadata": {},
"plan": deepcopy(FAKE_PLAN_II),
"quantity": 1,
"start": 1386247539,
"status": "active",
"tax_percent": None,
"trial_end": None,
"trial_start": None,
})
FAKE_SUBSCRIPTION_III = SubscriptionDict({
"id": "sub_8NDptncNY485qZ",
"object": "subscription",
"application_fee_percent": None,
"cancel_at_period_end": False,
"canceled_at": None,
"current_period_end": 1464821382,
"current_period_start": 1462142982,
"customer": "cus_4UbFSo9tl62jqj",
"discount": None,
"ended_at": None,
"metadata": {},
"plan": deepcopy(FAKE_PLAN),
"quantity": 1,
"start": 1462142982,
"status": "active",
"tax_percent": None,
"trial_end": None,
"trial_start": None,
})
class Sources(object):
def __init__(self, card_fakes):
self.card_fakes = card_fakes
def create(self, source, api_key=None):
for fake_card in self.card_fakes:
if fake_card["id"] == source:
return fake_card
def retrieve(self, id, expand=None): # noqa
for fake_card in self.card_fakes:
if fake_card["id"] == id:
return fake_card
def list(self, **kwargs):
return StripeList(data=self.card_fakes)
class CustomerDict(dict):
def save(self):
return self
def delete(self):
return self
@property
def sources(self):
return Sources(card_fakes=self["sources"]["data"])
FAKE_CUSTOMER = CustomerDict({
"id": "cus_6lsBvm5rJ0zyHc",
"object": "customer",
"account_balance": 0,
"created": 1439229084,
"currency": "usd",
"default_source": deepcopy(FAKE_CARD),
"delinquent": False,
"description": "Michael Smith",
"discount": None,
"email": "michael.smith@example.com",
"livemode": False,
"metadata": {},
"shipping": None,
"sources": {
"object": "list",
"total_count": 2,
"has_more": False,
"url": "/v1/customers/cus_6lsBvm5rJ0zyHc/sources",
"data": [deepcopy(FAKE_CARD), deepcopy(FAKE_CARD_V)]
},
"subscriptions": {
"object": "list",
"total_count": 2,
"has_more": False,
"url": "/v1/customers/cus_6lsBvm5rJ0zyHc/subscriptions",
"data": [deepcopy(FAKE_SUBSCRIPTION), deepcopy(FAKE_SUBSCRIPTION_II)]
},
})
FAKE_CUSTOMER_II = CustomerDict({
"id": "cus_4UbFSo9tl62jqj",
"object": "customer",
"account_balance": 0,
"created": 1439229084,
"currency": "usd",
"default_source": deepcopy(FAKE_CARD_II),
"delinquent": False,
"description": "John Snow",
"discount": None,
"email": "john.snow@thewall.com",
"livemode": False,
"metadata": {},
"shipping": None,
"sources": {
"object": "list",
"total_count": 1,
"has_more": False,
"url": "/v1/customers/cus_4UbFSo9tl62jqj/sources",
"data": [deepcopy(FAKE_CARD_II)]
},
"subscriptions": {
"object": "list",
"total_count": 1,
"has_more": False,
"url": "/v1/customers/cus_4UbFSo9tl62jqj/subscriptions",
"data": [deepcopy(FAKE_SUBSCRIPTION_III)]
},
})
FAKE_DISCOUNT_CUSTOMER = {
"object": "discount",
"coupon": deepcopy(FAKE_COUPON),
"customer": FAKE_CUSTOMER["id"],
"start": 1493206114,
"end": None,
"subscription": None,
}
class InvoiceDict(dict):
def pay(self):
return self
FAKE_INVOICE = InvoiceDict({
"id": "in_16YHls2eZvKYlo2CwwH968Mc",
"object": "invoice",
"amount_due": 2000,
"application_fee": None,
"attempt_count": 1,
"attempted": True,
"charge": FAKE_CHARGE["id"],
"closed": True,
"currency": "usd",
"customer": "cus_6lsBvm5rJ0zyHc",
"date": 1439218864,
"description": None,
"discount": None,
"ending_balance": 0,
"forgiven": False,
"lines": {
"data": [
{
"id": FAKE_SUBSCRIPTION["id"],
"object": "line_item",
"amount": 2000,
"currency": "usd",
"description": None,
"discountable": True,
"livemode": True,
"metadata": {},
"period": {
"start": 1441907581,
"end": 1444499581
},
"plan": deepcopy(FAKE_PLAN),
"proration": False,
"quantity": 1,
"subscription": None,
"type": "subscription",
}
],
"total_count": 1,
"object": "list",
"url": "/v1/invoices/in_16YHls2eZvKYlo2CwwH968Mc/lines",
},
"livemode": False,
"metadata": {},
"next_payment_attempt": None,
"paid": True,
"period_end": 1439218689,
"period_start": 1439132289,
"receipt_number": None,
"starting_balance": 0,
"statement_descriptor": None,
"subscription": FAKE_SUBSCRIPTION["id"],
"subtotal": 2000,
"tax": None,
"tax_percent": None,
"total": 2000,
"webhooks_delivered_at": 1439218870,
})
FAKE_INVOICE_II = InvoiceDict({
"id": "in_16af5A2eZvKYlo2CJjANLL81",
"object": "invoice",
"amount_due": 3000,
"application_fee": None,
"attempt_count": 1,
"attempted": True,
"charge": FAKE_CHARGE_II["id"],
"closed": False,
"currency": "usd",
"customer": "cus_4UbFSo9tl62jqj",
"date": 1439785128,
"description": None,
"discount": None,
"ending_balance": 0,
"forgiven": False,
"lines": {
"data": [
{
"id": FAKE_SUBSCRIPTION_III["id"],
"object": "line_item",
"amount": 2000,
"currency": "usd",
"description": None,
"discountable": True,
"livemode": True,
"metadata": {},
"period": {
"start": 1442469907,
"end": 1445061907
},
"plan": deepcopy(FAKE_PLAN),
"proration": False,
"quantity": 1,
"subscription": None,
"type": "subscription",
}
],
"total_count": 1,
"object": "list",
"url": "/v1/invoices/in_16af5A2eZvKYlo2CJjANLL81/lines",
},
"livemode": False,
"metadata": {},
"next_payment_attempt": 1440048103,
"paid": False,
"period_end": 1439784771,
"period_start": 1439698371,
"receipt_number": None,
"starting_balance": 0,
"statement_descriptor": None,
"subscription": FAKE_SUBSCRIPTION_III["id"],
"subtotal": 3000,
"tax": None,
"tax_percent": None,
"total": 3000,
"webhooks_delivered_at": 1439785139,
})
FAKE_INVOICE_III = InvoiceDict({
"id": "in_16Z9dP2eZvKYlo2CgFHgFx2Z",
"object": "invoice",
"amount_due": 0,
"application_fee": None,
"attempt_count": 0,
"attempted": True,
"charge": None,
"closed": False,
"currency": "usd",
"customer": "cus_6lsBvm5rJ0zyHc",
"date": 1439425915,
"description": None,
"discount": None,
"ending_balance": 20,
"forgiven": False,
"lines": {
"data": [
{
"id": FAKE_SUBSCRIPTION["id"],
"object": "line_item",
"amount": 2000,
"currency": "usd",
"description": None,
"discountable": True,
"livemode": True,
"metadata": {},
"period": {
"start": 1442111228,
"end": 1444703228
},
"plan": deepcopy(FAKE_PLAN),
"proration": False,
"quantity": 1,
"subscription": None,
"type": "subscription",
}
],
"total_count": 1,
"object": "list",
"url": "/v1/invoices/in_16Z9dP2eZvKYlo2CgFHgFx2Z/lines",
},
"livemode": False,
"metadata": {},
"next_payment_attempt": None,
"paid": False,
"period_end": 1439424571,
"period_start": 1436746171,
"receipt_number": None,
"starting_balance": 0,
"statement_descriptor": None,
"subscription": FAKE_SUBSCRIPTION["id"],
"subtotal": 20,
"tax": None,
"tax_percent": None,
"total": 20,
"webhooks_delivered_at": 1439426955,
})
FAKE_UPCOMING_INVOICE = InvoiceDict({
"id": "in",
"object": "invoice",
"amount_due": 2000,
"application_fee": None,
"attempt_count": 1,
"attempted": False,
"charge": None,
"closed": False,
"currency": "usd",
"customer": FAKE_CUSTOMER["id"],
"date": 1439218864,
"description": None,
"discount": None,
"ending_balance": None,
"forgiven": False,
"lines": {
"data": [
{
"id": FAKE_SUBSCRIPTION["id"],
"object": "line_item",
"amount": 2000,
"currency": "usd",
"description": None,
"discountable": True,
"livemode": True,
"metadata": {},
"period": {
"start": 1441907581,
"end": 1444499581
},
"plan": deepcopy(FAKE_PLAN),
"proration": False,
"quantity": 1,
"subscription": None,
"type": "subscription",
}
],
"total_count": 1,
"object": "list",
"url": "/v1/invoices/in_16YHls2eZvKYlo2CwwH968Mc/lines",
},
"livemode": False,
"metadata": {},
"next_payment_attempt": 1439218689,
"paid": False,
"period_end": 1439218689,
"period_start": 1439132289,
"receipt_number": None,
"starting_balance": 0,
"statement_descriptor": None,
"subscription": FAKE_SUBSCRIPTION["id"],
"subtotal": 2000,
"tax": None,
"tax_percent": None,
"total": 2000,
"webhooks_delivered_at": 1439218870,
})
FAKE_INVOICEITEM = {
"id": "ii_16XVTY2eZvKYlo2Cxz5n3RaS",
"object": "invoiceitem",
"amount": 2000,
"currency": "usd",
"customer": FAKE_CUSTOMER_II["id"],
"date": 1439033216,
"description": "One-time setup fee",
"discountable": True,
"invoice": FAKE_INVOICE_II["id"],
"livemode": False,
"metadata": {
"key1": "value1",
"key2": "value2"
},
"period": {
"start": 1439033216,
"end": 1439033216,
},
"plan": None,
"proration": False,
"quantity": None,
"subscription": None,
}
FAKE_INVOICEITEM_II = {
"id": "ii_16XVTY2eZvKYlo2Cxz5n3RaS",
"object": "invoiceitem",
"amount": 2000,
"currency": "usd",
"customer": FAKE_CUSTOMER["id"],
"date": 1439033216,
"description": "One-time setup fee",
"discountable": True,
"invoice": FAKE_INVOICE["id"],
"livemode": False,
"metadata": {
"key1": "value1",
"key2": "value2"
},
"period": {
"start": 1439033216,
"end": 1439033216,
},
"plan": None,
"proration": False,
"quantity": None,
"subscription": None,
}
FAKE_TRANSFER = {
"id": "tr_16Y9BK2eZvKYlo2CR0ySu1BA",
"object": "transfer",
"amount": 100,
"amount_reversed": 0,
"application_fee": None,
"balance_transaction": deepcopy(FAKE_BALANCE_TRANSACTION_II),
"created": 1439185846,
"currency": "usd",
"date": 1439185846,
"description": "Test description - 1439185984",
"destination": "acct_16Y9B9Fso9hLaeLu",
"destination_payment": "py_16Y9BKFso9hLaeLueFmWAYUi",
"failure_code": None,
"failure_message": None,
"livemode": False,
"metadata": {},
"recipient": None,
"reversals": {
"object": "list",
"total_count": 0,
"has_more": False,
"url": "/v1/transfers/tr_16Y9BK2eZvKYlo2CR0ySu1BA/reversals",
"data": [],
},
"reversed": False,
"source_transaction": None,
"source_type": "bank_account",
"statement_descriptor": None,
"status": "paid",
"type": "stripe_account",
}
FAKE_TRANSFER_II = {
"id": "tr_16hTzv2eZvKYlo2CWuyMmuvV",
"object": "transfer",
"amount": 2000,
"amount_reversed": 0,
"application_fee": None,
"balance_transaction": deepcopy(FAKE_BALANCE_TRANSACTION_III),
"bank_account": deepcopy(FAKE_BANK_ACCOUNT),
"created": 1440420000,
"currency": "usd",
"date": 1440420000,
"description": None,
"destination": "ba_16hTzo2eZvKYlo2CeSjfb0tS",
"failure_code": None,
"failure_message": None,
"livemode": False,
"metadata": {
"foo": "bar",
},
"recipient": "rp_16hTzu2eZvKYlo2C9A5mgxEj",
"reversals": {
"object": "list",
"total_count": 0,
"has_more": False,
"url": "/v1/transfers/tr_16hTzv2eZvKYlo2CWuyMmuvV/reversals",
"data": [],
},
"reversed": False,
"source_transaction": None,
"source_type": "card",
"statement_descriptor": None,
"status": "paid",
"type": "bank_account",
}
FAKE_TRANSFER_III = {
"id": "tr_17O4U52eZvKYlo2CmyYbDAEy",
"object": "transfer",
"amount": 19010,
"amount_reversed": 0,
"application_fee": None,
"balance_transaction": deepcopy(FAKE_BALANCE_TRANSACTION_IV),
"bank_account": deepcopy(FAKE_BANK_ACCOUNT_II),
"created": 1451560845,
"currency": "usd",
"date": 1451560845,
"description": "Transfer+for+test@example.com",
"destination": "ba_17O4Tz2eZvKYlo2CMYsxroV5",
"failure_code": None,
"failure_message": None,
"livemode": False,
"metadata": {
"foo2": "bar2",
},
"recipient": "rp_17O4U42eZvKYlo2CLk4upfDE",
"reversals": {
"object": "list",
"total_count": 0,
"has_more": False,
"url": "/v1/transfers/tr_17O4U52eZvKYlo2CmyYbDAEy/reversals",
"data": [],
},
"reversed": False,
"source_transaction": None,
"source_type": "card",
"statement_descriptor": None,
"status": "paid",
"type": "bank_account",
}
FAKE_ACCOUNT = {
"id": "acct_1032D82eZvKYlo2C",
"object": "account",
"business_logo": None,
"business_name": "Stripe.com",
"business_url": None,
"charges_enabled": False,
"country": "US",
"debit_negative_balances": True,
"decline_charge_on": {
"cvc_failure": False,
"avs_failure": False,
},
"default_currency": "usd",
"details_submitted": False,
"display_name": "Stripe.com",
"email": "site@stripe.com",
"external_accounts": {
"object": "list",
"data": [],
"has_more": False,
"total_count": 0,
"url": "/v1/accounts/acct_1032D82eZvKYlo2C/external_accounts",
},
"legal_entity": {
"address": {
"city": None,
"country": "US",
"line1": None,
"line2": None,
"postal_code": None,
"state": None,
},
"business_name": None,
"business_tax_id_provided": False,
"dob": {
"day": None,
"month": None,
"year": None
},
"first_name": None,
"last_name": None,
"personal_id_number_provided": False,
"ssn_last_4_provided": False,
"type": None,
"verification": {
"details": None,
"details_code": "failed_other",
"document": None,
"status": "unverified",
},
},
"managed": False,
"product_description": None,
"statement_descriptor": None,
"support_emaile": None,
"support_phone": None,
"timezone": "US/Pacific",
"tos_acceptance": {
"date": None,
"ip": None,
"user_agent": None
},
"transfer_schedule": {
"delay_days": 7,
"interval": "daily"
},
"transfers_enabled": False,
"verification": {
"disabled_reason": "other",
"fields_needed": [],
"due_by": None,
},
}
FAKE_EVENT_ACCOUNT_APPLICATION_DEAUTHORIZED = {
"id": "evt_XXXXXXXXXXXXXXXXXXXXXXXX",
"type": "account.application.deauthorized",
"pending_webhooks": 0,
"livemode": False,
"request": None,
"api_version": None,
"created": 1493823371,
"object": "event",
"data": {
"object": {
"id": "ca_XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
"object": "application",
"name": "Test Connect Application",
}
},
}
FAKE_EVENT_CHARGE_SUCCEEDED = {
"id": "evt_16YKQi2eZvKYlo2CT2oe5ff3",
"object": "event",
"api_version": "2016-03-07",
"created": 1439229084,
"data": {
"object": deepcopy(FAKE_CHARGE)
},
"livemode": False,
"pending_webhooks": 0,
"request": "req_6lsB7hkicwhaDj",
"type": "charge.succeeded",
}
FAKE_EVENT_TEST_CHARGE_SUCCEEDED = deepcopy(FAKE_EVENT_CHARGE_SUCCEEDED)
FAKE_EVENT_TEST_CHARGE_SUCCEEDED['id'] = TEST_EVENT_ID
FAKE_EVENT_CUSTOMER_CREATED = {
"id": "evt_38DHch3whaDvKYlo2CT2oe5ff3",
"object": "event",
"api_version": "2016-03-07",
"created": 1439229084,
"data": {
"object": deepcopy(FAKE_CUSTOMER)
},
"livemode": False,
"pending_webhooks": 0,
"request": "req_6l38DHch3whaDj",
"type": "customer.created",
}
FAKE_EVENT_CUSTOMER_DELETED = deepcopy(FAKE_EVENT_CUSTOMER_CREATED)
FAKE_EVENT_CUSTOMER_DELETED.update({
"id": "evt_38DHch3whaDvKYlo2jksfsFFxy",
"type": "customer.deleted"
})
FAKE_EVENT_CUSTOMER_DISCOUNT_CREATED = {
"id": "AGBWvF5zBm4sMCsLLPZrw9XX",
"type": "customer.discount.created",
"api_version": "2017-02-14",
"created": 1439229084,
"object": "discount",
"pending_webhooks": 0,
"request": "req_6l38DHch3whaDj",
"data": {
"object": deepcopy(FAKE_DISCOUNT_CUSTOMER),
}
}
FAKE_EVENT_CUSTOMER_DISCOUNT_DELETED = {
"id": "AGBWvF5zBm4sMCsLLPZrw9XX",
"type": "customer.discount.deleted",
"api_version": "2017-02-14",
"created": 1439229084,
"object": "discount",
"pending_webhooks": 0,
"request": "req_6l38DHch3whaDj",
"data": {
"object": deepcopy(FAKE_DISCOUNT_CUSTOMER),
}
}
FAKE_EVENT_CUSTOMER_SOURCE_CREATED = {
"id": "evt_DvKYlo38huDvKYlo2C7SXedrZk",
"object": "event",
"api_version": "2016-03-07",
"created": 1439229084,
"data": {
"object": deepcopy(FAKE_CARD)
},
"livemode": False,
"pending_webhooks": 0,
"request": "req_o3whaDvh3whaDj",
"type": "customer.source.created",
}
FAKE_EVENT_CUSTOMER_SOURCE_DELETED = deepcopy(FAKE_EVENT_CUSTOMER_SOURCE_CREATED)
FAKE_EVENT_CUSTOMER_SOURCE_DELETED.update({
"id": "evt_DvKYlo38huDvKYlo2C7SXedrYk",
"type": "customer.source.deleted"
})
FAKE_EVENT_CUSTOMER_SOURCE_DELETED_DUPE = deepcopy(FAKE_EVENT_CUSTOMER_SOURCE_DELETED)
FAKE_EVENT_CUSTOMER_SOURCE_DELETED_DUPE.update({
"id": "evt_DvKYlo38huDvKYlo2C7SXedzAk",
})
FAKE_EVENT_CUSTOMER_SUBSCRIPTION_CREATED = {
"id": "evt_38DHch3wHD2eZvKYlCT2oe5ff3",
"object": "event",
"api_version": "2016-03-07",
"created": 1439229084,
"data": {
"object": deepcopy(FAKE_SUBSCRIPTION)
},
"livemode": False,
"pending_webhooks": 0,
"request": "req_6l87IHch3diaDj",
"type": "customer.subscription.created",
}
FAKE_EVENT_CUSTOMER_SUBSCRIPTION_DELETED = deepcopy(FAKE_EVENT_CUSTOMER_SUBSCRIPTION_CREATED)
FAKE_EVENT_CUSTOMER_SUBSCRIPTION_DELETED.update({
"id": "evt_38DHch3wHD2eZvKYlCT2oeryaf",
"type": "customer.subscription.deleted"})
FAKE_EVENT_INVOICE_CREATED = {
"id": "evt_187IHD2eZvKYlo2C6YKQi2eZ",
"object": "event",
"api_version": "2016-03-07",
"created": 1462338623,
"data": {
"object": deepcopy(FAKE_INVOICE)
},
"livemode": False,
"pending_webhooks": 0,
"request": "req_8O4sB7hkDobVT",
"type": "invoice.created",
}
FAKE_EVENT_INVOICE_DELETED = deepcopy(FAKE_EVENT_INVOICE_CREATED)
FAKE_EVENT_INVOICE_DELETED.update({
"id": "evt_187IHD2eZvKYlo2Cjkjsr34H",
"type": "invoice.deleted"})
FAKE_EVENT_INVOICEITEM_CREATED = {
"id": "evt_187IHD2eZvKYlo2C7SXedrZk",
"object": "event",
"api_version": "2016-03-07",
"created": 1462338623,
"data": {
"object": deepcopy(FAKE_INVOICEITEM)
},
"livemode": False,
"pending_webhooks": 0,
"request": "req_8O4Qbs2EDobDVT",
"type": "invoiceitem.created",
}
FAKE_EVENT_INVOICEITEM_DELETED = deepcopy(FAKE_EVENT_INVOICEITEM_CREATED)
FAKE_EVENT_INVOICEITEM_DELETED.update({
"id": "evt_187IHD2eZvKYloJfdsnnfs34",
"type": "invoiceitem.deleted"})
FAKE_EVENT_PLAN_CREATED = {
"id": "evt_1877X72eZvKYlo2CLK6daFxu",
"object": "event",
"api_version": "2016-03-07",
"created": 1462297325,
"data": {
"object": deepcopy(FAKE_PLAN)
},
"livemode": False,
"pending_webhooks": 0,
"request": "req_8NtJXPttxSvFyM",
"type": "plan.created",
}
FAKE_EVENT_PLAN_DELETED = deepcopy(FAKE_EVENT_PLAN_CREATED)
FAKE_EVENT_PLAN_DELETED.update({
"id": "evt_1877X72eZvKYl2jkds32jJFc",
"type": "plan.deleted"})
FAKE_EVENT_TRANSFER_CREATED = {
"id": "evt_16igNU2eZvKYlo2CYyMkYvet",
"object": "event",
"api_version": "2016-03-07",
"created": 1441696732,
"data": {
"object": deepcopy(FAKE_TRANSFER)
},
"livemode": False,
"pending_webhooks": 0,
"request": "req_6wZW9MskhYU15Y",
"type": "transfer.created",
}
FAKE_EVENT_TRANSFER_DELETED = deepcopy(FAKE_EVENT_TRANSFER_CREATED)
FAKE_EVENT_TRANSFER_DELETED.update({
"id": "evt_16igNU2eZvKjklfsdjk232Mf",
"type": "transfer.deleted"})
FAKE_TOKEN = {
"id": "tok_16YDIe2eZvKYlo2CPvqprIJd",
"object": "token",
"card": deepcopy(FAKE_CARD),
"client_ip": None,
"created": 1439201676,
"livemode": False,
"type": "card",
"used": False,
}
|
|
"""
sentry.models.user
~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import logging
import warnings
from django.contrib.auth.models import AbstractBaseUser, UserManager
from django.core.urlresolvers import reverse
from django.db import IntegrityError, models, transaction
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from sentry.db.models import BaseManager, BaseModel, BoundedAutoField
from sentry.utils.http import absolute_uri
audit_logger = logging.getLogger('sentry.audit.user')
class UserManager(BaseManager, UserManager):
pass
class User(BaseModel, AbstractBaseUser):
__core__ = True
id = BoundedAutoField(primary_key=True)
username = models.CharField(_('username'), max_length=128, unique=True)
# this column is called first_name for legacy reasons, but it is the entire
# display name
name = models.CharField(_('name'), max_length=200, blank=True, db_column='first_name')
email = models.EmailField(_('email address'), blank=True)
is_staff = models.BooleanField(
_('staff status'),
default=False,
help_text=_('Designates whether the user can log into this admin '
'site.')
)
is_active = models.BooleanField(
_('active'),
default=True,
help_text=_(
'Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.'
)
)
is_superuser = models.BooleanField(
_('superuser status'),
default=False,
help_text=_(
'Designates that this user has all permissions without '
'explicitly assigning them.'
)
)
is_managed = models.BooleanField(
_('managed'),
default=False,
help_text=_(
'Designates whether this user should be treated as '
'managed. Select this to disallow the user from '
'modifying their account (username, password, etc).'
)
)
is_password_expired = models.BooleanField(
_('password expired'),
default=False,
help_text=_(
'If set to true then the user needs to change the '
'password on next sign in.'
)
)
last_password_change = models.DateTimeField(
_('date of last password change'),
null=True,
help_text=_('The date the password was changed last.')
)
session_nonce = models.CharField(max_length=12, null=True)
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
last_active = models.DateTimeField(_('last active'), default=timezone.now, null=True)
objects = UserManager(cache_fields=['pk'])
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
class Meta:
app_label = 'sentry'
db_table = 'auth_user'
verbose_name = _('user')
verbose_name_plural = _('users')
def delete(self):
if self.username == 'sentry':
raise Exception('You cannot delete the "sentry" user as it is required by Sentry.')
avatar = self.avatar.first()
if avatar:
avatar.delete()
return super(User, self).delete()
def save(self, *args, **kwargs):
if not self.username:
self.username = self.email
return super(User, self).save(*args, **kwargs)
def has_perm(self, perm_name):
warnings.warn('User.has_perm is deprecated', DeprecationWarning)
return self.is_superuser
def has_module_perms(self, app_label):
warnings.warn('User.has_module_perms is deprecated', DeprecationWarning)
return self.is_superuser
def get_unverified_emails(self):
return self.emails.filter(is_verified=False)
def get_verified_emails(self):
return self.emails.filter(is_verified=True)
def has_unverified_emails(self):
return self.get_unverified_emails().exists()
def get_label(self):
return self.email or self.username or self.id
def get_display_name(self):
return self.name or self.email or self.username
def get_full_name(self):
return self.name
def get_short_name(self):
return self.username
def get_salutation_name(self):
name = self.name or self.username.split('@', 1)[0].split('.', 1)[0]
first_name = name.split(' ', 1)[0]
return first_name.capitalize()
def get_avatar_type(self):
avatar = self.avatar.first()
if avatar:
return avatar.get_avatar_type_display()
return 'letter_avatar'
def send_confirm_email_singular(self, email, is_new_user=False):
from sentry import options
from sentry.utils.email import MessageBuilder
if not email.hash_is_valid():
email.set_hash()
email.save()
context = {
'user':
self,
'url':
absolute_uri(
reverse('sentry-account-confirm-email', args=[self.id, email.validation_hash])
),
'confirm_email':
email.email,
'is_new_user':
is_new_user,
}
msg = MessageBuilder(
subject='%sConfirm Email' % (options.get('mail.subject-prefix'), ),
template='sentry/emails/confirm_email.txt',
html_template='sentry/emails/confirm_email.html',
type='user.confirm_email',
context=context,
)
msg.send_async([email.email])
def send_confirm_emails(self, is_new_user=False):
email_list = self.get_unverified_emails()
for email in email_list:
self.send_confirm_email_singular(email, is_new_user)
def merge_to(from_user, to_user):
# TODO: we could discover relations automatically and make this useful
from sentry import roles
from sentry.models import (
Activity, AuditLogEntry, AuthIdentity, Authenticator, GroupAssignee, GroupBookmark, GroupSeen,
GroupShare, GroupSubscription, OrganizationMember, OrganizationMemberTeam, UserAvatar,
UserEmail, UserOption,
)
audit_logger.info(
'user.merge', extra={
'from_user_id': from_user.id,
'to_user_id': to_user.id,
}
)
for obj in OrganizationMember.objects.filter(user=from_user):
try:
with transaction.atomic():
obj.update(user=to_user)
except IntegrityError:
pass
# identify the highest priority membership
to_member = OrganizationMember.objects.get(
organization=obj.organization_id,
user=to_user,
)
if roles.get(obj.role).priority > roles.get(to_member.role).priority:
to_member.update(role=obj.role)
for team in obj.teams.all():
try:
with transaction.atomic():
OrganizationMemberTeam.objects.create(
organizationmember=to_member,
team=team,
)
except IntegrityError:
pass
model_list = (
Authenticator, GroupAssignee, GroupBookmark, GroupSeen, GroupShare,
GroupSubscription, UserAvatar, UserEmail, UserOption,
)
for model in model_list:
for obj in model.objects.filter(user=from_user):
try:
with transaction.atomic():
obj.update(user=to_user)
except IntegrityError:
pass
Activity.objects.filter(
user=from_user,
).update(user=to_user)
AuditLogEntry.objects.filter(
actor=from_user,
).update(actor=to_user)
AuditLogEntry.objects.filter(
target_user=from_user,
).update(target_user=to_user)
# remove any duplicate identities that exist on the current user that
# might conflict w/ the new users existing SSO
AuthIdentity.objects.filter(
user=from_user,
auth_provider__organization__in=AuthIdentity.objects.filter(
user=to_user,
).values('auth_provider__organization')
).delete()
AuthIdentity.objects.filter(
user=from_user,
).update(user=to_user)
def set_password(self, raw_password):
super(User, self).set_password(raw_password)
self.last_password_change = timezone.now()
self.is_password_expired = False
def refresh_session_nonce(self, request=None):
from django.utils.crypto import get_random_string
self.session_nonce = get_random_string(12)
if request is not None:
request.session['_nonce'] = self.session_nonce
def get_orgs(self):
from sentry.models import (Organization, OrganizationMember, OrganizationStatus)
return Organization.objects.filter(
status=OrganizationStatus.VISIBLE,
id__in=OrganizationMember.objects.filter(
user=self,
).values('organization'),
)
|
|
from direct.directnotify import DirectNotifyGlobal
from pandac.PandaModules import *
from toontown.toonbase.ToonBaseGlobal import *
from DistributedMinigame import *
from direct.distributed.ClockDelta import *
from direct.interval.IntervalGlobal import *
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import ToontownTimer
from direct.task.Task import Task
import math
from toontown.toon import ToonHead
import PhotoGameGlobals
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from toontown.toonbase import TTLocalizer
from toontown.golf import BuildGeometry
from toontown.toon import Toon
from toontown.toon import ToonDNA
from toontown.dna.DNAParser import *
from toontown.nametag import NametagGlobals
from direct.interval.IntervalGlobal import *
import random
from direct.showbase import PythonUtil
import math
import time
from toontown.makeatoon import NameGenerator
from otp.otpbase import OTPGlobals
from toontown.battle import BattleParticles
from toontown.minigame import PhotoGameBase
WORLD_SCALE = 2.0
FAR_PLANE_DIST = 600 * WORLD_SCALE
STAGE_Z_OFFSET = 7.0
GOODROWS = 13
BADROWS = 4
RAYSPREADX = 0.08
RAYSPREADY = 0.06
ZOOMRATIO = 0.4
ZOOMTIME = 0.5
WANTDOTS = 1
NUMSTARS = PhotoGameGlobals.NUMSTARS
STARSIZE = 0.06
VIEWSIZEX = (GOODROWS - BADROWS) * RAYSPREADX
VIEWSIZEY = (GOODROWS - BADROWS) * RAYSPREADY
def toRadians(angle):
return angle * 2.0 * math.pi / 360.0
def toDegrees(angle):
return angle * 360.0 / (2.0 * math.pi)
class DistributedPhotoGame(DistributedMinigame, PhotoGameBase.PhotoGameBase):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedPhotoGame')
font = ToontownGlobals.getToonFont()
LOCAL_PHOTO_MOVE_TASK = 'localPhotoMoveTask'
FIRE_KEY = base.JUMP
UP_KEY = base.MOVE_UP
DOWN_KEY = base.MOVE_DOWN
LEFT_KEY = base.MOVE_LEFT
RIGHT_KEY = base.MOVE_RIGHT
INTRO_TASK_NAME = 'PhotoGameIntro'
INTRO_TASK_NAME_CAMERA_LERP = 'PhotoGameIntroCamera'
def __init__(self, cr):
DistributedMinigame.__init__(self, cr)
PhotoGameBase.PhotoGameBase.__init__(self)
self.gameFSM = ClassicFSM.ClassicFSM('DistributedPhotoGame', [State.State('off', self.enterOff, self.exitOff, ['aim']),
State.State('aim', self.enterAim, self.exitAim, ['showResults', 'cleanup', 'zoom']),
State.State('zoom', self.enterZoom, self.exitZoom, ['showResults', 'cleanup', 'aim']),
State.State('showResults', self.enterShowResults, self.exitShowResults, ['cleanup']),
State.State('cleanup', self.enterCleanup, self.exitCleanup, [])], 'off', 'cleanup')
self.addChildGameFSM(self.gameFSM)
self.tripod = None
self.leftPressed = 0
self.rightPressed = 0
self.upPressed = 0
self.downPressed = 0
self.photoMoving = 0
self.introSequence = None
self.subjects = []
self.scenery = []
self.subjectNode = render.attachNewNode('subjects')
self.subjectTracks = {}
self.nameCounter = 0
self.zoomedView = 0
self.zoomFlip = 1
self.cameraTrack = None
self.assignments = []
self.currentAssignment = 0
self.assignmentPanels = []
self.toonList = []
self.assignmentDataDict = {}
self.starDict = {}
self.starParentDict = {}
self.textureBuffers = []
self.filmCount = 20
self.edgeUp = 0
self.edgeRight = 0
self.edgeDown = 0
self.edgeLeft = 0
self.scorePanel = None
return
def getTitle(self):
return TTLocalizer.PhotoGameTitle
def getInstructions(self):
return TTLocalizer.PhotoGameInstructions
def getMaxDuration(self):
return PhotoGameGlobals.GAME_TIME
def load(self):
self.notify.debug('load')
DistributedMinigame.load(self)
PhotoGameBase.PhotoGameBase.load(self)
self.filmCount = self.data['FILMCOUNT']
self.safeZoneStorageDNAFile = self.data['DNA_TRIO'][0]
self.storageDNAFile = self.data['DNA_TRIO'][1]
self.dnaFile = self.data['DNA_TRIO'][2]
self.dnaStore = DNAStorage()
files = ('phase_4/dna/storage.pdna', self.storageDNAFile, self.safeZoneStorageDNAFile)
dnaBulk = DNABulkLoader(self.dnaStore, files)
dnaBulk.loadDNAFiles()
node = loader.loadDNAFile(self.dnaStore, self.dnaFile)
self.scene = hidden.attachNewNode(node)
self.construct()
purchaseModels = loader.loadModel('phase_4/models/gui/purchase_gui')
self.filmImage = loader.loadModel('phase_4/models/minigames/photogame_filmroll')
self.filmImage.reparentTo(hidden)
self.tripodModel = loader.loadModel('phase_4/models/minigames/toon_cannon')
self.filmPanel = DirectLabel(parent=hidden, relief=None, pos=(-0.23, -1.2, -0.55), scale=0.65, text=str(self.filmCount), text_scale=0.2, text_fg=(0.95, 0.95, 0, 1), text_pos=(0.08, -0.15), text_font=ToontownGlobals.getSignFont(), image=self.filmImage, image_scale=Point3(1.0, 0.0, 0.85))
self.filmPanelTitle = DirectLabel(parent=self.filmPanel, relief=None, pos=(0.08, 0, 0.04), scale=0.08, text=TTLocalizer.PhotoGameFilm, text_fg=(0.95, 0.95, 0, 1), text_shadow=(0, 0, 0, 1))
self.music = base.loadMusic('phase_4/audio/bgm/MG_cannon_game.ogg')
self.sndPhotoMove = base.loadSfx('phase_4/audio/sfx/MG_cannon_adjust.ogg')
self.sndPhotoFire = base.loadSfx('phase_4/audio/sfx/MG_cannon_fire_alt.ogg')
self.sndWin = base.loadSfx('phase_4/audio/sfx/MG_win.ogg')
self.sndFilmTick = base.loadSfx('phase_4/audio/sfx/Photo_instamatic.ogg')
self.timer = ToontownTimer.ToontownTimer()
self.timer.posInTopRightCorner()
self.timer.hide()
self.viewfinderNode = base.aspect2d.attachNewNode('camera node')
self.viewfinderNode.setTransparency(TransparencyAttrib.MAlpha)
self.viewfinderNode.setDepthWrite(1)
self.viewfinderNode.setDepthTest(1)
self.viewfinderNode.setY(-1.0)
self.screenSizeMult = 0.5
self.screenSizeX = (base.a2dRight - base.a2dLeft) * self.screenSizeMult
self.screenSizeZ = (base.a2dTop - base.a2dBottom) * self.screenSizeMult
viewfinderImage = loader.loadModel('phase_4/models/minigames/photo_game_viewfinder')
viewfinderImage.reparentTo(self.viewfinderNode)
viewfinderImage.setScale(0.55, 1.0, 0.55)
self.blackoutNode = base.aspect2d.attachNewNode('blackout node')
self.blackoutNode.setP(90)
BuildGeometry.addSquareGeom(self.blackoutNode, self.screenSizeX * 2.2, self.screenSizeZ * 2.2, Vec4(1.0, 1.0, 1.0, 1.0))
self.blackoutNode.setTransparency(TransparencyAttrib.MAlpha)
self.blackoutNode.setColorScale(0.0, 0.0, 0.0, 0.5)
self.blackoutNode.setDepthWrite(1)
self.blackoutNode.setDepthTest(1)
self.blackoutNode.hide()
self.subjectToon = Toon.Toon()
self.addSound('zoom', 'Photo_zoom.ogg', 'phase_4/audio/sfx/')
self.addSound('snap', 'Photo_shutter.ogg', 'phase_4/audio/sfx/')
return
def __setupCapture(self):
self.captureCam = NodePath(Camera('CaptureCamera'))
self.captureCam.reparentTo(self.pointer)
self.captureLens = PerspectiveLens()
self.captureOutFOV = VIEWSIZEX / self.screenSizeX * self.outFov * 0.5
self.captureZoomFOV = VIEWSIZEX / self.screenSizeX * self.zoomFov * 0.5
self.captureLens.setFov(self.captureOutFOV)
self.captureLens.setAspectRatio(1.33)
self.captureCam.node().setLens(self.captureLens)
def __removeCapture(self):
del self.captureCam
del self.captureLens
def unload(self):
self.notify.debug('unload')
DistributedMinigame.unload(self)
if self.cameraTrack:
self.cameraTrack.finish()
self.cameraTrack = None
self.__removeCapture()
for textureBuffer in self.textureBuffers:
base.graphicsEngine.removeWindow(textureBuffer)
del self.textureBuffers
self.viewfinderNode.removeNode()
self.blackoutNode.removeNode()
for key in self.assignmentDataDict:
assignmentData = self.assignmentDataDict[key]
assignmentData[7].delete()
del self.assignmentDataDict
self.assignments = []
for subject in self.subjects:
subject.delete()
self.subjects = []
self.subjectToon.delete()
self.destruct()
for scenery in self.scenery:
scenery.removeNode()
self.scenery = None
self.subjectNode.removeNode()
self.subjectNode = None
self.sky.removeNode()
del self.sky
self.photoRoot = None
self.scene.removeNode()
del self.scene
self.pointer.removeNode()
self.tripodModel.removeNode()
del self.tripodModel
for panel in self.assignmentPanels:
panel.destroy()
self.assignmentPanels = []
if self.scorePanel:
self.scorePanel.destroy()
self.starDict = {}
self.starParentDict = {}
self.filmPanel.destroy()
del self.filmPanel
self.filmImage.removeNode()
del self.filmImage
del self.music
del self.sndPhotoMove
del self.sndPhotoFire
del self.sndWin
del self.sndFilmTick
self.tripod.removeNode()
del self.tripod
del self.swivel
self.timer.destroy()
del self.timer
self.removeChildGameFSM(self.gameFSM)
del self.gameFSM
self.ignoreAll()
return
def onstage(self):
self.notify.debug('Onstage')
DistributedMinigame.onstage(self)
self.__createTripod()
self.tripod.reparentTo(render)
self.tripod.hide()
self.__loadToonInTripod(self.localAvId)
base.camera.reparentTo(render)
self.__oldCamFar = base.camLens.getFar()
base.camLens.setFar(FAR_PLANE_DIST)
self.__setupSubjects()
self.__startIntro()
base.transitions.irisIn()
base.playMusic(self.music, looping=1, volume=0.8)
orgFov = base.camLens.getFov()
self.outFov = orgFov.getX()
self.zoomFov = orgFov.getX() * ZOOMRATIO
self.currentFov = self.outFov
self.__setupCapture()
def offstage(self):
self.notify.debug('offstage')
self.sky.reparentTo(hidden)
self.scene.reparentTo(hidden)
for avId in self.avIdList:
av = self.getAvatar(avId)
if av:
av.dropShadow.show()
av.resetLOD()
self.__stopIntro()
base.camLens.setFar(self.__oldCamFar)
self.timer.reparentTo(hidden)
self.filmPanel.reparentTo(hidden)
DistributedMinigame.offstage(self)
for key in self.subjectTracks:
track = self.subjectTracks[key][0]
track.pause()
del track
self.subjectTracks = {}
base.localAvatar.laffMeter.start()
del self.soundTable
def __setupCollisions(self):
self.queue = CollisionHandlerQueue()
self.traverser = CollisionTraverser('traverser name')
self.rayArray = []
vRange = (GOODROWS - BADROWS) / 2
for row in xrange(-(GOODROWS / 2), GOODROWS / 2 + 1):
for column in xrange(-(GOODROWS / 2), GOODROWS / 2 + 1):
goodRange = range(-((GOODROWS - BADROWS) / 2), (GOODROWS - BADROWS) / 2 + 1)
rayQuality = 'g'
if row not in goodRange or column not in goodRange:
rayQuality = 'l'
if row > vRange:
rayQuality = 'r'
if column > vRange:
rayQuality = 't'
if column < -vRange:
rayQuality = 'b'
columnString = '+%s' % column
if column < 0:
columnString = '%s' % column
rowString = '+%s' % row
if row < 0:
rowString = '%s' % row
pickerNode = CollisionNode('%s %s %s' % (rowString, columnString, rayQuality))
pickerNP = base.camera.attachNewNode(pickerNode)
pickerNode.setFromCollideMask(GeomNode.getDefaultCollideMask())
pickerRay = CollisionRay()
pickerNode.addSolid(pickerRay)
self.rayArray.append((row,
column,
pickerNode,
pickerNP,
pickerRay))
self.traverser.addCollider(pickerNP, self.queue)
if WANTDOTS:
self.markerDict = {}
for rayEntry in self.rayArray:
markerNode = self.viewfinderNode.attachNewNode('marker Node')
BuildGeometry.addSquareGeom(markerNode, 0.01, 0.01, Vec4(1.0, 1.0, 1.0, 1.0))
markerNode.setX(RAYSPREADX * rayEntry[0])
markerNode.setY(RAYSPREADY * rayEntry[1])
markerNode.setDepthWrite(1)
markerNode.setZ(2.0)
self.markerDict[rayEntry[0], rayEntry[1]] = markerNode
self.lensNode = LensNode('photo taker')
self.lensNode.setLens(base.camLens)
def __moveViewfinder(self, task):
if base.mouseWatcherNode.hasMouse():
mpos = base.mouseWatcherNode.getMouse()
self.viewfinderNode.setX(mpos.getX() * self.screenSizeX)
self.viewfinderNode.setZ(mpos.getY() * self.screenSizeZ)
horzAngle = self.viewfinderNode.getX() / self.screenSizeX * 0.5 * base.camLens.getFov()[0]
vertAngle = self.viewfinderNode.getZ() / self.screenSizeZ * 0.5 * base.camLens.getFov()[1]
horzPointFlat = self.viewfinderNode.getX() / self.screenSizeX
vertPointFlat = self.viewfinderNode.getZ() / self.screenSizeZ
horzLength = base.camLens.getFov()[0] * 0.5
vertLength = base.camLens.getFov()[1] * 0.5
horzRadianLength = toRadians(horzLength)
vertRadianLength = toRadians(vertLength)
hMRadian = math.atan(horzPointFlat * math.tan(horzRadianLength))
vMRadian = math.atan(vertPointFlat * math.tan(vertRadianLength))
hMDegree = toDegrees(hMRadian)
vMDegree = toDegrees(vMRadian)
self.pointer.setH(-hMDegree)
self.pointer.setP(vMDegree)
newRight = 0
newLeft = 0
newUp = 0
newDown = 0
if self.viewfinderNode.getX() > self.screenSizeX * 0.95:
newRight = 1
if self.viewfinderNode.getX() < self.screenSizeX * -0.95:
newLeft = 1
if self.viewfinderNode.getZ() > self.screenSizeZ * 0.95:
newUp = 1
if self.viewfinderNode.getZ() < self.screenSizeZ * -0.95:
newDown = 1
if not self.edgeRight and newRight:
self.edgeRight = 1
self.__rightPressed()
elif self.edgeRight and not newRight:
self.edgeRight = 0
self.__rightReleased()
if not self.edgeLeft and newLeft:
self.edgeLeft = 1
self.__leftPressed()
elif self.edgeLeft and not newLeft:
self.edgeLeft = 0
self.__leftReleased()
if not self.edgeUp and newUp:
self.edgeUp = 1
self.__upPressed()
elif self.edgeUp and not newUp:
self.edgeUp = 0
self.__upReleased()
if not self.edgeDown and newDown:
self.edgeDown = 1
self.__downPressed()
elif self.edgeDown and not newDown:
self.edgeDown = 0
self.__downReleased()
return task.cont
def __testCollisions(self):
self.notify.debug('\nSnapping Photo')
self.playSound('snap')
if self.filmCount <= 0:
self.notify.debug('No Film')
return
for rayEntry in self.rayArray:
posX = (self.viewfinderNode.getX() + RAYSPREADX * rayEntry[0]) / self.screenSizeX
posY = (self.viewfinderNode.getZ() + RAYSPREADY * rayEntry[1]) / self.screenSizeZ
rayEntry[4].setFromLens(self.lensNode, posX, posY)
self.traverser.traverse(self.subjectNode)
distDict = {}
hitDict = {}
centerDict = {}
for i in xrange(self.queue.getNumEntries()):
entry = self.queue.getEntry(i)
object = None
objectIndex = None
subjectIndexString = entry.getIntoNodePath().getNetTag('subjectIndex')
sceneryIndexString = entry.getIntoNodePath().getNetTag('sceneryIndex')
if subjectIndexString:
objectIndex = int(subjectIndexString)
object = self.subjects[objectIndex]
elif sceneryIndexString:
objectIndex = int(sceneryIndexString)
object = self.scenery[objectIndex]
marker = 'g'
if 'b' in entry.getFromNodePath().getName():
marker = 'b'
if 't' in entry.getFromNodePath().getName():
marker = 't'
if 'r' in entry.getFromNodePath().getName():
marker = 'r'
if 'l' in entry.getFromNodePath().getName():
marker = 'l'
if object:
newEntry = (entry.getFromNode(), object)
distance = Vec3(entry.getSurfacePoint(self.tripod)).lengthSquared()
name = entry.getFromNode().getName()
if not name in distDict:
distDict[name] = distance
hitDict[name] = (entry.getFromNode(), object, marker)
elif distance < distDict[name]:
distDict[name] = distance
hitDict[name] = (entry.getFromNode(), object, marker)
for key in hitDict:
hit = hitDict[key]
superParent = hit[1]
marker = hit[2]
onCenter = 0
overB = 0
overT = 0
overR = 0
overL = 0
quality = -1
if marker == 'b':
overB = 1
elif marker == 't':
overT = 1
elif marker == 'r':
overR = 1
elif marker == 'l':
overL = 1
else:
quality = 1
onCenter = 1
if superParent not in centerDict:
centerDict[superParent] = (onCenter,
overB,
overT,
overR,
overL)
else:
centerDict[superParent] = (onCenter + centerDict[superParent][0],
overB + centerDict[superParent][1],
overT + centerDict[superParent][2],
overR + centerDict[superParent][3],
overL + centerDict[superParent][4])
if WANTDOTS:
for key in self.markerDict:
node = self.markerDict[key]
node.setColorScale(Vec4(1, 1, 1, 1))
for key in hitDict:
entry = hitDict[key]
name = entry[0].getName()
xS = int(name[0:2])
yS = int(name[3:5])
node = self.markerDict[xS, yS]
node.setColorScale(Vec4(1.0, 0.0, 0.0, 1.0))
centerDictKeys = []
for key in centerDict:
centerDictKeys.append(key)
for subject in centerDictKeys:
score = self.judgePhoto(subject, centerDict)
self.notify.debug('Photo is %s / 5 stars' % score)
self.notify.debug('assignment compare %s %s' % (self.determinePhotoContent(subject), self.assignments[self.currentAssignment]))
content = self.determinePhotoContent(subject)
if content:
photoAnalysisZero = (content[0], content[1])
if score and photoAnalysisZero in self.assignments:
index = self.assignments.index(photoAnalysisZero)
assignment = self.assignments[index]
self.notify.debug('assignment complete')
if score >= self.assignmentDataDict[assignment][0]:
subjectIndex = self.subjects.index(subject)
texturePanel = self.assignmentDataDict[assignment][5]
texture = self.assignmentDataDict[assignment][6]
buffer = self.assignmentDataDict[assignment][4]
panelToon = self.assignmentDataDict[assignment][7]
panelToon.hide()
buffer.setActive(1)
texturePanel.show()
texturePanel.setColorScale(1, 1, 1, 1)
taskMgr.doMethodLater(0.2, buffer.setActive, 'capture Image', [0])
if score > self.assignmentDataDict[assignment][0]:
self.assignmentDataDict[assignment][0] = score
self.updateAssignmentPanels()
self.sendUpdate('newClientPhotoScore', [subjectIndex, content[1], score])
else:
self.notify.debug('assignment not complete')
horzAngle = self.viewfinderNode.getX() / self.screenSizeX * 0.5 * base.camLens.getFov()[0]
vertAngle = self.viewfinderNode.getZ() / self.screenSizeZ * 0.5 * base.camLens.getFov()[1]
horzPointFlat = self.viewfinderNode.getX() / self.screenSizeX
vertPointFlat = self.viewfinderNode.getZ() / self.screenSizeZ
horzLength = base.camLens.getFov()[0] * 0.5
vertLength = base.camLens.getFov()[1] * 0.5
horzRadianLength = toRadians(horzLength)
vertRadianLength = toRadians(vertLength)
hMRadian = math.atan(horzPointFlat * math.tan(horzRadianLength))
vMRadian = math.atan(vertPointFlat * math.tan(vertRadianLength))
hMDegree = toDegrees(hMRadian)
vMDegree = toDegrees(vMRadian)
self.__decreaseFilmCount()
if self.filmCount == 0:
self.sendUpdate('filmOut', [])
self.notify.debug('Screen angles H:%s V:%s' % (self.swivel.getH(), self.swivel.getP()))
self.notify.debug('Viewfinder to screen angles H:%s V:%s' % (horzAngle, vertAngle))
self.notify.debug('Viewfinder to screen angles with math H:%s V:%s' % (hMDegree, vMDegree))
return
def newAIPhotoScore(self, playerId, assignmentIndex, score):
if len(self.assignments) > assignmentIndex:
assignment = self.assignments[assignmentIndex]
assignmentData = self.assignmentDataDict[assignment]
if score > assignmentData[2]:
assignmentData[2] = score
assignmentData[3] = playerId
self.updateAssignmentPanels()
def determinePhotoContent(self, subject):
if self.getSubjectTrackState(subject):
return [subject, self.getSubjectTrackState(subject)[2]]
else:
return None
return None
def judgePhoto(self, subject, centerDict):
self.notify.debug('judgePhoto')
self.notify.debug(subject.getName())
self.notify.debug(str(centerDict[subject]))
a1 = base.camera.getH(render) % 360
a2 = subject.getH(render) % 360
angle = abs((a1 + 180 - a2) % 360 - 180)
self.notify.debug('angle camera:%s subject:%s between:%s' % (base.camera.getH(render), subject.getH(render), angle))
self.notify.debug(str(angle))
centering = centerDict[subject]
if type(subject) == type(self.subjectToon):
facing = angle / 180.0
interest = self.getSubjectTrackState(subject)[3]
quality = centering[0] - (centering[1] + centering[2] + centering[3] + centering[4])
tooClose = centering[1] and centering[2] or centering[3] and centering[4]
portrait = centering[1] and not (centering[2] or centering[3] or centering[4])
self.notify.debug('angle %s facing %s' % (angle, facing))
self.notify.debug('Interest %s' % interest)
self.notify.debug('Quality %s' % quality)
self.notify.debug('tooClose %s' % tooClose)
if quality <= 0:
return None
else:
score = 0
if angle >= 135:
score += 2
elif angle >= 90:
score += 1
elif angle <= 60:
score -= 1
score += interest
if quality >= 5 and (not tooClose or portrait):
score += 1
if quality >= 10:
score += 1
if quality >= 15:
score += 1
score -= 2
if score > NUMSTARS:
score = float(NUMSTARS)
if score <= 0:
return 1
else:
return score
return None
def __toggleView(self):
self.notify.debug('Toggle View')
hCam = self.swivel.getH()
vCam = self.swivel.getP()
horzPointFlat = self.viewfinderNode.getX() / self.screenSizeX
vertPointFlat = self.viewfinderNode.getZ() / self.screenSizeZ
horzLength = base.camLens.getFov()[0] * 0.5
vertLength = base.camLens.getFov()[1] * 0.5
horzRadianLength = toRadians(horzLength)
vertRadianLength = toRadians(vertLength)
hMRadian = math.atan(horzPointFlat * math.tan(horzRadianLength))
vMRadian = math.atan(vertPointFlat * math.tan(vertRadianLength))
hMDegree = toDegrees(hMRadian)
vMDegree = toDegrees(vMRadian)
if self.zoomedView:
self.zoomedView = 0
else:
self.zoomedView = 1
if self.zoomedView:
self.notify.debug('Zoom In')
hMove = hMDegree * (1.0 - ZOOMRATIO)
vMove = vMDegree * (1.0 - ZOOMRATIO)
self.currentFov = self.zoomFov
base.camLens.setMinFov(self.zoomFov/(4./3.))
self.blackoutNode.show()
self.swivel.setHpr(self.swivel, hMove * -self.zoomFlip, vMove * self.zoomFlip, 0)
else:
self.notify.debug('Zoom Out')
hMove = hMDegree * ((1.0 - ZOOMRATIO) / ZOOMRATIO)
vMove = vMDegree * ((1.0 - ZOOMRATIO) / ZOOMRATIO)
self.currentFov = self.outFov
base.camLens.setMinFov(self.outFov/(4./3.))
self.blackoutNode.hide()
self.swivel.setHpr(self.swivel, hMove * self.zoomFlip, vMove * -self.zoomFlip, 0)
def __doZoom(self):
self.notify.debug('Toggle View')
self.playSound('zoom')
hCam = self.swivel.getH()
vCam = self.swivel.getP()
horzPointFlat = self.viewfinderNode.getX() / self.screenSizeX
vertPointFlat = self.viewfinderNode.getZ() / self.screenSizeZ
horzLength = base.camLens.getFov()[0] * 0.5
vertLength = base.camLens.getFov()[1] * 0.5
horzRadianLength = toRadians(horzLength)
vertRadianLength = toRadians(vertLength)
hMRadian = math.atan(horzPointFlat * math.tan(horzRadianLength))
vMRadian = math.atan(vertPointFlat * math.tan(vertRadianLength))
hMDegree = toDegrees(hMRadian)
vMDegree = toDegrees(vMRadian)
if self.zoomedView:
self.zoomedView = 0
else:
self.zoomedView = 1
self.cameraTrack = Sequence()
if self.zoomedView:
self.notify.debug('Zoom In')
hMove = hMDegree * (1.0 - ZOOMRATIO)
vMove = vMDegree * (1.0 - ZOOMRATIO)
self.currentFov = self.zoomFov
base.camLens.setMinFov(self.zoomFov/(4./3.))
self.blackoutNode.show()
orgQuat = self.swivel.getQuat()
self.swivel.setHpr(self.swivel, hMove * -self.zoomFlip, vMove * self.zoomFlip, 0)
self.swivel.setR(0.0)
newQuat = self.swivel.getQuat()
self.swivel.setQuat(orgQuat)
zoomTrack = Parallel()
zoomTrack.append(LerpQuatInterval(self.swivel, ZOOMTIME, newQuat))
zoomTrack.append(LerpFunc(base.camLens.setMinFov, fromData=self.outFov/(4./3.), toData=self.zoomFov/(4./3.), duration=ZOOMTIME))
zoomTrack.append(LerpFunc(self.setBlackout, fromData=0.0, toData=0.5, duration=ZOOMTIME))
self.cameraTrack.append(zoomTrack)
self.cameraTrack.append(Func(self.finishZoom, 1))
else:
self.notify.debug('Zoom Out')
hMove = hMDegree * ((1.0 - ZOOMRATIO) / ZOOMRATIO)
vMove = vMDegree * ((1.0 - ZOOMRATIO) / ZOOMRATIO)
self.currentFov = self.outFov
base.camLens.setMinFov(self.outFov/(4./3.))
orgQuat = self.swivel.getQuat()
self.swivel.setHpr(self.swivel, hMove * self.zoomFlip, vMove * -self.zoomFlip, 0)
self.swivel.setR(0.0)
newQuat = self.swivel.getQuat()
self.swivel.setQuat(orgQuat)
zoomTrack = Parallel()
zoomTrack.append(LerpQuatInterval(self.swivel, ZOOMTIME, newQuat))
zoomTrack.append(LerpFunc(base.camLens.setMinFov, fromData=self.zoomFov/(4./3.), toData=self.outFov/(4./3.), duration=ZOOMTIME))
zoomTrack.append(LerpFunc(self.setBlackout, fromData=0.5, toData=0.0, duration=ZOOMTIME))
self.cameraTrack.append(zoomTrack)
self.cameraTrack.append(Func(self.blackoutNode.hide))
self.cameraTrack.append(Func(self.finishZoom, 0))
self.cameraTrack.start()
def setBlackout(self, black):
self.blackoutNode.setColorScale(0.0, 0.0, 0.0, black)
def getSubjectTrackState(self, subject):
subjectTrack = self.subjectTracks.get(subject)
if subjectTrack:
interval = subjectTrack[0]
timeline = subjectTrack[1]
time = interval.getT()
timeCount = time
timelineIndex = 0
while timeCount >= 0.0:
timeCount -= timeline[timelineIndex][1]
if timeCount >= 0.0:
timelineIndex += 1
return timeline[timelineIndex]
else:
return None
return None
def __setupSubjects(self):
self.__setupCollisions()
self.subjects = []
self.subjectTracks = {}
self.photoRoot.reparentTo(self.subjectNode)
self.photoRoot.setTag('sceneryIndex', '%s' % len(self.scenery))
self.scenery.append(self.photoRoot)
random.seed(time.time())
namegen = NameGenerator.NameGenerator()
for pathIndex in xrange(len(self.data['PATHS'])):
path = self.data['PATHS'][pathIndex]
subject = Toon.Toon()
gender = random.choice(['m', 'f'])
seed = int(random.random() * 571)
if gender == 'm':
boy = 1
girl = 0
else:
boy = 0
girl = 1
subject.setName(namegen.randomNameMoreinfo(boy=boy, girl=girl)[-1])
self.nameCounter += 1
subject.setPickable(0)
subject.setPlayerType(NametagGlobals.CCSpeedChat)
dna = ToonDNA.ToonDNA()
dna.newToonRandom(seed, gender, 1)
subject.setDNAString(dna.makeNetString())
subject.animFSM.request('neutral')
subject.setTag('subjectIndex', '%s' % len(self.subjects))
self.subjects.append(subject)
height = subject.getHeight()
self.collSphere = CollisionSphere(0, 0, height * 0.5, height * 0.5)
self.collSphere.setTangible(1)
self.collNode = CollisionNode(self.uniqueName('subject Sphere'))
self.collNode.setCollideMask(BitMask32.allOn())
self.collNode.addSolid(self.collSphere)
self.collNodePath = subject.attachNewNode(self.collNode)
subject.reparentTo(self.subjectNode)
subject.setPos(path[0])
subject.lookAt(path[1])
subject.show()
subjectTrack = self.generateToonTrack(subject, path, pathIndex)
subjectTrack[0].start()
self.subjectTracks[subject] = subjectTrack
def regenerateToonTrack(self, subject, path, pathIndex):
if not hasattr(self, 'swivel'):
return
subjectTrack = self.generateToonTrack(subject, path, pathIndex)
subjectTrack[0].start()
self.subjectTracks[subject] = subjectTrack
def generateToonTrack(self, subject, path, pathIndex):
def getNextIndex(curIndex, path):
return (curIndex + 1) % len(path)
subjectTrack = Sequence()
subjectTimeline = []
timeAccum = 0.0
pathPointIndex = 0
orgPos = subject.getPos()
orgQuat = subject.getQuat()
while pathPointIndex < len(path):
nextIndex = getNextIndex(pathPointIndex, path)
curPoint = path[pathPointIndex]
nextPoint = path[nextIndex]
distance = self.slowDistance(curPoint, nextPoint)
pointTime = distance * 0.25
subject.setPos(curPoint)
subject.lookAt(nextPoint)
nextQuat = subject.getQuat()
animSetIndex = self.data['PATHANIMREL'][pathIndex]
animChoice = random.choice(self.data['ANIMATIONS'][animSetIndex])[0]
movetype = random.choice(self.data['MOVEMODES'][animSetIndex])
turnTime = 0.2
if movetype[0] == 'swim':
turnTime = 0.0
nextInterval = LerpQuatInterval(subject, turnTime, quat=nextQuat)
subjectTrack.append(nextInterval)
subjectTimeline.append((timeAccum,
nextInterval.getDuration(),
'turn',
1.0))
timeAccum += nextInterval.getDuration()
movetype = random.choice(self.data['MOVEMODES'][animSetIndex])
pointTime = pointTime * movetype[1]
if movetype[0] == 'swim':
nextInterval = Sequence()
startInterval = Func(subject.setP, -60)
midInterval = Parallel(LerpPosInterval(subject, pointTime, nextPoint), ActorInterval(subject, movetype[0], loop=1, duration=pointTime))
nextInterval.append(startInterval)
nextInterval.append(midInterval)
else:
nextInterval = Sequence()
startInterval = Func(subject.setP, 0)
midInterval = Parallel(LerpPosInterval(subject, pointTime, nextPoint), ActorInterval(subject, movetype[0], loop=1, duration=pointTime))
nextInterval.append(startInterval)
nextInterval.append(midInterval)
subjectTrack.append(nextInterval)
subjectTimeline.append((timeAccum,
nextInterval.getDuration(),
movetype[0],
1.0))
timeAccum += nextInterval.getDuration()
if animChoice:
nextInterval = ActorInterval(subject, animChoice, loop=0)
subjectTrack.append(nextInterval)
subjectTimeline.append((timeAccum,
nextInterval.getDuration(),
animChoice,
2.0))
timeAccum += nextInterval.getDuration()
pathPointIndex += 1
subject.setPos(orgPos)
subject.setQuat(orgQuat)
subjectTrack.append(Func(self.regenerateToonTrack, subject, path, pathIndex))
return (subjectTrack, subjectTimeline)
def slowDistance(self, point1, point2):
dx = point1[0] - point2[0]
dy = point1[1] - point2[1]
dz = point1[2] - point2[2]
distance = math.sqrt(dx * dx + dy * dy + dz * dz)
return distance
def getNextPoint(self, pointList, point):
pointIndex = 0
length = len(pointList)
found = 0
loop = 0
while not found and loop < length:
if pointList[index] == point:
found = 1
else:
index += 1
loop += 1
if not found:
return None
nextPointIndex = loop + 1
if nextPointIndex >= length:
nextPointIndex = 0
return pointList[nextPointIndex]
def __createTripod(self):
tripod = self.tripodModel.copyTo(hidden)
swivel = tripod.find('**/cannon')
self.tripod = tripod
self.swivel = swivel
self.pointer = self.swivel.attachNewNode('pointer')
self.tripod.setPos(self.photoRoot.getPos())
self.tripod.setPos(self.tripod.getPos() + self.data['TRIPOD_OFFSET'])
def setGameReady(self):
if not self.hasLocalToon:
return
self.notify.debug('setGameReady')
if DistributedMinigame.setGameReady(self):
return
def setGameStart(self, timestamp):
if not self.hasLocalToon:
return
self.notify.debug('setGameStart')
DistributedMinigame.setGameStart(self, timestamp)
self.__stopIntro()
self.__putCameraOnTripod()
if not base.config.GetBool('endless-cannon-game', 0):
self.timer.show()
self.timer.countdown(self.data['TIME'], self.__gameTimerExpired)
self.filmPanel.reparentTo(base.a2dTopRight)
self.scoreMult = MinigameGlobals.getScoreMult(self.cr.playGame.hood.id)
self.clockStopTime = None
self.gameFSM.request('aim')
self.__putCameraOnTripod()
self.currentAssignment = 0
assignmentTemplates = self.generateAssignmentTemplates(PhotoGameGlobals.ONSCREENASSIGNMENTS)
self.generateAssignments(assignmentTemplates)
self.generateAssignmentPanels()
self.scorePanel = self.makeScoreFrame()
self.scorePanel.reparentTo(base.a2dBottomRight)
self.scorePanel.setPos(-0.3, 0, 0.3)
self.updateAssignmentPanels()
for subject in self.subjects:
subject.useLOD(1000)
return
def setGameExit(self):
DistributedMinigame.setGameExit(self)
self.__gameTimerExpired()
def __gameTimerExpired(self):
self.notify.debug('game timer expired')
self.gameOver()
def generateAssignments(self, assignmentTemplates):
for template in assignmentTemplates:
subject = self.subjects[template[0]]
pose = template[1]
score = 0.0
panel = None
topScore = 0.0
topScorerId = None
textureBuffer = None
texturePanel = None
texture = None
panelToon = None
assignment = (subject, pose)
if assignment not in self.assignments:
self.assignments.append(assignment)
self.assignmentDataDict[assignment] = [score,
panel,
topScore,
topScorerId,
textureBuffer,
texturePanel,
texture,
panelToon]
self.notify.debug('assignments')
for assignment in self.assignments:
self.notify.debug(str(assignment))
return
def generateAssignmentPanels(self):
self.notify.debug('generateAssignmentPanels')
for panel in self.assignmentPanels:
panel.destroy()
spacing = self.screenSizeX / PhotoGameGlobals.ONSCREENASSIGNMENTS * 1.61
index = 0
Xoff = self.screenSizeX - 0.735
Zoff = -self.screenSizeZ + 0.25
for assignment in self.assignments:
self.notify.debug('made assignment panel %s' % str(assignment))
panel, texturePanel, toon = self.makeAssignmentPanel(assignment)
panel.setX(Xoff - spacing * index)
panel.setZ(Zoff)
texturePanel.setZ(0.065)
rot = random.choice([0.0,
2.0,
-2.0,
-4.0,
6.0])
panel.setR(rot)
textureBuffer = base.win.makeTextureBuffer('Photo Capture', 128, 128)
dr = textureBuffer.makeDisplayRegion()
dr.setCamera(self.captureCam)
texture = textureBuffer.getTexture()
texturePanel.setTexture(texture)
texturePanel.setColorScale(0, 0, 0, 0)
textureBuffer.setActive(0)
self.textureBuffers.append(textureBuffer)
texturePanel.hide()
self.assignmentPanels.append(panel)
self.assignmentDataDict[assignment][1] = panel
self.assignmentDataDict[assignment][4] = textureBuffer
self.assignmentDataDict[assignment][5] = texturePanel
self.assignmentDataDict[assignment][6] = texture
self.assignmentDataDict[assignment][7] = toon
index += 1
def printAD(self):
for assignment in self.assignmentDataDict:
data = self.assignmentDataDict[assignment]
print 'Key:%s\nData:%s\n' % (str(assignment), data)
def updateScorePanel(self):
teamScore = 0.0
bonusScore = 0.0
for assignment in self.assignments:
data = self.assignmentDataDict[assignment]
teamScore += data[2]
if data[3] == localAvatar.doId:
bonusScore += 1.0
self.scorePanel['text'] = TTLocalizer.PhotoGameScore % (int(teamScore), int(bonusScore), int(teamScore + bonusScore))
def updateAssignmentPanels(self):
for assignment in self.assignments:
data = self.assignmentDataDict[assignment]
leaderName = data[3]
leader = base.cr.doId2do.get(data[3])
if not leader:
data[1]['text'] = ' '
elif leader.doId == localAvatar.doId:
data[1]['text'] = TTLocalizer.PhotoGameScoreYou
else:
leaderName = leader.getName()
data[1]['text'] = TTLocalizer.PhotoGameScoreOther % leaderName
starList = self.starDict[data[1]]
starParent = self.starParentDict[data[1]]
score = int(data[2])
for index in xrange(NUMSTARS):
if index < score:
starList[index].show()
else:
starList[index].hide()
starParent.setX(float(NUMSTARS - score) * STARSIZE * 0.5)
self.updateScorePanel()
def makeAssignmentPanel(self, assignment):
if assignment != None:
assignedToon = Toon.Toon()
assignedToon.setDNA(assignment[0].getStyle())
else:
assignedToon = None
model, ival = self.makeFrameModel(assignedToon)
if assignedToon:
assignedToon.loop(assignment[1])
model.reparentTo(aspect2d)
assignedToon.setH(172)
assignedToon.setZ(-1.2)
assignedToon.setY(100.0)
if assignment[1] == 'swim':
assignedToon.setP(-70)
assignedToon.setH(160)
assignedToon.setZ(-0.6)
model.setH(0)
model.setScale(1.0)
model['text'] = ' '
assignedToon.setY(-100.0)
model.setY(-10.0)
screen = model.attachNewNode('screen node')
BuildGeometry.addSquareGeom(screen, 0.36, 0.27, Vec4(1.0, 1.0, 1.0, 1.0))
screen.setHpr(0, 90, 0)
screen.setDepthTest(1)
starImage = loader.loadModel('phase_4/models/minigames/photogame_star')
starParent = model.attachNewNode('star parent')
self.starDict[model] = []
for index in xrange(NUMSTARS):
star = DirectLabel(parent=starParent, image=starImage, image_color=(1, 1, 1, 1), image_scale=Point3(STARSIZE, 0.0, STARSIZE), relief=None)
star.setX(STARSIZE * -0.5 * float(NUMSTARS) + float(index + 0.5) * STARSIZE)
star.setZ(-0.05 - STARSIZE)
self.starDict[model].append(star)
self.starParentDict[model] = starParent
star.hide()
return (model, screen, assignedToon)
def makeFrameModel(self, model):
frame = self.makeAssignmentFrame()
ival = None
if model:
model.setDepthTest(1)
model.setDepthWrite(1)
scale = frame.attachNewNode('scale')
model.reparentTo(scale)
bMin, bMax = model.getTightBounds()
center = (bMin + bMax) / 2.0
model.setPos(-center[0], 2, -center[2])
corner = Vec3(bMax - center)
scaleFactor = self.screenSizeX / PhotoGameGlobals.ONSCREENASSIGNMENTS
scale.setScale(0.4 * scaleFactor / max(corner[0], corner[1], corner[2]))
return (frame, ival)
def makeAssignmentFrame(self):
from direct.gui.DirectGui import DirectFrame
photoImage = loader.loadModel('phase_4/models/minigames/photo_game_pictureframe')
size = 1.0
assignmentScale = self.screenSizeX / PhotoGameGlobals.ONSCREENASSIGNMENTS
frame = DirectFrame(parent=hidden, image=photoImage, image_color=(1, 1, 1, 1), image_scale=Point3(1.6 * assignmentScale, 0.0, 1.75 * assignmentScale), frameSize=(-size,
size,
-size,
size), text='HC Score', textMayChange=1, text_wordwrap=9, text_pos=Point3(0.0, -0.135, 0.0), text_scale=0.045, relief=None)
return frame
def makeScoreFrame(self):
from direct.gui.DirectGui import DirectFrame
size = 1.0
scoreImage = loader.loadModel('phase_4/models/minigames/photogame_camera')
frame = DirectFrame(parent=hidden, image=scoreImage, image_color=(1, 1, 1, 1), image_scale=Point3(0.64, 0.0, 0.64), frameSize=(-size,
size,
-size,
size), text='Score Frame', textMayChange=1, text_wordwrap=9, text_pos=Point3(0.0, 0.0, 0.0), text_scale=0.05, relief=None)
return frame
def enterOff(self):
self.notify.debug('enterOff')
def exitOff(self):
pass
def enterAim(self):
self.notify.debug('enterAim')
self.__enableAimInterface()
taskMgr.add(self.__moveViewfinder, 'photo game viewfinder Task')
self.accept('mouse1', self.__handleMouseClick)
base.localAvatar.laffMeter.stop()
base.transitions.noIris()
def exitAim(self):
self.__disableAimInterface()
taskMgr.remove('photo game viewfinder Task')
self.ignore('mouse1')
def enterZoom(self):
self.notify.debug('enterZoom')
taskMgr.add(self.__moveViewfinder, 'photo game viewfinder Task')
self.__doZoom()
def exitZoom(self):
taskMgr.remove('photo game viewfinder Task')
self.notify.debug('exitZoom')
def finishZoom(self, zoomed = None, task = None):
if zoomed:
self.captureLens.setFov(self.captureZoomFOV)
else:
self.captureLens.setFov(self.captureOutFOV)
self.gameFSM.request('aim')
def enterShowResults(self):
self.notify.debug('enterShowResults')
for subject in self.subjects:
subject.resetLOD()
def exitShowResults(self):
pass
def enterCleanup(self):
self.notify.debug('enterCleanup')
self.music.stop()
if hasattr(self, 'jarIval'):
self.jarIval.finish()
del self.jarIval
for avId in self.avIdList:
taskMgr.remove('firePhoto' + str(avId))
taskMgr.remove('flyingToon' + str(avId))
def exitCleanup(self):
pass
def __enableAimInterface(self):
self.accept(self.FIRE_KEY, self.__fireKeyPressed)
self.accept(self.UP_KEY, self.__upKeyPressed)
self.accept(self.DOWN_KEY, self.__downKeyPressed)
self.accept(self.LEFT_KEY, self.__leftKeyPressed)
self.accept(self.RIGHT_KEY, self.__rightKeyPressed)
self.__spawnLocalPhotoMoveTask()
def __disableAimInterface(self):
self.ignore(self.FIRE_KEY)
self.ignore(self.UP_KEY)
self.ignore(self.DOWN_KEY)
self.ignore(self.LEFT_KEY)
self.ignore(self.RIGHT_KEY)
self.ignore(self.FIRE_KEY + '-up')
self.ignore(self.UP_KEY + '-up')
self.ignore(self.DOWN_KEY + '-up')
self.ignore(self.LEFT_KEY + '-up')
self.ignore(self.RIGHT_KEY + '-up')
self.__killLocalPhotoMoveTask()
def __fireKeyPressed(self):
self.ignore(self.FIRE_KEY)
self.accept(self.FIRE_KEY + '-up', self.__fireKeyReleased)
self.__firePressed()
def __upKeyPressed(self):
self.ignore(self.UP_KEY)
self.accept(self.UP_KEY + '-up', self.__upKeyReleased)
self.__upPressed()
def __downKeyPressed(self):
self.ignore(self.DOWN_KEY)
self.accept(self.DOWN_KEY + '-up', self.__downKeyReleased)
self.__downPressed()
def __leftKeyPressed(self):
self.ignore(self.LEFT_KEY)
self.accept(self.LEFT_KEY + '-up', self.__leftKeyReleased)
self.__leftPressed()
def __rightKeyPressed(self):
self.ignore(self.RIGHT_KEY)
self.accept(self.RIGHT_KEY + '-up', self.__rightKeyReleased)
self.__rightPressed()
def __fireKeyReleased(self):
self.ignore(self.FIRE_KEY + '-up')
self.accept(self.FIRE_KEY, self.__fireKeyPressed)
self.__fireReleased()
def __leftKeyReleased(self):
self.ignore(self.LEFT_KEY + '-up')
self.accept(self.LEFT_KEY, self.__leftKeyPressed)
self.__leftReleased()
def __rightKeyReleased(self):
self.ignore(self.RIGHT_KEY + '-up')
self.accept(self.RIGHT_KEY, self.__rightKeyPressed)
self.__rightReleased()
def __upKeyReleased(self):
self.ignore(self.UP_KEY + '-up')
self.accept(self.UP_KEY, self.__upKeyPressed)
self.__upReleased()
def __downKeyReleased(self):
self.ignore(self.DOWN_KEY + '-up')
self.accept(self.DOWN_KEY, self.__downKeyPressed)
self.__downReleased()
def __firePressed(self):
self.notify.debug('fire pressed')
def __fireReleased(self):
self.gameFSM.request('zoom')
def __upPressed(self):
self.notify.debug('up pressed')
self.upPressed = self.__enterControlActive(self.upPressed)
def __downPressed(self):
self.notify.debug('down pressed')
self.downPressed = self.__enterControlActive(self.downPressed)
def __leftPressed(self):
self.notify.debug('left pressed')
self.leftPressed = self.__enterControlActive(self.leftPressed)
def __rightPressed(self):
self.notify.debug('right pressed')
self.rightPressed = self.__enterControlActive(self.rightPressed)
def __upReleased(self):
self.notify.debug('up released')
self.upPressed = self.__exitControlActive(self.upPressed)
def __downReleased(self):
self.notify.debug('down released')
self.downPressed = self.__exitControlActive(self.downPressed)
def __leftReleased(self):
self.notify.debug('left released')
self.leftPressed = self.__exitControlActive(self.leftPressed)
def __rightReleased(self):
self.notify.debug('right released')
self.rightPressed = self.__exitControlActive(self.rightPressed)
def __handleMouseClick(self):
self.notify.debug('mouse click')
self.__testCollisions()
def __enterControlActive(self, control):
return control + 1
def __exitControlActive(self, control):
return max(0, control - 1)
def __spawnLocalPhotoMoveTask(self):
self.leftPressed = 0
self.rightPressed = 0
self.upPressed = 0
self.downPressed = 0
self.photoMoving = 0
task = Task(self.__localPhotoMoveTask)
task.lastPositionBroadcastTime = 0.0
taskMgr.add(task, self.LOCAL_PHOTO_MOVE_TASK)
def __killLocalPhotoMoveTask(self):
taskMgr.remove(self.LOCAL_PHOTO_MOVE_TASK)
if self.photoMoving:
self.sndPhotoMove.stop()
def __localPhotoMoveTask(self, task):
if not hasattr(self, 'swivel'):
return
pos = [self.swivel.getHpr()[0], self.swivel.getHpr()[1], self.swivel.getHpr()[2]]
oldRot = pos[0]
oldAng = pos[1]
rotVel = 0
if self.leftPressed:
rotVel += PhotoGameGlobals.PHOTO_ROTATION_VEL
if self.rightPressed:
rotVel -= PhotoGameGlobals.PHOTO_ROTATION_VEL
pos[0] += rotVel * globalClock.getDt()
angVel = 0
if self.upPressed:
angVel += PhotoGameGlobals.PHOTO_ANGLE_VEL
if self.downPressed:
angVel -= PhotoGameGlobals.PHOTO_ANGLE_VEL
pos[1] += angVel * globalClock.getDt()
if pos[1] < PhotoGameGlobals.PHOTO_ANGLE_MIN:
pos[1] = PhotoGameGlobals.PHOTO_ANGLE_MIN
elif pos[1] > PhotoGameGlobals.PHOTO_ANGLE_MAX:
pos[1] = PhotoGameGlobals.PHOTO_ANGLE_MAX
if oldRot != pos[0] or oldAng != pos[1]:
if self.photoMoving == 0:
self.photoMoving = 1
base.playSfx(self.sndPhotoMove, looping=1)
posVec = Vec3(pos[0], pos[1], pos[2])
self.swivel.setHpr(posVec)
elif self.photoMoving:
self.photoMoving = 0
self.sndPhotoMove.stop()
return Task.cont
def __putCameraOnTripod(self):
base.camera.setPosHpr(0, 0.0, 0, 0, 0, 0)
base.camera.reparentTo(self.swivel)
self.swivel.setHpr(self.data['START_HPR'])
def __loadToonInTripod(self, avId):
toon = base.cr.doId2do.get(avId)
if toon:
toon.reparentTo(self.swivel)
def __toRadians(self, angle):
return angle * 2.0 * math.pi / 360.0
def __toDegrees(self, angle):
return angle * 360.0 / (2.0 * math.pi)
def __decreaseFilmCount(self):
curTime = self.getCurrentGameTime()
score = self.filmCount - 1
if not hasattr(self, 'curScore'):
self.curScore = score
self.filmPanel['text'] = str(score)
if self.curScore != score:
if hasattr(self, 'jarIval'):
self.jarIval.finish()
s = self.filmPanel.getScale()
self.jarIval = Parallel(Sequence(self.filmPanel.scaleInterval(0.15, s * 3.0 / 4.0, blendType='easeOut'), self.filmPanel.scaleInterval(0.15, s, blendType='easeIn')), Sequence(Wait(0.25), SoundInterval(self.sndFilmTick)), name='photoGameFilmJarThrob')
self.jarIval.start()
self.curScore = score
self.filmCount = score
def __stopIntro(self):
taskMgr.remove(self.INTRO_TASK_NAME)
taskMgr.remove(self.INTRO_TASK_NAME_CAMERA_LERP)
self.__putCameraOnTripod()
if self.introSequence:
self.introSequence.finish()
self.introSequence = None
return
def __startIntro(self):
base.camera.reparentTo(render)
base.camera.setPos(self.data['CAMERA_INTIAL_POSTION'])
base.camera.setHpr(0, 0, 0)
base.camera.lookAt(self.tripod)
lookatHpr = base.camera.getHpr()
self.introSequence = LerpPosHprInterval(base.camera, 4.0, pos=self.tripod.getPos(render), hpr=lookatHpr, startPos=self.data['CAMERA_INTIAL_POSTION'], blendType='easeInOut')
self.introSequence.start()
def construct(self):
zone = self.getSafezoneId()
if zone == ToontownGlobals.ToontownCentral:
self.constructTTC()
elif zone == ToontownGlobals.DonaldsDock:
self.constructDD()
elif zone == ToontownGlobals.DaisyGardens:
self.constructDG()
elif zone == ToontownGlobals.MinniesMelodyland:
self.constructMM()
elif zone == ToontownGlobals.TheBrrrgh:
self.constructBR()
elif zone == ToontownGlobals.DonaldsDreamland:
self.constructDL()
def destruct(self):
zone = self.getSafezoneId()
if zone == ToontownGlobals.ToontownCentral:
self.destructTTC()
elif zone == ToontownGlobals.DonaldsDock:
self.destructDD()
elif zone == ToontownGlobals.DaisyGardens:
self.destructDG()
elif zone == ToontownGlobals.MinniesMelodyland:
self.destructMM()
elif zone == ToontownGlobals.TheBrrrgh:
self.destructBR()
elif zone == ToontownGlobals.DonaldsDreamland:
self.destructDL()
def constructTTC(self):
self.photoRoot = self.scene.find('**/prop_gazebo*')
self.sky = loader.loadModel('phase_3.5/models/props/TT_sky')
self.sky.reparentTo(render)
self.sky.setBin('background', -100)
self.sky.find('**/cloud1').setBin('background', -99)
self.sky.find('**/cloud2').setBin('background', -98)
self.scene.reparentTo(render)
self.makeDictionaries(self.dnaStore)
self.createAnimatedProps(self.nodeList)
self.startAnimatedProps()
#Causes a crash, disabling has seemingly no bad effect.
#self.scene.find('**/door_trigger_22*').hide()
self.scene.find('**/doorFrameHoleRight_0*').hide()
self.scene.find('**/doorFrameHoleLeft_0*').hide()
def destructTTC(self):
self.stopAnimatedProps()
self.deleteAnimatedProps()
def constructDD(self):
self.photoRoot = self.scene.find('**/center_island*')
self.sky = loader.loadModel('phase_3.5/models/props/BR_sky')
self.sky.reparentTo(render)
self.sky.setBin('background', -100)
self.sky.find('**/skypanel1').setBin('background', -98)
self.sky.find('**/skypanel2').setBin('background', -97)
self.sky.find('**/skypanel3').setBin('background', -96)
self.sky.find('**/skypanel4').setBin('background', -95)
self.sky.find('**/skypanel5').setBin('background', -94)
self.sky.find('**/skypanel6').setBin('background', -93)
self.scene.reparentTo(render)
self.makeDictionaries(self.dnaStore)
self.createAnimatedProps(self.nodeList)
self.startAnimatedProps()
boatGeom = self.scene.find('**/donalds_boat')
self.photoRoot.setPos(-22, 0, 0)
self.boat = self.photoRoot.attachNewNode('boat')
boatGeom.reparentTo(self.boat)
boatGeom.setX(45.0)
boatGeom.setY(-5.0)
boatGeom.setR(-0.0)
boatGeom.setH(-8)
self.bg = boatGeom
self.boatTrack = Sequence()
self.boatTrack.append(LerpHprInterval(self.boat, 90.0, Point3(360, 0, 0)))
self.boatTrack.loop()
self.boatTrack2 = Sequence()
self.boatTrack2.append(LerpPosInterval(self.boat, 5.0, Point3(0, 0, 2.0), Point3(0, 0, 1.0), blendType='easeInOut'))
self.boatTrack2.append(LerpPosInterval(self.boat, 5.0, Point3(0, 0, 1.0), Point3(0, 0, 2.0), blendType='easeInOut'))
self.boatTrack2.loop()
ddFog = Fog('DDFog Photo')
ddFog.setColor(Vec4(0.8, 0.8, 0.8, 1.0))
ddFog.setLinearRange(0.0, 400.0)
render.setFog(ddFog)
water = self.scene.find('**/top_surface')
water.setTransparency(TransparencyAttrib.MAlpha)
water.setColorScale(1.0, 1.0, 1.0, 0.8)
water.setDepthWrite(1)
water.setDepthTest(1)
water.setBin('transparent', 0)
def destructDD(self):
self.bg = None
self.boatTrack.finish()
self.boatTrack2.finish()
self.boat.removeNode()
render.clearFog()
self.stopAnimatedProps()
self.deleteAnimatedProps()
return
def constructDG(self):
self.photoRoot = render.attachNewNode('DG PhotoRoot')
self.photoRoot.setPos(1.39, 92.91, 2.0)
self.bigFlower = loader.loadModel('phase_8/models/props/DG_flower-mod.bam')
self.bigFlower.reparentTo(self.photoRoot)
self.bigFlower.setScale(2.5)
self.sky = loader.loadModel('phase_3.5/models/props/TT_sky')
self.sky.reparentTo(render)
self.sky.setBin('background', -100)
self.sky.find('**/cloud1').setBin('background', -99)
self.sky.find('**/cloud2').setBin('background', -98)
self.scene.reparentTo(render)
self.scene.find('**/door_trigger_5*').hide()
self.scene.find('**/doorFrameHoleRight_0*').hide()
self.scene.find('**/doorFrameHoleLeft_0*').hide()
for name in ['**/o10_2']:
maze = self.scene.find(name)
maze.reparentTo(self.subjectNode)
maze.setTag('sceneryIndex', '%s' % len(self.scenery))
self.scenery.append(maze)
self.makeDictionaries(self.dnaStore)
self.createAnimatedProps(self.nodeList)
self.startAnimatedProps()
def destructDG(self):
self.bigFlower.removeNode()
self.stopAnimatedProps()
self.deleteAnimatedProps()
def constructMM(self):
self.photoRoot = render.attachNewNode('MM PhotoRoot')
self.photoRoot.setPos(103.6, -61, -4.497)
self.sky = loader.loadModel('phase_6/models/props/MM_sky')
self.sky.reparentTo(render)
self.sky.setBin('background', -100)
self.scene.reparentTo(render)
self.scene.find('**/door_trigger_8*').hide()
self.scene.find('**/door_trigger_6*').hide()
self.scene.find('**/door_trigger_1*').hide()
self.scene.find('**/door_trigger_0*').hide()
self.scene.find('**/door_trigger_3*').hide()
self.scene.find('**/doorFrameHoleRight_0*').hide()
self.scene.find('**/doorFrameHoleLeft_0*').hide()
self.scene.find('**/doorFrameHoleRight_1*').hide()
self.scene.find('**/doorFrameHoleLeft_1*').hide()
self.scene.find('**/doorFrameHoleRight').hide()
self.scene.find('**/doorFrameHoleLeft').hide()
self.makeDictionaries(self.dnaStore)
self.createAnimatedProps(self.nodeList)
self.startAnimatedProps()
lm = self.scene.findAllMatches('**/*landmark*')
blocker = lm[2]
blocker.reparentTo(self.subjectNode)
blocker.setTag('sceneryIndex', '%s' % len(self.scenery))
self.scenery.append(blocker)
def destructMM(self):
self.stopAnimatedProps()
self.deleteAnimatedProps()
def constructBR(self):
self.photoRoot = render.attachNewNode('BR PhotoRoot')
self.photoRoot.setPos(-110, -48, 8.567)
self.sky = loader.loadModel('phase_3.5/models/props/BR_sky')
self.sky.reparentTo(render)
self.sky.setBin('background', -100)
self.scene.reparentTo(render)
self.scene.find('**/door_trigger_11*').hide()
self.scene.find('**/doorFrameHoleRight_0*').hide()
self.scene.find('**/doorFrameHoleLeft_0*').hide()
self.makeDictionaries(self.dnaStore)
self.createAnimatedProps(self.nodeList)
self.startAnimatedProps()
self.snow = BattleParticles.loadParticleFile('snowdisk.ptf')
self.snow.setPos(0, 0, 5)
self.snowRender = self.scene.attachNewNode('snowRender')
self.snowRender.setDepthWrite(0)
self.snowRender.setBin('fixed', 1)
self.snowFade = None
self.snow.start(camera, self.snowRender)
return
def destructBR(self):
self.snow.cleanup()
del self.snow
del self.snowRender
self.stopAnimatedProps()
self.deleteAnimatedProps()
def constructDL(self):
self.photoRoot = render.attachNewNode('DL PhotoRoot')
self.photoRoot.setPos(-70.228, 87.588, 4.397)
self.sky = loader.loadModel('phase_8/models/props/DL_sky')
self.sky.reparentTo(render)
self.sky.setBin('background', -100)
self.scene.reparentTo(render)
self.scene.find('**/door_trigger_8*').hide()
self.scene.find('**/doorFrameHoleRight_0*').hide()
self.scene.find('**/doorFrameHoleLeft_0*').hide()
self.scene.find('**/doorFrameHoleRight_1*').hide()
self.scene.find('**/doorFrameHoleLeft_1*').hide()
self.makeDictionaries(self.dnaStore)
self.createAnimatedProps(self.nodeList)
self.startAnimatedProps()
def destructDL(self):
self.stopAnimatedProps()
self.deleteAnimatedProps()
def makeDictionaries(self, dnaStore):
self.nodeList = []
for i in xrange(dnaStore.getNumDNAVisGroups()):
groupFullName = dnaStore.getDNAVisGroupName(i)
groupName = base.cr.hoodMgr.extractGroupName(groupFullName)
groupNode = self.scene.find('**/' + groupFullName)
if groupNode.isEmpty():
self.notify.error('Could not find visgroup')
self.nodeList.append(groupNode)
def startAnimatedProps(self):
for animPropListKey in self.animPropDict:
animPropList = self.animPropDict[animPropListKey]
for animProp in animPropList:
animProp.enter()
def stopAnimatedProps(self):
for animPropListKey in self.animPropDict:
animPropList = self.animPropDict[animPropListKey]
for animProp in animPropList:
animProp.exit()
def createAnimatedProps(self, nodeList):
self.animPropDict = {}
for i in nodeList:
animPropNodes = i.findAllMatches('**/animated_prop_*')
numAnimPropNodes = animPropNodes.getNumPaths()
for j in xrange(numAnimPropNodes):
animPropNode = animPropNodes.getPath(j)
if animPropNode.getName().startswith('animated_prop_generic'):
className = 'GenericAnimatedProp'
else:
className = animPropNode.getName()[14:-8]
symbols = {}
base.cr.importModule(symbols, 'toontown.hood', [className])
classObj = getattr(symbols[className], className)
animPropObj = classObj(animPropNode)
animPropList = self.animPropDict.setdefault(i, [])
animPropList.append(animPropObj)
interactivePropNodes = i.findAllMatches('**/interactive_prop_*')
numInteractivePropNodes = interactivePropNodes.getNumPaths()
for j in xrange(numInteractivePropNodes):
interactivePropNode = interactivePropNodes.getPath(j)
className = 'GenericAnimatedProp'
symbols = {}
base.cr.importModule(symbols, 'toontown.hood', [className])
classObj = getattr(symbols[className], className)
interactivePropObj = classObj(interactivePropNode)
animPropList = self.animPropDict.get(i)
if animPropList is None:
animPropList = self.animPropDict.setdefault(i, [])
animPropList.append(interactivePropObj)
return
def deleteAnimatedProps(self):
for animPropListKey in self.animPropDict:
animPropList = self.animPropDict[animPropListKey]
for animProp in animPropList:
animProp.delete()
del self.animPropDict
def addSound(self, name, soundName, path = None):
if not hasattr(self, 'soundTable'):
self.soundTable = {}
if path:
self.soundPath = path
soundSource = '%s%s' % (self.soundPath, soundName)
self.soundTable[name] = loader.loadSfx(soundSource)
def playSound(self, name, volume = 1.0):
if hasattr(self, 'soundTable'):
self.soundTable[name].setVolume(volume)
self.soundTable[name].play()
|
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2017, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import types
import itertools
from qiime2.core.util import tuplize
from ..util import ImmutableBase
class _TypeBase(ImmutableBase):
"""Provides reflexive methods."""
def __ne__(self, other):
return not self == other
def __rmod__(self, predicate):
raise TypeError("Predicate must be applied to the right-hand side of"
" a type expression.")
def __ror__(self, other):
return self | other # union should be associative
def __rand__(self, other):
return self & other # intersection should be associative
class CompositeType(_TypeBase):
def __init__(self, name, field_names):
# These classes aren't user-facing, but some light validation avoids
# accidental issues. However, we don't want to waste a lot of time with
# validation here, validation should happen elsewhere.
if not len(field_names):
raise ValueError("`field_names` cannot be an empty array-like.")
self.name = name
self.field_names = field_names
self._freeze_()
def __mod__(self, predicate):
raise TypeError("Cannot apply predicate %r, %r is missing arguments"
" for its fields." % (predicate, self))
def __or__(self, other):
raise TypeError("Cannot union with %r, %r is missing arguments"
" for its fields." % (other, self))
def __and__(self, other):
raise TypeError("Cannot intersect with %r, %r is missing arguments"
" for its fields." % (other, self))
def __getitem__(self, fields):
fields = tuplize(fields)
if len(fields) != len(self.field_names):
raise TypeError("%r takes %d field(s), %d provided."
% (self, len(self.field_names), len(fields)))
for args in zip(self.field_names, fields):
self._validate_field_(*args)
return self._apply_fields_(fields=fields)
def __repr__(self):
return "%s[%s]" % (self.name,
', '.join('{%s}' % f for f in self.field_names))
def _validate_field_(self, name, value):
"""Called when a field is provided to a `CompositeType`.
This method is designed to be overridden to influence the behavior of
the grammar. It is recommended to call super as the default
implementation includes useful type checks and errors.
Parameters
----------
name : str
The name of the field being set
value : TypeExpression
The value of the field being set
Raises
------
TypeError
Raised when the field is rejected. By default this is when a field
is not provided a `TypeExpression`.
"""
if not isinstance(value, TypeExpression):
if isinstance(value, self.__class__):
raise TypeError("Incomplete type %r provided as a field to %r"
% (value, self))
raise TypeError("%r cannot be used as a field to %r (not a type)."
% (value, self))
def _apply_fields_(self, fields):
"""Called when a `CompositeType` is promoted to a `TypeExpression`.
This method is designed to be overridden to influence the behaviour of
the grammar. An overriding method should ensure that `self.name` is
propogated and that the provided `fields` are passed.
Parameters
----------
fields : Tuple[TypeExpression, ...]
The fields which should be provided to the `TypeExpression`
Returns
-------
TypeExpression
Typically this will return a subclass of `TypeExpression`.
"""
return TypeExpression(self.name, fields=fields)
def iter_symbols(self):
yield self.name
def is_concrete(self):
return False
class TypeExpression(_TypeBase):
def __init__(self, name, fields=(), predicate=None):
self.name = name
self.predicate = predicate
self.fields = fields
self._freeze_()
def __hash__(self):
return (hash(self.__class__.__name__) ^
hash(self.name) ^
hash(self.predicate) ^
hash(self.fields))
def __eq__(self, other):
# Deep equality, but not semantic equality.
if type(self) is not type(other):
return NotImplemented
return (self.name == other.name and
self.predicate == other.predicate and
self.fields == other.fields)
def equals(self, other):
# Different from __eq__ which has to match hashing but can't
# consider semantic equality
return self <= other <= self
def __repr__(self):
result = self.name
if self.fields:
result += '[%s]' % ', '.join(repr(f) for f in self.fields)
if self.predicate:
result += ' %% %r' % self.predicate
return result
def __getitem__(self, fields):
raise TypeError("%r has no empty fields (not subscriptable)." % self)
def _apply_fields_(self, fields):
return self.__class__(self.name, fields=fields,
predicate=self.predicate)
def __contains__(self, value):
return (self._is_element_(value) and
((not self.predicate) or value in self.predicate))
def _is_element_(self, value):
return False
def __mod__(self, predicate):
if self.predicate:
raise TypeError("%r already has a predicate." % self)
if predicate is None:
return self
self._validate_predicate_(predicate)
return self._apply_predicate_(predicate=predicate)
def _validate_predicate_(self, predicate):
if not isinstance(predicate, Predicate):
raise TypeError("%r is not a predicate." % predicate)
def _apply_predicate_(self, predicate):
return self.__class__(self.name, fields=self.fields,
predicate=predicate)
def __or__(self, other):
self._validate_union_(other, handshake=False)
if self == other:
return self
return self._build_union_((self, other))
def _validate_union_(self, other, handshake=False):
if not isinstance(other, TypeExpression):
if isinstance(other, CompositeType):
raise TypeError("Cannot union an incomplete type %r with %r."
% (other, self))
else:
raise TypeError("%r is not a type expression." % other)
if not handshake:
other._validate_union_(self, handshake=True)
def _build_union_(self, members):
return UnionTypeExpression(members)
def __and__(self, other):
self._validate_intersection_(other, handshake=False)
if self == other:
return other
return self._build_intersection_((self, other))
def _validate_intersection_(self, other, handshake=False):
if not isinstance(other, TypeExpression):
if isinstance(other, CompositeType):
raise TypeError("Cannot intersect an incomplete type %r with"
" %r." % (other, self))
else:
raise TypeError("%r is not a type expression." % other)
if not handshake:
other._validate_intersection_(self, handshake=True)
def _build_intersection_(self, members):
return IntersectionTypeExpression(members)
def __le__(self, other):
return all(any(s._aug_is_subtype(o) for o in other) for s in self)
def __ge__(self, other):
return all(any(o._aug_is_subtype(s) for s in self) for o in other)
def _aug_is_subtype(self, other):
r = self._is_subtype_(other)
if r is NotImplemented:
return other._is_supertype_(self)
return r
def _is_subtype_(self, other):
if self.name != other.name:
return False
for f1, f2 in itertools.zip_longest(self.fields, other.fields):
if not (f1 <= f2):
return False
if other.predicate and not self.predicate <= other.predicate:
return False
return True
def _is_supertype_(self, other):
# Invoked only when `other`'s `_is_subtype_` returned `NotImplemented`
# that really shouldn't be needed most of the time.
raise NotImplementedError
def __iter__(self):
yield from set(self._apply_fields_(fields=fields)
for fields in itertools.product(*self.fields))
def is_concrete(self):
return len(list(self)) == 1
def iter_symbols(self):
yield self.name
for field in self.fields:
yield from field.iter_symbols()
def to_ast(self):
return {
"type": 'expression',
"name": self.name,
"predicate": self.predicate.to_ast() if self.predicate else {},
"fields": [field.to_ast() for field in self.fields]
}
class _SetOperationBase(TypeExpression):
_operator = '?' # Used for repr only - ? chosen as it is not a Python op.
def __init__(self, members):
m = []
for member in members:
# We can flatten the object a little, which will avoid excessive
# recursion (it would look like a cons-list otherwise)
if type(member) is type(self):
m.extend(member.members)
else:
m.append(member)
self.members = frozenset(m)
super().__init__('') # Unions/intersections do not have a name
def __hash__(self):
return super().__hash__() ^ hash(self.members)
def __eq__(self, other):
super_eq = super().__eq__(other)
if super_eq is NotImplemented:
return NotImplemented
return super_eq and self.members == other.members
def __repr__(self):
return (" %s " % self._operator) \
.join(sorted([repr(m) for m in self.members]))
def _validate_predicate_(self, predicate):
raise TypeError("Cannot apply predicates to union/intersection types.")
def to_ast(self):
return {
'members': [m.to_ast() for m in self.members]
}
def __iter__(self):
yield from set(itertools.chain.from_iterable(self.members))
class UnionTypeExpression(_SetOperationBase):
_operator = '|'
def _validate_intersection_(self, other, handshake=False):
raise TypeError("Cannot intersect %r with %r." % (self, other))
def _build_union_(self, members):
return self.__class__(members)
def to_ast(self):
r = super().to_ast()
r['type'] = 'union'
return r
class Predicate(_TypeBase):
def __init__(self, *args, **kwargs):
self._truthy = any(map(bool, args)) or any(map(bool, kwargs.values()))
self._freeze_()
def __hash__(self):
# This trivially satisfies the property:
# x == x => hash(x) == hash(x)
# Subclasses ought to override this with something less... collision-y.
return 0
def __eq__(self, other):
raise NotImplementedError
def __contains__(self, value):
return self._is_element_(value)
def _is_element_(self, value):
return True
def __bool__(self):
return self._truthy
def __le__(self, other):
if other is None:
other = self.__class__()
return self._is_subtype_(other)
def __ge__(self, other):
if other is None:
other = self.__class__()
return other._is_subtype_(self)
def _aug_is_subtype(self, other):
r = self._is_subtype_(other)
if r is NotImplemented:
return other._is_supertype_(self)
return r
def _is_subtype_(self, other):
raise NotImplementedError
def _is_supertype_(self, other):
raise NotImplementedError
def to_ast(self):
return {
'type': 'predicate',
'name': self.__class__.__name__
}
# TODO: finish these classes:
class IntersectionTypeExpression(_SetOperationBase):
_operator = '&'
def _validate_union_(self, other, handshake=False):
raise TypeError("Cannot union %r with %r." % (self, other))
def _build_intersection_(self, members):
return self.__class__(members)
def to_ast(self):
r = super().to_ast()
r['type'] = 'intersection'
return r
class MappingTypeExpression(TypeExpression):
def __init__(self, name, mapping):
if type(mapping) is not dict: # we really only want dict literals
raise ValueError()
if type(name) is not str:
raise ValueError()
for key in mapping:
self._validate_member_(key)
for value in mapping.values():
self._validate_member_(value)
# Read only proxy of mapping, mutation to `mapping` will be reflected
# but there isn't much we can do about that. Good use of this object
# would involve a dict literal anyway.
self.mapping = types.MappingProxyType(mapping)
super().__init__(name)
def __hash__(self):
return super().__hash__() ^ hash(frozenset(self.mapping.items()))
def __eq__(self, other):
super_eq = super().__eq__(other)
if super_eq is NotImplemented:
return NotImplemented
return super_eq and (set(self.mapping.items()) ==
set(other.mapping.items()))
def _validate_predicate_(self, predicate):
raise TypeError("Cannot apply predicates to type variables.")
def _validate_intersection_(self, other, handshake=False):
if type(self) != type(other):
raise TypeError()
if set(self.mapping) != set(other.mapping):
raise TypeError()
super()._validate_intersection_(other, handshake=handshake)
def _validate_union_(self, other, handshake=False):
# This has a reasonable definition (ensure disjoint sets on left-hand)
# the opposite of intersection, but there isn't really a good use-case
# for it at this time.
raise TypeError("Cannot union type variables.")
def to_ast(self):
return {
"type": "map",
"mapping": [list(item) for item in self.mapping.items()]
}
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Code for model cloning, plus model-related API entries.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import metrics as metrics_module
from tensorflow.python.keras import optimizers
from tensorflow.python.keras.engine import saving
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.engine.input_layer import Input
from tensorflow.python.keras.engine.input_layer import InputLayer
from tensorflow.python.keras.engine.network import Network
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.utils.generic_utils import CustomObjectScope
from tensorflow.python.training.checkpointable import base as checkpointable
from tensorflow.python.training.checkpointable import data_structures
from tensorflow.python.util.tf_export import tf_export
# API entries importable from `keras.models`:
Model = training.Model # pylint: disable=invalid-name
Sequential = sequential.Sequential # pylint: disable=invalid-name
save_model = saving.save_model
load_model = saving.load_model
model_from_config = saving.model_from_config
model_from_yaml = saving.model_from_yaml
model_from_json = saving.model_from_json
def _clone_functional_model(model, input_tensors=None):
"""Clone a functional `Model` instance.
Model cloning is similar to calling a model on new inputs,
except that it creates new layers (and thus new weights) instead
of sharing the weights of the existing layers.
Arguments:
model: Instance of `Model`.
input_tensors: optional list of input tensors
to build the model upon. If not provided,
placeholders will be created.
Returns:
An instance of `Model` reproducing the behavior
of the original model, on top of new inputs tensors,
using newly instantiated weights.
Raises:
ValueError: in case of invalid `model` argument value.
"""
if not isinstance(model, Model):
raise ValueError('Expected `model` argument '
'to be a `Model` instance, got ', model)
if isinstance(model, Sequential):
raise ValueError('Expected `model` argument '
'to be a functional `Model` instance, '
'got a `Sequential` instance instead:', model)
layer_map = {} # Cache for created layers.
tensor_map = {} # Map {reference_tensor: corresponding_tensor}
if input_tensors is None:
# Create placeholders to build the model on top of.
input_layers = []
input_tensors = []
for layer in model._input_layers:
input_tensor = Input(
batch_shape=layer._batch_input_shape,
dtype=layer.dtype,
sparse=layer.sparse,
name=layer.name)
input_tensors.append(input_tensor)
# Cache newly created input layer.
newly_created_input_layer = input_tensor._keras_history[0]
layer_map[layer] = newly_created_input_layer
for original_input_layer, cloned_input_layer in zip(model._input_layers,
input_layers):
layer_map[original_input_layer] = cloned_input_layer
else:
# Make sure that all input tensors come from a Keras layer.
# If tensor comes from an input layer: cache the input layer.
if isinstance(input_tensors, tuple):
input_tensors = list(input_tensors)
input_tensors = generic_utils.to_list(input_tensors)
input_tensors_ = []
for i, x in enumerate(input_tensors):
if not K.is_keras_tensor(x):
name = model._input_layers[i].name
input_tensor = Input(tensor=x, name='input_wrapper_for_' + name)
input_tensors_.append(input_tensor)
# Cache newly created input layer.
original_input_layer = x._keras_history[0]
newly_created_input_layer = input_tensor._keras_history[0]
layer_map[original_input_layer] = newly_created_input_layer
else:
input_tensors_.append(x)
input_tensors = input_tensors_
for x, y in zip(model.inputs, input_tensors):
tensor_map[x] = y
# Iterated over every node in the reference model, in depth order.
depth_keys = list(model._nodes_by_depth.keys())
depth_keys.sort(reverse=True)
for depth in depth_keys:
nodes = model._nodes_by_depth[depth]
for node in nodes:
# Recover the corresponding layer.
layer = node.outbound_layer
# Get or create layer.
if layer not in layer_map:
# Clone layer.
new_layer = layer.__class__.from_config(layer.get_config())
layer_map[layer] = new_layer
layer = new_layer
else:
# Reuse previously cloned layer.
layer = layer_map[layer]
# Don't call InputLayer multiple times.
if isinstance(layer, InputLayer):
continue
# Gather inputs to call the new layer.
reference_input_tensors = node.input_tensors
reference_output_tensors = node.output_tensors
# If all previous input tensors are available in tensor_map,
# then call node.inbound_layer on them.
computed_tensors = []
for x in reference_input_tensors:
if x in tensor_map:
computed_tensors.append(tensor_map[x])
if len(computed_tensors) == len(reference_input_tensors):
# Call layer.
if node.arguments:
kwargs = node.arguments
else:
kwargs = {}
if len(computed_tensors) == 1:
computed_tensor = computed_tensors[0]
output_tensors = generic_utils.to_list(layer(computed_tensor,
**kwargs))
computed_tensors = [computed_tensor]
else:
computed_tensors = computed_tensors
output_tensors = generic_utils.to_list(layer(computed_tensors,
**kwargs))
for x, y in zip(reference_output_tensors, output_tensors):
tensor_map[x] = y
# Check that we did compute the model outputs,
# then instantiate a new model from inputs and outputs.
output_tensors = []
for x in model.outputs:
assert x in tensor_map, 'Could not compute output ' + str(x)
output_tensors.append(tensor_map[x])
return Model(input_tensors, output_tensors, name=model.name)
def _clone_sequential_model(model, input_tensors=None):
"""Clone a `Sequential` model instance.
Model cloning is similar to calling a model on new inputs,
except that it creates new layers (and thus new weights) instead
of sharing the weights of the existing layers.
Arguments:
model: Instance of `Sequential`.
input_tensors: optional list of input tensors
to build the model upon. If not provided,
placeholders will be created.
Returns:
An instance of `Sequential` reproducing the behavior
of the original model, on top of new inputs tensors,
using newly instantiated weights.
Raises:
ValueError: in case of invalid `model` argument value.
"""
if not isinstance(model, Sequential):
raise ValueError('Expected `model` argument '
'to be a `Sequential` model instance, '
'but got:', model)
def clone(layer):
return layer.__class__.from_config(layer.get_config())
layers = [clone(layer) for layer in model.layers]
if input_tensors is None:
return Sequential(layers=layers, name=model.name)
else:
if len(generic_utils.to_list(input_tensors)) != 1:
raise ValueError('To clone a `Sequential` model, we expect '
' at most one tensor '
'as part of `input_tensors`.')
if isinstance(input_tensors, tuple):
input_tensors = list(input_tensors)
x = generic_utils.to_list(input_tensors)[0]
if K.is_keras_tensor(x):
origin_layer = x._keras_history[0]
if isinstance(origin_layer, InputLayer):
return Sequential(layers=[origin_layer] + layers, name=model.name)
else:
raise ValueError('Cannot clone a `Sequential` model on top '
'of a tensor that comes from a Keras layer '
'other than an `InputLayer`. '
'Use the functional API instead.')
input_tensor = Input(tensor=x, name='input_wrapper_for_' + str(x.name))
input_layer = input_tensor._keras_history[0]
return Sequential(layers=[input_layer] + layers, name=model.name)
@tf_export('keras.models.clone_model')
def clone_model(model, input_tensors=None):
"""Clone any `Model` instance.
Model cloning is similar to calling a model on new inputs,
except that it creates new layers (and thus new weights) instead
of sharing the weights of the existing layers.
Arguments:
model: Instance of `Model`
(could be a functional model or a Sequential model).
input_tensors: optional list of input tensors or InputLayer objects
to build the model upon. If not provided,
placeholders will be created.
Returns:
An instance of `Model` reproducing the behavior
of the original model, on top of new inputs tensors,
using newly instantiated weights.
Raises:
ValueError: in case of invalid `model` argument value.
"""
if isinstance(model, Sequential):
return _clone_sequential_model(model, input_tensors=input_tensors)
else:
return _clone_functional_model(model, input_tensors=input_tensors)
# "Clone" a subclassed model by reseting all of the attributes.
def _in_place_subclassed_model_reset(model):
"""Substitute for model cloning that works for subclassed models.
Subclassed models cannot be cloned because their topology is not serializable.
To "instantiate" an identical model in a new TF graph, we reuse the original
model object, but we clear its state.
After calling this function on a model instance, you can use the model
instance as if it were a model clone (in particular you can use it in a new
graph).
This method clears the state of the input model. It is thus destructive.
However the original state can be restored fully by calling
`_in_place_subclassed_model_state_restoration`.
Args:
model: Instance of a Keras model created via subclassing.
Raises:
ValueError: In case the model uses a subclassed model as inner layer.
"""
assert not model._is_graph_network # Only makes sense for subclassed networks
# Retrieve all layers tracked by the model as well as their attribute names
attributes_cache = {}
for name in dir(model):
try:
value = getattr(model, name)
except (AttributeError, ValueError, TypeError):
continue
if isinstance(value, Layer):
attributes_cache[name] = value
assert value in model._layers
elif isinstance(
value, (list, tuple)) and name not in ('layers', '_layers',
'stateful_metric_functions'):
# Handle case: list/tuple of layers (also tracked by the Network API).
if value and all(isinstance(val, Layer) for val in value):
raise ValueError('We do not support the use of list-of-layers '
'attributes in subclassed models used with '
'`model_to_estimator` at this time. Found list '
'model: %s' % name)
# Replace layers on the model with fresh layers
layers_to_names = {value: key for key, value in attributes_cache.items()}
original_layers = model._layers[:]
model._layers = data_structures.NoDependency([])
for layer in original_layers: # We preserve layer order.
config = layer.get_config()
# This will not work for nested subclassed models used as layers.
# This would be theoretically possible to support, but would add complexity.
# Only do it if users complain.
if isinstance(layer, Network) and not layer._is_graph_network:
raise ValueError('We do not support the use of nested subclassed models '
'in `model_to_estimator` at this time. Found nested '
'model: %s' % layer)
fresh_layer = layer.__class__.from_config(config)
name = layers_to_names[layer]
setattr(model, name, fresh_layer)
# Cache original model build attributes (in addition to layers)
if (not hasattr(model, '_original_attributes_cache') or
model._original_attributes_cache is None):
if model.built:
attributes_to_cache = [
'inputs',
'outputs',
'_feed_outputs',
'_feed_output_names',
'_feed_output_shapes',
'_feed_loss_fns',
'loss_weights_list',
'targets',
'_feed_targets',
'sample_weight_modes',
'weighted_metrics',
'metrics_names',
'metrics_tensors',
'total_loss',
'sample_weights',
'_feed_sample_weights',
'_fit_function',
'_eval_function',
'train_function',
'test_function',
'predict_function',
'_collected_trainable_weights',
'_feed_inputs',
'_feed_input_names',
'_feed_input_shapes',
'optimizer',
]
for name in attributes_to_cache:
attributes_cache[name] = getattr(model, name)
model._original_attributes_cache = data_structures.NoDependency(
attributes_cache)
# Reset built state
model.built = False
model.inputs = None
model.outputs = None
def in_place_subclassed_model_state_restoration(model):
"""Restores the original state of a model after it was "reset".
This undoes this action of `_in_place_subclassed_model_reset`, which is called
in `clone_and_build_model` if `in_place_reset` is set to True.
Args:
model: Instance of a Keras model created via subclassing, on which
`_in_place_subclassed_model_reset` was previously called.
"""
assert not model._is_graph_network
# Restore layers and build attributes
if (hasattr(model, '_original_attributes_cache') and
model._original_attributes_cache is not None):
# Models have sticky attribute assignment, so we want to be careful to add
# back the previous attributes and track Layers by their original names
# without adding dependencies on "utility" attributes which Models exempt
# when they're constructed.
model._layers = data_structures.NoDependency([])
for name, value in model._original_attributes_cache.items():
if not isinstance(value, checkpointable.CheckpointableBase):
# If this value is not already checkpointable, it's probably that way
# for a reason; we don't want to start tracking data structures that the
# original Model didn't.
value = data_structures.NoDependency(value)
setattr(model, name, value)
model._original_attributes_cache = None
else:
# Restore to the state of a never-called model.
model.built = False
model.inputs = None
model.outputs = None
def clone_and_build_model(
model, input_tensors=None, target_tensors=None, custom_objects=None,
compile_clone=True, in_place_reset=False, optimizer_iterations=None):
"""Clone a `Model` and build/compile it with the same settings used before.
This function can be be run in the same graph or in a separate graph from the
model. When using a separate graph, `in_place_reset` must be `False`.
Note that, currently, the clone produced from this function may not work with
TPU DistributionStrategy. Try at your own risk.
Args:
model: `tf.keras.Model` object. Can be Functional, Sequential, or
sub-classed.
input_tensors: Optional list of input tensors to build the model upon. If
not provided, placeholders will be created.
target_tensors: Optional list of target tensors for compiling the model. If
not provided, placeholders will be created.
custom_objects: Optional dictionary mapping string names to custom classes
or functions.
compile_clone: Boolean, whether to compile model clone (default `True`).
in_place_reset: Boolean, whether to reset the model in place. Only used if
the model is not a graph network. If the model is a subclassed model, then
this argument must be set to `True` (default `False`). To restore the
original model, use the function
`in_place_subclassed_model_state_restoration(model)`.
optimizer_iterations: An iterations variable that will be incremented by the
optimizer if the clone is compiled. This argument is used when a Keras
model is cloned into an Estimator model function, because Estimators
create their own global step variable.
Returns:
Clone of the model.
Raises:
ValueError: Cloning fails in the following cases
- cloning a subclassed model with `in_place_reset` set to False.
- compiling the clone when the original model has not been compiled.
"""
if compile_clone and not model.optimizer:
raise ValueError(
'Error when cloning model: compile_clone was set to True, but the '
'original model has not been compiled.')
if model._is_graph_network or isinstance(model, Sequential):
if custom_objects:
with CustomObjectScope(custom_objects):
clone = clone_model(model, input_tensors=input_tensors)
else:
clone = clone_model(model, input_tensors=input_tensors)
if all([isinstance(clone, Sequential),
not clone._is_graph_network,
model.built]):
# Set model inputs to build the model and add input/output properties.
# TODO(kathywu): Add multiple placeholders to handle edge case where
# sequential model has multiple inputs.
clone._set_inputs(
K.placeholder(model._build_input_shape, dtype=model.inputs[0].dtype))
else:
if not in_place_reset:
raise ValueError(
'Model is not a graph network (usually means that it is a subclassed '
'model). The model cannot be cloned, but there is a workaround where '
'the model is reset in-place. To use this, please set the argument '
'`in_place_reset` to `True`. This will reset the attributes in the '
'original model. To restore the attributes, call '
'`in_place_subclassed_model_state_restoration(model)`.')
clone = model
_in_place_subclassed_model_reset(clone)
if input_tensors is not None:
if isinstance(input_tensors, (list, tuple)) and len(input_tensors) == 1:
input_tensors = input_tensors[0]
clone._set_inputs(input_tensors)
if compile_clone and model.optimizer:
if isinstance(model.optimizer, optimizers.TFOptimizer):
optimizer = optimizers.TFOptimizer(
model.optimizer.optimizer, optimizer_iterations)
K.track_tf_optimizer(optimizer)
else:
optimizer_config = model.optimizer.get_config()
optimizer = model.optimizer.__class__.from_config(optimizer_config)
if optimizer_iterations is not None:
optimizer.iterations = optimizer_iterations
clone.compile(
optimizer,
model.loss,
metrics=metrics_module.clone_metrics(model.metrics),
loss_weights=model.loss_weights,
sample_weight_mode=model.sample_weight_mode,
weighted_metrics=metrics_module.clone_metrics(model.weighted_metrics),
target_tensors=target_tensors)
return clone
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import random
import time
from airflow import settings
from airflow.contrib.hooks.gcp_api_base_hook import GoogleCloudBaseHook
from apiclient.discovery import build
from apiclient import errors
from oauth2client.client import GoogleCredentials
logging.getLogger('GoogleCloudML').setLevel(settings.LOGGING_LEVEL)
def _poll_with_exponential_delay(request, max_n, is_done_func, is_error_func):
for i in range(0, max_n):
try:
response = request.execute()
if is_error_func(response):
raise ValueError(
'The response contained an error: {}'.format(response))
elif is_done_func(response):
logging.info('Operation is done: {}'.format(response))
return response
else:
time.sleep((2**i) + (random.randint(0, 1000) / 1000))
except errors.HttpError as e:
if e.resp.status != 429:
logging.info(
'Something went wrong. Not retrying: {}'.format(e))
raise
else:
time.sleep((2**i) + (random.randint(0, 1000) / 1000))
class CloudMLHook(GoogleCloudBaseHook):
def __init__(self, gcp_conn_id='google_cloud_default', delegate_to=None):
super(CloudMLHook, self).__init__(gcp_conn_id, delegate_to)
self._cloudml = self.get_conn()
def get_conn(self):
"""
Returns a Google CloudML service object.
"""
credentials = GoogleCredentials.get_application_default()
return build('ml', 'v1', credentials=credentials)
def create_job(self, project_id, job, use_existing_job_fn=None):
"""
Launches a CloudML job and wait for it to reach a terminal state.
:param project_id: The Google Cloud project id within which CloudML
job will be launched.
:type project_id: string
:param job: CloudML Job object that should be provided to the CloudML
API, such as:
{
'jobId': 'my_job_id',
'trainingInput': {
'scaleTier': 'STANDARD_1',
...
}
}
:type job: dict
:param use_existing_job_fn: In case that a CloudML job with the same
job_id already exist, this method (if provided) will decide whether
we should use this existing job, continue waiting for it to finish
and returning the job object. It should accepts a CloudML job
object, and returns a boolean value indicating whether it is OK to
reuse the existing job. If 'use_existing_job_fn' is not provided,
we by default reuse the existing CloudML job.
:type use_existing_job_fn: function
:return: The CloudML job object if the job successfully reach a
terminal state (which might be FAILED or CANCELLED state).
:rtype: dict
"""
request = self._cloudml.projects().jobs().create(
parent='projects/{}'.format(project_id),
body=job)
job_id = job['jobId']
try:
request.execute()
except errors.HttpError as e:
# 409 means there is an existing job with the same job ID.
if e.resp.status == 409:
if use_existing_job_fn is not None:
existing_job = self._get_job(project_id, job_id)
if not use_existing_job_fn(existing_job):
logging.error(
'Job with job_id {} already exist, but it does '
'not match our expectation: {}'.format(
job_id, existing_job))
raise
logging.info(
'Job with job_id {} already exist. Will waiting for it to '
'finish'.format(job_id))
else:
logging.error('Failed to create CloudML job: {}'.format(e))
raise
return self._wait_for_job_done(project_id, job_id)
def _get_job(self, project_id, job_id):
"""
Gets a CloudML job based on the job name.
:return: CloudML job object if succeed.
:rtype: dict
Raises:
apiclient.errors.HttpError: if HTTP error is returned from server
"""
job_name = 'projects/{}/jobs/{}'.format(project_id, job_id)
request = self._cloudml.projects().jobs().get(name=job_name)
while True:
try:
return request.execute()
except errors.HttpError as e:
if e.resp.status == 429:
# polling after 30 seconds when quota failure occurs
time.sleep(30)
else:
logging.error('Failed to get CloudML job: {}'.format(e))
raise
def _wait_for_job_done(self, project_id, job_id, interval=30):
"""
Waits for the Job to reach a terminal state.
This method will periodically check the job state until the job reach
a terminal state.
Raises:
apiclient.errors.HttpError: if HTTP error is returned when getting
the job
"""
assert interval > 0
while True:
job = self._get_job(project_id, job_id)
if job['state'] in ['SUCCEEDED', 'FAILED', 'CANCELLED']:
return job
time.sleep(interval)
def create_version(self, project_id, model_name, version_spec):
"""
Creates the Version on Cloud ML.
Returns the operation if the version was created successfully and
raises an error otherwise.
"""
parent_name = 'projects/{}/models/{}'.format(project_id, model_name)
create_request = self._cloudml.projects().models().versions().create(
parent=parent_name, body=version_spec)
response = create_request.execute()
get_request = self._cloudml.projects().operations().get(
name=response['name'])
return _poll_with_exponential_delay(
request=get_request,
max_n=9,
is_done_func=lambda resp: resp.get('done', False),
is_error_func=lambda resp: resp.get('error', None) is not None)
def set_default_version(self, project_id, model_name, version_name):
"""
Sets a version to be the default. Blocks until finished.
"""
full_version_name = 'projects/{}/models/{}/versions/{}'.format(
project_id, model_name, version_name)
request = self._cloudml.projects().models().versions().setDefault(
name=full_version_name, body={})
try:
response = request.execute()
logging.info(
'Successfully set version: {} to default'.format(response))
return response
except errors.HttpError as e:
logging.error('Something went wrong: {}'.format(e))
raise
def list_versions(self, project_id, model_name):
"""
Lists all available versions of a model. Blocks until finished.
"""
result = []
full_parent_name = 'projects/{}/models/{}'.format(
project_id, model_name)
request = self._cloudml.projects().models().versions().list(
parent=full_parent_name, pageSize=100)
response = request.execute()
next_page_token = response.get('nextPageToken', None)
result.extend(response.get('versions', []))
while next_page_token is not None:
next_request = self._cloudml.projects().models().versions().list(
parent=full_parent_name,
pageToken=next_page_token,
pageSize=100)
response = next_request.execute()
next_page_token = response.get('nextPageToken', None)
result.extend(response.get('versions', []))
time.sleep(5)
return result
def delete_version(self, project_id, model_name, version_name):
"""
Deletes the given version of a model. Blocks until finished.
"""
full_name = 'projects/{}/models/{}/versions/{}'.format(
project_id, model_name, version_name)
delete_request = self._cloudml.projects().models().versions().delete(
name=full_name)
response = delete_request.execute()
get_request = self._cloudml.projects().operations().get(
name=response['name'])
return _poll_with_exponential_delay(
request=get_request,
max_n=9,
is_done_func=lambda resp: resp.get('done', False),
is_error_func=lambda resp: resp.get('error', None) is not None)
def create_model(self, project_id, model):
"""
Create a Model. Blocks until finished.
"""
assert model['name'] is not None and model['name'] is not ''
project = 'projects/{}'.format(project_id)
request = self._cloudml.projects().models().create(
parent=project, body=model)
return request.execute()
def get_model(self, project_id, model_name):
"""
Gets a Model. Blocks until finished.
"""
assert model_name is not None and model_name is not ''
full_model_name = 'projects/{}/models/{}'.format(
project_id, model_name)
request = self._cloudml.projects().models().get(name=full_model_name)
try:
return request.execute()
except errors.HttpError as e:
if e.resp.status == 404:
logging.error('Model was not found: {}'.format(e))
return None
raise
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class InboundNatRulesOperations:
"""InboundNatRulesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
load_balancer_name: str,
**kwargs: Any
) -> AsyncIterable["_models.InboundNatRuleListResult"]:
"""Gets all the inbound nat rules in a load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either InboundNatRuleListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_11_01.models.InboundNatRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.InboundNatRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('InboundNatRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
load_balancer_name: str,
inbound_nat_rule_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
load_balancer_name: str,
inbound_nat_rule_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified load balancer inbound nat rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param inbound_nat_rule_name: The name of the inbound nat rule.
:type inbound_nat_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
inbound_nat_rule_name=inbound_nat_rule_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
async def get(
self,
resource_group_name: str,
load_balancer_name: str,
inbound_nat_rule_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.InboundNatRule":
"""Gets the specified load balancer inbound nat rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param inbound_nat_rule_name: The name of the inbound nat rule.
:type inbound_nat_rule_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: InboundNatRule, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_11_01.models.InboundNatRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.InboundNatRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('InboundNatRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
load_balancer_name: str,
inbound_nat_rule_name: str,
inbound_nat_rule_parameters: "_models.InboundNatRule",
**kwargs: Any
) -> "_models.InboundNatRule":
cls = kwargs.pop('cls', None) # type: ClsType["_models.InboundNatRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(inbound_nat_rule_parameters, 'InboundNatRule')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('InboundNatRule', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('InboundNatRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
load_balancer_name: str,
inbound_nat_rule_name: str,
inbound_nat_rule_parameters: "_models.InboundNatRule",
**kwargs: Any
) -> AsyncLROPoller["_models.InboundNatRule"]:
"""Creates or updates a load balancer inbound nat rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param inbound_nat_rule_name: The name of the inbound nat rule.
:type inbound_nat_rule_name: str
:param inbound_nat_rule_parameters: Parameters supplied to the create or update inbound nat
rule operation.
:type inbound_nat_rule_parameters: ~azure.mgmt.network.v2020_11_01.models.InboundNatRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either InboundNatRule or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_11_01.models.InboundNatRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.InboundNatRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
inbound_nat_rule_name=inbound_nat_rule_name,
inbound_nat_rule_parameters=inbound_nat_rule_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('InboundNatRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
|
|
#!/usr/bin/python
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittest for cros_test_lib (tests for tests? Who'd a thunk it)."""
import os
import sys
import time
import unittest
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)),
'..', '..'))
from chromite.lib import cros_test_lib
from chromite.lib import cros_build_lib_unittest
from chromite.lib import partial_mock
from chromite.lib import timeout_util
# TODO(build): Finish test wrapper (http://crosbug.com/37517).
# Until then, this has to be after the chromite imports.
import mock
# pylint: disable=W0212,W0233
# Convenience alias
Dir = cros_test_lib.Directory
class TruthTableTest(cros_test_lib.TestCase):
"""Test TruthTable functionality."""
def _TestTableSanity(self, tt, lines):
"""Run the given truth table through basic sanity checks.
Args:
tt: A TruthTable object.
lines: The expect input lines, in order (list of tuples).
"""
# Check that more than one iterable can be used at once.
iter1 = iter(tt)
iter2 = iter(tt)
self.assertEquals(lines[0], iter1.next())
self.assertEquals(lines[0], iter2.next())
self.assertEquals(lines[1], iter2.next())
# Check that iteration again works again.
for ix, line in enumerate(tt):
self.assertEquals(lines[ix], line)
# Check direct access of input lines.
for i in xrange(len(tt)):
self.assertEquals(lines[i], tt.GetInputs(i))
# Check assertions on bad input to GetInputs.
self.assertRaises(ValueError, tt.GetInputs, -1)
self.assertRaises(ValueError, tt.GetInputs, len(tt))
def testTwoDimensions(self):
"""Test TruthTable behavior for two boolean inputs."""
tt = cros_test_lib.TruthTable(inputs=[(True, True), (True, False)])
self.assertEquals(len(tt), pow(2, 2))
# Check truth table output.
self.assertFalse(tt.GetOutput((False, False)))
self.assertFalse(tt.GetOutput((False, True)))
self.assertTrue(tt.GetOutput((True, False)))
self.assertTrue(tt.GetOutput((True, True)))
# Check assertions on bad input to GetOutput.
self.assertRaises(TypeError, tt.GetOutput, True)
self.assertRaises(ValueError, tt.GetOutput, (True, True, True))
# Check iteration over input lines.
lines = list(tt)
self.assertEquals((False, False), lines[0])
self.assertEquals((False, True), lines[1])
self.assertEquals((True, False), lines[2])
self.assertEquals((True, True), lines[3])
self._TestTableSanity(tt, lines)
def testFourDimensions(self):
"""Test TruthTable behavior for four boolean inputs."""
false1 = (True, True, True, False)
false2 = (True, False, True, False)
true1 = (False, True, False, True)
true2 = (True, True, False, False)
tt = cros_test_lib.TruthTable(inputs=(false1, false2), input_result=False)
self.assertEquals(len(tt), pow(2, 4))
# Check truth table output.
self.assertFalse(tt.GetOutput(false1))
self.assertFalse(tt.GetOutput(false2))
self.assertTrue(tt.GetOutput(true1))
self.assertTrue(tt.GetOutput(true2))
# Check assertions on bad input to GetOutput.
self.assertRaises(TypeError, tt.GetOutput, True)
self.assertRaises(ValueError, tt.GetOutput, (True, True, True))
# Check iteration over input lines.
lines = list(tt)
self.assertEquals((False, False, False, False), lines[0])
self.assertEquals((False, False, False, True), lines[1])
self.assertEquals((False, True, True, True), lines[7])
self.assertEquals((True, True, True, True), lines[15])
self._TestTableSanity(tt, lines)
class VerifyTarballTest(cros_test_lib.MockTempDirTestCase):
"""Test tarball verification functionality."""
TARBALL = 'fake_tarball'
def setUp(self):
self.rc_mock = self.StartPatcher(cros_build_lib_unittest.RunCommandMock())
def _MockTarList(self, files):
"""Mock out tarball content list call.
Args:
files: A list of contents to return.
"""
self.rc_mock.AddCmdResult(
partial_mock.ListRegex('tar -tf'), output='\n'.join(files))
def testNormPath(self):
"""Test path normalization."""
tar_contents = ['./', './foo/', './foo/./a', './foo/./b']
dir_struct = [Dir('.', []), Dir('foo', ['a', 'b'])]
self._MockTarList(tar_contents)
cros_test_lib.VerifyTarball(self.TARBALL, dir_struct)
def testDuplicate(self):
"""Test duplicate detection."""
tar_contents = ['a', 'b', 'a']
dir_struct = ['a', 'b']
self._MockTarList(tar_contents)
self.assertRaises(AssertionError, cros_test_lib.VerifyTarball, self.TARBALL,
dir_struct)
class MockTestCaseTest(cros_test_lib.TestCase):
"""Tests MockTestCase functionality."""
class MyMockTestCase(cros_test_lib.MockTestCase):
"""Helper class for testing MockTestCase."""
def testIt(self):
pass
class Mockable(object):
"""Helper test class intended for having values mocked out."""
TO_BE_MOCKED = 0
TO_BE_MOCKED2 = 10
TO_BE_MOCKED3 = 20
def GetPatcher(self, attr, val):
return mock.patch('__main__.MockTestCaseTest.Mockable.%s' % attr,
new=val)
def testPatchRemovalError(self):
"""Verify that patch removal during tearDown is robust to Exceptions."""
tc = self.MyMockTestCase('testIt')
patcher = self.GetPatcher('TO_BE_MOCKED', -100)
patcher2 = self.GetPatcher('TO_BE_MOCKED2', -200)
patcher3 = self.GetPatcher('TO_BE_MOCKED3', -300)
patcher3.start()
tc.setUp()
tc.StartPatcher(patcher)
tc.StartPatcher(patcher2)
patcher.stop()
self.assertEquals(self.Mockable.TO_BE_MOCKED2, -200)
self.assertEquals(self.Mockable.TO_BE_MOCKED3, -300)
self.assertRaises(RuntimeError, tc.tearDown)
# Make sure that even though exception is raised for stopping 'patcher', we
# continue to stop 'patcher2', and run patcher.stopall().
self.assertEquals(self.Mockable.TO_BE_MOCKED2, 10)
self.assertEquals(self.Mockable.TO_BE_MOCKED3, 20)
class TestCaseTest(unittest.TestCase):
"""Tests TestCase functionality."""
def testTimeout(self):
"""Test that test cases are interrupted when they are hanging."""
class TimeoutTestCase(cros_test_lib.TestCase):
"""Test case that raises a TimeoutError because it takes too long."""
TEST_CASE_TIMEOUT = 1
def testSleeping(self):
"""Sleep for 2 minutes. This should raise a TimeoutError."""
time.sleep(2 * 60)
raise AssertionError('Test case should have timed out.')
# Run the test case, verifying it raises a TimeoutError.
test = TimeoutTestCase(methodName='testSleeping')
self.assertRaises(timeout_util.TimeoutError, test.testSleeping)
if __name__ == '__main__':
cros_test_lib.main()
|
|
from galaxy_analysis.plot.plot_styles import *
import numpy as np
from scipy import integrate
import yt
import os, sys
import matplotlib.pyplot as plt
import glob
# AE: Comment out below import unless you feel like
# installing a bunch of stuff:
# from galaxy_analysis.plot.plot_styles import *
SolarAbundances = np.array([0.02, 0.28, 3.26E-3, 1.32E-3, 8.65E-3,
2.22E-3, 9.31E-4, 1.08E-3, 6.44E-4, 1.01E-4, 1.73E-3])
# globals since these were ifdefs in an old version of the code
# here for backwards compatability, but now these are
# read from the gizmo parameter file if they are there
# (only needed for when logbins used)
AGE_BIN_START = 1.0 # Myr
AGE_BIN_END = 14000.0 # Myr
SOLAR_METALLICITY = 0.02 # as defined in Gizmo / FIRE defaults
CYTHON_ON = True
if CYTHON_ON:
import pyximport; pyximport.install(setup_args={'include_dirs':[np.get_include()]},
language_level=3)
from galaxy_analysis.gizmo import age_fields
from galaxy_analysis.utilities import cy_convert_abundances as ca
# in Gizmo output, first metal tracer field corresponding to
# the age bins (0-14 are the 11 default species + 4 r-process)
OFFSET = 15
# Hard-coded: list of elements in standard file model (in order)
elements = ['Total','He','C','N','O','Ne','Mg','Si','S','Ca','Fe']
element_num = {}
i = 0
for e in elements:
element_num[e] = i
i = i + 1
def generate_metal_fields(ds, _agebins=None,
_elements=elements,
_yields=None,
ptype='PartType0',
age_is_fraction=False):
"""
Generate derived fields mapping the age tracers to
actual elemental abundances using the given set of
yields. yields must be a NxM array with N = the
number of age bins and M = the number of elements
(M = 11 for default FIRE). Each value here should be the
yield (in solar masses) per solar mass of star formation
in each age bin for each element.
The derived fields computing elemental mass loop through
all the age bin tracer fields.
Derived fields will be of form:
(ptype,"ELEMENTNAME_mass")
(ptype,"ELEMENTNAME_fraction")
(ptype,"ELEMENTNAME_actual_mass")
where 'ptype' is the passed particle type ("PartType0" or
"PartType4" probably) and the "ELEMENTNAME_actual_mass" field
is the actual mass of that element in the yields followed in
the simulation (e.g. something like:
(ptype,"Metallicity_X")*(ptype,"particle_mass"), where X
is the metallicity number corresponding to that given element).
"""
def _metal_mass_test(_ptype, _ei):
# test metals in bin zero
def temp(field,data):
mass_p = np.zeros(np.shape( data[(_ptype,'particle_mass')]))
# do this in cython:
if CYTHON_ON:
for i in np.arange(np.size(_agebins)-1):
fname = 'Metallicity_%02i'%(OFFSET + i)
mass_p += data[(ptype,fname)].value * _yields[i,_ei] #(agebinnum, elementnum)
#
# Both the calculations below *should* work but don't for some reason. I am
# absolutely mystified by this.... it works just fine in tests outside of
# the derived fields routines.... but for some reason this gives wrong answers...
# unfortunate since its slightly faster (and is how I wrote the cython version....
#
# age_vals = np.array([ data[(_ptype ,"Metallicity_%02i"%(OFFSET+i))].value for i in np.arange(np.size(_agebins)-1)])
# mass_p = np.matmul(age_vals.T, _yields[:,ei].T)
# mass_p = np.matmul(_yields[:,ei].T, age_vals)
else:
for i in np.arange(np.size(_agebins)-1):
fname = 'Metallicity_%02i'%(OFFSET + i)
mass_p += data[(_ptype,fname)].value * _yields[i,_ei] #(agebinnum, elementnum)
# age_vals = np.array([ data[(_ptype,"Metallicity_%02i"%(OFFSET+i))] for i in np.arange(np.size(_agebins)-1)])
# mass_p = np.matmul(_yields[:ei].T, age_vals)
#np.sum(np.transpose(age_vals) * _yields[:,ei], axis=1)
if age_is_fraction:
mass_p = (mass_p * data[(_ptype,'particle_mass')].to('code_mass').value) * yt.units.Msun
else:
mass_p = mass_p * yt.units.Msun
return mass_p / data.ds.hubble_constant
return temp
def _metal_fraction_test(_ptype, _e):
def temp(field,data):
Mp = data[(_ptype,'particle_mass')].to('Msun')
abund = data[('all',_ptype + '_' + _e + '_mass')].to('Msun') / Mp
abund[Mp==0.0] = 0.0
return abund
return temp
def _metal_mass_actual_test(_ptype,_ei):
def temp(field,data):
Mp = data[(_ptype,'particle_mass')].to('Msun')
abund = data[(_ptype,"Metallicity_%02i"%(_ei))]
return abund*Mp
return temp
for ei,e in enumerate(_elements):
ds.add_field( ('all', ptype + '_' + e + '_mass'), sampling_type='particle',
function=_metal_mass_test(ptype, ei),
units = 'Msun', force_override=True)
ds.add_field( ('all',ptype + '_' + e + '_fraction'), sampling_type='particle',
function=_metal_fraction_test(ptype,e),
units='', force_override=True)
ds.add_field( ('all',ptype + '_' + e + '_actual_mass'), sampling_type='particle',
function=_metal_mass_actual_test(ptype,ei),
units='Msun', force_override=True)
return
def _generate_star_metal_fields(ds,
_agebins=None,
_elements=elements,
_yields=None,
ptype='PartType4',
age_is_fraction = False):
"""
See above function. Computes surface abundances
of given star as derived from the age tracers
"""
def _star_metal_mass_test(_ptype, _ei):
# test metals in bin zero
def temp(field,data):
mass_p = np.zeros(np.shape( data[(_ptype,'particle_mass')]))
for i in np.arange(np.size(_agebins)-1):
fname = 'Metallicity_%02i'%(OFFSET + i)
mass_p += data[(ptype,fname)].value * _yields[i,_ei] #(agebinnum, elementnum)
if age_is_fraction:
mass_p = (mass_p * data[(ptype,'particle_mass')].to('code_mass').value) * yt.units.Msun
else:
mass_p = mass_p * yt.units.Msun
return mass_p / data.ds.hubble_constant
return temp
for ei,e in enumerate(_elements):
ds.add_field( ('all', ptype + '_' + e + '_mass'), sampling_type='particle',
function=_star_metal_mass_test(ptype, ei),
units = 'Msun', force_override=True)
return
#
# Extracted FIRE yield model:
# - Model is as-is from the code, but does not include any
# metallicity dependence (wind rates are fixed to a chosen
# metallicity, default is solar)
#
#
def sn_rate(t):
"""
CCSNE rate
SN / Gyr per solar msas of star formation
Changed output to /Gyr to keep same units as input t
"""
agemin = 0.003401 # Gyr
agebrk = 0.010370 # Gyr
agemax = 0.03753 # Gyr
RSNE = 0.0
if (t>agemin):
if (t <= agebrk):
RSNE = 5.408E-4
elif (t<=agemax):
RSNE=2.516E-4
if (t > agemax):
#RSNE=5.3E-8+1.6*np.exp(-0.5*((t-0.05)/0.01)*((t-0.05)/0.01)) # This is JUST SNIa
RSNE=0.0 # set to zero for CCSNE
return RSNE * 1000.0
def snIa_rate(t):
"""
SNIa rate (SN/Gyr) - t in Gyr
"""
agemin = 0.003401 # Gyr
agebrk = 0.010370 # Gyr
agemax = 0.03753 # Gyr
RSNE = 0.0
if (t > agemax):
RSNE=5.3E-8+1.6E-5*np.exp(-0.5*((t-0.05)/0.01)*((t-0.05)/0.01)) # This is JUST SNIa
return RSNE * 1000.0
def wind_yields(i,element=None, Z = 1.0E-5, FIRE_Z_scaling = False):
"""
Yields (in fraction) per element with winds
"""
Zsolar = Z / SOLAR_METALLICITY
yields = np.array([0.0, 0.36,0.016,0.0041,0.0118] + [0.0]*6)
if (Z < 0.033):
yields[4] *= Zsolar
else:
yields[4] *= 1.65
if FIRE_Z_scaling:
# only first 5 elements
i = 5
yields[:i] = yields[:i]*(1.0-Z)+(Zsolar*SolarAbundances[:i]-SolarAbundances[:i])
yields[0] = np.sum(yields[2:]) # total yield
if yields[4] < 0:
yields[4] = 0.0
print("Total O yield in winds is negative due to Z scaling")
if (np.any(yields < 0.0)):
print(yields)
print("Error in wind yields - negative", Z)
raise RuntimeError
# if element passed, use that - otherwise use yield indeces
if not (element is None):
if element == 'all':
return yields
return yields[i]
def wind_rate(t, Z = 1.0E-5, GasReturnFraction = 1.0):
"""
Mass loss rate from stellar winds. Z is in solar.
"""
Zsolar = Z / SOLAR_METALLICITY
p = 0.0
if (t <= 0.001):
p = 11.6846
else:
if (t <=0.0035):
logZ=np.log10(Zsolar)
p=11.6846*Zsolar*10.0**(1.838*(0.79+logZ)*(np.log10(t)-(-3.00)))
else:
if (t<=0.1):
p=72.1215*(t/0.0035)**(-3.25)+0.0103
else:
p=1.03*t**(-1.1) / (12.9-np.log(t)) # bug: this was log10 at first
#if (t < 0.1):
# p = p * 1.0
# assuming wind_rate is in Msun / Myr per solar mass of SF
rate = p * GasReturnFraction * 1.4 * 0.291175
return rate # might already be / Gyr
def snIa_yields(i, element = None, Z = 1.0E-5, FIRE_Z_scaling = False, MSNe = 1.4):
# ['Total','He','C','N','O','Ne','Mg','Si','S','Ca','Fe']
yields = np.array([1.4,0.0,0.049,1.2E-6,0.143,0.0045,0.0086,0.156,0.087,0.012,0.743])
Zsolar = Z / SOLAR_METALLICITY
if FIRE_Z_scaling:
yields = yields / MSNe
yields = yields*(1.0-Z)+(Zsolar*SolarAbundances-SolarAbundances)
yields = yields * MSNe
yields[1] = 0.0
if (np.any(yields < 0.0)):
if yields[3] < 0.0:
print("N yield in SNIA is negative")
yields[3] = 0.0
if np.any(yields<0.0):
print("Error in SNIA yields - negative")
print(yields)
raise RuntimeError
# if element passed, use that - otherwise use yield indeces
if not (element is None):
if element == 'all':
return yields
return yields[i]
def snII_yields(i, element = None, Z = 1.0E-5, FIRE_Z_scaling=False, MSNe = 10.5):
# if element passed, use that - otherwise use yield indeces
# ['Total','He','C','N','O','Ne','Mg','Si','S','Ca','Fe']
yields = np.array([2.0,3.87,0.133,0.0479,1.17,0.30,0.0987,0.0933,0.0397,0.00458,0.0741])
Zsolar = Z / SOLAR_METALLICITY
if (Z < 0.033):
yields[3] *= Zsolar
else:
yields[3] *= 1.65
yields[0] = yields[0] + yields[3]-0.0479
if FIRE_Z_scaling:
yields = yields / MSNe
yields = yields*(1.0-Z)+(Zsolar*SolarAbundances-SolarAbundances)
yields = yields * MSNe
if (np.any(yields < 0.0)):
if yields[3] < 0:
print("N yield in SNII is less than zero due to FIRE scaling")
yields[3] = 0.0
if (np.any(yields<0.0)):
print("Error in SNII yields - negative")
print(yields)
raise RuntimeError
if not (element is None):
if element == 'all':
return yields
i = element_num[element]
return yields[i]
def construct_yields(agebins, yieldtype = 'total', Z = 1.0E-5, FIRE_Z_scaling = False):
"""
Z is in fraction, not solar
"""
# Z = Z / SOLAR_METALLICITY
points = np.sort([0.003401, 0.010370, 0.03753, 0.001, 0.05, 0.10, 1.0, 14.0])
yields = np.zeros( (np.size(agebins)-1 , np.size(elements)))
if yieldtype == 'snII' or yieldtype == 'total' or yieldtype == 'sn_only':
numsnII = np.zeros(np.size(agebins)-1)
for i in np.arange(np.size(agebins) - 1):
if i == 0:
mint = 0.0
else:
mint = agebins[i]
maxt = agebins[i+1]
numsnII[i] = integrate.quad( sn_rate, mint, maxt, points = points)[0]
yields += np.outer(numsnII, snII_yields(-1, element = 'all', Z = Z, FIRE_Z_scaling=FIRE_Z_scaling))
if yieldtype == 'snIa' or yieldtype == 'total' or yieldtype == 'sn_only':
numsnIa = np.zeros(np.size(agebins)-1)
for i in np.arange(np.size(agebins) - 1 ):
if i == 0:
mint = 0.0
else:
mint = agebins[i]
maxt = agebins[i+1]
numsnIa[i] = integrate.quad( snIa_rate, mint, maxt, points = points)[0]
yields += np.outer(numsnIa, snIa_yields(-1, element = 'all', Z=Z, FIRE_Z_scaling=FIRE_Z_scaling))
if yieldtype == 'winds' or yieldtype == 'total':
wind_mass = np.zeros(np.size(agebins)-1)
windpoints = [0.001, 0.0035, 0.1, 1.0, 14.0]
for i in np.arange(np.size(agebins) - 1 ):
if i == 0:
mint = 0.0
else:
mint = agebins[i]
maxt = agebins[i+1]
wind_mass[i] = integrate.quad(wind_rate, mint, maxt, points = windpoints, args=(Z,1.0))[0]
yields += np.outer(wind_mass, wind_yields(-1, element = 'all', Z = Z, FIRE_Z_scaling=FIRE_Z_scaling))
return yields
def get_bins(config_file = "./gizmo.out", param_file = "./params.txt-usedvalues",
binfile = "age_bins.txt", delimiter = '='):
"""
Assuming gizmo.out is included in directory, generate
age bins. Requires age bin file if custom bins used
"""
count = 0
logbins = True
if "GIZMO_config.h" in config_file:
delimiter = " "
for line in open(config_file,'r'):
if "GALSF_FB_FIRE_AGE_TRACERS"+delimiter in line:
num_tracers = int(line.split(delimiter)[1])
if "GALSF_FB_FIRE_AGE_TRACERS_CUSTOM" in line:
logbins = False
if count > 100: # gizmo.out can be huge.....
break
count = count + 1
if logbins:
age_bin_start = AGE_BIN_START
age_bin_end = AGE_BIN_END
if os.path.isfile(param_file):
fname = param_file
elif 'gizmo.out' in config_file:
fname = param_file
for line in open(fname,'r'):
if "AgeTracerBinStart" in line:
age_tracer_start = float(line.split(" ")[-1])
elif "AgeTracerBinEnd" in line:
age_tracer_end = float(line.split(" ")[-1])
NBINS = num_tracers + 1
binstart = np.log10(age_bin_start)
binend = np.log10(age_bin_end)
bins = np.logspace(binstart, binend, NBINS)
else:
# read bins from file
try:
bins = np.genfromtxt(binfile)
except:
print("Custom bins. Problem loading binfile " + binfile)
raise ValueError
return bins
def compute_error(outfile = 'error.dat', overwrite=False,
limit_input=False, final_only = False,
age_is_fraction = False,
FIRE_Z_scaling = False, Z = 1.0E-5):
"""
Compute the error in the given age tracer model, defined
for each element (i) as:
(M_i_age - M_i_sim) / M_i_sim
where M_i_age is the mass of that element as derived from
convolving age tracers with extracted yield model from FIRE
and M_i_sim is the actual mass of that element in the simulation
"""
bins = get_bins()
# need to estimate Z from data a bit
total_yields = construct_yields(bins/1000.0, # pass bins as Gyr, Z = Z,
Z = Z, yieldtype = 'total', FIRE_Z_scaling=FIRE_Z_scaling)
if final_only:
ds_list = glob.glob('./output/snapshot*.hdf5')
ds_list = [ds_list[0], ds_list[-1]]
outfile = outfile.split('.')[0] + '-final.' + outfile.split('.')[1]
elif limit_input:
ds_list = np.sort(glob.glob('./output/snapshot_*0.hdf5'))
else:
ds_list = np.sort(glob.glob('./output/snapshot_*.hdf5'))
#
nlines = 0
open_mode = 'w'
if os.path.exists(outfile) and (not overwrite):
data = np.genfromtxt(outfile)
nlines = np.size(data[:,0])
if nlines == (np.size(ds_list)):
print("Not overwriting output")
if nlines > np.size(ds_list):
print("This file seems to exist for more data than available. Double check that you want to compute")
return
else:
print("Only computing for files that don't already exist in error output - assuming contiguous")
open_mode = 'a'
f = open(outfile, open_mode)
fcol = open("./final_error.dat",'w')
ds0 = yt.load(ds_list[0])
data0 = ds0.all_data()
generate_metal_fields(ds0, _agebins = bins, _yields = total_yields, age_is_fraction=age_is_fraction)
mtrue_initial = {}
for e in elements:
m = data0[('all','PartType0_'+e+'_actual_mass')]
mtrue_initial[e] = np.sum( m ).to('Msun')
dsstart = 0
if nlines > 0:
ds_list = ds_list[nlines:]
dsstart = nlines
for dsi, dsname in enumerate(ds_list):
finalloop=False
print(dsi, len(ds_list))
if dsi == (len(ds_list) -1):
finalloop = True
dsi = dsi + dsstart
ds = yt.load(dsname)
data = ds.all_data()
fields = ds.field_list
generate_metal_fields(ds, _agebins=bins, _yields=total_yields, age_is_fraction=age_is_fraction)
ptypes = np.unique([x[0] for x in ds.field_list])
metals = np.unique([x[1] for x in ds.field_list if ((x[0] == 'PartType0') and ('Metal' in x[1]))])
M_new_stars = 0.0 * yt.units.Msun
if 'PartType4' in ptypes:
print("File %003i: Number of new stars %00005i"%(dsi, np.size(data[('PartType4','Metallicity_00')])))
M_new_stars = data[('PartType4','particle_mass')].to('Msun')
_generate_star_metal_fields(ds, _agebins = bins, _yields = total_yields, age_is_fraction=age_is_fraction)
else:
print("File %003i: No star particles "%(dsi))
if ds.cosmological_simulation: # might need to turn off if age is fraction
current_redshift = ds.current_redshift
HubbleParam = 0.702
else:
current_redshift = 0.0
HubbleParam = 1.0
f.write("%03i %3.3f "%(dsi,current_redshift))
tolerance = 1.0E-6 * yt.units.Msun
if finalloop:
fcol.write("Final Output Stats: i = %03i z = %3.3f\n"%(dsi,current_redshift))
fcol.write("element total_error gas_error star_error\n")
te_sum = ge_sum = se_sum = 0.0
ne = 1
for ei,e in enumerate(elements):
# if e=='Total':
# continue
m = data[('all','PartType0_' + e + '_mass')]
mstar = 0.0 * yt.units.Msun
mtrue_mstar = 0.0 * yt.units.Msun
if 'PartType4' in ptypes:
mstar = data[('all','PartType4_' + e + '_mass')]
mtrue_mstar = data[('PartType4','Metallicity_%02i'%(ei))].value * data[('PartType4','particle_mass')].to('Msun')
mtrue = data[('all','PartType0_'+e+'_actual_mass')]
mtrue_total = np.max( [(np.sum(mtrue) + np.sum(mtrue_mstar) - mtrue_initial[e]), 0.0])
m_total = np.sum(m)/HubbleParam + np.sum(mstar)
# if ds.cosmological_simulation:
# mstar = mstar / HubbleParam
# m_total = m_total / HubbleParam
# if mtrue_total == 0.0:
# error = 0.00
# else:
# error = (m_total.value - mtrue_total) / mtrue_total
f.write("%5.5E %5.5E %5.5E %5.5E "%(m_total, mtrue_total, np.sum(mstar), np.sum(mtrue_mstar) ))
# final?
if finalloop:
print(np.sum(m), np.sum(m)/HubbleParam, np.sum(mtrue)-mtrue_initial[e], mtrue_initial[e])
total_error = (m_total.value-mtrue_total)/mtrue_total
gas_error = (np.sum(m)/HubbleParam - (np.sum(mtrue)-mtrue_initial[e])) / (np.sum(mtrue)-mtrue_initial[e])
star_error = (np.sum(mstar) - np.sum(mtrue_mstar)) / np.sum(mtrue_mstar)
fcol.write("%10s %5.5E %5.5E %5.5E \n"%(e, total_error, gas_error, star_error))
if (e != 'Total') and (e != 'He') and (e != 'N'):
te_sum += np.abs(total_error)
ge_sum += np.abs(gas_error)
se_sum += np.abs(star_error)
ne = ne + 1
f.write("\n")
f.flush()
os.fsync(f.fileno())
del(ds) # just in case
del(data) # just in case
if finalloop:
ne = 1.0 * ne
fcol.write("%10s %5.5E %5.5E %5.5E \n"%("average", te_sum/ne, ge_sum/ne, se_sum/ne))
f.close()
fcol.flush()
os.fsync(fcol.fileno())
fcol.close()
return
def plot_error(infile = 'error.dat'):
data = np.genfromtxt(infile)
fig, ax = plt.subplots()
fig.set_size_inches(6,6)
time = data[:,0] # Myr
redshift = data[:,1] # redshift
cosmological = False
if np.size(redshift[redshift>0]) > 0:
plot_x = redshift + 1
cosmological = True
# HubbleParam = 0.702 # correction applied in computation now
HubbleParam = 1.0
else:
plot_x = time
HubbleParam = 1.0
i = 0
for ei, e in enumerate(elements):
if e == 'Total':
continue
if (not(e == 'C')) and (not(e == 'O')) and (not(e == 'N')) and (not(e == 'Fe')) and (not(e=='Mg')):
continue
mass = data[:,2 + 4*ei] / HubbleParam
mtrue = data[:,3 + 4*ei]
error = np.zeros(np.size(mtrue))
error[mtrue>0] = (mass[mtrue>0]-mtrue[mtrue>0])/mtrue[mtrue>0]
ax.plot(plot_x, np.abs(error), lw = 3, ls = '-', label = e, color = 'C%i'%(i))
# ax.plot(plot_x[error<0], np.abs(error[error<0]), lw = 3, ls = ':', color = 'C%i'%(i))
i = i + 1
if cosmological:
ax.set_xlim(15.0+1, 0.0 + 1)
ax.set_xlabel("1 + z")
ax.semilogx()
else:
ax.set_xlim(0.0, np.max([np.max(time),50.0]))
ax.set_xlabel("Time (Myr)")
ax.set_ylim(1.0E-4,1.0)
ax.semilogy()
ax.set_ylabel("Fractional Error")
ax.legend(loc="best",ncol=3)
plt.minorticks_on()
plt.tight_layout()
fig.savefig("error.png")
return
if __name__ == "__main__":
# overwrite = False
# limit_input = False
# final_only = False
kwargs = {}
if len(sys.argv) > 1:
for k in ['overwrite','limit_input','final_only','age_is_fraction', 'FIRE_Z_scaling']:
if k in sys.argv:
kwargs[k] = sys.argv[sys.argv.index(k) + 1] == "True"
for k in ['outfile']:
if k in sys.argv:
kwargs[k] = str(sys.argv[sys.argv.index(k)+1])
for k in ['Z']:
if k in sys.argv:
kwargs[k] = float(sys.argv[sys.argv.index(k)+1])
compute_error(**kwargs)
plot_error()
|
|
from django.conf import settings
from protein.models import Protein, ProteinConformation, ProteinAnomaly, ProteinState, ProteinSegment
from residue.models import Residue
from residue.functions import dgn, ggn
from structure.models import *
from structure.functions import HSExposureCB, PdbStateIdentifier, update_template_source, compare_and_update_template_source
from common.alignment import AlignedReferenceTemplate, GProteinAlignment
from common.definitions import *
from common.models import WebLink
from signprot.models import SignprotComplex
import structure.structural_superposition as sp
import structure.assign_generic_numbers_gpcr as as_gn
from structure.homology_modeling_functions import GPCRDBParsingPDB
import Bio.PDB as PDB
from collections import OrderedDict
import os
import logging
import pprint
from io import StringIO, BytesIO
import sys
import re
import math
import yaml
import traceback
import subprocess
from copy import deepcopy
import pprint
gprotein_segments = ProteinSegment.objects.filter(proteinfamily='Alpha')
gprotein_segment_slugs = [i.slug for i in gprotein_segments]
atom_num_dict = {'E':9, 'S':6, 'Y':12, 'G':4, 'A':5, 'V':7, 'M':8, 'L':8, 'I':8, 'T':7, 'F':11, 'H':10, 'K':9,
'D':8, 'C':6, 'R':11, 'P':7, 'Q':9, 'N':8, 'W':14, '-':0}
class SignprotModeling():
def __init__(self, main_structure, signprot, template_source, trimmed_residues, alignment, main_pdb_array):
self.main_structure = main_structure
self.signprot = signprot
self.template_source = template_source
self.trimmed_residues = trimmed_residues
self.a = alignment
self.main_pdb_array = main_pdb_array
self.target_signprot = None
def run(self):
parse = GPCRDBParsingPDB()
self.signprot_complex = SignprotComplex.objects.get(structure=self.main_structure)
structure_signprot= self.signprot_complex.protein
if self.signprot!=False:
self.target_signprot = Protein.objects.get(entry_name=self.signprot)
else:
self.target_signprot = self.signprot_complex.protein
self.signprot_protconf = ProteinConformation.objects.get(protein=self.target_signprot)
sign_a = GProteinAlignment()
sign_a.run_alignment(self.target_signprot, structure_signprot)
io = StringIO(self.main_structure.pdb_data.pdb)
assign_cgn = as_gn.GenericNumbering(pdb_file=io, pdb_code=self.main_structure.pdb_code.index, sequence_parser=True, signprot=structure_signprot)
signprot_pdb_array = assign_cgn.assign_cgn_with_sequence_parser(self.signprot_complex.alpha)
# Alignment exception in HN for 6OIJ, shifting alignment by 6 residues
if self.main_structure.pdb_code.index=='6OIJ':
keys = list(signprot_pdb_array['HN'].keys())
new_HN = OrderedDict()
for i, k in enumerate(signprot_pdb_array['HN']):
if i<8:
new_HN[k] = 'x'
else:
new_HN[k] = signprot_pdb_array['HN'][keys[i-6]]
signprot_pdb_array['HN'] = new_HN
new_array = OrderedDict()
# Initiate complex part of template source
source_resis = Residue.objects.filter(protein_conformation__protein=self.target_signprot)
for res in source_resis:
if res.protein_segment.slug not in self.template_source:
self.template_source[res.protein_segment.slug] = OrderedDict()
if res.protein_segment.category=='loop':
self.template_source[res.protein_segment.slug][str(res.sequence_number)] = [None, None]
else:
self.template_source[res.protein_segment.slug][res.display_generic_number.label] = [self.main_structure, self.main_structure]
# Superimpose missing regions H1 - hfs2
alt_complex_struct = None
segs_for_alt_complex_struct = []
if self.main_structure.pdb_code.index!='3SN6':
segs_for_alt_complex_struct = ['H1', 'h1ha', 'HA', 'hahb', 'HB', 'hbhc', 'HC', 'hchd', 'HD', 'hdhe', 'HE', 'hehf', 'HF', 'hfs2']
alt_complex_struct = Structure.objects.get(pdb_code__index='3SN6')
io = StringIO(alt_complex_struct.pdb_data.pdb)
alt_signprot_complex = SignprotComplex.objects.get(structure__pdb_code__index='3SN6')
alt_assign_cgn = as_gn.GenericNumbering(pdb_file=io, pdb_code='3SN6', sequence_parser=True, signprot=alt_signprot_complex.protein)
alt_signprot_pdb_array = alt_assign_cgn.assign_cgn_with_sequence_parser(alt_signprot_complex.alpha)
before_cgns = ['G.HN.50', 'G.HN.51', 'G.HN.52', 'G.HN.53']
after_cgns = ['G.H5.03', 'G.H5.04', 'G.H5.05', 'G.H5.06']
orig_residues1 = parse.fetch_residues_from_array(signprot_pdb_array['HN'], before_cgns)
orig_residues2 = parse.fetch_residues_from_array(signprot_pdb_array['H5'], after_cgns)
orig_residues = parse.add_two_ordereddict(orig_residues1, orig_residues2)
alt_residues1 = parse.fetch_residues_from_array(alt_signprot_pdb_array['HN'], before_cgns)
alt_residues2 = parse.fetch_residues_from_array(alt_signprot_pdb_array['H5'], after_cgns)
# for i,j in orig_residues.items():
# print(i, j, j[0].get_parent())
# print('ALTERNATIVES')
# for i,j in alt_residues1.items():
# print(i, j, j[0].get_parent())
# for i,j in alt_residues2.items():
# print(i, j, j[0].get_parent())
alt_middle = OrderedDict()
for s in segs_for_alt_complex_struct:
alt_middle = parse.add_two_ordereddict(alt_middle, alt_signprot_pdb_array[s])
self.template_source = update_template_source(self.template_source, list(self.template_source[s].keys()), alt_complex_struct, s)
alt_residues = parse.add_two_ordereddict(parse.add_two_ordereddict(alt_residues1, alt_middle), alt_residues2)
del_list = []
for r, t in alt_middle.items():
if t=='x':
del_list.append(r)
for r in del_list:
del alt_residues[r]
superpose = sp.LoopSuperpose(orig_residues, alt_residues)
new_residues = superpose.run()
key_list = list(new_residues.keys())[4:-4]
for key in key_list:
seg = key.split('.')[1]
signprot_pdb_array[seg][key] = new_residues[key]
# alt local loop alignment
alt_sign_a = GProteinAlignment()
alt_sign_a.run_alignment(self.target_signprot, alt_signprot_complex.protein, segments=segs_for_alt_complex_struct)
for alt_seg in segs_for_alt_complex_struct:
sign_a.reference_dict[alt_seg] = alt_sign_a.reference_dict[alt_seg]
sign_a.template_dict[alt_seg] = alt_sign_a.template_dict[alt_seg]
sign_a.alignment_dict[alt_seg] = alt_sign_a.alignment_dict[alt_seg]
# fix h1ha and hahb and hbhc
if self.target_signprot.entry_name!='gnas2_human':
h1ha = Residue.objects.filter(protein_conformation__protein=alt_signprot_complex.protein, protein_segment__slug='h1ha')
h1ha_dict, hahb_dict = OrderedDict(), OrderedDict()
for h in h1ha:
h1ha_dict[h.generic_number.label] = 'x'
signprot_pdb_array['h1ha'] = h1ha_dict
right_order = sorted(list(signprot_pdb_array['hahb'].keys()), key=lambda x: (x))
for r in right_order:
hahb_dict[r] = signprot_pdb_array['hahb'][r]
signprot_pdb_array['hahb'] = hahb_dict
# Let Modeller model buffer regions
self.trimmed_residues.append('s1h1_6')
self.trimmed_residues.append('hfs2_1')
self.trimmed_residues.append('hfs2_2')
self.trimmed_residues.append('hfs2_3')
self.trimmed_residues.append('hfs2_4')
self.trimmed_residues.append('hfs2_5')
self.trimmed_residues.append('hfs2_6')
self.trimmed_residues.append('hfs2_7')
self.trimmed_residues.append('G.S2.01')
self.trimmed_residues.append('G.S2.02')
self.trimmed_residues.append('s4h3_4')
self.trimmed_residues.append('s4h3_5')
# New loop alignments for signprot. If length differs between ref and temp, buffer is created in the middle of the loop
loops = [i.slug for i in ProteinSegment.objects.filter(proteinfamily='Alpha', category='loop')]
loops_to_model = []
for r_seg, t_seg, a_seg in zip(sign_a.reference_dict, sign_a.template_dict, sign_a.alignment_dict):
if r_seg in loops:
loop_length = len(sign_a.reference_dict[r_seg])
ref_loop = [i for i in list(sign_a.reference_dict[r_seg].values()) if i not in ['x','-']]
ref_keys = [i for i in list(sign_a.reference_dict[r_seg].keys()) if i not in ['x','-']]
ref_loop_residues = Residue.objects.filter(protein_conformation__protein=self.target_signprot, protein_segment__slug=r_seg)
temp_loop = [i for i in list(sign_a.template_dict[t_seg].values()) if i not in ['x','-']]
temp_keys = [i for i in list(sign_a.template_dict[t_seg].keys()) if i not in ['x','-']]
if alt_complex_struct and r_seg in segs_for_alt_complex_struct:
temp_loop_residues = Residue.objects.filter(protein_conformation__protein=alt_signprot_complex.protein, protein_segment__slug=r_seg)
else:
temp_loop_residues = Residue.objects.filter(protein_conformation__protein=structure_signprot, protein_segment__slug=r_seg)
ref_out, temp_out, align_out = OrderedDict(), OrderedDict(), OrderedDict()
# ref is longer
if len(ref_loop)>len(temp_loop):
mid_temp = math.ceil(len(temp_loop)/2)
j = 0
for i in range(0, loop_length):
key = r_seg+'_'+str(i+1)
if i+1<=mid_temp:
temp_out[key] = temp_loop[i]
self.template_source = compare_and_update_template_source(self.template_source, r_seg, signprot_pdb_array, i, ref_loop_residues[i].display_generic_number.label,
ref_loop_residues[i].sequence_number, segs_for_alt_complex_struct, alt_complex_struct, self.main_structure)
elif mid_temp<i+1<=loop_length-mid_temp+1:
if i+1==loop_length-mid_temp+1 and len(temp_loop)%2==0:
temp_out[key] = temp_loop[mid_temp+j]
self.template_source = compare_and_update_template_source(self.template_source, r_seg, signprot_pdb_array, mid_temp+j, ref_loop_residues[i].display_generic_number.label,
ref_loop_residues[i].sequence_number, segs_for_alt_complex_struct, alt_complex_struct, self.main_structure)
j+=1
else:
temp_out[key.replace('_','?')] = '-'
self.template_source = compare_and_update_template_source(self.template_source, r_seg, signprot_pdb_array, mid_temp+j, ref_loop_residues[i].display_generic_number.label,
ref_loop_residues[i].sequence_number, segs_for_alt_complex_struct, alt_complex_struct, self.main_structure)
else:
temp_out[key] = temp_loop[mid_temp+j]
self.template_source = compare_and_update_template_source(self.template_source, r_seg, signprot_pdb_array, mid_temp+j, ref_loop_residues[i].display_generic_number.label,
ref_loop_residues[i].sequence_number, segs_for_alt_complex_struct, alt_complex_struct, self.main_structure)
j+=1
for i, j in enumerate(list(sign_a.reference_dict[r_seg].values())):
key = r_seg+'_'+str(i+1)
try:
temp_out[key]
ref_out[key] = j
except:
ref_out[key.replace('_','?')] = j
i+=1
# temp is longer
elif len(ref_loop)<len(temp_loop):
mid_ref = math.ceil(len(ref_loop)/2)
j = 0
for i in range(0, loop_length):
key = r_seg+'_'+str(i+1)
if i+1<=mid_ref:
ref_out[key] = ref_loop[i]
self.template_source = compare_and_update_template_source(self.template_source, r_seg, signprot_pdb_array, i, temp_loop_residues[i].display_generic_number.label,
ref_loop_residues[i].sequence_number, segs_for_alt_complex_struct, alt_complex_struct, self.main_structure)
elif mid_ref<i+1<=loop_length-mid_ref+1:
if i+1==loop_length-mid_ref+1 and len(ref_loop)%2==0:
ref_out[key] = ref_loop[mid_ref+j]
self.template_source = compare_and_update_template_source(self.template_source, r_seg, signprot_pdb_array, mid_ref+j, temp_loop_residues[i].display_generic_number.label,
ref_loop_residues[mid_ref+j].sequence_number, segs_for_alt_complex_struct, alt_complex_struct, self.main_structure)
j+=1
else:
ref_out[key.replace('_','?')] = '-'
self.template_source = compare_and_update_template_source(self.template_source, r_seg, signprot_pdb_array, mid_ref+j, temp_loop_residues[i].display_generic_number.label,
ref_loop_residues[mid_ref+j].sequence_number, segs_for_alt_complex_struct, alt_complex_struct, self.main_structure)
else:
ref_out[key] = ref_loop[mid_ref+j]
self.template_source = compare_and_update_template_source(self.template_source, r_seg, signprot_pdb_array, mid_ref+j, temp_loop_residues[i].display_generic_number.label,
ref_loop_residues[mid_ref+j].sequence_number, segs_for_alt_complex_struct, alt_complex_struct, self.main_structure)
j+=1
for i, j in enumerate(list(sign_a.template_dict[t_seg].values())):
key = r_seg+'_'+str(i+1)
try:
ref_out[key]
temp_out[key] = j
except:
temp_out[key.replace('_','?')] = j
i+=1
loops_to_model.append(r_seg)
# ref and temp length equal
else:
cr, ct = 1,1
for i, j in zip(list(sign_a.reference_dict[r_seg].values()), list(sign_a.template_dict[t_seg].values())):
ref_out[r_seg+'_'+str(cr)] = i
temp_out[r_seg+'_'+str(ct)] = j
self.template_source = compare_and_update_template_source(self.template_source, r_seg, signprot_pdb_array, ct-1, temp_loop_residues[ct-1].display_generic_number.label,
ref_loop_residues[cr-1].sequence_number, segs_for_alt_complex_struct, alt_complex_struct, self.main_structure)
if i!='-':
cr+=1
if j!='-':
ct+=1
c = 1
# update alignment dict
for i, j in zip(list(ref_out.values()), list(temp_out.values())):
key = r_seg+'_'+str(c)
if i=='-' or j=='-':
align_out[key.replace('_','?')] = '-'
elif i!=j:
align_out[key] = '.'
elif i==j:
align_out[key] = i
c+=1
# update pdb array
new_pdb_array = OrderedDict()
atoms_list = list(signprot_pdb_array[t_seg].values())
j = 0
for t_c, t in temp_out.items():
jplus1 = False
if t!='-':
for i in range(j, len(atoms_list)):
if atoms_list[j]!='-':
new_pdb_array[t_c] = atoms_list[j]
jplus1 = True
break
if jplus1:
j+=1
else:
new_pdb_array[t_c] = 'x'
# j+=1
# pprint.pprint(new_pdb_array)
# for i,j in new_pdb_array.items():
# try:
# print(i, PDB.Polypeptide.three_to_one(j[0].get_parent().get_resname()))
# except:
# print(i, j)
# update dictionary keys with '?' if no backbone template
ref_out_final, temp_out_final, align_out_final, new_pdb_array_final = OrderedDict(), OrderedDict(), OrderedDict(), OrderedDict()
# self.template_source[r_seg] = OrderedDict()
for i,j in new_pdb_array.items():
if '?' not in i and j=='x':
ref_out_final[i.replace('_','?').replace('.','?')] = ref_out[i]
temp_out_final[i.replace('_','?').replace('.','?')] = temp_out[i]
align_out_final[i.replace('_','?').replace('.','?')] = align_out[i]
new_pdb_array_final[i.replace('_','?').replace('.','?')] = new_pdb_array[i]
else:
ref_out_final[i] = ref_out[i]
temp_out_final[i] = temp_out[i]
align_out_final[i] = align_out[i]
new_pdb_array_final[i] = new_pdb_array[i]
sign_a.reference_dict[r_seg] = ref_out_final
sign_a.template_dict[t_seg] = temp_out_final
sign_a.alignment_dict[a_seg] = align_out_final
signprot_pdb_array[r_seg] = new_pdb_array_final
align_loop = list(sign_a.alignment_dict[a_seg].values())
self.a.reference_dict = deepcopy(self.a.reference_dict)
self.a.template_dict = deepcopy(self.a.template_dict)
self.a.alignment_dict = deepcopy(self.a.alignment_dict)
for seg, values in sign_a.reference_dict.items():
new_array[seg] = OrderedDict()
# self.template_source[seg] = OrderedDict()
final_values = deepcopy(values)
for key, res in values.items():
try:
if signprot_pdb_array[seg][key]=='x':
new_array[seg][key] = 'x'
self.template_source = update_template_source(self.template_source, [key], None, seg)
else:
new_array[seg][key] = signprot_pdb_array[seg][key]
except:
if res!='-':
new_array[seg][key] = '-'
self.template_source = update_template_source(self.template_source, [key], None, seg)
self.a.reference_dict[seg] = final_values
for seg, values in sign_a.template_dict.items():
for key, res in values.items():
if new_array[seg][key]=='x':
sign_a.template_dict[seg][key] = 'x'
else:
if new_array[seg][key]=='-':
sign_a.template_dict[seg][key] = '-'
else:
pdb_res = PDB.Polypeptide.three_to_one(new_array[seg][key][0].get_parent().get_resname())
if pdb_res!=sign_a.template_dict[seg][key]:
sign_a.template_dict[seg][key] = pdb_res
self.a.template_dict[seg] = sign_a.template_dict[seg]
for seg, values in sign_a.alignment_dict.items():
for key, res in values.items():
if new_array[seg][key]=='x':
values[key] = 'x'
self.a.alignment_dict[seg] = values
signprot_pdb_array = new_array
for seg, values in signprot_pdb_array.items():
self.main_pdb_array[seg] = values
delete_HN_begin = []
for i in self.a.reference_dict['HN']:
if i=='G.HN.30':
break
delete_HN_begin.append(i)
for d in delete_HN_begin:
del self.a.reference_dict['HN'][d]
try:
del self.a.template_dict['HN'][d]
except:
pass
try:
del self.a.alignment_dict['HN'][d]
except:
pass
del self.main_pdb_array['HN'][d]
try:
del self.template_source['HN'][d]
except:
pass
# add residues to model to self.trimmed_residues
gprot_segments = [i.slug for i in ProteinSegment.objects.filter(proteinfamily='Alpha')]
for i,j in self.a.reference_dict.items():
if i in gprot_segments:
for k,l in j.items():
if '?' in k or self.main_pdb_array[i][k] in ['-','x']:
self.trimmed_residues.append(k)
if i in loops_to_model:
self.trimmed_residues.append(k)
# custom mods
long_HG_prots = Protein.objects.filter(family__name='Gs')
if structure_signprot in long_HG_prots and self.target_signprot not in long_HG_prots:
self.trimmed_residues.append('G.HG.08')
self.trimmed_residues.append('G.HG.09')
self.trimmed_residues.append('G.HG.12')
self.trimmed_residues.append('G.HG.13')
self.trimmed_residues.append('G.HG.14')
self.trimmed_residues.append('G.HG.16')
self.trimmed_residues.append('G.HG.17')
if structure_signprot!=self.target_signprot or alt_signprot_complex.protein not in [None, self.target_signprot]:
# hbhc
hbhc_keys = list(self.a.reference_dict['hbhc'].keys())
self.trimmed_residues.append(hbhc_keys[2])
self.trimmed_residues.append(hbhc_keys[3])
self.trimmed_residues.append(hbhc_keys[-3])
self.trimmed_residues.append(hbhc_keys[-2])
# H1
self.trimmed_residues.append('G.H1.07')
self.trimmed_residues.append('G.H1.08')
if 'hgh4' in loops_to_model:
self.trimmed_residues.append('G.H4.01')
self.trimmed_residues.append('G.H4.02')
self.trimmed_residues.append('G.H4.03')
# Add mismatching residues to trimmed residues for modeling
for seg, val in self.a.alignment_dict.items():
if seg in gprotein_segment_slugs:
for key, res in val.items():
if res=='.':
self.trimmed_residues.append(key)
# Add residues with missing atom coordinates to trimmed residues for modeling
for seg, val in self.main_pdb_array.items():
if seg in gprotein_segment_slugs:
for key, atoms in val.items():
if atoms not in ['-','x']:
if atom_num_dict[PDB.Polypeptide.three_to_one(atoms[0].get_parent().get_resname())]>len(atoms):
self.trimmed_residues.append(key)
# Add Beta and Gamma chains
p = PDB.PDBParser(QUIET=True).get_structure('structure', StringIO(self.main_structure.pdb_data.pdb))[0]
beta = p[self.signprot_complex.beta_chain]
gamma = p[self.signprot_complex.gamma_chain]
self.a.reference_dict['Beta'] = OrderedDict()
self.a.template_dict['Beta'] = OrderedDict()
self.a.alignment_dict['Beta'] = OrderedDict()
self.main_pdb_array['Beta'] = OrderedDict()
self.template_source['Beta'] = OrderedDict()
self.a.reference_dict['Gamma'] = OrderedDict()
self.a.template_dict['Gamma'] = OrderedDict()
self.a.alignment_dict['Gamma'] = OrderedDict()
self.main_pdb_array['Gamma'] = OrderedDict()
self.template_source['Gamma'] = OrderedDict()
for b_res in beta:
key = str(b_res.get_id()[1])
self.a.reference_dict['Beta'][key] = PDB.Polypeptide.three_to_one(b_res.get_resname())
self.a.template_dict['Beta'][key] = PDB.Polypeptide.three_to_one(b_res.get_resname())
self.a.alignment_dict['Beta'][key] = PDB.Polypeptide.three_to_one(b_res.get_resname())
atoms = [atom for atom in b_res]
self.main_pdb_array['Beta'][key] = atoms
self.template_source['Beta'][key] = [self.main_structure, self.main_structure]
for g_res in gamma:
key = str(g_res.get_id()[1])
self.a.reference_dict['Gamma'][key] = PDB.Polypeptide.three_to_one(g_res.get_resname())
self.a.template_dict['Gamma'][key] = PDB.Polypeptide.three_to_one(g_res.get_resname())
self.a.alignment_dict['Gamma'][key] = PDB.Polypeptide.three_to_one(g_res.get_resname())
atoms = [atom for atom in g_res]
self.main_pdb_array['Gamma'][key] = atoms
self.template_source['Gamma'][key] = [self.main_structure, self.main_structure]
# raise AssertionError
# for i,j,k,l in zip(sign_a.reference_dict, sign_a.template_dict, sign_a.alignment_dict, signprot_pdb_array):
# pprint.pprint(self.template_source[i])
# for v,b,n,m in zip(sign_a.reference_dict[i], sign_a.template_dict[j], sign_a.alignment_dict[k], signprot_pdb_array[l]):
# print(v, b, n, m, sign_a.reference_dict[i][v], sign_a.template_dict[j][b], sign_a.alignment_dict[k][n], signprot_pdb_array[l][m])
|
|
# coding: UTF-8
"""Layer for packing data into one tunnel.
Packet Structure:
Packet in this layer can be divided into two part, one is header,
the other is body. Body of a packet is plain data which is
received from or being sent to the counterpart of an outside
connection. The following figure illustrate the format of header:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| | |R|F|D|S| |
| Version | |S|I|A|Y| Connection ID |
| | |T|N|T|N| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Version 8-bit version number = 1.
Control bits 4-bit flags. See Connection Procedure.
Reserved 4 bits reserved for future use. Must be zero.
Connection ID 16-bit Connection ID which is used for identifying
connections. This id must be unique for each
connection in its lifetime, but may be reused
after one has been closed.
Connection Procedure:
When a new connection is established, the client half allocates a
unused Connection ID to this connection. Then it can send a packet
with SYN set either immediately or with the first data packet. All
following packet of this connection must have SYN flag cleared.
After receiving a packet with SYN flag, server half create a new
frontend to process this connection, and bind the Connection ID
with this frontend. If there has been a frontend using the same
Connection ID (which should be prevented by client half), server
half should close the old frontend first.
When either side closes the connection, the corresponding half
sends a packet with FIN set. The half who did not sent a FIN
packet must reply it with FIN set as well, and then closes its
connection. The initial sender can recycle the resources of
connection only when it receives the reply. In a word, resources
are released as soon as a packet with FIN is received.
All packets with data have DAT flag set. Since a packet should
make sense, no packet should be sent without any of the flag set.
The three flags mentioned above are not exclusive, one packet may
have any combination of the three flags set (except none of them).
If more than one flag is set, SYN flag is the first one to be
processed, followed by DAT, and FIN is the last.
If there is a critical error occurs in frontend, or the outside
connection is resetted, a packet with RST flag alone should be
sent. Receiving this packet, client should reset the connection,
or server should reset the frontend, respectively. This flag
also indicates that the connection is able to be released, and
the Connection ID is available for allocating again. But like
FIN flag, receiver must reply a RST packet, and all connection
resources are released only after a packet with RST is received.
"""
VERSION_CODE = 1
import struct
class UnsupportVersionError(Exception): pass
class NoIDAvailableError(Exception): pass
header_format = "!BBH"
header_size = struct.calcsize(header_format)
max_conn_id = 65535
class StatusControl(object):
syn = 1 # first packet, means connection is started
dat = 2 # data transmission
fin = 4 # connection is closed
rst = 8 # connection is resetted
class ConnectionStatus(object):
new = 0 # connection is created, but SYN has not been sent
connected = 1 # connection has established
closing = 2 # FIN has been sent, waiting for reply
resetting = 3 # RST has been sent, waiting for reply
closed = 4 # connection has been closed
class IDAllocator(object):
def __init__(self, min_id, max_id):
self.next_id = min_id
self.max_id = max_id
self.recycled = set()
def allocate(self):
if self.recycled:
return self.recycled.pop()
if self.next_id >= self.max_id:
raise NoIDAvailableError()
next_id = self.next_id
self.next_id += 1
return next_id
def recycle(self, conn_id):
self.recycled.add(conn_id)
while (self.next_id - 1) in self.recycled:
self.next_id -= 1
self.recycled.remove(self.next_id)
class TunnelConnection(object):
def __init__(self, record_conn):
self.record_conn = record_conn
self.id_allocator = IDAllocator(1, max_conn_id)
self.conn_states = {}
# is tunnel available for writing?
self.available = True
def new_connection(self):
conn_id = self.id_allocator.allocate()
self.conn_states[conn_id] = ConnectionStatus.new
return conn_id
def reset_connection(self, conn_id):
if self.conn_states[conn_id] == ConnectionStatus.connected:
self._send_packet(conn_id, StatusControl.rst)
self.conn_states[conn_id] = ConnectionStatus.resetting
def close_connection(self, conn_id):
if self.conn_states[conn_id] == ConnectionStatus.connected:
self._send_packet(conn_id, StatusControl.fin)
self.conn_states[conn_id] = ConnectionStatus.closing
def send_packet(self, conn_id, data):
if not data:
return
control = StatusControl.dat
if self.conn_states[conn_id] == ConnectionStatus.new:
control |= StatusControl.syn
self.conn_states[conn_id] = ConnectionStatus.connected
self._send_packet(conn_id, control, data)
def receive_packets(self):
for packet in self.record_conn.receive_packets():
packet = self._process_packet(packet)
if packet:
yield packet
def _process_packet(self, packet):
ver, control, conn_id = \
struct.unpack(header_format, packet[:header_size])
if ver != VERSION_CODE:
raise UnsupportVersionError()
data = packet[header_size:]
# RST flag is set
if control & StatusControl.rst:
old_state = self.conn_states[conn_id]
self.reset_connection(conn_id)
self.conn_states[conn_id] = ConnectionStatus.closed
self.id_allocator.recycle(conn_id)
if old_state != ConnectionStatus.connected:
return None
return conn_id, StatusControl.rst, b""
# SYN flag is set
if control & StatusControl.syn:
self.conn_states[conn_id] = ConnectionStatus.connected
# clear DAT flag if status is not connected
if self.conn_states[conn_id] != ConnectionStatus.connected:
control &= ~StatusControl.dat
# if DAT flag is not set, no data should be returned
if not (control & StatusControl.dat):
data = b""
# FIN flag is set
if control & StatusControl.fin:
old_state = self.conn_states[conn_id]
self.close_connection(conn_id)
self.conn_states[conn_id] = ConnectionStatus.closed
self.id_allocator.recycle(conn_id)
if old_state != ConnectionStatus.connected:
return None
if not control:
return None
return conn_id, control, data
def _send_packet(self, conn_id, control, data=b""):
header = struct.pack(header_format, VERSION_CODE, control, conn_id)
self.record_conn.send_packet(header + data)
def continue_sending(self):
self.available = self.record_conn.continue_sending()
def get_rlist(self):
return self.record_conn.get_rlist()
def get_wlist(self):
return self.record_conn.get_wlist()
|
|
"""
Spatial Discretizor
-------------------
Module which contains the classes to 'discretize' a topological space.
When we talk about discretize we talk about creating a non-bijective
mathematical application between two topological spaces.
The main function of an spatial discretization class is the transformation of
the spatial information of one element in some topological space to the spatial
information in another topological space in which each tological elements
contains a group of topological elements of the previous topological space.
If the point do not belong to the space discretized, the function has to
return -1.
The clases also implement some useful functions related with this task.
Conventions
-----------
- Region_id as int, regionslocs as numpy.ndarray (even if it is an int)
TODO
----
- Complete irregular discretizer.
- Assign regions to points.
- Multiple regions
- nd-grid discretization
- Retrieve only populated regions. (Renumerate populated regions)
- Multiple discretization types aggregated
- Compute contiguity using correlation measure
"""
import numpy as np
import warnings
from utils import check_discretizors, check_flag_multi
class BaseSpatialDiscretizor:
"""
Spatial Discretizor object. This object performs a discretization of the
spatial domain and it is able to do:
- Assign a static predefined regions to each point.
- Retrieve neighbourhood defined by static regions.
This class acts as a base of all possible discretizers.
"""
def __len__(self):
"""Returns the number of regions or discretization units."""
return len(np.unique(self.regions_id))
def __getitem__(self, key):
"""Get the regions_id which match with the input."""
if type(key) == int:
return self.regions_id[key]
else:
return self.discretize(key)
def _initialization(self):
"""Function to initialize useful class parameters for discretizers."""
## Conditionals initialization
if 'limits' not in dir(self):
self.limits = None
if 'borders' not in dir(self):
self.borders = None
if 'regionlocs' not in dir(self):
self.regionlocs = None
if 'regions_id' not in dir(self):
self.regions_id = None
check_discretizors(self)
def retrieve_region(self, element_i, info_i, ifdistance=False):
"""Retrieve the region to which the points given belong to in this
discretization.
**warning** it is in format retriever to be used in that way if the
user consider in that way.
Parameters
----------
element_i: numpy.ndarray, shape(n, m) or shape (n,)
the point or points we want to retrieve their regions.
info_i: optional [ignored]
the special information in order to retrieve neighs and regions.
ifdistance: bool
True if we want the distance.
Returns
-------
region: numpy.ndarray or int
the region id of the given points.
See also
--------
pySpatialTools.BaseRetriever
"""
region = self.discretize(element_i)
return region
def retrieve_neigh(self, element_i, elements):
"""Retrieve the neighs given a point using this discretization. Could
be an internal retrieve if element_i is an index or an external
retrieve if element_i it is not a point in elements (element_i is a
coordinates).
Parameters
----------
element_i: numpy.ndarray
the point location for which we want its neighbours using the given
discretization.
elements: optional
the spatial information of the elements from which we want to get
the neighs of element_i.
Returns
-------
logi: numpy.ndarray boolean
the boolean array of which elements are neighs (are in the same
region) of element_i.
"""
region = self.discretize(element_i)
regions = self.discretize(elements)
logi = self.check_neighbors(region, regions)
return logi
def discretize(self, elements):
"""Discretize elements given their region_id.
Parameters
----------
elements: optional
the spatial information of the elements for which we want to obtain
their region given that discretization.
Returns
-------
regions: numpy.ndarray of int
the region_id of each elements for this discretization.
"""
regions = self._map_loc2regionid(elements)
return regions
def belong_region(self, elements, region_id=None):
"""Function to compute the belonging of some elements to the regions
selected.
Parameters
----------
elements: optional
the coordinates of the elements we want to check its belonging to
the selected region.
region_id: int or None
the region we want to check. If it is None we will check the whole
region defined by the discretization.
Returns
-------
boolean: bool
the belonging to the selected region.
"""
if region_id is None:
regions = self.discretize(elements)
boolean = np.logical_not(self.check_neighbors(regions, -1))
else:
regions = self.discretize(elements)
boolean = self.check_neighbors(regions, region_id)
return boolean
def get_contiguity(self, region_id=None, *params):
"""Get the whole contiguity or the contiguos regions of a given region.
Parameters
----------
region_id: int or None
the regions we want to get their contiguous regions. If it is None
it is retrieved the whole map of contiguity.
params: list or tuple
the instructions of which considerations we need to compute the
contiguity we want.
Returns
-------
contiguity: list or list of lists
the contiguous regions.
"""
contiguity = self._compute_contiguity_geom(region_id, *params)
return contiguity
def get_limits(self, region_id=None):
"""Function to compute the limits of the region.
Parameters
----------
region_id: numpy.ndarray or int
the regions id of the regions we want to get their limits. If it is
None it is retrieved the limits of the whole discretized space.
Returns
-------
limits: numpy.ndarray
the limits with an specific ordering.
"""
## Check limits function
def check_limits(limits):
try:
assert len(limits.shape) == 1
assert len(limits) == self.n_dim * 2
return True
except:
try:
assert len(limits.shape) == self.n_dim
assert limits.shape == tuple([2]*self.n_dim)
return True
except:
return False
## Compute limits
if region_id is None:
limits = self.limits
else:
limits = self._compute_limits(region_id)
## Check output
if not check_limits(limits):
raise TypeError("Incorrect computation of limits.")
return limits
def get_activated_regions(self, elements, geom=True):
"""Get the regions that have at least one of the elements input in
them.
Parameters
----------
elements: optional
the spatial information of the elements for which we want to obtain
their region given that discretization.
Returns
-------
regions: numpy.ndarray
the regions which have some elements in them.
"""
if self.multiple:
discretized = self.discretize(elements)
## for weighted
if type(discretized) == tuple:
discretized = discretized[0]
regions = []
for e in discretized:
regions += list(e)
regions = np.unique(regions)
else:
regions = np.unique(self.discretize(elements))
return regions
def check_neighbors(self, regions, region):
"""Check if the region is in each of the regions of pre-discretized
list of elements.
Parameters
----------
regions: int or numpy.ndarray
the regions id we want to check if there are similar to region.
region: list or numpy.array
the assignation of regions.
Returns
-------
logi: numpy.ndarray boolean
the boolean array of which have region coincidence (are in the same
region).
"""
## Check if there is flag multi
flag_multi = check_flag_multi(regions)
if self.multiple:
logi = self._check_neighbors_multiple(region, regions)
else:
msg = "Regions multi-assigned in a not multi-assign discretizor."
if flag_multi:
warnings.warn(msg)
logi = self._check_neighbors_multiple(region, regions)
else:
logi = self._check_neighbors_individual(region, regions)
return logi
###########################################################################
########################### Auxiliar functions ############################
###########################################################################
def _check_neighbors_individual(self, region, regions):
"""Check if there is equal regions in a uni-assigned regions. Returns
a boolean array."""
logi = regions == region
return logi
def _check_neighbors_multiple(self, region, regions):
"""Check if there is equal regions in a multi-assigned regions. Returns
a boolean array."""
N_r = len(regions)
logi = np.zeros(N_r).astype(bool)
for i in xrange(N_r):
logi[i] = region in regions[i]
return logi
|
|
import logging
import unittest
from unittest import mock
from . import setup_test_files
import bigchaindb_driver
from prov2bigchaindb.core import clients, accounts, utils, exceptions, local_stores
log = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
class BaseAccountTest(unittest.TestCase):
def setUp(self):
self.account_id = 'Base_Account_Test'
self.public_key = 'public'
self.private_key = 'private'
self.store = mock.Mock(spec=local_stores.SqliteStore,
**{'get_account.return_value': (self.account_id,
self.public_key,
self.private_key,
None)}
)
def tearDown(self):
del self.account_id
del self.public_key
del self.private_key
del self.store
def test_positive_init(self):
account = accounts.BaseAccount(self.account_id, self.store)
self.assertIsInstance(account, accounts.BaseAccount)
self.assertIsInstance(account.public_key, str)
self.assertIsInstance(account.private_key, str)
self.assertEqual(account.get_public_key(), self.public_key)
def test_positive_init_without_Account(self):
self.store.configure_mock(**{'get_account.side_effect': exceptions.NoAccountFoundException()})
account = accounts.BaseAccount(self.account_id, self.store)
self.assertNotEqual(account.get_public_key(), self.public_key)
def test_negative_init_BaseAccount(self):
with self.assertRaises(AssertionError):
accounts.BaseAccount(self.account_id, None)
def test__str__(self):
account = accounts.BaseAccount(self.account_id, self.store)
self.assertEqual(str(account), self.account_id + " : " + self.public_key)
def test_positive_get_id(self):
account = accounts.BaseAccount(self.account_id, self.store)
self.assertIsInstance(account.get_id(), str)
self.assertEqual(account.get_id(), self.account_id)
def test_positive_get_public_Key(self):
account = accounts.BaseAccount(self.account_id, self.store)
self.assertIsInstance(account.get_public_key(), str)
self.assertEqual(account.get_public_key(), self.public_key)
@unittest.skip("testing skipping")
def test__create_asset(self):
raise NotImplementedError()
@unittest.skip("testing skipping")
def test__transfer_asset(self):
raise NotImplementedError()
class DocumentConceptAccountTest(unittest.TestCase):
def setUp(self):
self.account_id = 'Document_Concept_Account_Test'
self.public_key = 'public'
self.private_key = 'private'
self.store = mock.Mock(spec=local_stores.SqliteStore,
**{'get_account.return_value': (
self.account_id,
self.public_key,
self.private_key,
None)})
self.bdb_returned_transaction = {"operation": "CREATE",
"outputs": [{"amount": 1,
"condition": {
"details": {
"bitmask": 32,
"public_key": "4K9sWUMFwTgaDGPfdynrbxWqWS6sWmKbZoTjxLtVUibD",
"signature": None,
"type": "fulfillment",
"type_id": 4
},
"uri": "cc:4:20:MTmLrdyfhfxPw3WxnaYaQkPmU1GcEzg9mAj_O_Nuv5w:96"
},
"public_keys": [
"4K9sWUMFwTgaDGPfdynrbxWqWS6sWmKbZoTjxLtVUibD"
]
}
],
'id': '1',
'asset': {'data': {'prov': '1'}}}
self.bdb_connection = mock.Mock(spec=bigchaindb_driver.BigchainDB,
**{'transactions.retrieve.return_value': self.bdb_returned_transaction,
'transactions.prepare.return_value': self.bdb_returned_transaction,
'transactions.fulfill.return_value': self.bdb_returned_transaction,
'transactions.send.return_value': self.bdb_returned_transaction,
}
)
def tearDown(self):
del self.account_id
del self.public_key
del self.private_key
del self.store
del self.bdb_connection
del self.bdb_returned_transaction
@mock.patch('prov2bigchaindb.core.utils.wait_until_valid')
def test_positive_save_asset(self, mock_wait):
asset = {'data': {'prov': ''}}
account = accounts.DocumentConceptAccount(self.account_id, self.store)
tx_id = account.save_asset(asset, self.bdb_connection)
# self.bdb_connection.transactions.prepare.assert_called_with(operation='CREATE', signers=self.public_key, asset={'data':asset}, metadata={'account_id':self.account_id})
self.bdb_connection.transactions.fulfill.assert_called_with(self.bdb_returned_transaction,
private_keys=self.private_key)
self.bdb_connection.transactions.send.assert_called_with(self.bdb_returned_transaction)
self.assertEqual(tx_id, '1')
@mock.patch('prov2bigchaindb.core.utils.wait_until_valid')
def test_negative_save_asset(self, mock_wait):
asset = {'data': {'prov': ''}}
account = accounts.DocumentConceptAccount(self.account_id, self.store)
negative_return = self.bdb_returned_transaction['id'] = '2'
self.bdb_connection.configure_mock(**{'transactions.retrieve.return_value': self.bdb_returned_transaction,
'transactions.prepare.return_value': self.bdb_returned_transaction,
'transactions.fulfill.return_value': self.bdb_returned_transaction,
'transactions.send.return_value': negative_return,
})
with self.assertRaises(exceptions.CreateRecordException):
account.save_asset(asset, self.bdb_connection)
# self.bdb_connection.transactions.prepare.assert_called_with(operation='CREATE', signers=self.public_key, asset={'data':asset}, metadata={'account_id':self.account_id})
self.bdb_connection.transactions.fulfill.assert_called_with(self.bdb_returned_transaction,
private_keys=self.private_key)
self.bdb_connection.transactions.send.assert_called_with(self.bdb_returned_transaction)
class GraphConceptAccountTest(unittest.TestCase):
def setUp(self):
self.test_prov_files = setup_test_files()
self.prov_document = utils.to_prov_document(content=self.test_prov_files["simple"])
self.prov_element, self.prov_relations, self.prov_namespaces = \
clients.GraphConceptClient.calculate_account_data(self.prov_document)[
0]
self.id_mapping = {}
for rel in self.prov_relations['with_id']:
self.id_mapping[rel.identifier] = ''
self.public_key = 'public'
self.private_key = 'private'
self.store = mock.Mock(spec=local_stores.SqliteStore,
**{'get_account.return_value': (
str(self.prov_element.identifier),
self.public_key,
self.private_key,
None)})
self.bdb_returned_transaction = {"operation": "CREATE",
"outputs": [{"amount": 1,
"condition": {
"details": {
"bitmask": 32,
"public_key": "4K9sWUMFwTgaDGPfdynrbxWqWS6sWmKbZoTjxLtVUibD",
"signature": None,
"type": "fulfillment",
"type_id": 4
},
"uri": "cc:4:20:MTmLrdyfhfxPw3WxnaYaQkPmU1GcEzg9mAj_O_Nuv5w:96"
},
"public_keys": [
"4K9sWUMFwTgaDGPfdynrbxWqWS6sWmKbZoTjxLtVUibD"
]
}
],
'id': '1',
'asset': {'data': {'prov': '1'}}
}
self.bdb_connection = mock.Mock(spec=bigchaindb_driver.BigchainDB,
**{'transactions.retrieve.return_value': self.bdb_returned_transaction,
'transactions.prepare.return_value': self.bdb_returned_transaction,
'transactions.fulfill.return_value': self.bdb_returned_transaction,
'transactions.send.return_value': self.bdb_returned_transaction})
def tearDown(self):
del self.prov_document
del self.prov_element
del self.prov_relations
del self.prov_namespaces
del self.public_key
del self.private_key
del self.store
del self.bdb_connection
del self.bdb_returned_transaction
del self.test_prov_files
def test_positive_init_without_account(self):
self.store.configure_mock(**{'get_account.side_effect': exceptions.NoAccountFoundException()})
account = accounts.GraphConceptAccount(self.prov_element, self.prov_relations, self.id_mapping,
self.prov_namespaces, self.store)
self.assertNotEqual(account.get_public_key(), self.public_key)
self.assertEqual(account.tx_id, '')
self.assertEqual(account.prov_namespaces, self.prov_namespaces)
self.assertEqual(account.prov_relations_without_id, self.prov_relations['without_id'])
self.assertEqual(account.prov_relations_with_id, self.prov_relations['with_id'])
def test__str__(self):
account = accounts.GraphConceptAccount(self.prov_element, self.prov_relations, self.id_mapping,
self.prov_namespaces, self.store)
self.assertEqual(str(account), str(self.prov_element.identifier) + " : " + self.public_key +
"\n\t" + str(self.prov_relations['with_id']) +
"\n\t" + str(self.prov_relations['without_id'])
)
def test_get_tx_id(self):
account = accounts.GraphConceptAccount(self.prov_element, self.prov_relations, self.id_mapping,
self.prov_namespaces, self.store)
account.tx_id = '1'
self.assertEqual(account.get_tx_id(), '1')
@mock.patch('prov2bigchaindb.core.utils.wait_until_valid')
def test_positiv_save_instance_asset(self, mock_wait):
self.store.configure_mock(**{'get_account.side_effect': exceptions.NoAccountFoundException()})
account = accounts.GraphConceptAccount(self.prov_element, self.prov_relations, self.id_mapping,
self.prov_namespaces, self.store)
tx_id = account.save_instance_asset(self.bdb_connection)
# asset = {'data': {'prov': account._create_instance_document().serialize(format='json')}}
# self.bdb_connection.transactions.prepare.assert_called_with(operation='CREATE', signers=account.public_key, asset=asset, metadata={'account_id':str(self.prov_element.identifier)})
self.bdb_connection.transactions.fulfill.assert_called_with(self.bdb_returned_transaction,
private_keys=account.private_key)
self.bdb_connection.transactions.send.assert_called_with(self.bdb_returned_transaction)
self.assertEqual(tx_id, '1')
self.assertEqual(account.get_tx_id(), '1')
@mock.patch('prov2bigchaindb.core.utils.wait_until_valid')
def test_negativ_save_instance_asset(self, mock_wait):
self.store.configure_mock(**{'get_account.side_effect': exceptions.NoAccountFoundException()})
account = accounts.GraphConceptAccount(self.prov_element, self.prov_relations, self.id_mapping,
self.prov_namespaces, self.store)
negative_return = self.bdb_returned_transaction['id'] = '2'
self.bdb_connection.configure_mock(**{'transactions.retrieve.return_value': self.bdb_returned_transaction,
'transactions.prepare.return_value': self.bdb_returned_transaction,
'transactions.fulfill.return_value': self.bdb_returned_transaction,
'transactions.send.return_value': negative_return,
})
# asset = {'data': {'prov': account._create_instance_document().serialize(format='json')}}
with self.assertRaises(exceptions.CreateRecordException):
account.save_instance_asset(self.bdb_connection)
# self.bdb_connection.transactions.prepare.assert_called_with(operation='CREATE', signers=account.public_key, asset=asset, metadata={'account_id':str(self.prov_element.identifier)})
self.bdb_connection.transactions.fulfill.assert_called_with(self.bdb_returned_transaction,
private_keys=account.private_key)
self.bdb_connection.transactions.send.assert_called_with(self.bdb_returned_transaction)
self.assertEqual(account.tx_id, '')
@unittest.skip("testing skipping")
def test_save_relations_assets(self):
raise NotImplementedError()
@unittest.skip("testing skipping")
def test__create_instance_document(self):
raise NotImplementedError()
@unittest.skip("testing skipping")
def test__create_relations_document(self):
raise NotImplementedError()
class RoleConceptAccountTest(unittest.TestCase):
def setUp(self):
self.test_prov_files = setup_test_files()
self.prov_document = utils.to_prov_document(content=self.test_prov_files["simple"])
self.prov_element, self.prov_relations, self.prov_namespaces = \
clients.RoleConceptClient.calculate_account_data(self.prov_document)[
0]
self.id_mapping = {}
for rel in self.prov_relations['with_id']:
self.id_mapping[rel.identifier] = ''
self.public_key = 'public'
self.private_key = 'private'
self.store = mock.Mock(spec=local_stores.SqliteStore,
**{'get_account.return_value': (
str(self.prov_element.identifier),
self.public_key,
self.private_key,
None)})
self.bdb_returned_transaction = {"operation": "CREATE",
"outputs": [{"amount": 1,
"condition": {
"details": {
"bitmask": 32,
"public_key": "4K9sWUMFwTgaDGPfdynrbxWqWS6sWmKbZoTjxLtVUibD",
"signature": None,
"type": "fulfillment",
"type_id": 4
},
"uri": "cc:4:20:MTmLrdyfhfxPw3WxnaYaQkPmU1GcEzg9mAj_O_Nuv5w:96"
},
"public_keys": [
"4K9sWUMFwTgaDGPfdynrbxWqWS6sWmKbZoTjxLtVUibD"
]
}
],
'id': '1',
'asset': {'data': {'prov': '1'}}
}
self.bdb_connection = mock.Mock(spec=bigchaindb_driver.BigchainDB,
**{'transactions.retrieve.return_value': self.bdb_returned_transaction,
'transactions.prepare.return_value': self.bdb_returned_transaction,
'transactions.fulfill.return_value': self.bdb_returned_transaction,
'transactions.send.return_value': self.bdb_returned_transaction})
def tearDown(self):
del self.prov_document
del self.prov_element
del self.prov_relations
del self.prov_namespaces
del self.public_key
del self.private_key
del self.store
del self.bdb_connection
del self.bdb_returned_transaction
del self.test_prov_files
@unittest.skip("testing skipping")
def test_positive_init_without_account(self):
self.store.configure_mock(**{'get_account.side_effect': exceptions.NoAccountFoundException()})
account = accounts.RoleConceptAccount(self.prov_element, self.prov_relations, self.id_mapping,
self.prov_namespaces, self.store)
self.assertNotEqual(account.get_public_key(), self.public_key)
self.assertEqual(account.tx_id, '')
self.assertEqual(account.prov_namespaces, self.prov_namespaces)
self.assertEqual(account.prov_relations_without_id, self.prov_relations['without_id'])
self.assertEqual(account.prov_relations_with_id, self.prov_relations['with_id'])
@unittest.skip("testing skipping")
def test__str__(self):
account = accounts.RoleConceptAccount(self.prov_element, self.prov_relations, self.id_mapping,
self.prov_namespaces, self.store)
self.assertEqual(str(account), str(self.prov_element.identifier) + " : " + self.public_key +
"\n\t" + str(self.prov_relations['with_id']) +
"\n\t" + str(self.prov_relations['without_id'])
)
@unittest.skip("testing skipping")
def test_get_tx_id(self):
account = accounts.RoleConceptAccount(self.prov_element, self.prov_relations, self.id_mapping,
self.prov_namespaces, self.store)
account.tx_id = '1'
self.assertEqual(account.get_tx_id(), '1')
@unittest.skip("testing skipping")
@mock.patch('prov2bigchaindb.core.utils.wait_until_valid')
def test_positiv_save_instance_asset(self, mock_wait):
self.store.configure_mock(**{'get_account.side_effect': exceptions.NoAccountFoundException()})
account = accounts.RoleConceptAccount(self.prov_element, self.prov_relations, self.id_mapping,
self.prov_namespaces, self.store)
tx_id = account.save_instance_asset(self.bdb_connection)
# asset = {'data': {'prov': account._create_instance_document().serialize(format='json')}}
# self.bdb_connection.transactions.prepare.assert_called_with(operation='CREATE', signers=account.public_key, asset=asset, metadata={'account_id':str(self.prov_element.identifier)})
self.bdb_connection.transactions.fulfill.assert_called_with(self.bdb_returned_transaction,
private_keys=account.private_key)
self.bdb_connection.transactions.send.assert_called_with(self.bdb_returned_transaction)
self.assertEqual(tx_id, '1')
self.assertEqual(account.get_tx_id(), '1')
@unittest.skip("testing skipping")
@mock.patch('prov2bigchaindb.core.utils.wait_until_valid')
def test_negativ_save_instance_asset(self, mock_wait):
self.store.configure_mock(**{'get_account.side_effect': exceptions.NoAccountFoundException()})
account = accounts.RoleConceptAccount(self.prov_element, self.prov_relations, self.id_mapping,
self.prov_namespaces, self.store)
negative_return = self.bdb_returned_transaction['id'] = '2'
self.bdb_connection.configure_mock(**{'transactions.retrieve.return_value': self.bdb_returned_transaction,
'transactions.prepare.return_value': self.bdb_returned_transaction,
'transactions.fulfill.return_value': self.bdb_returned_transaction,
'transactions.send.return_value': negative_return,
})
# asset = {'data': {'prov': account._create_instance_document().serialize(format='json')}}
with self.assertRaises(exceptions.CreateRecordException):
account.save_instance_asset(self.bdb_connection)
# self.bdb_connection.transactions.prepare.assert_called_with(operation='CREATE', signers=account.public_key, asset=asset, metadata={'account_id':str(self.prov_element.identifier)})
self.bdb_connection.transactions.fulfill.assert_called_with(self.bdb_returned_transaction,
private_keys=account.private_key)
self.bdb_connection.transactions.send.assert_called_with(self.bdb_returned_transaction)
self.assertEqual(account.tx_id, '')
@unittest.skip("testing skipping")
def test_save_relations_assets(self):
raise NotImplementedError()
@unittest.skip("testing skipping")
def test__create_instance_document(self):
raise NotImplementedError()
@unittest.skip("testing skipping")
def test__create_relations_document(self):
raise NotImplementedError()
|
|
import tempfile
import shutil
import os
import inspect
from lib import BaseTest
class AddRepo1Test(BaseTest):
"""
add package to local repo: .deb file
"""
fixtureCmds = [
"aptly repo create -comment=Repo1 -distribution=squeeze repo1",
]
runCmd = "aptly repo add repo1 ${files}/libboost-program-options-dev_1.49.0.1_i386.deb"
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show -with-packages repo1", "repo_show")
# check pool
self.check_exists('pool/00/35/libboost-program-options-dev_1.49.0.1_i386.deb')
class AddRepo2Test(BaseTest):
"""
add package to local repo: .dsc file
"""
fixtureCmds = [
"aptly repo create -comment=Repo2 -distribution=squeeze repo2",
]
runCmd = "aptly repo add repo2 ${files}/pyspi_0.6.1-1.3.dsc ${files}/pyspi-0.6.1-1.3.stripped.dsc"
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show -with-packages repo2", "repo_show")
# check pool
self.check_exists('pool/22/ff/pyspi_0.6.1-1.3.diff.gz')
self.check_exists('pool/b7/2c/pyspi_0.6.1-1.3.dsc')
self.check_exists('pool/de/f3/pyspi_0.6.1.orig.tar.gz')
self.check_exists('pool/2f/5b/pyspi-0.6.1-1.3.stripped.dsc')
class AddRepo3Test(BaseTest):
"""
add package to local repo: directory
"""
fixtureCmds = [
"aptly repo create -comment=Repo3 -distribution=squeeze repo3",
]
runCmd = "aptly repo add repo3 ${files}"
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show -with-packages repo3", "repo_show")
# check pool
self.check_exists('pool/00/35/libboost-program-options-dev_1.49.0.1_i386.deb')
self.check_exists('pool/22/ff/pyspi_0.6.1-1.3.diff.gz')
self.check_exists('pool/b7/2c/pyspi_0.6.1-1.3.dsc')
self.check_exists('pool/de/f3/pyspi_0.6.1.orig.tar.gz')
self.check_exists('pool/2f/5b/pyspi-0.6.1-1.3.stripped.dsc')
class AddRepo4Test(BaseTest):
"""
add package to local repo: complex directory + remove
"""
fixtureCmds = [
"aptly repo create -comment=Repo4 -distribution=squeeze repo4",
]
runCmd = "aptly repo add -remove-files repo4 "
def prepare(self):
super(AddRepo4Test, self).prepare()
self.tempSrcDir = tempfile.mkdtemp()
os.makedirs(os.path.join(self.tempSrcDir, "01"), 0755)
os.makedirs(os.path.join(self.tempSrcDir, "02", "03"), 0755)
shutil.copy(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "libboost-program-options-dev_1.49.0.1_i386.deb"),
os.path.join(self.tempSrcDir, "01"))
shutil.copy(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "pyspi_0.6.1-1.3.dsc"),
os.path.join(self.tempSrcDir, "02", "03"))
shutil.copy(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "pyspi_0.6.1.orig.tar.gz"),
os.path.join(self.tempSrcDir, "02", "03"))
shutil.copy(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "pyspi_0.6.1-1.3.diff.gz"),
os.path.join(self.tempSrcDir, "02", "03"))
shutil.copy(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "pyspi_0.6.1-1.3.diff.gz"),
os.path.join(self.tempSrcDir, "02", "03", "other.file"))
self.runCmd += self.tempSrcDir
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show -with-packages repo4", "repo_show")
# check pool
self.check_exists('pool/00/35/libboost-program-options-dev_1.49.0.1_i386.deb')
self.check_exists('pool/22/ff/pyspi_0.6.1-1.3.diff.gz')
self.check_exists('pool/b7/2c/pyspi_0.6.1-1.3.dsc')
self.check_exists('pool/de/f3/pyspi_0.6.1.orig.tar.gz')
path = os.path.join(self.tempSrcDir, "01", "libboost-program-options-dev_1.49.0.1_i386.deb")
if os.path.exists(path):
raise Exception("path %s shouldn't exist" % (path, ))
path = os.path.join(self.tempSrcDir, "02", "03", "pyspi_0.6.1.orig.tar.gz")
if os.path.exists(path):
raise Exception("path %s shouldn't exist" % (path, ))
path = os.path.join(self.tempSrcDir, "02", "03", "other.file")
if not os.path.exists(path):
raise Exception("path %s doesn't exist" % (path, ))
shutil.rmtree(self.tempSrcDir)
class AddRepo5Test(BaseTest):
"""
add package to local repo: some source files missing
"""
fixtureCmds = [
"aptly repo create -comment=Repo5 -distribution=squeeze repo5",
]
runCmd = "aptly repo add repo5 "
outputMatchPrepare = lambda self, s: s.replace(self.tempSrcDir, "")
expectedCode = 1
def prepare(self):
super(AddRepo5Test, self).prepare()
self.tempSrcDir = tempfile.mkdtemp()
os.makedirs(os.path.join(self.tempSrcDir, "02", "03"), 0755)
shutil.copy(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "pyspi_0.6.1-1.3.dsc"),
os.path.join(self.tempSrcDir, "02", "03"))
shutil.copy(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "pyspi_0.6.1.orig.tar.gz"),
os.path.join(self.tempSrcDir, "02", "03"))
self.runCmd += self.tempSrcDir
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show repo5", "repo_show")
shutil.rmtree(self.tempSrcDir)
class AddRepo6Test(BaseTest):
"""
add package to local repo: missing file
"""
fixtureCmds = [
"aptly repo create -comment=Repo6 -distribution=squeeze repo6",
]
runCmd = "aptly repo add repo6 no-such-file"
expectedCode = 1
class AddRepo7Test(BaseTest):
"""
add package to local repo: missing repo
"""
runCmd = "aptly repo add repo7 ${files}"
expectedCode = 1
class AddRepo8Test(BaseTest):
"""
add package to local repo: conflict in packages
"""
fixtureCmds = [
"aptly repo create -comment=Repo8 -distribution=squeeze repo8",
"aptly repo add repo8 ${files}/pyspi_0.6.1-1.3.dsc",
]
runCmd = "aptly repo add repo8 ${testfiles}/pyspi_0.6.1-1.3.conflict.dsc"
outputMatchPrepare = lambda self, s: s.replace(os.path.join(os.path.dirname(inspect.getsourcefile(self.__class__)), self.__class__.__name__), "").replace(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files"), "")
expectedCode = 1
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show -with-packages repo8", "repo_show")
class AddRepo9Test(BaseTest):
"""
add package to local repo: conflict in files
"""
fixtureCmds = [
"aptly repo create -comment=Repo9 -distribution=squeeze repo9",
]
runCmd = "aptly repo add repo9 ${files}/pyspi_0.6.1-1.3.dsc"
gold_processor = BaseTest.expand_environ
outputMatchPrepare = lambda self, s: s.replace(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files"), "")
expectedCode = 1
def prepare(self):
super(AddRepo9Test, self).prepare()
os.makedirs(os.path.join(os.environ["HOME"], ".aptly", "pool/de/f3/"))
with open(os.path.join(os.environ["HOME"], ".aptly", "pool/de/f3/pyspi_0.6.1.orig.tar.gz"), "w") as f:
f.write("abcd")
class AddRepo10Test(BaseTest):
"""
add package to local repo: double import
"""
fixtureCmds = [
"aptly repo create -comment=Repo10 -distribution=squeeze repo10",
"aptly repo add repo10 ${files}",
]
runCmd = "aptly repo add repo10 ${files}/pyspi_0.6.1-1.3.dsc"
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show -with-packages repo10", "repo_show")
class AddRepo11Test(BaseTest):
"""
add package to local repo: conflict in packages + -force-replace
"""
fixtureCmds = [
"aptly repo create -comment=Repo11 -distribution=squeeze repo11",
"aptly repo add repo11 ${files}/pyspi_0.6.1-1.3.dsc",
]
runCmd = "aptly repo add -force-replace repo11 ${testfiles}/pyspi_0.6.1-1.3.conflict.dsc"
outputMatchPrepare = lambda self, s: s.replace(os.path.join(os.path.dirname(inspect.getsourcefile(self.__class__)), self.__class__.__name__), "").replace(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files"), "")
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show -with-packages repo11", "repo_show")
|
|
class MoveDirection(object):
ROTATE = 0x00
LEFT = 0x01
RIGHT = 0x02
UP = 0x03
DOWN = 0x04
POSITION = 0x05
class Deck(object):
unit = None
cell = None
def __init__(self, unit):
self.unit = unit
def bind(self, cell):
self.cell = cell
self.cell.bind(self)
def unbind(self):
if self.cell:
self.cell.unbind(self)
self.cell = None
def is_hit(self):
hit = False
if self.cell:
hit = self.cell.is_marked()
return hit
class BaseUnit(object):
size = None
name = None
decks = None
grid = None
placed = None
sunked = None
def __init__(self):
self.decks = []
for i in range(self.size):
self.decks.append(Deck(self))
self.placed = False
self.sunked = False
def is_sunked(self):
if not self.sunked:
sunked = True
for d in self.decks:
if not d.is_hit():
sunked = False
self.sunked = sunked
return self.sunked
def is_placed(self):
return self.placed
def get_position(self):
position = []
for d in self.decks:
if d.cell:
position.append(d.cell.position)
else:
position.clear()
break
return position
def get_default_position(self):
position = []
for i in range(self.size):
position.append([0, i])
return position
def move(self, direction, position=None):
result = False
if not position:
position = self.get_position()
if not position:
position = self.get_default_position()
if MoveDirection.ROTATE == direction:
for p in position:
p[0], p[1] = p[1], p[0]
elif MoveDirection.LEFT == direction:
for p in position:
p[1] -= 1
elif MoveDirection.RIGHT == direction:
for p in position:
p[1] += 1
elif MoveDirection.UP == direction:
for p in position:
p[0] -= 1
elif MoveDirection.DOWN == direction:
for p in position:
p[0] += 1
if not self.grid.check(position):
position = self.get_position()
if len(position) == len(self.decks):
for i in range(len(self.decks)):
self.decks[i].unbind()
cell = self.grid.cell([position[i][0], position[i][1]])
self.decks[i].bind(cell)
result = True
return result
def rotate(self):
return self.move(MoveDirection.ROTATE)
def move_left(self):
return self.move(MoveDirection.LEFT)
def move_right(self):
return self.move(MoveDirection.RIGHT)
def move_up(self):
return self.move(MoveDirection.UP)
def move_down(self):
return self.move(MoveDirection.DOWN)
def move_to(self, position=None):
return self.move(MoveDirection.POSITION, position)
def place(self):
if self.grid.place(self.get_position(), self):
self.placed = True
return self.placed
class Fleet(object):
config = None
units = None
ready = None
defeated = None
def __init__(self, config):
self.config = config
self.units = []
self.ready = False
self.defeated = False
def create(self, grid):
self.units.clear()
for u in self.config.units:
for i in range(u.ammount):
unit = type(u.name, (BaseUnit,), {'name': u.name, 'size': u.size, 'grid': grid})()
self.units.append(unit)
def current(self):
unit = None
for u in self.units:
if not u.placed:
unit = u
break
return unit
def is_ready(self):
if not self.ready:
ready = True
for u in self.units:
if not u.is_placed():
ready = False
break
self.ready = ready
return self.ready
def is_defeated(self):
if not self.defeated:
defeated = True
for u in self.units:
if not u.is_sunked():
defeated = False
break
self.defeated = defeated
return self.defeated
class FleetConfig(object):
units = None
def __init__(self, content):
self.units = []
for u in content.units:
self.units.append(type('unit', (object,), u)())
|
|
# -*- coding: utf-8 -*-
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# (mostly translation, see implementation details)
# License: BSD 3 clause
from __future__ import print_function
import numpy as np
from scipy import linalg, optimize
from ..base import BaseEstimator, RegressorMixin
from ..metrics.pairwise import manhattan_distances
from ..utils import check_random_state, check_array, check_X_y
from ..utils.validation import check_is_fitted
from . import regression_models as regression
from . import correlation_models as correlation
from ..utils import deprecated
MACHINE_EPSILON = np.finfo(np.double).eps
@deprecated("l1_cross_distances was deprecated in version 0.18 "
"and will be removed in 0.20.")
def l1_cross_distances(X):
"""
Computes the nonzero componentwise L1 cross-distances between the vectors
in X.
Parameters
----------
X : array_like
An array with shape (n_samples, n_features)
Returns
-------
D : array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise L1 cross-distances.
ij : arrays with shape (n_samples * (n_samples - 1) / 2, 2)
The indices i and j of the vectors in X associated to the cross-
distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]).
"""
X = check_array(X)
n_samples, n_features = X.shape
n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int)
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 0] = k
ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples)
D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples])
return D, ij
@deprecated("GaussianProcess was deprecated in version 0.18 and will be "
"removed in 0.20. Use the GaussianProcessRegressor instead.")
class GaussianProcess(BaseEstimator, RegressorMixin):
"""The legacy Gaussian Process model class.
.. deprecated:: 0.18
This class will be removed in 0.20.
Use the :class:`GaussianProcessRegressor` instead.
Read more in the :ref:`User Guide <gaussian_process>`.
Parameters
----------
regr : string or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis. The number of observations n_samples
should be greater than the size p of this basis.
Default assumes a simple constant regression trend.
Available built-in regression models are::
'constant', 'linear', 'quadratic'
corr : string or callable, optional
A stationary autocorrelation function returning the autocorrelation
between two points x and x'.
Default assumes a squared-exponential autocorrelation model.
Built-in correlation models are::
'absolute_exponential', 'squared_exponential',
'generalized_exponential', 'cubic', 'linear'
beta0 : double array_like, optional
The regression weight vector to perform Ordinary Kriging (OK).
Default assumes Universal Kriging (UK) so that the vector beta of
regression weights is estimated using the maximum likelihood
principle.
storage_mode : string, optional
A string specifying whether the Cholesky decomposition of the
correlation matrix should be stored in the class (storage_mode =
'full') or not (storage_mode = 'light').
Default assumes storage_mode = 'full', so that the
Cholesky decomposition of the correlation matrix is stored.
This might be a useful parameter when one is not interested in the
MSE and only plan to estimate the BLUP, for which the correlation
matrix is not required.
verbose : boolean, optional
A boolean specifying the verbose level.
Default is verbose = False.
theta0 : double array_like, optional
An array with shape (n_features, ) or (1, ).
The parameters in the autocorrelation model.
If thetaL and thetaU are also specified, theta0 is considered as
the starting point for the maximum likelihood estimation of the
best set of parameters.
Default assumes isotropic autocorrelation model with theta0 = 1e-1.
thetaL : double array_like, optional
An array with shape matching theta0's.
Lower bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
thetaU : double array_like, optional
An array with shape matching theta0's.
Upper bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
normalize : boolean, optional
Input X and observations y are centered and reduced wrt
means and standard deviations estimated from the n_samples
observations provided.
Default is normalize = True so that data is normalized to ease
maximum likelihood estimation.
nugget : double or ndarray, optional
Introduce a nugget effect to allow smooth predictions from noisy
data. If nugget is an ndarray, it must be the same length as the
number of data points used for the fit.
The nugget is added to the diagonal of the assumed training covariance;
in this way it acts as a Tikhonov regularization in the problem. In
the special case of the squared exponential correlation function, the
nugget mathematically represents the variance of the input values.
Default assumes a nugget close to machine precision for the sake of
robustness (nugget = 10. * MACHINE_EPSILON).
optimizer : string, optional
A string specifying the optimization algorithm to be used.
Default uses 'fmin_cobyla' algorithm from scipy.optimize.
Available optimizers are::
'fmin_cobyla', 'Welch'
'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.
It consists in iterating over several one-dimensional optimizations
instead of running one single multi-dimensional optimization.
random_start : int, optional
The number of times the Maximum Likelihood Estimation should be
performed from a random starting point.
The first MLE always uses the specified starting point (theta0),
the next starting points are picked at random according to an
exponential distribution (log-uniform on [thetaL, thetaU]).
Default does not use random starting point (random_start = 1).
random_state : integer or numpy.RandomState, optional
The generator used to shuffle the sequence of coordinates of theta in
the Welch optimizer. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
Attributes
----------
theta_ : array
Specified theta OR the best set of autocorrelation parameters (the \
sought maximizer of the reduced likelihood function).
reduced_likelihood_function_value_ : array
The optimal reduced likelihood function value.
Examples
--------
>>> import numpy as np
>>> from sklearn.gaussian_process import GaussianProcess
>>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T
>>> y = (X * np.sin(X)).ravel()
>>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)
>>> gp.fit(X, y) # doctest: +ELLIPSIS
GaussianProcess(beta0=None...
...
Notes
-----
The presentation implementation is based on a translation of the DACE
Matlab toolbox, see reference [NLNS2002]_.
References
----------
.. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.
Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)
http://imedea.uib-csic.es/master/cambioglobal/Modulo_V_cod101615/Lab/lab_maps/krigging/DACE-krigingsoft/dace/dace.pdf
.. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell,
and M.D. Morris (1992). Screening, predicting, and computer
experiments. Technometrics, 34(1) 15--25.`
http://www.jstor.org/stable/1269548
"""
_regression_types = {
'constant': regression.constant,
'linear': regression.linear,
'quadratic': regression.quadratic}
_correlation_types = {
'absolute_exponential': correlation.absolute_exponential,
'squared_exponential': correlation.squared_exponential,
'generalized_exponential': correlation.generalized_exponential,
'cubic': correlation.cubic,
'linear': correlation.linear}
_optimizer_types = [
'fmin_cobyla',
'Welch']
def __init__(self, regr='constant', corr='squared_exponential', beta0=None,
storage_mode='full', verbose=False, theta0=1e-1,
thetaL=None, thetaU=None, optimizer='fmin_cobyla',
random_start=1, normalize=True,
nugget=10. * MACHINE_EPSILON, random_state=None):
self.regr = regr
self.corr = corr
self.beta0 = beta0
self.storage_mode = storage_mode
self.verbose = verbose
self.theta0 = theta0
self.thetaL = thetaL
self.thetaU = thetaU
self.normalize = normalize
self.nugget = nugget
self.optimizer = optimizer
self.random_start = random_start
self.random_state = random_state
def fit(self, X, y):
"""
The Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_samples, ) or shape (n_samples, n_targets)
with the observations of the output to be predicted.
Returns
-------
gp : self
A fitted Gaussian Process model object awaiting data to perform
predictions.
"""
# Run input checks
self._check_params()
self.random_state = check_random_state(self.random_state)
# Force data to 2D numpy.array
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
self.y_ndim_ = y.ndim
if y.ndim == 1:
y = y[:, np.newaxis]
# Check shapes of DOE & observations
n_samples, n_features = X.shape
_, n_targets = y.shape
# Run input checks
self._check_params(n_samples)
# Normalize data or don't
if self.normalize:
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
y_mean = np.mean(y, axis=0)
y_std = np.std(y, axis=0)
X_std[X_std == 0.] = 1.
y_std[y_std == 0.] = 1.
# center and scale X if necessary
X = (X - X_mean) / X_std
y = (y - y_mean) / y_std
else:
X_mean = np.zeros(1)
X_std = np.ones(1)
y_mean = np.zeros(1)
y_std = np.ones(1)
# Calculate matrix of distances D between samples
D, ij = l1_cross_distances(X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple input features cannot have the same"
" target value.")
# Regression matrix and parameters
F = self.regr(X)
n_samples_F = F.shape[0]
if F.ndim > 1:
p = F.shape[1]
else:
p = 1
if n_samples_F != n_samples:
raise Exception("Number of rows in F and X do not match. Most "
"likely something is going wrong with the "
"regression model.")
if p > n_samples_F:
raise Exception(("Ordinary least squares problem is undetermined "
"n_samples=%d must be greater than the "
"regression model size p=%d.") % (n_samples, p))
if self.beta0 is not None:
if self.beta0.shape[0] != p:
raise Exception("Shapes of beta0 and F do not match.")
# Set attributes
self.X = X
self.y = y
self.D = D
self.ij = ij
self.F = F
self.X_mean, self.X_std = X_mean, X_std
self.y_mean, self.y_std = y_mean, y_std
# Determine Gaussian Process model parameters
if self.thetaL is not None and self.thetaU is not None:
# Maximum Likelihood Estimation of the parameters
if self.verbose:
print("Performing Maximum Likelihood Estimation of the "
"autocorrelation parameters...")
self.theta_, self.reduced_likelihood_function_value_, par = \
self._arg_max_reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad parameter region. "
"Try increasing upper bound")
else:
# Given parameters
if self.verbose:
print("Given autocorrelation parameters. "
"Computing Gaussian Process model parameters...")
self.theta_ = self.theta0
self.reduced_likelihood_function_value_, par = \
self.reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad point. Try increasing theta0.")
self.beta = par['beta']
self.gamma = par['gamma']
self.sigma2 = par['sigma2']
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
if self.storage_mode == 'light':
# Delete heavy data (it will be computed again if required)
# (it is required only when MSE is wanted in self.predict)
if self.verbose:
print("Light storage mode specified. "
"Flushing autocorrelation matrix...")
self.D = None
self.ij = None
self.F = None
self.C = None
self.Ft = None
self.G = None
return self
def predict(self, X, eval_MSE=False, batch_size=None):
"""
This function evaluates the Gaussian Process model at x.
Parameters
----------
X : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE : boolean, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not.
Default assumes evalMSE = False and evaluates only the BLUP (mean
prediction).
batch_size : integer, optional
An integer giving the maximum number of points that can be
evaluated simultaneously (depending on the available memory).
Default is None so that all given points are evaluated at the same
time.
Returns
-------
y : array_like, shape (n_samples, ) or (n_samples, n_targets)
An array with shape (n_eval, ) if the Gaussian Process was trained
on an array of shape (n_samples, ) or an array with shape
(n_eval, n_targets) if the Gaussian Process was trained on an array
of shape (n_samples, n_targets) with the Best Linear Unbiased
Prediction at x.
MSE : array_like, optional (if eval_MSE == True)
An array with shape (n_eval, ) or (n_eval, n_targets) as with y,
with the Mean Squared Error at x.
"""
check_is_fitted(self, "X")
# Check input shapes
X = check_array(X)
n_eval, _ = X.shape
n_samples, n_features = self.X.shape
n_samples_y, n_targets = self.y.shape
# Run input checks
self._check_params(n_samples)
if X.shape[1] != n_features:
raise ValueError(("The number of features in X (X.shape[1] = %d) "
"should match the number of features used "
"for fit() "
"which is %d.") % (X.shape[1], n_features))
if batch_size is None:
# No memory management
# (evaluates all given points in a single batch run)
# Normalize input
X = (X - self.X_mean) / self.X_std
# Initialize output
y = np.zeros(n_eval)
if eval_MSE:
MSE = np.zeros(n_eval)
# Get pairwise componentwise L1-distances to the input training set
dx = manhattan_distances(X, Y=self.X, sum_over_features=False)
# Get regression function and correlation
f = self.regr(X)
r = self.corr(self.theta_, dx).reshape(n_eval, n_samples)
# Scaled predictor
y_ = np.dot(f, self.beta) + np.dot(r, self.gamma)
# Predictor
y = (self.y_mean + self.y_std * y_).reshape(n_eval, n_targets)
if self.y_ndim_ == 1:
y = y.ravel()
# Mean Squared Error
if eval_MSE:
C = self.C
if C is None:
# Light storage mode (need to recompute C, F, Ft and G)
if self.verbose:
print("This GaussianProcess used 'light' storage mode "
"at instantiation. Need to recompute "
"autocorrelation matrix...")
reduced_likelihood_function_value, par = \
self.reduced_likelihood_function()
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
rt = linalg.solve_triangular(self.C, r.T, lower=True)
if self.beta0 is None:
# Universal Kriging
u = linalg.solve_triangular(self.G.T,
np.dot(self.Ft.T, rt) - f.T,
lower=True)
else:
# Ordinary Kriging
u = np.zeros((n_targets, n_eval))
MSE = np.dot(self.sigma2.reshape(n_targets, 1),
(1. - (rt ** 2.).sum(axis=0)
+ (u ** 2.).sum(axis=0))[np.newaxis, :])
MSE = np.sqrt((MSE ** 2.).sum(axis=0) / n_targets)
# Mean Squared Error might be slightly negative depending on
# machine precision: force to zero!
MSE[MSE < 0.] = 0.
if self.y_ndim_ == 1:
MSE = MSE.ravel()
return y, MSE
else:
return y
else:
# Memory management
if type(batch_size) is not int or batch_size <= 0:
raise Exception("batch_size must be a positive integer")
if eval_MSE:
y, MSE = np.zeros(n_eval), np.zeros(n_eval)
for k in range(max(1, int(n_eval / batch_size))):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to], MSE[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y, MSE
else:
y = np.zeros(n_eval)
for k in range(max(1, int(n_eval / batch_size))):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y
def reduced_likelihood_function(self, theta=None):
"""
This function determines the BLUP parameters and evaluates the reduced
likelihood function for the given autocorrelation parameters theta.
Maximizing this function wrt the autocorrelation parameters theta is
equivalent to maximizing the likelihood of the assumed joint Gaussian
distribution of the observations y evaluated onto the design of
experiments X.
Parameters
----------
theta : array_like, optional
An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Default uses the built-in autocorrelation parameters
(ie ``theta = self.theta_``).
Returns
-------
reduced_likelihood_function_value : double
The value of the reduced likelihood function associated to the
given autocorrelation parameters theta.
par : dict
A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squares regression weights for
Universal Kriging or given beta0 for Ordinary
Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
G
QR decomposition of the matrix Ft.
"""
check_is_fitted(self, "X")
if theta is None:
# Use built-in autocorrelation parameters
theta = self.theta_
# Initialize output
reduced_likelihood_function_value = - np.inf
par = {}
# Retrieve data
n_samples = self.X.shape[0]
D = self.D
ij = self.ij
F = self.F
if D is None:
# Light storage mode (need to recompute D, ij and F)
D, ij = l1_cross_distances(self.X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple X are not allowed")
F = self.regr(self.X)
# Set up R
r = self.corr(theta, D)
R = np.eye(n_samples) * (1. + self.nugget)
R[ij[:, 0], ij[:, 1]] = r
R[ij[:, 1], ij[:, 0]] = r
# Cholesky decomposition of R
try:
C = linalg.cholesky(R, lower=True)
except linalg.LinAlgError:
return reduced_likelihood_function_value, par
# Get generalized least squares solution
Ft = linalg.solve_triangular(C, F, lower=True)
Q, G = linalg.qr(Ft, mode='economic')
sv = linalg.svd(G, compute_uv=False)
rcondG = sv[-1] / sv[0]
if rcondG < 1e-10:
# Check F
sv = linalg.svd(F, compute_uv=False)
condF = sv[0] / sv[-1]
if condF > 1e15:
raise Exception("F is too ill conditioned. Poor combination "
"of regression model and observations.")
else:
# Ft is too ill conditioned, get out (try different theta)
return reduced_likelihood_function_value, par
Yt = linalg.solve_triangular(C, self.y, lower=True)
if self.beta0 is None:
# Universal Kriging
beta = linalg.solve_triangular(G, np.dot(Q.T, Yt))
else:
# Ordinary Kriging
beta = np.array(self.beta0)
rho = Yt - np.dot(Ft, beta)
sigma2 = (rho ** 2.).sum(axis=0) / n_samples
# The determinant of R is equal to the squared product of the diagonal
# elements of its Cholesky decomposition C
detR = (np.diag(C) ** (2. / n_samples)).prod()
# Compute/Organize output
reduced_likelihood_function_value = - sigma2.sum() * detR
par['sigma2'] = sigma2 * self.y_std ** 2.
par['beta'] = beta
par['gamma'] = linalg.solve_triangular(C.T, rho)
par['C'] = C
par['Ft'] = Ft
par['G'] = G
return reduced_likelihood_function_value, par
def _arg_max_reduced_likelihood_function(self):
"""
This function estimates the autocorrelation parameters theta as the
maximizer of the reduced likelihood function.
(Minimization of the opposite reduced likelihood function is used for
convenience)
Parameters
----------
self : All parameters are stored in the Gaussian Process model object.
Returns
-------
optimal_theta : array_like
The best set of autocorrelation parameters (the sought maximizer of
the reduced likelihood function).
optimal_reduced_likelihood_function_value : double
The optimal reduced likelihood function value.
optimal_par : dict
The BLUP parameters associated to thetaOpt.
"""
# Initialize output
best_optimal_theta = []
best_optimal_rlf_value = []
best_optimal_par = []
if self.verbose:
print("The chosen optimizer is: " + str(self.optimizer))
if self.random_start > 1:
print(str(self.random_start) + " random starts are required.")
percent_completed = 0.
# Force optimizer to fmin_cobyla if the model is meant to be isotropic
if self.optimizer == 'Welch' and self.theta0.size == 1:
self.optimizer = 'fmin_cobyla'
if self.optimizer == 'fmin_cobyla':
def minus_reduced_likelihood_function(log10t):
return - self.reduced_likelihood_function(
theta=10. ** log10t)[0]
constraints = []
for i in range(self.theta0.size):
constraints.append(lambda log10t, i=i:
log10t[i] - np.log10(self.thetaL[0, i]))
constraints.append(lambda log10t, i=i:
np.log10(self.thetaU[0, i]) - log10t[i])
for k in range(self.random_start):
if k == 0:
# Use specified starting point as first guess
theta0 = self.theta0
else:
# Generate a random starting point log10-uniformly
# distributed between bounds
log10theta0 = (np.log10(self.thetaL)
+ self.random_state.rand(*self.theta0.shape)
* np.log10(self.thetaU / self.thetaL))
theta0 = 10. ** log10theta0
# Run Cobyla
try:
log10_optimal_theta = \
optimize.fmin_cobyla(minus_reduced_likelihood_function,
np.log10(theta0).ravel(), constraints,
iprint=0)
except ValueError as ve:
print("Optimization failed. Try increasing the ``nugget``")
raise ve
optimal_theta = 10. ** log10_optimal_theta
optimal_rlf_value, optimal_par = \
self.reduced_likelihood_function(theta=optimal_theta)
# Compare the new optimizer to the best previous one
if k > 0:
if optimal_rlf_value > best_optimal_rlf_value:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
else:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
if self.verbose and self.random_start > 1:
if (20 * k) / self.random_start > percent_completed:
percent_completed = (20 * k) / self.random_start
print("%s completed" % (5 * percent_completed))
optimal_rlf_value = best_optimal_rlf_value
optimal_par = best_optimal_par
optimal_theta = best_optimal_theta
elif self.optimizer == 'Welch':
# Backup of the given attributes
theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU
corr = self.corr
verbose = self.verbose
# This will iterate over fmin_cobyla optimizer
self.optimizer = 'fmin_cobyla'
self.verbose = False
# Initialize under isotropy assumption
if verbose:
print("Initialize under isotropy assumption...")
self.theta0 = check_array(self.theta0.min())
self.thetaL = check_array(self.thetaL.min())
self.thetaU = check_array(self.thetaU.max())
theta_iso, optimal_rlf_value_iso, par_iso = \
self._arg_max_reduced_likelihood_function()
optimal_theta = theta_iso + np.zeros(theta0.shape)
# Iterate over all dimensions of theta allowing for anisotropy
if verbose:
print("Now improving allowing for anisotropy...")
for i in self.random_state.permutation(theta0.size):
if verbose:
print("Proceeding along dimension %d..." % (i + 1))
self.theta0 = check_array(theta_iso)
self.thetaL = check_array(thetaL[0, i])
self.thetaU = check_array(thetaU[0, i])
def corr_cut(t, d):
return corr(check_array(np.hstack([optimal_theta[0][0:i],
t[0],
optimal_theta[0][(i +
1)::]])),
d)
self.corr = corr_cut
optimal_theta[0, i], optimal_rlf_value, optimal_par = \
self._arg_max_reduced_likelihood_function()
# Restore the given attributes
self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU
self.corr = corr
self.optimizer = 'Welch'
self.verbose = verbose
else:
raise NotImplementedError("This optimizer ('%s') is not "
"implemented yet. Please contribute!"
% self.optimizer)
return optimal_theta, optimal_rlf_value, optimal_par
def _check_params(self, n_samples=None):
# Check regression model
if not callable(self.regr):
if self.regr in self._regression_types:
self.regr = self._regression_types[self.regr]
else:
raise ValueError("regr should be one of %s or callable, "
"%s was given."
% (self._regression_types.keys(), self.regr))
# Check regression weights if given (Ordinary Kriging)
if self.beta0 is not None:
self.beta0 = np.atleast_2d(self.beta0)
if self.beta0.shape[1] != 1:
# Force to column vector
self.beta0 = self.beta0.T
# Check correlation model
if not callable(self.corr):
if self.corr in self._correlation_types:
self.corr = self._correlation_types[self.corr]
else:
raise ValueError("corr should be one of %s or callable, "
"%s was given."
% (self._correlation_types.keys(), self.corr))
# Check storage mode
if self.storage_mode != 'full' and self.storage_mode != 'light':
raise ValueError("Storage mode should either be 'full' or "
"'light', %s was given." % self.storage_mode)
# Check correlation parameters
self.theta0 = np.atleast_2d(self.theta0)
lth = self.theta0.size
if self.thetaL is not None and self.thetaU is not None:
self.thetaL = np.atleast_2d(self.thetaL)
self.thetaU = np.atleast_2d(self.thetaU)
if self.thetaL.size != lth or self.thetaU.size != lth:
raise ValueError("theta0, thetaL and thetaU must have the "
"same length.")
if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL):
raise ValueError("The bounds must satisfy O < thetaL <= "
"thetaU.")
elif self.thetaL is None and self.thetaU is None:
if np.any(self.theta0 <= 0):
raise ValueError("theta0 must be strictly positive.")
elif self.thetaL is None or self.thetaU is None:
raise ValueError("thetaL and thetaU should either be both or "
"neither specified.")
# Force verbose type to bool
self.verbose = bool(self.verbose)
# Force normalize type to bool
self.normalize = bool(self.normalize)
# Check nugget value
self.nugget = np.asarray(self.nugget)
if np.any(self.nugget) < 0.:
raise ValueError("nugget must be positive or zero.")
if (n_samples is not None
and self.nugget.shape not in [(), (n_samples,)]):
raise ValueError("nugget must be either a scalar "
"or array of length n_samples.")
# Check optimizer
if self.optimizer not in self._optimizer_types:
raise ValueError("optimizer should be one of %s"
% self._optimizer_types)
# Force random_start type to int
self.random_start = int(self.random_start)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_create_request(
resource_group_name: str,
account_name: str,
application_name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2022-01-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[a-zA-Z0-9]+$'),
"applicationName": _SERIALIZER.url("application_name", application_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9_-]+$'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request(
resource_group_name: str,
account_name: str,
application_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2022-01-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[a-zA-Z0-9]+$'),
"applicationName": _SERIALIZER.url("application_name", application_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9_-]+$'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
resource_group_name: str,
account_name: str,
application_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2022-01-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[a-zA-Z0-9]+$'),
"applicationName": _SERIALIZER.url("application_name", application_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9_-]+$'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_update_request(
resource_group_name: str,
account_name: str,
application_name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2022-01-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[a-zA-Z0-9]+$'),
"applicationName": _SERIALIZER.url("application_name", application_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9_-]+$'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_list_request(
resource_group_name: str,
account_name: str,
subscription_id: str,
*,
maxresults: Optional[int] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2022-01-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[a-zA-Z0-9]+$'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if maxresults is not None:
query_parameters['maxresults'] = _SERIALIZER.query("maxresults", maxresults, 'int')
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class ApplicationOperations(object):
"""ApplicationOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.batch.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def create(
self,
resource_group_name: str,
account_name: str,
application_name: str,
parameters: Optional["_models.Application"] = None,
**kwargs: Any
) -> "_models.Application":
"""Adds an application to the specified Batch account.
:param resource_group_name: The name of the resource group that contains the Batch account.
:type resource_group_name: str
:param account_name: The name of the Batch account.
:type account_name: str
:param application_name: The name of the application. This must be unique within the account.
:type application_name: str
:param parameters: The parameters for the request.
:type parameters: ~azure.mgmt.batch.models.Application
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Application, or the result of cls(response)
:rtype: ~azure.mgmt.batch.models.Application
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Application"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if parameters is not None:
_json = self._serialize.body(parameters, 'Application')
else:
_json = None
request = build_create_request(
resource_group_name=resource_group_name,
account_name=account_name,
application_name=application_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.create.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Application', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}'} # type: ignore
@distributed_trace
def delete(
self,
resource_group_name: str,
account_name: str,
application_name: str,
**kwargs: Any
) -> None:
"""Deletes an application.
:param resource_group_name: The name of the resource group that contains the Batch account.
:type resource_group_name: str
:param account_name: The name of the Batch account.
:type account_name: str
:param application_name: The name of the application. This must be unique within the account.
:type application_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
resource_group_name=resource_group_name,
account_name=account_name,
application_name=application_name,
subscription_id=self._config.subscription_id,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
account_name: str,
application_name: str,
**kwargs: Any
) -> "_models.Application":
"""Gets information about the specified application.
:param resource_group_name: The name of the resource group that contains the Batch account.
:type resource_group_name: str
:param account_name: The name of the Batch account.
:type account_name: str
:param application_name: The name of the application. This must be unique within the account.
:type application_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Application, or the result of cls(response)
:rtype: ~azure.mgmt.batch.models.Application
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Application"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
account_name=account_name,
application_name=application_name,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Application', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}'} # type: ignore
@distributed_trace
def update(
self,
resource_group_name: str,
account_name: str,
application_name: str,
parameters: "_models.Application",
**kwargs: Any
) -> "_models.Application":
"""Updates settings for the specified application.
:param resource_group_name: The name of the resource group that contains the Batch account.
:type resource_group_name: str
:param account_name: The name of the Batch account.
:type account_name: str
:param application_name: The name of the application. This must be unique within the account.
:type application_name: str
:param parameters: The parameters for the request.
:type parameters: ~azure.mgmt.batch.models.Application
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Application, or the result of cls(response)
:rtype: ~azure.mgmt.batch.models.Application
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Application"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'Application')
request = build_update_request(
resource_group_name=resource_group_name,
account_name=account_name,
application_name=application_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Application', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}'} # type: ignore
@distributed_trace
def list(
self,
resource_group_name: str,
account_name: str,
maxresults: Optional[int] = None,
**kwargs: Any
) -> Iterable["_models.ListApplicationsResult"]:
"""Lists all of the applications in the specified account.
:param resource_group_name: The name of the resource group that contains the Batch account.
:type resource_group_name: str
:param account_name: The name of the Batch account.
:type account_name: str
:param maxresults: The maximum number of items to return in the response.
:type maxresults: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListApplicationsResult or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.batch.models.ListApplicationsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListApplicationsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
maxresults=maxresults,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
maxresults=maxresults,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ListApplicationsResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications'} # type: ignore
|
|
import operator
import ipaddr
import re
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.core.exceptions import ValidationError
from django.db.models import Q
from cyder.core.system.models import System
from cyder.cydhcp.interface.static_intr.models import StaticInterface
from cyder.cydhcp.interface.dynamic_intr.models import DynamicInterface
from cyder.cydhcp.network.models import Network
from cyder.cydhcp.range.models import Range
from cyder.cydhcp.site.models import Site
from cyder.cydhcp.utils import IPFilter
from cyder.cydhcp.utils import start_end_filter
from cyder.cydhcp.vlan.models import Vlan
from cyder.cydhcp.workgroup.models import Workgroup
from cyder.cydns.address_record.models import AddressRecord
from cyder.cydns.cname.models import CNAME
from cyder.cydns.domain.models import Domain
from cyder.cydns.mx.models import MX
from cyder.cydns.nameserver.models import Nameserver
from cyder.cydns.ptr.models import PTR
from cyder.cydns.srv.models import SRV
from cyder.cydns.soa.models import SOA
from cyder.cydns.sshfp.models import SSHFP
from cyder.cydns.txt.models import TXT
from cyder.cydns.view.models import View
searchables = (
('A', AddressRecord),
('CNAME', CNAME),
('DOMAIN', Domain),
('STATIC', StaticInterface),
('DYNAMIC', DynamicInterface),
('MX', MX),
('NETWORK', Network),
('NS', Nameserver),
('PTR', PTR),
('RANGE', Range),
('SOA', SOA),
('SRV', SRV),
('SSHFP', SSHFP),
('SYSTEM', System),
('TXT', TXT),
('WORKGROUP', Workgroup),
)
def get_managers():
managers = []
for name, Klass in searchables:
managers.append(Klass.objects)
return managers
class _Filter(object):
"""
The Base class of different filters. Implement these methods
"""
ntype = "FILTER"
def __str__(self):
return self.value
def compile_Q(self, ntype):
pass
def build_filter(filter_, fields, filter_type):
final_filter = Q()
for t in fields:
final_filter = final_filter | Q(**{"{0}__{1}".format(
t, filter_type): filter_})
return final_filter
class MacAddressFilter(_Filter):
def __init__(self, addr):
self.addr = addr
def compile_Q(self):
result = []
for name, Klass in searchables:
result.append(
Q(mac=self.addr) if name in ('STATIC', 'DYNAMIC') else None)
return result
class TextFilter(_Filter):
def __init__(self, rvalue):
self.value = rvalue
def compile_Q(self):
# Value is the search term
result = []
for name, Klass in searchables:
result.append(
build_filter(self.value, Klass.search_fields, 'icontains'))
return result
class REFilter(TextFilter):
num_match = re.compile("\{(\d+)-(\d+)\}")
def _expand_number_regex(self, value):
"""
We want to turn something like /hp-node{31-40}.phx1 into
'/hp-node(31|32|33|34|35|36|37|38|39|40).phx1'
"""
matches = self.num_match.findall(value)
for low, high in matches:
padding = min(len(low), len(high))
if int(low) >= int(high):
continue
new_value = ""
for i in xrange(int(low), int(high) + 1):
new_value += "{0}|".format(str(i).rjust(padding, '0'))
new_value = '(' + new_value.strip('|') + ')'
value = value.replace('{{{0}-{1}}}'.format(low, high), new_value)
return value
def compile_Q(self):
result = []
value = self._expand_number_regex(self.value)
for name, Klass in searchables:
result.append(build_filter(value, Klass.search_fields, 'regex'))
return result
class DirectiveFilter(_Filter):
def __init__(self, directive, dvalue):
self.directive = directive
self.dvalue = dvalue
def compile_Q(self):
if self.directive == 'view':
return build_view_qsets(self.dvalue)
elif self.directive == 'network':
return build_network_qsets(self.dvalue)
elif self.directive == 'vlan':
return build_vlan_qsets(self.dvalue)
elif self.directive == 'zone':
return build_zone_qsets(self.dvalue)
elif self.directive == 'range':
return build_range_qsets(self.dvalue)
elif self.directive == 'type':
return build_rdtype_qsets(self.dvalue)
elif self.directive == 'site':
return build_site_qsets(self.dvalue)
else:
raise BadDirective(
"Unknown Directive '{0}'".format(self.directive)
)
# TODO: move this into its own file
##############################################################################
########################## Directive Filters ###############################
##############################################################################
class BadDirective(Exception):
pass
def build_rdtype_qsets(rdtype):
"""This function needs to filter out all records of a certain rdtype (like
A or CNAME). Any filter produced here has to be able to be negated. We use
the fact that every object has a pk > -1. When a qset is negated the query
becomes pk <= -1.
"""
rdtype = rdtype.upper() # Let's get consistent
select = Q(pk__gt=-1)
no_select = Q(pk__lte=-1)
result = []
for name, Klass in searchables:
if name == rdtype:
result.append(select)
else:
result.append(no_select)
return result
def build_view_qsets(view_name):
"""Filter based on DNS views."""
view_name = view_name.lower() # Let's get consistent
try:
view = View.objects.get(name=view_name)
except ObjectDoesNotExist:
raise BadDirective("'{0}' isn't a valid view.".format(view_name))
view_filter = Q(views=view) # This will slow queries down due to joins
q_sets = []
select = Q(pk__gt=-1)
for name, Klass in searchables:
if name == 'SOA':
q_sets.append(select) # SOA's are always public and private
elif hasattr(Klass, 'views'):
q_sets.append(view_filter)
else:
q_sets.append(None)
return q_sets
def build_ipf_qsets(q):
"""Filter based on IP address views.
:param q: A filter for a certain IP or IP range
:type q: Q
"""
q_sets = []
for name, Klass in searchables:
if name == 'A' or name == 'STATIC' or name == 'PTR':
q_sets.append(q)
else:
q_sets.append(None)
return q_sets
def build_range_qsets(range_):
try:
start, end = range_.split(',')
except ValueError:
raise BadDirective("Specify a range using the format: start,end")
if start.find(':') > -1:
ip_type = '6'
if end.find('.') > -1:
ip_type = '4'
try:
istart, iend, ipf_q = start_end_filter(start, end, ip_type)
except (ValidationError, ipaddr.AddressValueError), e:
raise BadDirective(str(e))
return build_ipf_qsets(ipf_q)
def build_network_qsets(network_str):
# TODO: move these directive processors into functions.
if network_str.find(':') > -1:
Klass = ipaddr.IPv6Network
ip_type = '6'
if network_str.find('.') > -1:
Klass = ipaddr.IPv4Network
ip_type = '4'
try:
network = Klass(network_str)
ipf = IPFilter(network.network, network.broadcast, ip_type)
except (ipaddr.AddressValueError, ipaddr.NetmaskValueError):
raise BadDirective("{0} isn't a valid "
"network.".format(network_str))
return build_ipf_qsets(ipf.Q)
def build_site_qsets(site_name):
try:
site = Site.objects.get(name=site_name)
except ObjectDoesNotExist:
raise BadDirective("{0} isn't a valid "
"site.".format(site_name))
return build_ipf_qsets(site.compile_Q())
def build_vlan_qsets(vlan_name):
try:
if vlan_name.isdigit():
vlan = Vlan.objects.get(number=vlan_name)
else:
vlan = Vlan.objects.get(name=vlan_name)
except ObjectDoesNotExist:
raise BadDirective("{0} isn't a valid "
"vlan identifier.".format(vlan_name))
except MultipleObjectsReturned:
raise BadDirective("{0} doesn't uniquely identify"
"a vlan.".format(vlan_name))
return build_ipf_qsets(vlan.compile_Q())
def build_zone_qsets(zone):
"""The point of this filter is to first find the root of a dns zone
specified by zone and then build a query to return all records in this
zone.
"""
try:
root_domain = Domain.objects.get(name=zone)
# This might not actually be the root of a zone, but functionally we
# don't really care.
except ObjectDoesNotExist:
raise BadDirective("'{0}' part of a valid zone.".format(zone))
if not root_domain.soa:
raise BadDirective("'{0}' part of a valid zone.".format(zone))
def _get_zone_domains(domain):
domains = [domain]
for sub_domain in domain.domain_set.filter(soa=domain.soa):
domains += _get_zone_domains(sub_domain)
return domains
zone_domains = _get_zone_domains(root_domain)
domains = [Q(domain=domain) for domain in zone_domains]
reverse_domains = [Q(reverse_domain=domain) for domain in zone_domains]
zone_query = reduce(operator.or_, domains, Q())
reverse_zone_query = reduce(operator.or_, reverse_domains, Q())
result = []
for name, Klass in searchables:
if hasattr(Klass, 'domain'):
result.append(zone_query)
elif hasattr(Klass, 'reverse_domain'):
result.append(reverse_zone_query)
elif name == 'SOA':
result.append(Q(pk=root_domain.soa.pk))
else:
result.append(None)
return result
|
|
# Copyright 2015 Dell Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from cinder import exception
from cinder import objects
from cinder.i18n import _, _LE, _LI, _LW
from cinder.volume import driver
from cinder.volume.drivers.dell import dell_storagecenter_api
from cinder.volume.drivers.san.san import san_opts
from cinder.volume import volume_types
common_opts = [
cfg.IntOpt('dell_sc_ssn',
default=64702,
help='Storage Center System Serial Number'),
cfg.IntOpt('dell_sc_api_port',
default=3033,
help='Dell API port'),
cfg.StrOpt('dell_sc_server_folder',
default='openstack',
help='Name of the server folder to use on the Storage Center'),
cfg.StrOpt('dell_sc_volume_folder',
default='openstack',
help='Name of the volume folder to use on the Storage Center'),
cfg.BoolOpt('dell_sc_verify_cert',
default=False,
help='Enable HTTPS SC certificate verification.')
]
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opts(common_opts)
class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD,
driver.ExtendVD, driver.CloneableVD, driver.SnapshotVD,
driver.BaseVD):
def __init__(self, *args, **kwargs):
super(DellCommonDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(common_opts)
self.configuration.append_config_values(san_opts)
self.backend_name =\
self.configuration.safe_get('volume_backend_name') or 'Dell'
def _bytes_to_gb(self, spacestring):
"""Space is returned in a string like ...
7.38197504E8 Bytes
Need to split that apart and convert to GB.
:returns: gbs in int form
"""
try:
n = spacestring.split(' ', 1)
fgbs = float(n[0]) / 1073741824.0
igbs = int(fgbs)
return igbs
except Exception:
# If any of that blew up it isn't in the format we
# thought so eat our error and return None
return None
def do_setup(self, context):
"""One time driver setup.
Called once by the manager after the driver is loaded.
Sets up clients, check licenses, sets up protocol
specific helpers.
"""
self._client = dell_storagecenter_api.StorageCenterApiHelper(
self.configuration)
def check_for_setup_error(self):
"""Validates the configuration information."""
with self._client.open_connection() as api:
api.find_sc()
def _get_volume_extra_specs(self, volume):
"""Gets extra specs for the given volume."""
type_id = volume.get('volume_type_id')
if type_id:
return volume_types.get_volume_type_extra_specs(type_id)
return {}
def _add_volume_to_consistency_group(self, api, scvolume, volume):
"""Just a helper to add a volume to a consistency group.
:param api: Dell SC API opbject.
:param scvolume: Dell SC Volume object.
:param volume: Cinder Volume object.
:return: Nothing.
"""
if scvolume and volume.get('consistencygroup_id'):
profile = api.find_replay_profile(
volume.get('consistencygroup_id'))
if profile:
api.update_cg_volumes(profile, [volume])
def create_volume(self, volume):
"""Create a volume."""
# We use id as our name as it is unique.
volume_name = volume.get('id')
volume_size = volume.get('size')
# See if we have any extra specs.
specs = self._get_volume_extra_specs(volume)
storage_profile = specs.get('storagetype:storageprofile')
LOG.debug('Creating volume %(name)s of size %(size)s',
{'name': volume_name,
'size': volume_size})
scvolume = None
with self._client.open_connection() as api:
try:
if api.find_sc():
scvolume = api.create_volume(volume_name,
volume_size,
storage_profile)
# Update Consistency Group
self._add_volume_to_consistency_group(api, scvolume, volume)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to create volume %s'),
volume['name'])
if scvolume is None:
raise exception.VolumeBackendAPIException(
_('Unable to create volume'))
def delete_volume(self, volume):
deleted = False
# We use id as our name as it is unique.
volume_name = volume.get('id')
LOG.debug('Deleting volume %s', volume_name)
with self._client.open_connection() as api:
try:
if api.find_sc():
deleted = api.delete_volume(volume_name)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to delete volume %s'),
volume_name)
# if there was an error we will have raised an
# exception. If it failed to delete it is because
# the conditions to delete a volume were not met.
if deleted is False:
raise exception.VolumeIsBusy(volume_name=volume_name)
def create_snapshot(self, snapshot):
"""Create snapshot"""
# our volume name is the volume id
volume_name = snapshot.get('volume_id')
snapshot_id = snapshot.get('id')
LOG.debug('Creating snapshot %(snap)s on volume %(vol)s',
{'snap': snapshot_id,
'vol': volume_name})
with self._client.open_connection() as api:
if api.find_sc():
scvolume = api.find_volume(volume_name)
if scvolume is not None:
if api.create_replay(scvolume,
snapshot_id,
0) is not None:
snapshot['status'] = 'available'
return
else:
LOG.warning(_LW('Unable to locate volume:%s'),
volume_name)
snapshot['status'] = 'error_creating'
raise exception.VolumeBackendAPIException(
_('Failed to create snapshot %s') %
snapshot_id)
def create_volume_from_snapshot(self, volume, snapshot):
"""Create new volume from other volume's snapshot on appliance."""
scvolume = None
src_volume_name = snapshot.get('volume_id')
# This snapshot could have been created on its own or as part of a
# cgsnapshot. If it was a cgsnapshot it will be identified on the Dell
# backend under cgsnapshot_id. Given the volume ID and the
# cgsnapshot_id we can find the appropriate snapshot.
# So first we look for cgsnapshot_id. If that is blank then it must
# have been a normal snapshot which will be found under snapshot_id.
snapshot_id = snapshot.get('cgsnapshot_id')
if not snapshot_id:
snapshot_id = snapshot.get('id')
volume_name = volume.get('id')
LOG.debug(
'Creating new volume %(vol)s from snapshot %(snap)s '
'from vol %(src)s',
{'vol': volume_name,
'snap': snapshot_id,
'src': src_volume_name})
with self._client.open_connection() as api:
try:
if api.find_sc():
srcvol = api.find_volume(src_volume_name)
if srcvol is not None:
replay = api.find_replay(srcvol,
snapshot_id)
if replay is not None:
volume_name = volume.get('id')
scvolume = api.create_view_volume(volume_name,
replay)
# Update Consistency Group
self._add_volume_to_consistency_group(api,
scvolume,
volume)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to create volume %s'),
volume_name)
if scvolume is not None:
LOG.debug('Volume %(vol)s created from %(snap)s',
{'vol': volume_name,
'snap': snapshot_id})
else:
raise exception.VolumeBackendAPIException(
_('Failed to create volume %s') % volume_name)
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
scvolume = None
src_volume_name = src_vref.get('id')
volume_name = volume.get('id')
LOG.debug('Creating cloned volume %(clone)s from volume %(vol)s',
{'clone': volume_name,
'vol': src_volume_name})
with self._client.open_connection() as api:
try:
if api.find_sc():
srcvol = api.find_volume(src_volume_name)
if srcvol is not None:
scvolume = api.create_cloned_volume(volume_name,
srcvol)
# Update Consistency Group
self._add_volume_to_consistency_group(api,
scvolume,
volume)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to create volume %s'),
volume_name)
if scvolume is not None:
LOG.debug('Volume %(vol)s cloned from %(src)s',
{'vol': volume_name,
'src': src_volume_name})
else:
raise exception.VolumeBackendAPIException(
_('Failed to create volume %s') % volume_name)
def delete_snapshot(self, snapshot):
"""delete_snapshot"""
volume_name = snapshot.get('volume_id')
snapshot_id = snapshot.get('id')
LOG.debug('Deleting snapshot %(snap)s from volume %(vol)s',
{'snap': snapshot_id,
'vol': volume_name})
with self._client.open_connection() as api:
if api.find_sc():
scvolume = api.find_volume(volume_name)
if scvolume is not None:
if api.delete_replay(scvolume,
snapshot_id):
return
# if we are here things went poorly.
snapshot['status'] = 'error_deleting'
raise exception.VolumeBackendAPIException(
_('Failed to delete snapshot %s') % snapshot_id)
def create_export(self, context, volume):
"""Create an export of a volume.
The volume exists on creation and will be visible on
initialize connection. So nothing to do here.
"""
pass
def ensure_export(self, context, volume):
"""Ensure an export of a volume.
Per the eqlx driver we just make sure that the volume actually
exists where we think it does.
"""
scvolume = None
volume_name = volume.get('id')
LOG.debug('Checking existence of volume %s', volume_name)
with self._client.open_connection() as api:
try:
if api.find_sc():
scvolume = api.find_volume(volume_name)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to ensure export of volume %s'),
volume_name)
if scvolume is None:
raise exception.VolumeBackendAPIException(
_('Unable to find volume %s') % volume_name)
def remove_export(self, context, volume):
"""Remove an export of a volume.
We do nothing here to match the nothing we do in create export. Again
we do everything in initialize and terminate connection.
"""
pass
def extend_volume(self, volume, new_size):
"""Extend the size of the volume."""
volume_name = volume.get('id')
LOG.debug('Extending volume %(vol)s to %(size)s',
{'vol': volume_name,
'size': new_size})
if volume is not None:
with self._client.open_connection() as api:
if api.find_sc():
scvolume = api.find_volume(volume_name)
if api.expand_volume(scvolume, new_size) is not None:
return
# If we are here nothing good happened.
raise exception.VolumeBackendAPIException(
_('Unable to extend volume %s') % volume_name)
def get_volume_stats(self, refresh=False):
"""Get volume status.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self._update_volume_stats()
return self._stats
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
with self._client.open_connection() as api:
storageusage = api.get_storage_usage() if api.find_sc() else None
# all of this is basically static for now
data = {}
data['volume_backend_name'] = self.backend_name
data['vendor_name'] = 'Dell'
data['driver_version'] = self.VERSION
data['storage_protocol'] = 'iSCSI'
data['reserved_percentage'] = 0
data['free_capacity_gb'] = 'unavailable'
data['total_capacity_gb'] = 'unavailable'
data['consistencygroup_support'] = True
# In theory if storageusage is None then we should have
# blown up getting it. If not just report unavailable.
if storageusage is not None:
totalcapacity = storageusage.get('availableSpace')
totalcapacitygb = self._bytes_to_gb(totalcapacity)
data['total_capacity_gb'] = totalcapacitygb
freespace = storageusage.get('freeSpace')
freespacegb = self._bytes_to_gb(freespace)
data['free_capacity_gb'] = freespacegb
data['QoS_support'] = False
self._stats = data
LOG.debug('Total cap %(total)s Free cap %(free)s',
{'total': data['total_capacity_gb'],
'free': data['free_capacity_gb']})
def update_migrated_volume(self, ctxt, volume, new_volume,
original_volume_status):
"""Return model update for migrated volume.
:param volume: The original volume that was migrated to this backend
:param new_volume: The migration volume object that was created on
this backend as part of the migration process
:param original_volume_status: The status of the original volume
:return model_update to update DB with any needed changes
"""
# We use id as our volume name so we need to rename the backend
# volume to the original volume name.
original_volume_name = volume.get('id')
current_name = new_volume.get('id')
LOG.debug('update_migrated_volume: %(current)s to %(original)s',
{'current': current_name,
'original': original_volume_name})
if original_volume_name:
with self._client.open_connection() as api:
if api.find_sc():
scvolume = api.find_volume(current_name)
if (scvolume and
api.rename_volume(scvolume, original_volume_name)):
model_update = {'_name_id': None}
return model_update
# The world was horrible to us so we should error and leave.
LOG.error(_LE('Unable to rename the logical volume for volume: %s'),
original_volume_name)
return {'_name_id': new_volume['_name_id'] or new_volume['id']}
def create_consistencygroup(self, context, group):
"""This creates a replay profile on the storage backend.
:param context: the context of the caller.
:param group: the dictionary of the consistency group to be created.
:return: Nothing on success.
:raises: VolumeBackendAPIException
"""
gid = group['id']
with self._client.open_connection() as api:
cgroup = api.create_replay_profile(gid)
if cgroup:
LOG.info(_LI('Created Consistency Group %s'), gid)
return
raise exception.VolumeBackendAPIException(
_('Unable to create consistency group %s') % gid)
def delete_consistencygroup(self, context, group):
"""Delete the Dell SC profile associated with this consistency group.
:param context: the context of the caller.
:param group: the dictionary of the consistency group to be created.
:return: Updated model_update, volumes.
"""
gid = group['id']
with self._client.open_connection() as api:
profile = api.find_replay_profile(gid)
if profile:
api.delete_replay_profile(profile)
# If we are here because we found no profile that should be fine
# as we are trying to delete it anyway.
# Now whack the volumes. So get our list.
volumes = self.db.volume_get_all_by_group(context, gid)
# Trundle through the list deleting the volumes.
for volume in volumes:
self.delete_volume(volume)
volume['status'] = 'deleted'
model_update = {'status': group['status']}
return model_update, volumes
def update_consistencygroup(self, context, group,
add_volumes=None, remove_volumes=None):
"""Updates a consistency group.
:param context: the context of the caller.
:param group: the dictionary of the consistency group to be updated.
:param add_volumes: a list of volume dictionaries to be added.
:param remove_volumes: a list of volume dictionaries to be removed.
:return model_update, add_volumes_update, remove_volumes_update
model_update is a dictionary that the driver wants the manager
to update upon a successful return. If None is returned, the manager
will set the status to 'available'.
add_volumes_update and remove_volumes_update are lists of dictionaries
that the driver wants the manager to update upon a successful return.
Note that each entry requires a {'id': xxx} so that the correct
volume entry can be updated. If None is returned, the volume will
remain its original status. Also note that you cannot directly
assign add_volumes to add_volumes_update as add_volumes is a list of
cinder.db.sqlalchemy.models.Volume objects and cannot be used for
db update directly. Same with remove_volumes.
If the driver throws an exception, the status of the group as well as
those of the volumes to be added/removed will be set to 'error'.
"""
gid = group['id']
with self._client.open_connection() as api:
profile = api.find_replay_profile(gid)
if not profile:
LOG.error(_LE('Cannot find Consistency Group %s'), gid)
elif api.update_cg_volumes(profile,
add_volumes,
remove_volumes):
LOG.info(_LI('Updated Consistency Group %s'), gid)
# we need nothing updated above us so just return None.
return None, None, None
# Things did not go well so throw.
raise exception.VolumeBackendAPIException(
_('Unable to update consistency group %s') % gid)
def create_cgsnapshot(self, context, cgsnapshot):
"""Takes a snapshot of the consistency group.
:param context: the context of the caller.
:param cgsnapshot: Information about the snapshot to take.
:return: Updated model_update, snapshots.
:raises: VolumeBackendAPIException.
"""
cgid = cgsnapshot['consistencygroup_id']
snapshotid = cgsnapshot['id']
with self._client.open_connection() as api:
profile = api.find_replay_profile(cgid)
if profile:
LOG.debug('profile %s replayid %s', profile, snapshotid)
if api.snap_cg_replay(profile, snapshotid, 0):
snapshots = objects.SnapshotList().get_all_for_cgsnapshot(
context, snapshotid)
for snapshot in snapshots:
snapshot.status = 'available'
model_update = {'status': 'available'}
return model_update, snapshots
# That didn't go well. Tell them why. Then bomb out.
LOG.error(_LE('Failed to snap Consistency Group %s'), cgid)
else:
LOG.error(_LE('Cannot find Consistency Group %s'), cgid)
raise exception.VolumeBackendAPIException(
_('Unable to snap Consistency Group %s') % cgid)
def delete_cgsnapshot(self, context, cgsnapshot):
"""Deletes a cgsnapshot.
If profile isn't found return success. If failed to delete the
replay (the snapshot) then raise an exception.
:param context: the context of the caller.
:param cgsnapshot: Information about the snapshot to delete.
:return: Updated model_update, snapshots.
:raises: VolumeBackendAPIException.
"""
cgid = cgsnapshot['consistencygroup_id']
snapshotid = cgsnapshot['id']
with self._client.open_connection() as api:
profile = api.find_replay_profile(cgid)
if profile:
LOG.info(_LI('Deleting snapshot %(ss)s from %(pro)s'),
{'ss': snapshotid,
'pro': profile})
if not api.delete_cg_replay(profile, snapshotid):
raise exception.VolumeBackendAPIException(
_('Unable to delete Consistency Group snapshot %s') %
snapshotid)
snapshots = objects.SnapshotList().get_all_for_cgsnapshot(
context, snapshotid)
for snapshot in snapshots:
snapshot.status = 'deleted'
model_update = {'status': 'deleted'}
return model_update, snapshots
def manage_existing(self, volume, existing_ref):
"""Brings an existing backend storage object under Cinder management.
existing_ref is passed straight through from the API request's
manage_existing_ref value, and it is up to the driver how this should
be interpreted. It should be sufficient to identify a storage object
that the driver should somehow associate with the newly-created cinder
volume structure.
There are two ways to do this:
1. Rename the backend storage object so that it matches the,
volume['name'] which is how drivers traditionally map between a
cinder volume and the associated backend storage object.
2. Place some metadata on the volume, or somewhere in the backend, that
allows other driver requests (e.g. delete, clone, attach, detach...)
to locate the backend storage object when required.
If the existing_ref doesn't make sense, or doesn't refer to an existing
backend storage object, raise a ManageExistingInvalidReference
exception.
The volume may have a volume_type, and the driver can inspect that and
compare against the properties of the referenced backend storage
object. If they are incompatible, raise a
ManageExistingVolumeTypeMismatch, specifying a reason for the failure.
:param volume: Cinder volume to manage
:param existing_ref: Driver-specific information used to identify a
volume
"""
if existing_ref.get('source-name') or existing_ref.get('source-id'):
with self._client.open_connection() as api:
api.manage_existing(volume['id'], existing_ref)
else:
raise exception.ManageExistingInvalidReference(
_('Must specify source-name or source-id. (%s)') %
existing_ref)
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of volume to be managed by manage_existing.
When calculating the size, round up to the next GB.
:param volume: Cinder volume to manage
:param existing_ref: Driver-specific information used to identify a
volume
"""
if existing_ref.get('source-name') or existing_ref.get('source-id'):
with self._client.open_connection() as api:
return api.get_unmanaged_volume_size(existing_ref)
else:
raise exception.ManageExistingInvalidReference(
_('Must specify source-name or source-id. (%s)') %
existing_ref)
def unmanage(self, volume):
"""Removes the specified volume from Cinder management.
Does not delete the underlying backend storage object.
For most drivers, this will not need to do anything. However, some
drivers might use this call as an opportunity to clean up any
Cinder-specific configuration that they have associated with the
backend storage object.
:param volume: Cinder volume to unmanage
"""
with self._client.open_connection() as api:
scvolume = api.find_volume(volume['id'])
if scvolume:
api.unmanage(scvolume)
def retype(self, ctxt, volume, new_type, diff, host):
"""Convert the volume to be of the new type.
Returns a boolean indicating whether the retype occurred.
:param ctxt: Context
:param volume: A dictionary describing the volume to migrate
:param new_type: A dictionary describing the volume type to convert to
:param diff: A dictionary with the difference between the two types
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities (Not Used).
"""
# We currently only support retyping for the Storage Profile extra spec
if diff['extra_specs']:
storage_profiles = diff['extra_specs'].get(
'storagetype:storageprofile')
if storage_profiles:
if len(storage_profiles) != 2:
LOG.warning(_LW('Unable to retype Storage Profile, '
'expected to receive current and '
'requested storagetype:storageprofile '
'values. Value received: %s'),
storage_profiles)
return False
requested = storage_profiles[1]
volume_name = volume.get('id')
LOG.debug('Retyping volume %(vol)s to use storage '
'profile %(profile)s',
{'vol': volume_name,
'profile': requested})
with self._client.open_connection() as api:
if api.find_sc():
scvolume = api.find_volume(volume_name)
return api.update_storage_profile(
scvolume, requested)
return False
|
|
#!/usr/bin/python
# Copyright (c) 2009, Andrew McNabb
# Copyright (c) 2003-2008, Brent N. Chun
import os
import sys
import shutil
import tempfile
import time
import unittest
basedir, bin = os.path.split(os.path.dirname(os.path.abspath(sys.argv[0])))
sys.path.append("%s" % basedir)
if os.getenv("TEST_HOSTS") is None:
raise Exception("Must define TEST_HOSTS")
g_hosts = os.getenv("TEST_HOSTS").split()
if os.getenv("TEST_USER") is None:
raise Exception("Must define TEST_USER")
g_user = os.getenv("TEST_USER")
class PsshTest(unittest.TestCase):
def setUp(self):
self.outDir = tempfile.mkdtemp()
self.errDir = tempfile.mkdtemp()
def teardown(self):
shutil.rmtree(self.errDir)
shutil.rmtree(self.outDir)
def testShortOpts(self):
hostsFile = tempfile.NamedTemporaryFile()
hostsFile.write("".join(map(lambda x: "%s\n" % x, g_hosts)))
hostsFile.flush()
cmd = "%s/bin/pssh -h %s -l %s -p 64 -o %s -e %s -t 60 -v -P -i uptime < /dev/null" % (basedir, hostsFile.name, g_user, self.outDir, self.errDir)
rv = os.system(cmd)
self.assertEqual(rv, 0)
for host in g_hosts:
stdout = open("%s/%s" % (self.outDir, host)).read()
self.assert_(stdout.find("load average") != -1)
def testLongOpts(self):
hostsFile = tempfile.NamedTemporaryFile()
hostsFile.write("".join(map(lambda x: "%s\n" % x, g_hosts)))
hostsFile.flush()
cmd = "%s/bin/pssh --hosts=%s --user=%s --par=64 --outdir=%s --errdir=%s --timeout=60 --verbose --print --inline uptime < /dev/null" % (basedir, hostsFile.name, g_user, self.outDir, self.errDir)
rv = os.system(cmd)
self.assertEqual(rv, 0)
for host in g_hosts:
stdout = open("%s/%s" % (self.outDir, host)).read()
self.assert_(stdout.find("load average") != -1)
def testStderr(self):
hostsFile = tempfile.NamedTemporaryFile()
hostsFile.write("".join(map(lambda x: "%s\n" % x, g_hosts)))
hostsFile.flush()
cmd = "%s/bin/pssh -h %s -l %s -p 64 -o %s -e %s -t 60 -v -P -i ls /foobarbaz < /dev/null" % (basedir, hostsFile.name, g_user, self.outDir, self.errDir)
rv = os.system(cmd)
self.assertEqual(rv, 0)
for host in g_hosts:
stdout = open("%s/%s" % (self.outDir, host)).read()
self.assertEqual(stdout, "")
stderr = open("%s/%s" % (self.errDir, host)).read()
self.assert_(stderr.find("No such file or directory") != -1)
class PscpTest(unittest.TestCase):
def setUp(self):
self.outDir = tempfile.mkdtemp()
self.errDir = tempfile.mkdtemp()
def teardown(self):
shutil.rmtree(self.errDir)
shutil.rmtree(self.outDir)
try:
os.remove("/tmp/pssh.test")
except OSError:
pass
def testShortOpts(self):
for host in g_hosts:
cmd = "ssh %s@%s rm -rf /tmp/pssh.test" % (g_user, host)
rv = os.system(cmd)
self.assertEqual(rv, 0)
hostsFile = tempfile.NamedTemporaryFile()
hostsFile.write("".join(map(lambda x: "%s\n" % x, g_hosts)))
hostsFile.flush()
cmd = "%s/bin/pscp -h %s -l %s -p 64 -o %s -e %s -t 60 /etc/hosts /tmp/pssh.test < /dev/null" % (basedir, hostsFile.name, g_user, self.outDir, self.errDir)
rv = os.system(cmd)
self.assertEqual(rv, 0)
for host in g_hosts:
cmd = "ssh %s@%s cat /tmp/pssh.test" % (g_user, host)
data = os.popen(cmd).read()
self.assertEqual(data, open("/etc/hosts").read())
def testLongOpts(self):
for host in g_hosts:
cmd = "ssh %s@%s rm -rf /tmp/pssh.test" % (g_user, host)
rv = os.system(cmd)
self.assertEqual(rv, 0)
hostsFile = tempfile.NamedTemporaryFile()
hostsFile.write("".join(map(lambda x: "%s\n" % x, g_hosts)))
hostsFile.flush()
cmd = "%s/bin/pscp --hosts=%s --user=%s --par=64 --outdir=%s --errdir=%s --timeout=60 /etc/hosts /tmp/pssh.test < /dev/null" % (basedir, hostsFile.name, g_user, self.outDir, self.errDir)
rv = os.system(cmd)
self.assertEqual(rv, 0)
for host in g_hosts:
cmd = "ssh %s@%s cat /tmp/pssh.test" % (g_user, host)
data = os.popen(cmd).read()
self.assertEqual(data, open("/etc/hosts").read())
def testRecursive(self):
for host in g_hosts:
cmd = "ssh %s@%s rm -rf /tmp/pssh.test" % (g_user, host)
rv = os.system(cmd)
self.assertEqual(rv, 0)
hostsFile = tempfile.NamedTemporaryFile()
hostsFile.write("".join(map(lambda x: "%s\n" % x, g_hosts)))
hostsFile.flush()
cmd = "%s/bin/pscp -r -h %s -l %s -p 64 -o %s -e %s -t 60 /etc/init.d /tmp/pssh.test < /dev/null" % (basedir, hostsFile.name, g_user, self.outDir, self.errDir)
rv = os.system(cmd)
self.assertEqual(rv, 0)
files = os.popen("ls -R /etc/init.d | sed 1d | sort").read().strip()
for host in g_hosts:
cmd = "ssh %s@%s ls -R /tmp/pssh.test | sed 1d | sort" % (g_user, host)
data = os.popen(cmd).read().strip()
self.assertEqual(data, files)
class PslurpTest(unittest.TestCase):
def setUp(self):
self.outDir = tempfile.mkdtemp()
self.errDir = tempfile.mkdtemp()
def teardown(self):
shutil.rmtree(self.errDir)
shutil.rmtree(self.outDir)
def testShortOpts(self):
if os.path.exists("/tmp/pssh.test"):
try:
os.remove("/tmp/pssh.test")
except OSError:
shutil.rmtree("/tmp/pssh.test")
hostsFile = tempfile.NamedTemporaryFile()
hostsFile.write("".join(map(lambda x: "%s\n" % x, g_hosts)))
hostsFile.flush()
cmd = "%s/bin/pslurp -L /tmp/pssh.test -h %s -l %s -p 64 -o %s -e %s -t 60 /etc/hosts hosts < /dev/null" % (basedir, hostsFile.name, g_user, self.outDir, self.errDir)
rv = os.system(cmd)
self.assertEqual(rv, 0)
for host in g_hosts:
cmd = "ssh %s@%s cat /etc/hosts" % (g_user, host)
data = os.popen(cmd).read()
self.assertEqual(data, open("/tmp/pssh.test/%s/hosts" % host).read())
def testLongOpts(self):
if os.path.exists("/tmp/pssh.test"):
try:
os.remove("/tmp/pssh.test")
except OSError:
shutil.rmtree("/tmp/pssh.test")
hostsFile = tempfile.NamedTemporaryFile()
hostsFile.write("".join(map(lambda x: "%s\n" % x, g_hosts)))
hostsFile.flush()
cmd = "%s/bin/pslurp --localdir=/tmp/pssh.test --hosts=%s --user=%s --par=64 --outdir=%s --errdir=%s --timeout=60 /etc/hosts hosts < /dev/null" % (basedir, hostsFile.name, g_user, self.outDir, self.errDir)
rv = os.system(cmd)
self.assertEqual(rv, 0)
for host in g_hosts:
cmd = "ssh %s@%s cat /etc/hosts" % (g_user, host)
data = os.popen(cmd).read()
self.assertEqual(data, open("/tmp/pssh.test/%s/hosts" % host).read())
def testRecursive(self):
if os.path.exists("/tmp/pssh.test"):
try:
os.remove("/tmp/pssh.test")
except OSError:
shutil.rmtree("/tmp/pssh.test")
hostsFile = tempfile.NamedTemporaryFile()
hostsFile.write("".join(map(lambda x: "%s\n" % x, g_hosts)))
hostsFile.flush()
cmd = "%s/bin/pslurp -r -L /tmp/pssh.test -h %s -l %s -p 64 -o %s -e %s -t 60 /etc/init.d init.d < /dev/null" % (basedir, hostsFile.name, g_user, self.outDir, self.errDir)
rv = os.system(cmd)
self.assertEqual(rv, 0)
for host in g_hosts:
cmd = "ssh %s@%s ls -R /etc/init.d | sed 1d | sort" % (g_user, host)
data = os.popen(cmd).read()
self.assertEqual(data, os.popen("ls -R /tmp/pssh.test/%s/init.d | sed 1d | sort" % host).read())
class PrsyncTest(unittest.TestCase):
def setUp(self):
self.outDir = tempfile.mkdtemp()
self.errDir = tempfile.mkdtemp()
def teardown(self):
shutil.rmtree(self.errDir)
shutil.rmtree(self.outDir)
def testShortOpts(self):
for host in g_hosts:
cmd = "ssh %s@%s rm -rf /tmp/pssh.test" % (g_user, host)
rv = os.system(cmd)
self.assertEqual(rv, 0)
hostsFile = tempfile.NamedTemporaryFile()
hostsFile.write("".join(map(lambda x: "%s\n" % x, g_hosts)))
hostsFile.flush()
cmd = "%s/bin/prsync -h %s -l %s -p 64 -o %s -e %s -t 60 -a -z /etc/hosts /tmp/pssh.test < /dev/null" % (basedir, hostsFile.name, g_user, self.outDir, self.errDir)
rv = os.system(cmd)
self.assertEqual(rv, 0)
for host in g_hosts:
cmd = "ssh %s@%s cat /tmp/pssh.test" % (g_user, host)
data = os.popen(cmd).read()
self.assertEqual(data, open("/etc/hosts").read())
def testLongOpts(self):
for host in g_hosts:
cmd = "ssh %s@%s rm -rf /tmp/pssh.test" % (g_user, host)
rv = os.system(cmd)
self.assertEqual(rv, 0)
hostsFile = tempfile.NamedTemporaryFile()
hostsFile.write("".join(map(lambda x: "%s\n" % x, g_hosts)))
hostsFile.flush()
cmd = "%s/bin/prsync --hosts=%s --user=%s --par=64 --outdir=%s --errdir=%s --timeout=60 --archive --compress /etc/hosts /tmp/pssh.test < /dev/null" % (basedir, hostsFile.name, g_user, self.outDir, self.errDir)
rv = os.system(cmd)
self.assertEqual(rv, 0)
for host in g_hosts:
cmd = "ssh %s@%s cat /tmp/pssh.test" % (g_user, host)
data = os.popen(cmd).read()
self.assertEqual(data, open("/etc/hosts").read())
def testRecursive(self):
for host in g_hosts:
cmd = "ssh %s@%s rm -rf /tmp/pssh.test" % (g_user, host)
rv = os.system(cmd)
self.assertEqual(rv, 0)
hostsFile = tempfile.NamedTemporaryFile()
hostsFile.write("".join(map(lambda x: "%s\n" % x, g_hosts)))
hostsFile.flush()
cmd = "%s/bin/prsync -r -h %s -l %s -p 64 -o %s -e %s -t 60 -a -z /etc/init.d/ /tmp/pssh.test < /dev/null" % (basedir, hostsFile.name, g_user, self.outDir, self.errDir)
rv = os.system(cmd)
self.assertEqual(rv, 0)
files = os.popen("ls -R /etc/init.d | sed 1d | sort").read().strip()
for host in g_hosts:
cmd = "ssh %s@%s ls -R /tmp/pssh.test | sed 1d | sort" % (g_user, host)
data = os.popen(cmd).read().strip()
self.assertEqual(data, files)
class PnukeTest(unittest.TestCase):
def setUp(self):
self.outDir = tempfile.mkdtemp()
self.errDir = tempfile.mkdtemp()
def teardown(self):
shutil.rmtree(self.errDir)
shutil.rmtree(self.outDir)
def testShortOpts(self):
hostsFile = tempfile.NamedTemporaryFile()
hostsFile.write("".join(map(lambda x: "%s\n" % x, g_hosts)))
hostsFile.flush()
cmd = "%s/bin/pssh -h %s -l %s -p 64 -o %s -e %s -t 60 -v sleep 60 < /dev/null &" % (basedir, hostsFile.name, g_user, self.outDir, self.errDir)
os.system(cmd)
time.sleep(5)
cmd = "%s/bin/pnuke -h %s -l %s -p 64 -o %s -e %s -t 60 -v sleep < /dev/null" % (basedir, hostsFile.name, g_user, self.outDir, self.errDir)
print cmd
rv = os.system(cmd)
self.assertEqual(rv, 0)
def testLongOpts(self):
hostsFile = tempfile.NamedTemporaryFile()
hostsFile.write("".join(map(lambda x: "%s\n" % x, g_hosts)))
hostsFile.flush()
cmd = "%s/bin/pssh --hosts=%s --user=%s --par=64 --outdir=%s --errdir=%s --timeout=60 --verbose sleep 60 < /dev/null &" % (basedir, hostsFile.name, g_user, self.outDir, self.errDir)
os.system(cmd)
time.sleep(5)
cmd = "%s/bin/pnuke --hosts=%s --user=%s --par=64 --outdir=%s --errdir=%s --timeout=60 --verbose sleep < /dev/null" % (basedir, hostsFile.name, g_user, self.outDir, self.errDir)
print cmd
rv = os.system(cmd)
self.assertEqual(rv, 0)
if __name__ == '__main__':
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(PsshTest, "test"))
suite.addTest(unittest.makeSuite(PscpTest, "test"))
suite.addTest(unittest.makeSuite(PslurpTest, "test"))
suite.addTest(unittest.makeSuite(PrsyncTest, "test"))
suite.addTest(unittest.makeSuite(PnukeTest, "test"))
unittest.TextTestRunner().run(suite)
|
|
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import argparse
from oslo_serialization import jsonutils
from neutronclient._i18n import _
from neutronclient.common import exceptions
from neutronclient.common import utils
from neutronclient.neutron import v2_0 as neutronV20
from neutronclient.neutron.v2_0 import dns
from neutronclient.neutron.v2_0.qos import policy as qos_policy
def _format_fixed_ips(port):
try:
return '\n'.join([jsonutils.dumps(ip) for ip in port['fixed_ips']])
except (TypeError, KeyError):
return ''
def _add_updatable_args(parser):
parser.add_argument(
'--name',
help=_('Name of this port.'))
parser.add_argument(
'--description',
help=_('Description of this port.'))
parser.add_argument(
'--fixed-ip', metavar='subnet_id=SUBNET,ip_address=IP_ADDR',
action='append',
type=utils.str2dict_type(optional_keys=['subnet_id', 'ip_address']),
help=_('Desired IP and/or subnet for this port: '
'subnet_id=<name_or_id>,ip_address=<ip>. '
'You can repeat this option.'))
parser.add_argument(
'--fixed_ip',
action='append',
help=argparse.SUPPRESS)
parser.add_argument(
'--device-id',
help=_('Device ID of this port.'))
parser.add_argument(
'--device_id',
help=argparse.SUPPRESS)
parser.add_argument(
'--device-owner',
help=_('Device owner of this port.'))
parser.add_argument(
'--device_owner',
help=argparse.SUPPRESS)
def _updatable_args2body(parsed_args, body, client):
neutronV20.update_dict(parsed_args, body,
['device_id', 'device_owner', 'name',
'description'])
ips = []
if parsed_args.fixed_ip:
for ip_spec in parsed_args.fixed_ip:
if 'subnet_id' in ip_spec:
subnet_name_id = ip_spec['subnet_id']
_subnet_id = neutronV20.find_resourceid_by_name_or_id(
client, 'subnet', subnet_name_id)
ip_spec['subnet_id'] = _subnet_id
ips.append(ip_spec)
if ips:
body['fixed_ips'] = ips
class ListPort(neutronV20.ListCommand):
"""List ports that belong to a given tenant."""
resource = 'port'
_formatters = {'fixed_ips': _format_fixed_ips, }
list_columns = ['id', 'name', 'mac_address', 'fixed_ips']
pagination_support = True
sorting_support = True
class ListRouterPort(neutronV20.ListCommand):
"""List ports that belong to a given tenant, with specified router."""
resource = 'port'
_formatters = {'fixed_ips': _format_fixed_ips, }
list_columns = ['id', 'name', 'mac_address', 'fixed_ips']
pagination_support = True
sorting_support = True
def get_parser(self, prog_name):
parser = super(ListRouterPort, self).get_parser(prog_name)
parser.add_argument(
'id', metavar='ROUTER',
help=_('ID or name of the router to look up.'))
return parser
def take_action(self, parsed_args):
neutron_client = self.get_client()
_id = neutronV20.find_resourceid_by_name_or_id(
neutron_client, 'router', parsed_args.id)
self.values_specs.append('--device_id=%s' % _id)
return super(ListRouterPort, self).take_action(parsed_args)
class ShowPort(neutronV20.ShowCommand):
"""Show information of a given port."""
resource = 'port'
class UpdatePortSecGroupMixin(object):
def add_arguments_secgroup(self, parser):
group_sg = parser.add_mutually_exclusive_group()
group_sg.add_argument(
'--security-group', metavar='SECURITY_GROUP',
default=[], action='append', dest='security_groups',
help=_('Security group associated with the port. You can '
'repeat this option.'))
group_sg.add_argument(
'--no-security-groups',
action='store_true',
help=_('Associate no security groups with the port.'))
def _resolv_sgid(self, secgroup):
return neutronV20.find_resourceid_by_name_or_id(
self.get_client(), 'security_group', secgroup)
def args2body_secgroup(self, parsed_args, port):
if parsed_args.security_groups:
port['security_groups'] = [self._resolv_sgid(sg) for sg
in parsed_args.security_groups]
elif parsed_args.no_security_groups:
port['security_groups'] = []
class UpdateExtraDhcpOptMixin(object):
def add_arguments_extradhcpopt(self, parser):
group_sg = parser.add_mutually_exclusive_group()
group_sg.add_argument(
'--extra-dhcp-opt',
default=[],
action='append',
dest='extra_dhcp_opts',
type=utils.str2dict_type(
required_keys=['opt_name'],
optional_keys=['opt_value', 'ip_version']),
help=_('Extra dhcp options to be assigned to this port: '
'opt_name=<dhcp_option_name>,opt_value=<value>,'
'ip_version={4,6}. You can repeat this option.'))
def args2body_extradhcpopt(self, parsed_args, port):
ops = []
if parsed_args.extra_dhcp_opts:
# the extra_dhcp_opt params (opt_name & opt_value)
# must come in pairs, if there is a parm error
# both must be thrown out.
opt_ele = {}
edo_err_msg = _("Invalid --extra-dhcp-opt option, can only be: "
"opt_name=<dhcp_option_name>,opt_value=<value>,"
"ip_version={4,6}. "
"You can repeat this option.")
for opt in parsed_args.extra_dhcp_opts:
opt_ele.update(opt)
if ('opt_name' in opt_ele and
('opt_value' in opt_ele or 'ip_version' in opt_ele)):
if opt_ele.get('opt_value') == 'null':
opt_ele['opt_value'] = None
ops.append(opt_ele)
opt_ele = {}
else:
raise exceptions.CommandError(edo_err_msg)
if ops:
port['extra_dhcp_opts'] = ops
class UpdatePortAllowedAddressPair(object):
"""Update Port for allowed_address_pairs"""
def add_arguments_allowedaddresspairs(self, parser):
group_aap = parser.add_mutually_exclusive_group()
group_aap.add_argument(
'--allowed-address-pair',
metavar='ip_address=IP_ADDR|CIDR[,mac_address=MAC_ADDR]',
default=[],
action='append',
dest='allowed_address_pairs',
type=utils.str2dict_type(
required_keys=['ip_address'],
optional_keys=['mac_address']),
help=_('Allowed address pair associated with the port. '
'"ip_address" parameter is required. IP address or '
'CIDR can be specified for "ip_address". '
'"mac_address" parameter is optional. '
'You can repeat this option.'))
group_aap.add_argument(
'--no-allowed-address-pairs',
action='store_true',
help=_('Associate no allowed address pairs with the port.'))
def args2body_allowedaddresspairs(self, parsed_args, port):
if parsed_args.allowed_address_pairs:
port['allowed_address_pairs'] = parsed_args.allowed_address_pairs
elif parsed_args.no_allowed_address_pairs:
port['allowed_address_pairs'] = []
class CreatePort(neutronV20.CreateCommand, UpdatePortSecGroupMixin,
UpdateExtraDhcpOptMixin, qos_policy.CreateQosPolicyMixin,
UpdatePortAllowedAddressPair):
"""Create a port for a given tenant."""
resource = 'port'
def add_known_arguments(self, parser):
_add_updatable_args(parser)
parser.add_argument(
'--admin-state-down',
dest='admin_state', action='store_false',
help=_('Set admin state up to false.'))
parser.add_argument(
'--admin_state_down',
dest='admin_state', action='store_false',
help=argparse.SUPPRESS)
parser.add_argument(
'--mac-address',
help=_('MAC address of this port.'))
parser.add_argument(
'--mac_address',
help=argparse.SUPPRESS)
parser.add_argument(
'--vnic-type',
metavar='<direct | direct-physical | macvtap '
'| normal | baremetal>',
choices=['direct', 'direct-physical', 'macvtap',
'normal', 'baremetal'],
type=utils.convert_to_lowercase,
help=_('VNIC type for this port.'))
parser.add_argument(
'--vnic_type',
choices=['direct', 'direct-physical', 'macvtap',
'normal', 'baremetal'],
type=utils.convert_to_lowercase,
help=argparse.SUPPRESS)
parser.add_argument(
'--binding-profile',
help=_('Custom data to be passed as binding:profile.'))
parser.add_argument(
'--binding_profile',
help=argparse.SUPPRESS)
self.add_arguments_secgroup(parser)
self.add_arguments_extradhcpopt(parser)
self.add_arguments_qos_policy(parser)
self.add_arguments_allowedaddresspairs(parser)
parser.add_argument(
'network_id', metavar='NETWORK',
help=_('ID or name of the network this port belongs to.'))
dns.add_dns_argument_create(parser, self.resource, 'name')
def args2body(self, parsed_args):
client = self.get_client()
_network_id = neutronV20.find_resourceid_by_name_or_id(
client, 'network', parsed_args.network_id)
body = {'admin_state_up': parsed_args.admin_state,
'network_id': _network_id, }
_updatable_args2body(parsed_args, body, client)
neutronV20.update_dict(parsed_args, body,
['mac_address', 'tenant_id'])
if parsed_args.vnic_type:
body['binding:vnic_type'] = parsed_args.vnic_type
if parsed_args.binding_profile:
body['binding:profile'] = jsonutils.loads(
parsed_args.binding_profile)
self.args2body_secgroup(parsed_args, body)
self.args2body_extradhcpopt(parsed_args, body)
self.args2body_qos_policy(parsed_args, body)
self.args2body_allowedaddresspairs(parsed_args, body)
dns.args2body_dns_create(parsed_args, body, 'name')
return {'port': body}
class DeletePort(neutronV20.DeleteCommand):
"""Delete a given port."""
resource = 'port'
class UpdatePort(neutronV20.UpdateCommand, UpdatePortSecGroupMixin,
UpdateExtraDhcpOptMixin, qos_policy.UpdateQosPolicyMixin,
UpdatePortAllowedAddressPair):
"""Update port's information."""
resource = 'port'
def add_known_arguments(self, parser):
_add_updatable_args(parser)
parser.add_argument(
'--admin-state-up',
choices=['True', 'False'],
help=_('Set admin state up for the port.'))
parser.add_argument(
'--admin_state_up',
choices=['True', 'False'],
help=argparse.SUPPRESS)
self.add_arguments_secgroup(parser)
self.add_arguments_extradhcpopt(parser)
self.add_arguments_qos_policy(parser)
self.add_arguments_allowedaddresspairs(parser)
dns.add_dns_argument_update(parser, self.resource, 'name')
def args2body(self, parsed_args):
body = {}
client = self.get_client()
_updatable_args2body(parsed_args, body, client)
if parsed_args.admin_state_up:
body['admin_state_up'] = parsed_args.admin_state_up
self.args2body_secgroup(parsed_args, body)
self.args2body_extradhcpopt(parsed_args, body)
self.args2body_qos_policy(parsed_args, body)
self.args2body_allowedaddresspairs(parsed_args, body)
dns.args2body_dns_update(parsed_args, body, 'name')
return {'port': body}
|
|
'''
Consumption-saving models with aggregate productivity shocks as well as idiosyn-
cratic income shocks. Currently only contains one microeconomic model with a
basic solver. Also includes a subclass of Market called CobbDouglas economy,
used for solving "macroeconomic" models with aggregate shocks.
'''
import os
os.environ["R_HOME"] = "/Library/Frameworks/R.framework/Resources"
os.chdir("/Users/ganong/repo/HARK-comments-and-cleanup/gn")
import settings
settings.init()
import sys
sys.path.insert(0,'../')
sys.path.insert(0,'../ConsumptionSaving')
sys.path.insert(0,'../SolvingMicroDSOPs')
import numpy as np
import scipy.stats as stats
from HARKinterpolation import LinearInterp, LinearInterpOnInterp1D
from HARKutilities import CRRAutility, CRRAutilityP, CRRAutilityPP, CRRAutilityP_inv,\
CRRAutility_invP, CRRAutility_inv, combineIndepDstns,\
approxMeanOneLognormal
from HARKutilities import plotFuncs
from HARKsimulation import drawDiscrete, drawBernoulli
from ConsIndShockModel import ConsumerSolution, IndShockConsumerType
from HARKcore import HARKobject, Market, AgentType
from copy import deepcopy
utility = CRRAutility
utilityP = CRRAutilityP
utilityPP = CRRAutilityPP
utilityP_inv = CRRAutilityP_inv
utility_invP = CRRAutility_invP
utility_inv = CRRAutility_inv
class MargValueFunc2D():
'''
A class for representing a marginal value function in models where the
standard envelope condition of v'(m,k) = u'(c(m,k)) holds (with CRRA utility).
'''
def __init__(self,cFunc,CRRA):
'''
Constructor for a new marginal value function object.
Parameters
----------
cFunc : function
A real function representing the marginal value function composed
with the inverse marginal utility function, defined on market
resources: uP_inv(vPfunc(m,k)). Called cFunc because when standard
envelope condition applies, uP_inv(vPfunc(m,k)) = cFunc(m,k).
CRRA : float
Coefficient of relative risk aversion.
Returns
-------
new instance of MargValueFunc
'''
self.cFunc = deepcopy(cFunc)
self.CRRA = CRRA
def __call__(self,m,k):
return utilityP(self.cFunc(m,k),gam=self.CRRA)
###############################################################################
class AggShockConsumerType(IndShockConsumerType):
'''
A class to represent consumers who face idiosyncratic (transitory and per-
manent) shocks to their income and live in an economy that has aggregate
(transitory and permanent) shocks to labor productivity. As the capital-
to-labor ratio varies in the economy, so does the wage rate and interest
rate. "Aggregate shock consumers" have beliefs about how the capital ratio
evolves over time and take aggregate shocks into account when making their
decision about how much to consume.
'''
def __init__(self,time_flow=True,**kwds):
'''
Make a new instance of AggShockConsumerType, an extension of
IndShockConsumerType. Sets appropriate solver and input lists.
'''
AgentType.__init__(self,solution_terminal=deepcopy(IndShockConsumerType.solution_terminal_),
time_flow=time_flow,pseudo_terminal=False,**kwds)
# Add consumer-type specific objects, copying to create independent versions
self.time_vary = deepcopy(IndShockConsumerType.time_vary_)
self.time_inv = deepcopy(IndShockConsumerType.time_inv_)
self.delFromTimeInv('Rfree','BoroCnstArt','vFuncBool','CubicBool')
self.solveOnePeriod = solveConsAggShock
self.p_init = np.ones(self.Nagents)
self.update()
def reset(self):
'''
Initialize this type for a new simulated history of K/L ratio.
Parameters
----------
None
Returns
-------
None
'''
self.initializeSim()
self.t_agg_sim = 0
def updateSolutionTerminal(self):
'''
Updates the terminal period solution for an aggregate shock consumer.
Only fills in the consumption function and marginal value function.
Parameters
----------
None
Returns
-------
None
'''
vPfunc_terminal = lambda m,k : m**(-self.CRRA)
cFunc_terminal = lambda m,k : m
self.solution_terminal = ConsumerSolution(cFunc=cFunc_terminal,vPfunc=vPfunc_terminal)
def getEconomyData(self,Economy):
'''
Imports economy-determined objects into self from a Market.
Instances of AggShockConsumerType "live" in some macroeconomy that has
attributes relevant to their microeconomic model, like the relationship
between the capital-to-labor ratio and the interest and wage rates; this
method imports those attributes from an "economy" object and makes them
attributes of the ConsumerType.
Parameters
----------
Economy : Market
The "macroeconomy" in which this instance "lives". Might be of the
subclass CobbDouglasEconomy, which has methods to generate the
relevant attributes.
Returns
-------
None
'''
self.a_init = Economy.KtoYSS*np.ones(self.Nagents) # Initialize assets to steady state
self.kGrid = Economy.kSS*self.kGridBase # Capital ratio grid adjusted around SS ratio
self.kNextFunc = Economy.kNextFunc # Next period's capital ratio as function of current ratio
self.Rfunc = Economy.Rfunc # Interest factor as function of capital ratio
self.wFunc = Economy.wFunc # (Normalized) wage rate as function of capital ratio
IncomeDstnWithAggShks = combineIndepDstns(self.PermShkDstn,self.TranShkDstn,Economy.PermShkAggDstn) #Economy.TranShkAggDstn
self.IncomeDstn = [IncomeDstnWithAggShks] # Discrete income distribution with aggregate and idiosyncratic shocks
self.DiePrb = 1.0 - self.LivPrb[0] # Only relevant for simulating with mortality
self.addToTimeInv('kGrid','kNextFunc','Rfunc', 'wFunc')
def simOnePrd(self):
'''
Simulate a single period of a consumption-saving model with permanent
and transitory income shocks at both the idiosyncratic and aggregate level.
Parameters
----------
None
Returns
-------
None
'''
# Unpack objects from self for convenience
aPrev = self.aNow
pPrev = self.pNow
TranShkNow = self.TranShkNow
PermShkNow = self.PermShkNow
RfreeNow = self.RfreeNow
cFuncNow = self.cFuncNow
KtoLnow = self.KtoLnow*np.ones_like(aPrev)
# Simulate the period
pNow = pPrev*PermShkNow # Updated permanent income level
ReffNow = RfreeNow/PermShkNow # "effective" interest factor on normalized assets
bNow = ReffNow*aPrev # Bank balances before labor income
mNow = bNow + TranShkNow # Market resources after income
cNow = cFuncNow(mNow,KtoLnow) # Consumption (normalized by permanent income)
MPCnow = cFuncNow.derivativeX(mNow,KtoLnow) # Marginal propensity to consume
aNow = mNow - cNow # Assets after all actions are accomplished
# Store the new state and control variables
self.pNow = pNow
self.bNow = bNow
self.mNow = mNow
self.cNow = cNow
self.MPCnow = MPCnow
self.aNow = aNow
# def simMortality(self):
# '''
# Simulates the mortality process, killing off some percentage of agents
# and replacing them with newborn agents.
#
# Parameters
# ----------
# none
#
# Returns
# -------
# none
# '''
# if hasattr(self,'DiePrb'):
# if self.DiePrb > 0:
# who_dies = drawBernoulli(N=self.Nagents,p=self.DiePrb,seed=self.RNG.randint(low=1, high=2**31-1))
# wealth_all = self.aNow*self.pNow
# who_lives = np.logical_not(who_dies)
# wealth_of_dead = np.sum(wealth_all[who_dies])
# wealth_of_live = np.sum(wealth_all[who_lives])
# R_actuarial = 1.0 + wealth_of_dead/wealth_of_live
# self.aNow[who_dies] = 0.0
# self.pNow[who_dies] = 1.0
# self.aNow = self.aNow*R_actuarial
#
# def marketAction(self):
# '''
# In the aggregate shocks model, the "market action" is to simulate one
# period of receiving income and choosing how much to consume.
#
# Parameters
# ----------
# none
#
# Returns
# -------
# none
# '''
# # Simulate the period
# self.advanceIncShks()
# self.advancecFunc()
# #self.simMortality()
# self.TranShkNow = self.TranShkNow*self.wRteNow
# self.PermShkNow = self.PermShkNow*self.PermShkAggNow
# self.simOnePrd()
#
# # Record the results of the period
# self.pHist[self.t_agg_sim,:] = self.pNow
# self.bHist[self.t_agg_sim,:] = self.bNow
# self.mHist[self.t_agg_sim,:] = self.mNow
# self.cHist[self.t_agg_sim,:] = self.cNow
# self.MPChist[self.t_agg_sim,:] = self.MPCnow
# self.aHist[self.t_agg_sim,:] = self.aNow
# self.t_agg_sim += 1
def calcBoundingValues(self):
'''
Calculate human wealth plus minimum and maximum MPC in an infinite
horizon model with only one period repeated indefinitely. Store results
as attributes of self. Human wealth is the present discounted value of
expected future income after receiving income this period, ignoring mort-
ality. The maximum MPC is the limit of the MPC as m --> mNrmMin. The
minimum MPC is the limit of the MPC as m --> infty.
NOT YET IMPLEMENTED FOR THIS CLASS
Parameters
----------
None
Returns
-------
None
'''
raise NotImplementedError()
def makeEulerErrorFunc(self,mMax=100,approx_inc_dstn=True):
'''
Creates a "normalized Euler error" function for this instance, mapping
from market resources to "consumption error per dollar of consumption."
Stores result in attribute eulerErrorFunc as an interpolated function.
Has option to use approximate income distribution stored in self.IncomeDstn
or to use a (temporary) very dense approximation.
NOT YET IMPLEMENTED FOR THIS CLASS
Parameters
----------
mMax : float
Maximum normalized market resources for the Euler error function.
approx_inc_dstn : Boolean
Indicator for whether to use the approximate discrete income distri-
bution stored in self.IncomeDstn[0], or to use a very accurate
discrete approximation instead. When True, uses approximation in
IncomeDstn; when False, makes and uses a very dense approximation.
Returns
-------
None
'''
raise NotImplementedError()
###############################################################################
def solveConsAggShock(solution_next,IncomeDstn,LivPrb,DiscFac,CRRA,PermGroFac,aXtraGrid,kGrid,kNextFunc,Rfunc,wFunc):
'''
Solve one period of a consumption-saving problem with idiosyncratic and
aggregate shocks (transitory and permanent). This is a basic solver that
can't handle borrowing (assumes liquidity constraint) or cubic splines, nor
can it calculate a value function.
Parameters
----------
solution_next : ConsumerSolution
The solution to the succeeding one period problem.
IncomeDstn : [np.array]
A list containing five arrays of floats, representing a discrete
approximation to the income process between the period being solved
and the one immediately following (in solution_next). Order: event
probabilities, idisyncratic permanent shocks, idiosyncratic transitory
shocks, aggregate permanent shocks, aggregate transitory shocks.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
DiscFac : float
Intertemporal discount factor for future utility.
CRRA : float
Coefficient of relative risk aversion.
PermGroGac : float
Expected permanent income growth factor at the end of this period.
aXtraGrid : np.array
Array of "extra" end-of-period asset values-- assets above the
absolute minimum acceptable level.
kGrid : np.array
A grid of capital-to-labor ratios in the economy.
kNextFunc : function
Next period's capital-to-labor ratio as a function of this period's ratio.
Rfunc : function
The net interest factor on assets as a function of capital ratio k.
wFunc : function
The wage rate for labor as a function of capital-to-labor ratio k.
Returns
-------
solution_now : ConsumerSolution
The solution to the single period consumption-saving problem. Includes
a consumption function cFunc (linear interpolation over linear interpola-
tions) and marginal value function vPfunc.
'''
# Unpack next period's solution
vPfuncNext = solution_next.vPfunc
# Unpack the income shocks
ShkPrbsNext = IncomeDstn[0]
PermShkValsNext = IncomeDstn[1]
TranShkValsNext = IncomeDstn[2]
PermShkAggValsNext = IncomeDstn[3]
# TranShkAggValsNext = IncomeDstn[4]
ShkCount = ShkPrbsNext.size
# Make the grid of end-of-period asset values, and a tiled version
aNrmNow = np.insert(aXtraGrid,0,0.0)
aNrmNow_tiled = np.tile(aNrmNow,(ShkCount,1))
aCount = aNrmNow.size
# Make tiled versions of the income shocks
ShkPrbsNext_tiled = (np.tile(ShkPrbsNext,(aCount,1))).transpose()
PermShkValsNext_tiled = (np.tile(PermShkValsNext,(aCount,1))).transpose()
TranShkValsNext_tiled = (np.tile(TranShkValsNext,(aCount,1))).transpose()
PermShkAggValsNext_tiled = (np.tile(PermShkAggValsNext,(aCount,1))).transpose()
# TranShkAggValsNext_tiled = (np.tile(TranShkAggValsNext,(aCount,1))).transpose()
# Loop through the values in kGrid and calculate a linear consumption function for each
cFuncByK_list = []
#print "test"
for j in range(kGrid.size):
kNow = kGrid[j]
kNext = kNextFunc(kNow)
# Calculate returns to capital and labor in the next period
kNextEff_array = kNext #/TranShkAggValsNext_tiled
Reff_array = Rfunc(kNextEff_array)/LivPrb # Effective interest rate
wEff_array = wFunc(kNextEff_array) #*TranShkAggValsNext_tiled # Effective wage rate (accounts for labor supply)
# Calculate market resources next period (and a constant array of capital-to-labor ratio)
PermShkTotal_array = PermGroFac*PermShkValsNext_tiled*PermShkAggValsNext_tiled # total / combined permanent shock
mNrmNext_array = Reff_array*aNrmNow_tiled/PermShkTotal_array + TranShkValsNext_tiled*wEff_array
if settings.t_curr == settings.t_rebate :
if settings.verbose:
print mNrmNext_array[0]
print str(settings.t_rebate) + " years before death, I just gave a rebate of " + str(settings.rebate_size)
print mNrmNext_array[0]+ settings.rebate_size
mNrmNext_array = mNrmNext_array + settings.rebate_size
kNext_array = kNext*np.ones_like(mNrmNext_array)
# Find marginal value next period at every income shock realization and every asset gridpoint
vPnext_array = Reff_array*PermShkTotal_array**(-CRRA)*vPfuncNext(mNrmNext_array,kNext_array)
# Calculate expectated marginal value at the end of the period at every asset gridpoint
EndOfPrdvP = DiscFac*LivPrb*PermGroFac**(-CRRA)*np.sum(vPnext_array*ShkPrbsNext_tiled,axis=0)
# Calculate optimal consumption from each asset gridpoint, and construct a linear interpolation
cNrmNow = EndOfPrdvP**(-1.0/CRRA)
mNrmNow = aNrmNow + cNrmNow
c_for_interpolation = np.insert(cNrmNow,0,0.0) # Add liquidity constrained portion
m_for_interpolation = np.insert(mNrmNow,0,0.0)
cFuncNow_j = LinearInterp(m_for_interpolation,c_for_interpolation)
# Add the k-specific consumption function to the list
cFuncByK_list.append(cFuncNow_j)
# Construct the overall consumption function by combining the k-specific functions
cFuncNow = LinearInterpOnInterp1D(cFuncByK_list,kGrid)
# Construct the marginal value function using the envelope condition
vPfuncNow = MargValueFunc2D(cFuncNow,CRRA)
# Pack up and return the solution
solution_now = ConsumerSolution(cFunc=cFuncNow,vPfunc=vPfuncNow)
return solution_now
###############################################################################
class CobbDouglasEconomy(Market):
'''
A class to represent an economy with a Cobb-Douglas aggregate production
function over labor and capital, extending HARKcore.Market. The "aggregate
market process" for this market combines all individuals' asset holdings
into aggregate capital, yielding the interest factor on assets and the wage
rate for the upcoming period.
Note: The current implementation assumes a constant labor supply, but
this will be generalized in the future.
'''
def __init__(self,agents=[],tolerance=0.0001,act_T=1000,**kwds):
'''
Make a new instance of CobbDouglasEconomy by filling in attributes
specific to this kind of market.
Parameters
----------
agents : [ConsumerType]
List of types of consumers that live in this economy.
tolerance: float
Minimum acceptable distance between "dynamic rules" to consider the
solution process converged. Distance depends on intercept and slope
of the log-linear "next capital ratio" function.
act_T : int
Number of periods to simulate when making a history of of the market.
Returns
-------
None
'''
Market.__init__(self,agents=agents,
sow_vars=['KtoLnow','RfreeNow','wRteNow','PermShkAggNow','TranShkAggNow'],
reap_vars=['pNow','aNow'],
track_vars=['KtoLnow'],
dyn_vars=['kNextFunc'],
tolerance=tolerance,
act_T=act_T)
self.assignParameters(**kwds)
self.update()
# def millRule(self,pNow,aNow):
# '''
# Function to calculate the capital to labor ratio, interest factor, and
# wage rate based on each agent's current state. Just calls calcRandW().
#
# See documentation for calcRandW for more information.
# '''
# return self.calcRandW(pNow,aNow)
#
# def calcDynamics(self,KtoLnow):
# '''
# Calculates a new dynamic rule for the economy: next period's capital
# ratio as a function of this period's. Just calls calcCapitalEvoRule().
#
# See documentation for calcCapitalEvoRule for more information.
# '''
# return self.calcCapitalEvoRule(KtoLnow)
def update(self):
'''
Use primitive parameters (and perfect foresight calibrations) to make
interest factor and wage rate functions (of capital to labor ratio),
as well as discrete approximations to the aggregate shock distributions.
Parameters
----------
none
Returns
-------
none
'''
self.kSS = ((self.CRRA/self.DiscFac - (1.0-self.DeprFac))/self.CapShare)**(1.0/(self.CapShare-1.0))
self.KtoYSS = self.kSS**(1.0-self.CapShare)
self.wRteSS = (1.0-self.CapShare)*self.kSS**(self.CapShare)
self.convertKtoY = lambda KtoY : KtoY**(1.0/(1.0 - self.CapShare)) # converts K/Y to K/L
self.Rfunc = lambda k : (1.0 + self.CapShare*k**(self.CapShare-1.0) - self.DeprFac)
self.wFunc = lambda k : ((1.0-self.CapShare)*k**(self.CapShare))/self.wRteSS
self.KtoLnow_init = self.kSS
self.RfreeNow_init = self.Rfunc(self.kSS)
self.wRteNow_init = self.wFunc(self.kSS)
self.PermShkAggNow_init = 1.0
self.TranShkAggNow_init = 1.0
# self.TranShkAggDstn = approxMeanOneLognormal(sigma=self.TranShkAggStd,N=self.TranShkAggCount)
self.PermShkAggDstn = approxMeanOneLognormal(sigma=self.PermShkAggStd,N=self.PermShkAggCount)
# self.AggShkDstn = combineIndepDstns(self.PermShkAggDstn,self.TranShkAggDstn)
self.AggShkDstn = self.PermShkAggDstn
#self.kNextFunc = CapitalEvoRule(self.intercept_prev,self.slope_prev)
self.kNextFunc = lambda x: 1.01 * x #1% annual growth
def reset(self):
'''
Reset the economy to prepare for a new simulation. Sets the time index
of aggregate shocks to zero and runs Market.reset().
Parameters
----------
none
Returns
-------
none
'''
self.Shk_idx = 0
Market.reset(self)
def makeAggShkHist(self):
'''
Make simulated histories of aggregate transitory and permanent shocks.
Histories are of length self.act_T, for use in the general equilibrium
simulation.
Parameters
----------
none
Returns
-------
none
'''
sim_periods = self.act_T
Events = np.arange(self.AggShkDstn[0].size) # just a list of integers
EventDraws = drawDiscrete(N=sim_periods,P=self.AggShkDstn[0],X=Events,seed=0)
PermShkAggHist = self.AggShkDstn[1][EventDraws]
# TranShkAggHist = self.AggShkDstn[2][EventDraws]
# Store the histories
self.PermShkAggHist = PermShkAggHist
# self.TranShkAggHist = TranShkAggHist
# def calcRandW(self,pNow,aNow):
# '''
# Calculates the interest factor and wage rate this period using each agent's
# capital stock to get the aggregate capital ratio.
#
# Parameters
# ----------
# pNow : [np.array]
# Agents' current permanent income levels. Elements of the list corr-
# espond to types in the economy, entries within arrays to agents of
# that type.
# aNow : [np.array]
# Agents' current end-of-period assets (normalized). Elements of the
# list correspond to types in the economy, entries within arrays to
# agents of that type.
#
# Returns
# -------
# AggVarsNow : CobbDouglasAggVars
# An object containing the aggregate variables for the upcoming period:
# capital-to-labor ratio, interest factor, (normalized) wage rate,
# aggregate permanent and transitory shocks.
# '''
# # Calculate aggregate capital this period
# type_count = len(aNow)
# aAll = np.zeros((type_count,aNow[0].size))
# pAll = np.zeros((type_count,pNow[0].size))
# for j in range(type_count):
# aAll[j,:] = aNow[j]
# pAll[j,:] = pNow[j]
# KtoYnow = np.mean(aAll*pAll) # This version uses end-of-period assets and
# # permanent income to calculate aggregate capital, unlike the Mathematica
# # version, which first applies the idiosyncratic permanent income shocks
# # and then aggregates. Obviously this is mathematically equivalent.
#
# # Get this period's aggregate shocks
# PermShkAggNow = self.PermShkAggHist[self.Shk_idx]
## TranShkAggNow = self.TranShkAggHist[self.Shk_idx]
# self.Shk_idx += 1
#
# # Calculate the interest factor and wage rate this period
# KtoLnow = self.convertKtoY(KtoYnow)
## RfreeNow = self.Rfunc(KtoLnow/TranShkAggNow)
## wRteNow = self.wFunc(KtoLnow/TranShkAggNow)*TranShkAggNow # "effective" wage accounts for labor supply
# RfreeNow = self.Rfunc(KtoLnow)
# wRteNow = self.wFunc(KtoLnow) # "effective" wage accounts for labor supply
#
# # Package the results into an object and return it
# AggVarsNow = CobbDouglasAggVars(KtoLnow,RfreeNow,wRteNow,PermShkAggNow) #,TranShkAggNow
# return AggVarsNow
# def calcCapitalEvoRule(self,KtoLnow):
# '''
# Calculate a new capital evolution rule as an AR1 process based on the history
# of the capital-to-labor ratio from a simulation.
#
# Parameters
# ----------
# KtoLnow : [float]
# List of the history of the simulated capital-to-labor ratio for an economy.
#
# Returns
# -------
# (unnamed) : CapDynamicRule
# Object containing a new capital evolution rule, calculated from the
# history of the capital-to-labor ratio.
# '''
# verbose = True
# discard_periods = 200 # Throw out the first T periods to allow the simulation to approach the SS
# update_weight = 0.5 # Proportional weight to put on new function vs old function parameters
# total_periods = len(KtoLnow)
#
# # Auto-regress the log capital-to-labor ratio, one period lag only
# logKtoL_t = np.log(KtoLnow[discard_periods:(total_periods-1)])
# logKtoL_tp1 = np.log(KtoLnow[(discard_periods+1):total_periods])
# slope, intercept, r_value, p_value, std_err = stats.linregress(logKtoL_t,logKtoL_tp1)
#
# # Make a new capital evolution rule by combining the new regression parameters
# # with the previous guess
# intercept = update_weight*intercept + (1.0-update_weight)*self.intercept_prev
# slope = update_weight*slope + (1.0-update_weight)*self.slope_prev
# kNextFunc = CapitalEvoRule(intercept,slope) # Make a new
#
# # Save the new values as "previous" values for the next iteration
# self.intercept_prev = intercept
# self.slope_prev = slope
#
# # Plot the history of the capital ratio for this run and print the new parameters
# if verbose:
# print('intercept=' + str(intercept) + ', slope=' + str(slope) + ', r-sq=' + str(r_value**2))
# plt.plot(KtoLnow[discard_periods:])
# plt.show()
#
# return CapDynamicRule(kNextFunc)
#class CobbDouglasAggVars():
# '''
# A simple class for holding the relevant aggregate variables that should be
# passed from the market to each type. Includes the capital-to-labor ratio,
# the interest factor, the wage rate, and the aggregate permanent and tran-
# sitory shocks.
# '''
# def __init__(self,KtoLnow,RfreeNow,wRteNow,PermShkAggNow): #,TranShkAggNow
# '''
# Make a new instance of CobbDouglasAggVars.
#
# Parameters
# ----------
# KtoLnow : float
# Capital-to-labor ratio in the economy this period.
# RfreeNow : float
# Interest factor on assets in the economy this period.
# wRteNow : float
# Wage rate for labor in the economy this period (normalized by the
# steady state wage rate).
# PermShkAggNow : float
# Permanent shock to aggregate labor productivity this period.
# TranShkAggNow : float
# Transitory shock to aggregate labor productivity this period.
#
# Returns
# -------
# None
# '''
# self.KtoLnow = KtoLnow
# self.RfreeNow = RfreeNow
# self.wRteNow = wRteNow
# self.PermShkAggNow = PermShkAggNow
## self.TranShkAggNow = TranShkAggNow
#class CapitalEvoRule(HARKobject):
# '''
# A class to represent capital evolution rules. Agents believe that the log
# capital ratio next period is a linear function of the log capital ratio
# this period.
# '''
# def __init__(self,intercept,slope):
# '''
# Make a new instance of CapitalEvoRule.
#
# Parameters
# ----------
# intercept : float
# Intercept of the log-linear capital evolution rule.
# slope : float
# Slope of the log-linear capital evolution rule.
#
# Returns
# -------
# new instance of CapitalEvoRule
# '''
# self.intercept = intercept
# self.slope = slope
# self.distance_criteria = ['slope','intercept']
#
# def __call__(self,kNow):
# '''
# Evaluates (expected) capital-to-labor ratio next period as a function
# of the capital-to-labor ratio this period.
#
# Parameters
# ----------
# kNow : float
# Capital-to-labor ratio this period.
#
# Returns
# -------
# kNext : (Expected) capital-to-labor ratio next period.
# '''
# kNext = np.exp(self.intercept + self.slope*np.log(kNow))
# return kNext
#
#class CapDynamicRule(HARKobject):
# '''
# Just a container class for passing the capital evolution rule to agents.
# '''
# def __init__(self,kNextFunc):
# '''
# Make a new instance of CapDynamicRule.
#
# Parameters
# ----------
# kNextFunc : CapitalEvoRule
# Next period's capital-to-labor ratio as a function of this period's.
#
# Returns
# -------
# None
# '''
# self.kNextFunc = kNextFunc
# self.distance_criteria = ['kNextFunc']
#
###############################################################################
if __name__ == '__main__':
import ConsumerParameters as Params
# import EstimationParameters as Params
from time import clock
import matplotlib.pyplot as plt
# from HARKutilities import plotFuncs
mystr = lambda number : "{:.4f}".format(number)
import settings
settings.init()
settings.t_rebate = 45
settings.rebate_size = 1
settings.verbose = True
# Make an aggregate shocks consumer
# Params.init_agg_shocks['T_total'] = 30
#need to also modify the shocks vector length
AggShockExample = AggShockConsumerType(**Params.init_agg_shocks)
#AggShockExample.T_total = 30
AggShockExample.cycles = 30
AggShockExample.sim_periods = 3000
#AggShockExample.makeIncShkHist() # Simulate a history of idiosyncratic shocks
# Make a Cobb-Douglas economy for the agents
EconomyExample = CobbDouglasEconomy(agents = [AggShockExample],act_T=3000,**Params.init_cobb_douglas)
#EconomyExample.makeAggShkHist() # Simulate a history of aggregate shocks
# Have the consumers inherit relevant objects from the economy
AggShockExample.getEconomyData(EconomyExample)
# Solve the microeconomic model for the aggregate shocks example type (and display results)
t_start = clock()
AggShockExample.solve()
t_end = clock()
print('Solving an aggregate shocks consumer took ' + mystr(t_end-t_start) + ' seconds.')
print('Consumption function at each capital-to-labor ratio gridpoint:')
m_grid = np.linspace(0,10,200)
AggShockExample.unpackcFunc()
for k in [0.87,4.67]: #AggShockExample.kGrid.tolist()
c_at_this_k = AggShockExample.cFunc[0](m_grid,k*np.ones_like(m_grid))
plt.plot(m_grid,c_at_this_k)
plt.show()
#
# # Solve the "macroeconomic" model by searching for a "fixed point dynamic rule"
# t_start = clock()
# EconomyExample.solve()
# t_end = clock()
# print('Solving the "macroeconomic" aggregate shocks model took ' + str(t_end - t_start) + ' seconds.')
# print('Next capital-to-labor ratio as function of current ratio:')
# plotFuncs(EconomyExample.kNextFunc,0,2*EconomyExample.kSS)
# print('Consumption function at each capital-to-labor ratio gridpoint (in general equilibrium):')
# AggShockExample.unpackcFunc()
# m_grid = np.linspace(0,10,200)
# for k in AggShockExample.kGrid.tolist():
# c_at_this_k = AggShockExample.cFunc[0](m_grid,k*np.ones_like(m_grid))
# plt.plot(m_grid,c_at_this_k)
# plt.show()
|
|
"""
pygments.lexers._openedge_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Builtin list for the OpenEdgeLexer.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
OPENEDGEKEYWORDS = (
'ABS',
'ABSO',
'ABSOL',
'ABSOLU',
'ABSOLUT',
'ABSOLUTE',
'ABSTRACT',
'ACCELERATOR',
'ACCUM',
'ACCUMU',
'ACCUMUL',
'ACCUMULA',
'ACCUMULAT',
'ACCUMULATE',
'ACTIVE-FORM',
'ACTIVE-WINDOW',
'ADD',
'ADD-BUFFER',
'ADD-CALC-COLUMN',
'ADD-COLUMNS-FROM',
'ADD-EVENTS-PROCEDURE',
'ADD-FIELDS-FROM',
'ADD-FIRST',
'ADD-INDEX-FIELD',
'ADD-LAST',
'ADD-LIKE-COLUMN',
'ADD-LIKE-FIELD',
'ADD-LIKE-INDEX',
'ADD-NEW-FIELD',
'ADD-NEW-INDEX',
'ADD-SCHEMA-LOCATION',
'ADD-SUPER-PROCEDURE',
'ADM-DATA',
'ADVISE',
'ALERT-BOX',
'ALIAS',
'ALL',
'ALLOW-COLUMN-SEARCHING',
'ALLOW-REPLICATION',
'ALTER',
'ALWAYS-ON-TOP',
'AMBIG',
'AMBIGU',
'AMBIGUO',
'AMBIGUOU',
'AMBIGUOUS',
'ANALYZ',
'ANALYZE',
'AND',
'ANSI-ONLY',
'ANY',
'ANYWHERE',
'APPEND',
'APPL-ALERT',
'APPL-ALERT-',
'APPL-ALERT-B',
'APPL-ALERT-BO',
'APPL-ALERT-BOX',
'APPL-ALERT-BOXE',
'APPL-ALERT-BOXES',
'APPL-CONTEXT-ID',
'APPLICATION',
'APPLY',
'APPSERVER-INFO',
'APPSERVER-PASSWORD',
'APPSERVER-USERID',
'ARRAY-MESSAGE',
'AS',
'ASC',
'ASCE',
'ASCEN',
'ASCEND',
'ASCENDI',
'ASCENDIN',
'ASCENDING',
'ASK-OVERWRITE',
'ASSEMBLY',
'ASSIGN',
'ASYNC-REQUEST-COUNT',
'ASYNC-REQUEST-HANDLE',
'ASYNCHRONOUS',
'AT',
'ATTACHED-PAIRLIST',
'ATTR',
'ATTR-SPACE',
'ATTRI',
'ATTRIB',
'ATTRIBU',
'ATTRIBUT',
'AUDIT-CONTROL',
'AUDIT-ENABLED',
'AUDIT-EVENT-CONTEXT',
'AUDIT-POLICY',
'AUTHENTICATION-FAILED',
'AUTHORIZATION',
'AUTO-COMP',
'AUTO-COMPL',
'AUTO-COMPLE',
'AUTO-COMPLET',
'AUTO-COMPLETI',
'AUTO-COMPLETIO',
'AUTO-COMPLETION',
'AUTO-END-KEY',
'AUTO-ENDKEY',
'AUTO-GO',
'AUTO-IND',
'AUTO-INDE',
'AUTO-INDEN',
'AUTO-INDENT',
'AUTO-RESIZE',
'AUTO-RET',
'AUTO-RETU',
'AUTO-RETUR',
'AUTO-RETURN',
'AUTO-SYNCHRONIZE',
'AUTO-Z',
'AUTO-ZA',
'AUTO-ZAP',
'AUTOMATIC',
'AVAIL',
'AVAILA',
'AVAILAB',
'AVAILABL',
'AVAILABLE',
'AVAILABLE-FORMATS',
'AVE',
'AVER',
'AVERA',
'AVERAG',
'AVERAGE',
'AVG',
'BACK',
'BACKG',
'BACKGR',
'BACKGRO',
'BACKGROU',
'BACKGROUN',
'BACKGROUND',
'BACKWARD',
'BACKWARDS',
'BASE64-DECODE',
'BASE64-ENCODE',
'BASE-ADE',
'BASE-KEY',
'BATCH',
'BATCH-',
'BATCH-M',
'BATCH-MO',
'BATCH-MOD',
'BATCH-MODE',
'BATCH-SIZE',
'BEFORE-H',
'BEFORE-HI',
'BEFORE-HID',
'BEFORE-HIDE',
'BEGIN-EVENT-GROUP',
'BEGINS',
'BELL',
'BETWEEN',
'BGC',
'BGCO',
'BGCOL',
'BGCOLO',
'BGCOLOR',
'BIG-ENDIAN',
'BINARY',
'BIND',
'BIND-WHERE',
'BLANK',
'BLOCK-ITERATION-DISPLAY',
'BLOCK-LEVEL',
'BORDER-B',
'BORDER-BO',
'BORDER-BOT',
'BORDER-BOTT',
'BORDER-BOTTO',
'BORDER-BOTTOM-CHARS',
'BORDER-BOTTOM-P',
'BORDER-BOTTOM-PI',
'BORDER-BOTTOM-PIX',
'BORDER-BOTTOM-PIXE',
'BORDER-BOTTOM-PIXEL',
'BORDER-BOTTOM-PIXELS',
'BORDER-L',
'BORDER-LE',
'BORDER-LEF',
'BORDER-LEFT',
'BORDER-LEFT-',
'BORDER-LEFT-C',
'BORDER-LEFT-CH',
'BORDER-LEFT-CHA',
'BORDER-LEFT-CHAR',
'BORDER-LEFT-CHARS',
'BORDER-LEFT-P',
'BORDER-LEFT-PI',
'BORDER-LEFT-PIX',
'BORDER-LEFT-PIXE',
'BORDER-LEFT-PIXEL',
'BORDER-LEFT-PIXELS',
'BORDER-R',
'BORDER-RI',
'BORDER-RIG',
'BORDER-RIGH',
'BORDER-RIGHT',
'BORDER-RIGHT-',
'BORDER-RIGHT-C',
'BORDER-RIGHT-CH',
'BORDER-RIGHT-CHA',
'BORDER-RIGHT-CHAR',
'BORDER-RIGHT-CHARS',
'BORDER-RIGHT-P',
'BORDER-RIGHT-PI',
'BORDER-RIGHT-PIX',
'BORDER-RIGHT-PIXE',
'BORDER-RIGHT-PIXEL',
'BORDER-RIGHT-PIXELS',
'BORDER-T',
'BORDER-TO',
'BORDER-TOP',
'BORDER-TOP-',
'BORDER-TOP-C',
'BORDER-TOP-CH',
'BORDER-TOP-CHA',
'BORDER-TOP-CHAR',
'BORDER-TOP-CHARS',
'BORDER-TOP-P',
'BORDER-TOP-PI',
'BORDER-TOP-PIX',
'BORDER-TOP-PIXE',
'BORDER-TOP-PIXEL',
'BORDER-TOP-PIXELS',
'BOX',
'BOX-SELECT',
'BOX-SELECTA',
'BOX-SELECTAB',
'BOX-SELECTABL',
'BOX-SELECTABLE',
'BREAK',
'BROWSE',
'BUFFER',
'BUFFER-CHARS',
'BUFFER-COMPARE',
'BUFFER-COPY',
'BUFFER-CREATE',
'BUFFER-DELETE',
'BUFFER-FIELD',
'BUFFER-HANDLE',
'BUFFER-LINES',
'BUFFER-NAME',
'BUFFER-PARTITION-ID',
'BUFFER-RELEASE',
'BUFFER-VALUE',
'BUTTON',
'BUTTONS',
'BY',
'BY-POINTER',
'BY-VARIANT-POINTER',
'CACHE',
'CACHE-SIZE',
'CALL',
'CALL-NAME',
'CALL-TYPE',
'CAN-CREATE',
'CAN-DELETE',
'CAN-DO',
'CAN-DO-DOMAIN-SUPPORT',
'CAN-FIND',
'CAN-QUERY',
'CAN-READ',
'CAN-SET',
'CAN-WRITE',
'CANCEL-BREAK',
'CANCEL-BUTTON',
'CAPS',
'CAREFUL-PAINT',
'CASE',
'CASE-SEN',
'CASE-SENS',
'CASE-SENSI',
'CASE-SENSIT',
'CASE-SENSITI',
'CASE-SENSITIV',
'CASE-SENSITIVE',
'CAST',
'CATCH',
'CDECL',
'CENTER',
'CENTERE',
'CENTERED',
'CHAINED',
'CHARACTER',
'CHARACTER_LENGTH',
'CHARSET',
'CHECK',
'CHECKED',
'CHOOSE',
'CHR',
'CLASS',
'CLASS-TYPE',
'CLEAR',
'CLEAR-APPL-CONTEXT',
'CLEAR-LOG',
'CLEAR-SELECT',
'CLEAR-SELECTI',
'CLEAR-SELECTIO',
'CLEAR-SELECTION',
'CLEAR-SORT-ARROW',
'CLEAR-SORT-ARROWS',
'CLIENT-CONNECTION-ID',
'CLIENT-PRINCIPAL',
'CLIENT-TTY',
'CLIENT-TYPE',
'CLIENT-WORKSTATION',
'CLIPBOARD',
'CLOSE',
'CLOSE-LOG',
'CODE',
'CODEBASE-LOCATOR',
'CODEPAGE',
'CODEPAGE-CONVERT',
'COL',
'COL-OF',
'COLLATE',
'COLON',
'COLON-ALIGN',
'COLON-ALIGNE',
'COLON-ALIGNED',
'COLOR',
'COLOR-TABLE',
'COLU',
'COLUM',
'COLUMN',
'COLUMN-BGCOLOR',
'COLUMN-DCOLOR',
'COLUMN-FGCOLOR',
'COLUMN-FONT',
'COLUMN-LAB',
'COLUMN-LABE',
'COLUMN-LABEL',
'COLUMN-MOVABLE',
'COLUMN-OF',
'COLUMN-PFCOLOR',
'COLUMN-READ-ONLY',
'COLUMN-RESIZABLE',
'COLUMN-SCROLLING',
'COLUMNS',
'COM-HANDLE',
'COM-SELF',
'COMBO-BOX',
'COMMAND',
'COMPARES',
'COMPILE',
'COMPILER',
'COMPLETE',
'CONFIG-NAME',
'CONNECT',
'CONNECTED',
'CONSTRUCTOR',
'CONTAINS',
'CONTENTS',
'CONTEXT',
'CONTEXT-HELP',
'CONTEXT-HELP-FILE',
'CONTEXT-HELP-ID',
'CONTEXT-POPUP',
'CONTROL',
'CONTROL-BOX',
'CONTROL-FRAME',
'CONVERT',
'CONVERT-3D-COLORS',
'CONVERT-TO-OFFS',
'CONVERT-TO-OFFSE',
'CONVERT-TO-OFFSET',
'COPY-DATASET',
'COPY-LOB',
'COPY-SAX-ATTRIBUTES',
'COPY-TEMP-TABLE',
'COUNT',
'COUNT-OF',
'CPCASE',
'CPCOLL',
'CPINTERNAL',
'CPLOG',
'CPPRINT',
'CPRCODEIN',
'CPRCODEOUT',
'CPSTREAM',
'CPTERM',
'CRC-VALUE',
'CREATE',
'CREATE-LIKE',
'CREATE-LIKE-SEQUENTIAL',
'CREATE-NODE-NAMESPACE',
'CREATE-RESULT-LIST-ENTRY',
'CREATE-TEST-FILE',
'CURRENT',
'CURRENT-CHANGED',
'CURRENT-COLUMN',
'CURRENT-ENV',
'CURRENT-ENVI',
'CURRENT-ENVIR',
'CURRENT-ENVIRO',
'CURRENT-ENVIRON',
'CURRENT-ENVIRONM',
'CURRENT-ENVIRONME',
'CURRENT-ENVIRONMEN',
'CURRENT-ENVIRONMENT',
'CURRENT-ITERATION',
'CURRENT-LANG',
'CURRENT-LANGU',
'CURRENT-LANGUA',
'CURRENT-LANGUAG',
'CURRENT-LANGUAGE',
'CURRENT-QUERY',
'CURRENT-REQUEST-INFO',
'CURRENT-RESPONSE-INFO',
'CURRENT-RESULT-ROW',
'CURRENT-ROW-MODIFIED',
'CURRENT-VALUE',
'CURRENT-WINDOW',
'CURRENT_DATE',
'CURS',
'CURSO',
'CURSOR',
'CURSOR-CHAR',
'CURSOR-LINE',
'CURSOR-OFFSET',
'DATA-BIND',
'DATA-ENTRY-RET',
'DATA-ENTRY-RETU',
'DATA-ENTRY-RETUR',
'DATA-ENTRY-RETURN',
'DATA-REL',
'DATA-RELA',
'DATA-RELAT',
'DATA-RELATI',
'DATA-RELATIO',
'DATA-RELATION',
'DATA-SOURCE',
'DATA-SOURCE-COMPLETE-MAP',
'DATA-SOURCE-MODIFIED',
'DATA-SOURCE-ROWID',
'DATA-T',
'DATA-TY',
'DATA-TYP',
'DATA-TYPE',
'DATABASE',
'DATASERVERS',
'DATASET',
'DATASET-HANDLE',
'DATE',
'DATE-F',
'DATE-FO',
'DATE-FOR',
'DATE-FORM',
'DATE-FORMA',
'DATE-FORMAT',
'DAY',
'DB-CONTEXT',
'DB-REFERENCES',
'DBCODEPAGE',
'DBCOLLATION',
'DBNAME',
'DBPARAM',
'DBREST',
'DBRESTR',
'DBRESTRI',
'DBRESTRIC',
'DBRESTRICT',
'DBRESTRICTI',
'DBRESTRICTIO',
'DBRESTRICTION',
'DBRESTRICTIONS',
'DBTASKID',
'DBTYPE',
'DBVERS',
'DBVERSI',
'DBVERSIO',
'DBVERSION',
'DCOLOR',
'DDE',
'DDE-ERROR',
'DDE-I',
'DDE-ID',
'DDE-ITEM',
'DDE-NAME',
'DDE-TOPIC',
'DEBLANK',
'DEBU',
'DEBUG',
'DEBUG-ALERT',
'DEBUG-LIST',
'DEBUGGER',
'DECIMAL',
'DECIMALS',
'DECLARE',
'DECLARE-NAMESPACE',
'DECRYPT',
'DEFAULT',
'DEFAULT-B',
'DEFAULT-BU',
'DEFAULT-BUFFER-HANDLE',
'DEFAULT-BUT',
'DEFAULT-BUTT',
'DEFAULT-BUTTO',
'DEFAULT-BUTTON',
'DEFAULT-COMMIT',
'DEFAULT-EX',
'DEFAULT-EXT',
'DEFAULT-EXTE',
'DEFAULT-EXTEN',
'DEFAULT-EXTENS',
'DEFAULT-EXTENSI',
'DEFAULT-EXTENSIO',
'DEFAULT-EXTENSION',
'DEFAULT-NOXL',
'DEFAULT-NOXLA',
'DEFAULT-NOXLAT',
'DEFAULT-NOXLATE',
'DEFAULT-VALUE',
'DEFAULT-WINDOW',
'DEFINE',
'DEFINE-USER-EVENT-MANAGER',
'DEFINED',
'DEL',
'DELE',
'DELEGATE',
'DELET',
'DELETE PROCEDURE',
'DELETE',
'DELETE-CHAR',
'DELETE-CHARA',
'DELETE-CHARAC',
'DELETE-CHARACT',
'DELETE-CHARACTE',
'DELETE-CHARACTER',
'DELETE-CURRENT-ROW',
'DELETE-LINE',
'DELETE-RESULT-LIST-ENTRY',
'DELETE-SELECTED-ROW',
'DELETE-SELECTED-ROWS',
'DELIMITER',
'DESC',
'DESCE',
'DESCEN',
'DESCEND',
'DESCENDI',
'DESCENDIN',
'DESCENDING',
'DESELECT-FOCUSED-ROW',
'DESELECT-ROWS',
'DESELECT-SELECTED-ROW',
'DESELECTION',
'DESTRUCTOR',
'DIALOG-BOX',
'DICT',
'DICTI',
'DICTIO',
'DICTION',
'DICTIONA',
'DICTIONAR',
'DICTIONARY',
'DIR',
'DISABLE',
'DISABLE-AUTO-ZAP',
'DISABLE-DUMP-TRIGGERS',
'DISABLE-LOAD-TRIGGERS',
'DISABLED',
'DISCON',
'DISCONN',
'DISCONNE',
'DISCONNEC',
'DISCONNECT',
'DISP',
'DISPL',
'DISPLA',
'DISPLAY',
'DISPLAY-MESSAGE',
'DISPLAY-T',
'DISPLAY-TY',
'DISPLAY-TYP',
'DISPLAY-TYPE',
'DISTINCT',
'DO',
'DOMAIN-DESCRIPTION',
'DOMAIN-NAME',
'DOMAIN-TYPE',
'DOS',
'DOUBLE',
'DOWN',
'DRAG-ENABLED',
'DROP',
'DROP-DOWN',
'DROP-DOWN-LIST',
'DROP-FILE-NOTIFY',
'DROP-TARGET',
'DS-CLOSE-CURSOR',
'DSLOG-MANAGER',
'DUMP',
'DYNAMIC',
'DYNAMIC-ENUM',
'DYNAMIC-FUNCTION',
'DYNAMIC-INVOKE',
'EACH',
'ECHO',
'EDGE',
'EDGE-',
'EDGE-C',
'EDGE-CH',
'EDGE-CHA',
'EDGE-CHAR',
'EDGE-CHARS',
'EDGE-P',
'EDGE-PI',
'EDGE-PIX',
'EDGE-PIXE',
'EDGE-PIXEL',
'EDGE-PIXELS',
'EDIT-CAN-PASTE',
'EDIT-CAN-UNDO',
'EDIT-CLEAR',
'EDIT-COPY',
'EDIT-CUT',
'EDIT-PASTE',
'EDIT-UNDO',
'EDITING',
'EDITOR',
'ELSE',
'EMPTY',
'EMPTY-TEMP-TABLE',
'ENABLE',
'ENABLED-FIELDS',
'ENCODE',
'ENCRYPT',
'ENCRYPT-AUDIT-MAC-KEY',
'ENCRYPTION-SALT',
'END',
'END-DOCUMENT',
'END-ELEMENT',
'END-EVENT-GROUP',
'END-FILE-DROP',
'END-KEY',
'END-MOVE',
'END-RESIZE',
'END-ROW-RESIZE',
'END-USER-PROMPT',
'ENDKEY',
'ENTERED',
'ENTITY-EXPANSION-LIMIT',
'ENTRY',
'ENUM',
'EQ',
'ERROR',
'ERROR-COL',
'ERROR-COLU',
'ERROR-COLUM',
'ERROR-COLUMN',
'ERROR-ROW',
'ERROR-STACK-TRACE',
'ERROR-STAT',
'ERROR-STATU',
'ERROR-STATUS',
'ESCAPE',
'ETIME',
'EVENT',
'EVENT-GROUP-ID',
'EVENT-PROCEDURE',
'EVENT-PROCEDURE-CONTEXT',
'EVENT-T',
'EVENT-TY',
'EVENT-TYP',
'EVENT-TYPE',
'EVENTS',
'EXCEPT',
'EXCLUSIVE',
'EXCLUSIVE-',
'EXCLUSIVE-ID',
'EXCLUSIVE-L',
'EXCLUSIVE-LO',
'EXCLUSIVE-LOC',
'EXCLUSIVE-LOCK',
'EXCLUSIVE-WEB-USER',
'EXECUTE',
'EXISTS',
'EXP',
'EXPAND',
'EXPANDABLE',
'EXPLICIT',
'EXPORT',
'EXPORT-PRINCIPAL',
'EXTENDED',
'EXTENT',
'EXTERNAL',
'FALSE',
'FETCH',
'FETCH-SELECTED-ROW',
'FGC',
'FGCO',
'FGCOL',
'FGCOLO',
'FGCOLOR',
'FIELD',
'FIELDS',
'FILE',
'FILE-CREATE-DATE',
'FILE-CREATE-TIME',
'FILE-INFO',
'FILE-INFOR',
'FILE-INFORM',
'FILE-INFORMA',
'FILE-INFORMAT',
'FILE-INFORMATI',
'FILE-INFORMATIO',
'FILE-INFORMATION',
'FILE-MOD-DATE',
'FILE-MOD-TIME',
'FILE-NAME',
'FILE-OFF',
'FILE-OFFS',
'FILE-OFFSE',
'FILE-OFFSET',
'FILE-SIZE',
'FILE-TYPE',
'FILENAME',
'FILL',
'FILL-IN',
'FILLED',
'FILTERS',
'FINAL',
'FINALLY',
'FIND',
'FIND-BY-ROWID',
'FIND-CASE-SENSITIVE',
'FIND-CURRENT',
'FIND-FIRST',
'FIND-GLOBAL',
'FIND-LAST',
'FIND-NEXT-OCCURRENCE',
'FIND-PREV-OCCURRENCE',
'FIND-SELECT',
'FIND-UNIQUE',
'FIND-WRAP-AROUND',
'FINDER',
'FIRST',
'FIRST-ASYNCH-REQUEST',
'FIRST-CHILD',
'FIRST-COLUMN',
'FIRST-FORM',
'FIRST-OBJECT',
'FIRST-OF',
'FIRST-PROC',
'FIRST-PROCE',
'FIRST-PROCED',
'FIRST-PROCEDU',
'FIRST-PROCEDUR',
'FIRST-PROCEDURE',
'FIRST-SERVER',
'FIRST-TAB-I',
'FIRST-TAB-IT',
'FIRST-TAB-ITE',
'FIRST-TAB-ITEM',
'FIT-LAST-COLUMN',
'FIXED-ONLY',
'FLAT-BUTTON',
'FLOAT',
'FOCUS',
'FOCUSED-ROW',
'FOCUSED-ROW-SELECTED',
'FONT',
'FONT-TABLE',
'FOR',
'FORCE-FILE',
'FORE',
'FOREG',
'FOREGR',
'FOREGRO',
'FOREGROU',
'FOREGROUN',
'FOREGROUND',
'FORM INPUT',
'FORM',
'FORM-LONG-INPUT',
'FORMA',
'FORMAT',
'FORMATTE',
'FORMATTED',
'FORWARD',
'FORWARDS',
'FRAGMEN',
'FRAGMENT',
'FRAM',
'FRAME',
'FRAME-COL',
'FRAME-DB',
'FRAME-DOWN',
'FRAME-FIELD',
'FRAME-FILE',
'FRAME-INDE',
'FRAME-INDEX',
'FRAME-LINE',
'FRAME-NAME',
'FRAME-ROW',
'FRAME-SPA',
'FRAME-SPAC',
'FRAME-SPACI',
'FRAME-SPACIN',
'FRAME-SPACING',
'FRAME-VAL',
'FRAME-VALU',
'FRAME-VALUE',
'FRAME-X',
'FRAME-Y',
'FREQUENCY',
'FROM',
'FROM-C',
'FROM-CH',
'FROM-CHA',
'FROM-CHAR',
'FROM-CHARS',
'FROM-CUR',
'FROM-CURR',
'FROM-CURRE',
'FROM-CURREN',
'FROM-CURRENT',
'FROM-P',
'FROM-PI',
'FROM-PIX',
'FROM-PIXE',
'FROM-PIXEL',
'FROM-PIXELS',
'FULL-HEIGHT',
'FULL-HEIGHT-',
'FULL-HEIGHT-C',
'FULL-HEIGHT-CH',
'FULL-HEIGHT-CHA',
'FULL-HEIGHT-CHAR',
'FULL-HEIGHT-CHARS',
'FULL-HEIGHT-P',
'FULL-HEIGHT-PI',
'FULL-HEIGHT-PIX',
'FULL-HEIGHT-PIXE',
'FULL-HEIGHT-PIXEL',
'FULL-HEIGHT-PIXELS',
'FULL-PATHN',
'FULL-PATHNA',
'FULL-PATHNAM',
'FULL-PATHNAME',
'FULL-WIDTH',
'FULL-WIDTH-',
'FULL-WIDTH-C',
'FULL-WIDTH-CH',
'FULL-WIDTH-CHA',
'FULL-WIDTH-CHAR',
'FULL-WIDTH-CHARS',
'FULL-WIDTH-P',
'FULL-WIDTH-PI',
'FULL-WIDTH-PIX',
'FULL-WIDTH-PIXE',
'FULL-WIDTH-PIXEL',
'FULL-WIDTH-PIXELS',
'FUNCTION',
'FUNCTION-CALL-TYPE',
'GATEWAY',
'GATEWAYS',
'GE',
'GENERATE-MD5',
'GENERATE-PBE-KEY',
'GENERATE-PBE-SALT',
'GENERATE-RANDOM-KEY',
'GENERATE-UUID',
'GET',
'GET-ATTR-CALL-TYPE',
'GET-ATTRIBUTE-NODE',
'GET-BINARY-DATA',
'GET-BLUE',
'GET-BLUE-',
'GET-BLUE-V',
'GET-BLUE-VA',
'GET-BLUE-VAL',
'GET-BLUE-VALU',
'GET-BLUE-VALUE',
'GET-BROWSE-COLUMN',
'GET-BUFFER-HANDLE',
'GET-BYTE',
'GET-CALLBACK-PROC-CONTEXT',
'GET-CALLBACK-PROC-NAME',
'GET-CGI-LIST',
'GET-CGI-LONG-VALUE',
'GET-CGI-VALUE',
'GET-CLASS',
'GET-CODEPAGES',
'GET-COLLATIONS',
'GET-CONFIG-VALUE',
'GET-CURRENT',
'GET-DOUBLE',
'GET-DROPPED-FILE',
'GET-DYNAMIC',
'GET-ERROR-COLUMN',
'GET-ERROR-ROW',
'GET-FILE',
'GET-FILE-NAME',
'GET-FILE-OFFSE',
'GET-FILE-OFFSET',
'GET-FIRST',
'GET-FLOAT',
'GET-GREEN',
'GET-GREEN-',
'GET-GREEN-V',
'GET-GREEN-VA',
'GET-GREEN-VAL',
'GET-GREEN-VALU',
'GET-GREEN-VALUE',
'GET-INDEX-BY-NAMESPACE-NAME',
'GET-INDEX-BY-QNAME',
'GET-INT64',
'GET-ITERATION',
'GET-KEY-VAL',
'GET-KEY-VALU',
'GET-KEY-VALUE',
'GET-LAST',
'GET-LOCALNAME-BY-INDEX',
'GET-LONG',
'GET-MESSAGE',
'GET-NEXT',
'GET-NUMBER',
'GET-POINTER-VALUE',
'GET-PREV',
'GET-PRINTERS',
'GET-PROPERTY',
'GET-QNAME-BY-INDEX',
'GET-RED',
'GET-RED-',
'GET-RED-V',
'GET-RED-VA',
'GET-RED-VAL',
'GET-RED-VALU',
'GET-RED-VALUE',
'GET-REPOSITIONED-ROW',
'GET-RGB-VALUE',
'GET-SELECTED',
'GET-SELECTED-',
'GET-SELECTED-W',
'GET-SELECTED-WI',
'GET-SELECTED-WID',
'GET-SELECTED-WIDG',
'GET-SELECTED-WIDGE',
'GET-SELECTED-WIDGET',
'GET-SHORT',
'GET-SIGNATURE',
'GET-SIZE',
'GET-STRING',
'GET-TAB-ITEM',
'GET-TEXT-HEIGHT',
'GET-TEXT-HEIGHT-',
'GET-TEXT-HEIGHT-C',
'GET-TEXT-HEIGHT-CH',
'GET-TEXT-HEIGHT-CHA',
'GET-TEXT-HEIGHT-CHAR',
'GET-TEXT-HEIGHT-CHARS',
'GET-TEXT-HEIGHT-P',
'GET-TEXT-HEIGHT-PI',
'GET-TEXT-HEIGHT-PIX',
'GET-TEXT-HEIGHT-PIXE',
'GET-TEXT-HEIGHT-PIXEL',
'GET-TEXT-HEIGHT-PIXELS',
'GET-TEXT-WIDTH',
'GET-TEXT-WIDTH-',
'GET-TEXT-WIDTH-C',
'GET-TEXT-WIDTH-CH',
'GET-TEXT-WIDTH-CHA',
'GET-TEXT-WIDTH-CHAR',
'GET-TEXT-WIDTH-CHARS',
'GET-TEXT-WIDTH-P',
'GET-TEXT-WIDTH-PI',
'GET-TEXT-WIDTH-PIX',
'GET-TEXT-WIDTH-PIXE',
'GET-TEXT-WIDTH-PIXEL',
'GET-TEXT-WIDTH-PIXELS',
'GET-TYPE-BY-INDEX',
'GET-TYPE-BY-NAMESPACE-NAME',
'GET-TYPE-BY-QNAME',
'GET-UNSIGNED-LONG',
'GET-UNSIGNED-SHORT',
'GET-URI-BY-INDEX',
'GET-VALUE-BY-INDEX',
'GET-VALUE-BY-NAMESPACE-NAME',
'GET-VALUE-BY-QNAME',
'GET-WAIT-STATE',
'GETBYTE',
'GLOBAL',
'GO-ON',
'GO-PEND',
'GO-PENDI',
'GO-PENDIN',
'GO-PENDING',
'GRANT',
'GRAPHIC-E',
'GRAPHIC-ED',
'GRAPHIC-EDG',
'GRAPHIC-EDGE',
'GRID-FACTOR-H',
'GRID-FACTOR-HO',
'GRID-FACTOR-HOR',
'GRID-FACTOR-HORI',
'GRID-FACTOR-HORIZ',
'GRID-FACTOR-HORIZO',
'GRID-FACTOR-HORIZON',
'GRID-FACTOR-HORIZONT',
'GRID-FACTOR-HORIZONTA',
'GRID-FACTOR-HORIZONTAL',
'GRID-FACTOR-V',
'GRID-FACTOR-VE',
'GRID-FACTOR-VER',
'GRID-FACTOR-VERT',
'GRID-FACTOR-VERTI',
'GRID-FACTOR-VERTIC',
'GRID-FACTOR-VERTICA',
'GRID-FACTOR-VERTICAL',
'GRID-SNAP',
'GRID-UNIT-HEIGHT',
'GRID-UNIT-HEIGHT-',
'GRID-UNIT-HEIGHT-C',
'GRID-UNIT-HEIGHT-CH',
'GRID-UNIT-HEIGHT-CHA',
'GRID-UNIT-HEIGHT-CHARS',
'GRID-UNIT-HEIGHT-P',
'GRID-UNIT-HEIGHT-PI',
'GRID-UNIT-HEIGHT-PIX',
'GRID-UNIT-HEIGHT-PIXE',
'GRID-UNIT-HEIGHT-PIXEL',
'GRID-UNIT-HEIGHT-PIXELS',
'GRID-UNIT-WIDTH',
'GRID-UNIT-WIDTH-',
'GRID-UNIT-WIDTH-C',
'GRID-UNIT-WIDTH-CH',
'GRID-UNIT-WIDTH-CHA',
'GRID-UNIT-WIDTH-CHAR',
'GRID-UNIT-WIDTH-CHARS',
'GRID-UNIT-WIDTH-P',
'GRID-UNIT-WIDTH-PI',
'GRID-UNIT-WIDTH-PIX',
'GRID-UNIT-WIDTH-PIXE',
'GRID-UNIT-WIDTH-PIXEL',
'GRID-UNIT-WIDTH-PIXELS',
'GRID-VISIBLE',
'GROUP',
'GT',
'GUID',
'HANDLE',
'HANDLER',
'HAS-RECORDS',
'HAVING',
'HEADER',
'HEIGHT',
'HEIGHT-',
'HEIGHT-C',
'HEIGHT-CH',
'HEIGHT-CHA',
'HEIGHT-CHAR',
'HEIGHT-CHARS',
'HEIGHT-P',
'HEIGHT-PI',
'HEIGHT-PIX',
'HEIGHT-PIXE',
'HEIGHT-PIXEL',
'HEIGHT-PIXELS',
'HELP',
'HEX-DECODE',
'HEX-ENCODE',
'HIDDEN',
'HIDE',
'HORI',
'HORIZ',
'HORIZO',
'HORIZON',
'HORIZONT',
'HORIZONTA',
'HORIZONTAL',
'HOST-BYTE-ORDER',
'HTML-CHARSET',
'HTML-END-OF-LINE',
'HTML-END-OF-PAGE',
'HTML-FRAME-BEGIN',
'HTML-FRAME-END',
'HTML-HEADER-BEGIN',
'HTML-HEADER-END',
'HTML-TITLE-BEGIN',
'HTML-TITLE-END',
'HWND',
'ICON',
'IF',
'IMAGE',
'IMAGE-DOWN',
'IMAGE-INSENSITIVE',
'IMAGE-SIZE',
'IMAGE-SIZE-C',
'IMAGE-SIZE-CH',
'IMAGE-SIZE-CHA',
'IMAGE-SIZE-CHAR',
'IMAGE-SIZE-CHARS',
'IMAGE-SIZE-P',
'IMAGE-SIZE-PI',
'IMAGE-SIZE-PIX',
'IMAGE-SIZE-PIXE',
'IMAGE-SIZE-PIXEL',
'IMAGE-SIZE-PIXELS',
'IMAGE-UP',
'IMMEDIATE-DISPLAY',
'IMPLEMENTS',
'IMPORT',
'IMPORT-PRINCIPAL',
'IN',
'IN-HANDLE',
'INCREMENT-EXCLUSIVE-ID',
'INDEX',
'INDEX-HINT',
'INDEX-INFORMATION',
'INDEXED-REPOSITION',
'INDICATOR',
'INFO',
'INFOR',
'INFORM',
'INFORMA',
'INFORMAT',
'INFORMATI',
'INFORMATIO',
'INFORMATION',
'INHERIT-BGC',
'INHERIT-BGCO',
'INHERIT-BGCOL',
'INHERIT-BGCOLO',
'INHERIT-BGCOLOR',
'INHERIT-FGC',
'INHERIT-FGCO',
'INHERIT-FGCOL',
'INHERIT-FGCOLO',
'INHERIT-FGCOLOR',
'INHERITS',
'INIT',
'INITI',
'INITIA',
'INITIAL',
'INITIAL-DIR',
'INITIAL-FILTER',
'INITIALIZE-DOCUMENT-TYPE',
'INITIATE',
'INNER-CHARS',
'INNER-LINES',
'INPUT',
'INPUT-O',
'INPUT-OU',
'INPUT-OUT',
'INPUT-OUTP',
'INPUT-OUTPU',
'INPUT-OUTPUT',
'INPUT-VALUE',
'INSERT',
'INSERT-ATTRIBUTE',
'INSERT-B',
'INSERT-BA',
'INSERT-BAC',
'INSERT-BACK',
'INSERT-BACKT',
'INSERT-BACKTA',
'INSERT-BACKTAB',
'INSERT-FILE',
'INSERT-ROW',
'INSERT-STRING',
'INSERT-T',
'INSERT-TA',
'INSERT-TAB',
'INT64',
'INT',
'INTEGER',
'INTERFACE',
'INTERNAL-ENTRIES',
'INTO',
'INVOKE',
'IS',
'IS-ATTR',
'IS-ATTR-',
'IS-ATTR-S',
'IS-ATTR-SP',
'IS-ATTR-SPA',
'IS-ATTR-SPAC',
'IS-ATTR-SPACE',
'IS-CLASS',
'IS-JSON',
'IS-LEAD-BYTE',
'IS-OPEN',
'IS-PARAMETER-SET',
'IS-PARTITIONED',
'IS-ROW-SELECTED',
'IS-SELECTED',
'IS-XML',
'ITEM',
'ITEMS-PER-ROW',
'JOIN',
'JOIN-BY-SQLDB',
'KBLABEL',
'KEEP-CONNECTION-OPEN',
'KEEP-FRAME-Z',
'KEEP-FRAME-Z-',
'KEEP-FRAME-Z-O',
'KEEP-FRAME-Z-OR',
'KEEP-FRAME-Z-ORD',
'KEEP-FRAME-Z-ORDE',
'KEEP-FRAME-Z-ORDER',
'KEEP-MESSAGES',
'KEEP-SECURITY-CACHE',
'KEEP-TAB-ORDER',
'KEY',
'KEY-CODE',
'KEY-FUNC',
'KEY-FUNCT',
'KEY-FUNCTI',
'KEY-FUNCTIO',
'KEY-FUNCTION',
'KEY-LABEL',
'KEYCODE',
'KEYFUNC',
'KEYFUNCT',
'KEYFUNCTI',
'KEYFUNCTIO',
'KEYFUNCTION',
'KEYLABEL',
'KEYS',
'KEYWORD',
'KEYWORD-ALL',
'LABEL',
'LABEL-BGC',
'LABEL-BGCO',
'LABEL-BGCOL',
'LABEL-BGCOLO',
'LABEL-BGCOLOR',
'LABEL-DC',
'LABEL-DCO',
'LABEL-DCOL',
'LABEL-DCOLO',
'LABEL-DCOLOR',
'LABEL-FGC',
'LABEL-FGCO',
'LABEL-FGCOL',
'LABEL-FGCOLO',
'LABEL-FGCOLOR',
'LABEL-FONT',
'LABEL-PFC',
'LABEL-PFCO',
'LABEL-PFCOL',
'LABEL-PFCOLO',
'LABEL-PFCOLOR',
'LABELS',
'LABELS-HAVE-COLONS',
'LANDSCAPE',
'LANGUAGE',
'LANGUAGES',
'LARGE',
'LARGE-TO-SMALL',
'LAST',
'LAST-ASYNCH-REQUEST',
'LAST-BATCH',
'LAST-CHILD',
'LAST-EVEN',
'LAST-EVENT',
'LAST-FORM',
'LAST-KEY',
'LAST-OBJECT',
'LAST-OF',
'LAST-PROCE',
'LAST-PROCED',
'LAST-PROCEDU',
'LAST-PROCEDUR',
'LAST-PROCEDURE',
'LAST-SERVER',
'LAST-TAB-I',
'LAST-TAB-IT',
'LAST-TAB-ITE',
'LAST-TAB-ITEM',
'LASTKEY',
'LC',
'LDBNAME',
'LE',
'LEAVE',
'LEFT-ALIGN',
'LEFT-ALIGNE',
'LEFT-ALIGNED',
'LEFT-TRIM',
'LENGTH',
'LIBRARY',
'LIKE',
'LIKE-SEQUENTIAL',
'LINE',
'LINE-COUNT',
'LINE-COUNTE',
'LINE-COUNTER',
'LIST-EVENTS',
'LIST-ITEM-PAIRS',
'LIST-ITEMS',
'LIST-PROPERTY-NAMES',
'LIST-QUERY-ATTRS',
'LIST-SET-ATTRS',
'LIST-WIDGETS',
'LISTI',
'LISTIN',
'LISTING',
'LITERAL-QUESTION',
'LITTLE-ENDIAN',
'LOAD',
'LOAD-DOMAINS',
'LOAD-ICON',
'LOAD-IMAGE',
'LOAD-IMAGE-DOWN',
'LOAD-IMAGE-INSENSITIVE',
'LOAD-IMAGE-UP',
'LOAD-MOUSE-P',
'LOAD-MOUSE-PO',
'LOAD-MOUSE-POI',
'LOAD-MOUSE-POIN',
'LOAD-MOUSE-POINT',
'LOAD-MOUSE-POINTE',
'LOAD-MOUSE-POINTER',
'LOAD-PICTURE',
'LOAD-SMALL-ICON',
'LOCAL-NAME',
'LOCAL-VERSION-INFO',
'LOCATOR-COLUMN-NUMBER',
'LOCATOR-LINE-NUMBER',
'LOCATOR-PUBLIC-ID',
'LOCATOR-SYSTEM-ID',
'LOCATOR-TYPE',
'LOCK-REGISTRATION',
'LOCKED',
'LOG',
'LOG-AUDIT-EVENT',
'LOG-MANAGER',
'LOGICAL',
'LOGIN-EXPIRATION-TIMESTAMP',
'LOGIN-HOST',
'LOGIN-STATE',
'LOGOUT',
'LONGCHAR',
'LOOKAHEAD',
'LOOKUP',
'LT',
'MACHINE-CLASS',
'MANDATORY',
'MANUAL-HIGHLIGHT',
'MAP',
'MARGIN-EXTRA',
'MARGIN-HEIGHT',
'MARGIN-HEIGHT-',
'MARGIN-HEIGHT-C',
'MARGIN-HEIGHT-CH',
'MARGIN-HEIGHT-CHA',
'MARGIN-HEIGHT-CHAR',
'MARGIN-HEIGHT-CHARS',
'MARGIN-HEIGHT-P',
'MARGIN-HEIGHT-PI',
'MARGIN-HEIGHT-PIX',
'MARGIN-HEIGHT-PIXE',
'MARGIN-HEIGHT-PIXEL',
'MARGIN-HEIGHT-PIXELS',
'MARGIN-WIDTH',
'MARGIN-WIDTH-',
'MARGIN-WIDTH-C',
'MARGIN-WIDTH-CH',
'MARGIN-WIDTH-CHA',
'MARGIN-WIDTH-CHAR',
'MARGIN-WIDTH-CHARS',
'MARGIN-WIDTH-P',
'MARGIN-WIDTH-PI',
'MARGIN-WIDTH-PIX',
'MARGIN-WIDTH-PIXE',
'MARGIN-WIDTH-PIXEL',
'MARGIN-WIDTH-PIXELS',
'MARK-NEW',
'MARK-ROW-STATE',
'MATCHES',
'MAX',
'MAX-BUTTON',
'MAX-CHARS',
'MAX-DATA-GUESS',
'MAX-HEIGHT',
'MAX-HEIGHT-C',
'MAX-HEIGHT-CH',
'MAX-HEIGHT-CHA',
'MAX-HEIGHT-CHAR',
'MAX-HEIGHT-CHARS',
'MAX-HEIGHT-P',
'MAX-HEIGHT-PI',
'MAX-HEIGHT-PIX',
'MAX-HEIGHT-PIXE',
'MAX-HEIGHT-PIXEL',
'MAX-HEIGHT-PIXELS',
'MAX-ROWS',
'MAX-SIZE',
'MAX-VAL',
'MAX-VALU',
'MAX-VALUE',
'MAX-WIDTH',
'MAX-WIDTH-',
'MAX-WIDTH-C',
'MAX-WIDTH-CH',
'MAX-WIDTH-CHA',
'MAX-WIDTH-CHAR',
'MAX-WIDTH-CHARS',
'MAX-WIDTH-P',
'MAX-WIDTH-PI',
'MAX-WIDTH-PIX',
'MAX-WIDTH-PIXE',
'MAX-WIDTH-PIXEL',
'MAX-WIDTH-PIXELS',
'MAXI',
'MAXIM',
'MAXIMIZE',
'MAXIMU',
'MAXIMUM',
'MAXIMUM-LEVEL',
'MD5-DIGEST',
'MEMBER',
'MEMPTR-TO-NODE-VALUE',
'MENU',
'MENU-BAR',
'MENU-ITEM',
'MENU-K',
'MENU-KE',
'MENU-KEY',
'MENU-M',
'MENU-MO',
'MENU-MOU',
'MENU-MOUS',
'MENU-MOUSE',
'MENUBAR',
'MERGE-BY-FIELD',
'MESSAGE',
'MESSAGE-AREA',
'MESSAGE-AREA-FONT',
'MESSAGE-LINES',
'METHOD',
'MIN',
'MIN-BUTTON',
'MIN-COLUMN-WIDTH-C',
'MIN-COLUMN-WIDTH-CH',
'MIN-COLUMN-WIDTH-CHA',
'MIN-COLUMN-WIDTH-CHAR',
'MIN-COLUMN-WIDTH-CHARS',
'MIN-COLUMN-WIDTH-P',
'MIN-COLUMN-WIDTH-PI',
'MIN-COLUMN-WIDTH-PIX',
'MIN-COLUMN-WIDTH-PIXE',
'MIN-COLUMN-WIDTH-PIXEL',
'MIN-COLUMN-WIDTH-PIXELS',
'MIN-HEIGHT',
'MIN-HEIGHT-',
'MIN-HEIGHT-C',
'MIN-HEIGHT-CH',
'MIN-HEIGHT-CHA',
'MIN-HEIGHT-CHAR',
'MIN-HEIGHT-CHARS',
'MIN-HEIGHT-P',
'MIN-HEIGHT-PI',
'MIN-HEIGHT-PIX',
'MIN-HEIGHT-PIXE',
'MIN-HEIGHT-PIXEL',
'MIN-HEIGHT-PIXELS',
'MIN-SIZE',
'MIN-VAL',
'MIN-VALU',
'MIN-VALUE',
'MIN-WIDTH',
'MIN-WIDTH-',
'MIN-WIDTH-C',
'MIN-WIDTH-CH',
'MIN-WIDTH-CHA',
'MIN-WIDTH-CHAR',
'MIN-WIDTH-CHARS',
'MIN-WIDTH-P',
'MIN-WIDTH-PI',
'MIN-WIDTH-PIX',
'MIN-WIDTH-PIXE',
'MIN-WIDTH-PIXEL',
'MIN-WIDTH-PIXELS',
'MINI',
'MINIM',
'MINIMU',
'MINIMUM',
'MOD',
'MODIFIED',
'MODU',
'MODUL',
'MODULO',
'MONTH',
'MOUSE',
'MOUSE-P',
'MOUSE-PO',
'MOUSE-POI',
'MOUSE-POIN',
'MOUSE-POINT',
'MOUSE-POINTE',
'MOUSE-POINTER',
'MOVABLE',
'MOVE-AFTER',
'MOVE-AFTER-',
'MOVE-AFTER-T',
'MOVE-AFTER-TA',
'MOVE-AFTER-TAB',
'MOVE-AFTER-TAB-',
'MOVE-AFTER-TAB-I',
'MOVE-AFTER-TAB-IT',
'MOVE-AFTER-TAB-ITE',
'MOVE-AFTER-TAB-ITEM',
'MOVE-BEFOR',
'MOVE-BEFORE',
'MOVE-BEFORE-',
'MOVE-BEFORE-T',
'MOVE-BEFORE-TA',
'MOVE-BEFORE-TAB',
'MOVE-BEFORE-TAB-',
'MOVE-BEFORE-TAB-I',
'MOVE-BEFORE-TAB-IT',
'MOVE-BEFORE-TAB-ITE',
'MOVE-BEFORE-TAB-ITEM',
'MOVE-COL',
'MOVE-COLU',
'MOVE-COLUM',
'MOVE-COLUMN',
'MOVE-TO-B',
'MOVE-TO-BO',
'MOVE-TO-BOT',
'MOVE-TO-BOTT',
'MOVE-TO-BOTTO',
'MOVE-TO-BOTTOM',
'MOVE-TO-EOF',
'MOVE-TO-T',
'MOVE-TO-TO',
'MOVE-TO-TOP',
'MPE',
'MTIME',
'MULTI-COMPILE',
'MULTIPLE',
'MULTIPLE-KEY',
'MULTITASKING-INTERVAL',
'MUST-EXIST',
'NAME',
'NAMESPACE-PREFIX',
'NAMESPACE-URI',
'NATIVE',
'NE',
'NEEDS-APPSERVER-PROMPT',
'NEEDS-PROMPT',
'NEW',
'NEW-INSTANCE',
'NEW-ROW',
'NEXT',
'NEXT-COLUMN',
'NEXT-PROMPT',
'NEXT-ROWID',
'NEXT-SIBLING',
'NEXT-TAB-I',
'NEXT-TAB-IT',
'NEXT-TAB-ITE',
'NEXT-TAB-ITEM',
'NEXT-VALUE',
'NO',
'NO-APPLY',
'NO-ARRAY-MESSAGE',
'NO-ASSIGN',
'NO-ATTR',
'NO-ATTR-',
'NO-ATTR-L',
'NO-ATTR-LI',
'NO-ATTR-LIS',
'NO-ATTR-LIST',
'NO-ATTR-S',
'NO-ATTR-SP',
'NO-ATTR-SPA',
'NO-ATTR-SPAC',
'NO-ATTR-SPACE',
'NO-AUTO-VALIDATE',
'NO-BIND-WHERE',
'NO-BOX',
'NO-CONSOLE',
'NO-CONVERT',
'NO-CONVERT-3D-COLORS',
'NO-CURRENT-VALUE',
'NO-DEBUG',
'NO-DRAG',
'NO-ECHO',
'NO-EMPTY-SPACE',
'NO-ERROR',
'NO-F',
'NO-FI',
'NO-FIL',
'NO-FILL',
'NO-FOCUS',
'NO-HELP',
'NO-HIDE',
'NO-INDEX-HINT',
'NO-INHERIT-BGC',
'NO-INHERIT-BGCO',
'NO-INHERIT-BGCOLOR',
'NO-INHERIT-FGC',
'NO-INHERIT-FGCO',
'NO-INHERIT-FGCOL',
'NO-INHERIT-FGCOLO',
'NO-INHERIT-FGCOLOR',
'NO-JOIN-BY-SQLDB',
'NO-LABE',
'NO-LABELS',
'NO-LOBS',
'NO-LOCK',
'NO-LOOKAHEAD',
'NO-MAP',
'NO-MES',
'NO-MESS',
'NO-MESSA',
'NO-MESSAG',
'NO-MESSAGE',
'NO-PAUSE',
'NO-PREFE',
'NO-PREFET',
'NO-PREFETC',
'NO-PREFETCH',
'NO-ROW-MARKERS',
'NO-SCROLLBAR-VERTICAL',
'NO-SEPARATE-CONNECTION',
'NO-SEPARATORS',
'NO-TAB-STOP',
'NO-UND',
'NO-UNDE',
'NO-UNDER',
'NO-UNDERL',
'NO-UNDERLI',
'NO-UNDERLIN',
'NO-UNDERLINE',
'NO-UNDO',
'NO-VAL',
'NO-VALI',
'NO-VALID',
'NO-VALIDA',
'NO-VALIDAT',
'NO-VALIDATE',
'NO-WAIT',
'NO-WORD-WRAP',
'NODE-VALUE-TO-MEMPTR',
'NONAMESPACE-SCHEMA-LOCATION',
'NONE',
'NORMALIZE',
'NOT',
'NOT-ACTIVE',
'NOW',
'NULL',
'NUM-ALI',
'NUM-ALIA',
'NUM-ALIAS',
'NUM-ALIASE',
'NUM-ALIASES',
'NUM-BUFFERS',
'NUM-BUT',
'NUM-BUTT',
'NUM-BUTTO',
'NUM-BUTTON',
'NUM-BUTTONS',
'NUM-COL',
'NUM-COLU',
'NUM-COLUM',
'NUM-COLUMN',
'NUM-COLUMNS',
'NUM-COPIES',
'NUM-DBS',
'NUM-DROPPED-FILES',
'NUM-ENTRIES',
'NUM-FIELDS',
'NUM-FORMATS',
'NUM-ITEMS',
'NUM-ITERATIONS',
'NUM-LINES',
'NUM-LOCKED-COL',
'NUM-LOCKED-COLU',
'NUM-LOCKED-COLUM',
'NUM-LOCKED-COLUMN',
'NUM-LOCKED-COLUMNS',
'NUM-MESSAGES',
'NUM-PARAMETERS',
'NUM-REFERENCES',
'NUM-REPLACED',
'NUM-RESULTS',
'NUM-SELECTED',
'NUM-SELECTED-',
'NUM-SELECTED-ROWS',
'NUM-SELECTED-W',
'NUM-SELECTED-WI',
'NUM-SELECTED-WID',
'NUM-SELECTED-WIDG',
'NUM-SELECTED-WIDGE',
'NUM-SELECTED-WIDGET',
'NUM-SELECTED-WIDGETS',
'NUM-TABS',
'NUM-TO-RETAIN',
'NUM-VISIBLE-COLUMNS',
'NUMERIC',
'NUMERIC-F',
'NUMERIC-FO',
'NUMERIC-FOR',
'NUMERIC-FORM',
'NUMERIC-FORMA',
'NUMERIC-FORMAT',
'OCTET-LENGTH',
'OF',
'OFF',
'OK',
'OK-CANCEL',
'OLD',
'ON',
'ON-FRAME',
'ON-FRAME-',
'ON-FRAME-B',
'ON-FRAME-BO',
'ON-FRAME-BOR',
'ON-FRAME-BORD',
'ON-FRAME-BORDE',
'ON-FRAME-BORDER',
'OPEN',
'OPSYS',
'OPTION',
'OR',
'ORDERED-JOIN',
'ORDINAL',
'OS-APPEND',
'OS-COMMAND',
'OS-COPY',
'OS-CREATE-DIR',
'OS-DELETE',
'OS-DIR',
'OS-DRIVE',
'OS-DRIVES',
'OS-ERROR',
'OS-GETENV',
'OS-RENAME',
'OTHERWISE',
'OUTPUT',
'OVERLAY',
'OVERRIDE',
'OWNER',
'PAGE',
'PAGE-BOT',
'PAGE-BOTT',
'PAGE-BOTTO',
'PAGE-BOTTOM',
'PAGE-NUM',
'PAGE-NUMB',
'PAGE-NUMBE',
'PAGE-NUMBER',
'PAGE-SIZE',
'PAGE-TOP',
'PAGE-WID',
'PAGE-WIDT',
'PAGE-WIDTH',
'PAGED',
'PARAM',
'PARAME',
'PARAMET',
'PARAMETE',
'PARAMETER',
'PARENT',
'PARSE-STATUS',
'PARTIAL-KEY',
'PASCAL',
'PASSWORD-FIELD',
'PATHNAME',
'PAUSE',
'PBE-HASH-ALG',
'PBE-HASH-ALGO',
'PBE-HASH-ALGOR',
'PBE-HASH-ALGORI',
'PBE-HASH-ALGORIT',
'PBE-HASH-ALGORITH',
'PBE-HASH-ALGORITHM',
'PBE-KEY-ROUNDS',
'PDBNAME',
'PERSIST',
'PERSISTE',
'PERSISTEN',
'PERSISTENT',
'PERSISTENT-CACHE-DISABLED',
'PFC',
'PFCO',
'PFCOL',
'PFCOLO',
'PFCOLOR',
'PIXELS',
'PIXELS-PER-COL',
'PIXELS-PER-COLU',
'PIXELS-PER-COLUM',
'PIXELS-PER-COLUMN',
'PIXELS-PER-ROW',
'POPUP-M',
'POPUP-ME',
'POPUP-MEN',
'POPUP-MENU',
'POPUP-O',
'POPUP-ON',
'POPUP-ONL',
'POPUP-ONLY',
'PORTRAIT',
'POSITION',
'PRECISION',
'PREFER-DATASET',
'PREPARE-STRING',
'PREPARED',
'PREPROC',
'PREPROCE',
'PREPROCES',
'PREPROCESS',
'PRESEL',
'PRESELE',
'PRESELEC',
'PRESELECT',
'PREV',
'PREV-COLUMN',
'PREV-SIBLING',
'PREV-TAB-I',
'PREV-TAB-IT',
'PREV-TAB-ITE',
'PREV-TAB-ITEM',
'PRIMARY',
'PRINTER',
'PRINTER-CONTROL-HANDLE',
'PRINTER-HDC',
'PRINTER-NAME',
'PRINTER-PORT',
'PRINTER-SETUP',
'PRIVATE',
'PRIVATE-D',
'PRIVATE-DA',
'PRIVATE-DAT',
'PRIVATE-DATA',
'PRIVILEGES',
'PROC-HA',
'PROC-HAN',
'PROC-HAND',
'PROC-HANDL',
'PROC-HANDLE',
'PROC-ST',
'PROC-STA',
'PROC-STAT',
'PROC-STATU',
'PROC-STATUS',
'PROC-TEXT',
'PROC-TEXT-BUFFER',
'PROCE',
'PROCED',
'PROCEDU',
'PROCEDUR',
'PROCEDURE',
'PROCEDURE-CALL-TYPE',
'PROCEDURE-TYPE',
'PROCESS',
'PROFILER',
'PROGRAM-NAME',
'PROGRESS',
'PROGRESS-S',
'PROGRESS-SO',
'PROGRESS-SOU',
'PROGRESS-SOUR',
'PROGRESS-SOURC',
'PROGRESS-SOURCE',
'PROMPT',
'PROMPT-F',
'PROMPT-FO',
'PROMPT-FOR',
'PROMSGS',
'PROPATH',
'PROPERTY',
'PROTECTED',
'PROVERS',
'PROVERSI',
'PROVERSIO',
'PROVERSION',
'PROXY',
'PROXY-PASSWORD',
'PROXY-USERID',
'PUBLIC',
'PUBLIC-ID',
'PUBLISH',
'PUBLISHED-EVENTS',
'PUT',
'PUT-BYTE',
'PUT-DOUBLE',
'PUT-FLOAT',
'PUT-INT64',
'PUT-KEY-VAL',
'PUT-KEY-VALU',
'PUT-KEY-VALUE',
'PUT-LONG',
'PUT-SHORT',
'PUT-STRING',
'PUT-UNSIGNED-LONG',
'PUTBYTE',
'QUERY',
'QUERY-CLOSE',
'QUERY-OFF-END',
'QUERY-OPEN',
'QUERY-PREPARE',
'QUERY-TUNING',
'QUESTION',
'QUIT',
'QUOTER',
'R-INDEX',
'RADIO-BUTTONS',
'RADIO-SET',
'RANDOM',
'RAW',
'RAW-TRANSFER',
'RCODE-INFO',
'RCODE-INFOR',
'RCODE-INFORM',
'RCODE-INFORMA',
'RCODE-INFORMAT',
'RCODE-INFORMATI',
'RCODE-INFORMATIO',
'RCODE-INFORMATION',
'READ-AVAILABLE',
'READ-EXACT-NUM',
'READ-FILE',
'READ-JSON',
'READ-ONLY',
'READ-XML',
'READ-XMLSCHEMA',
'READKEY',
'REAL',
'RECID',
'RECORD-LENGTH',
'RECT',
'RECTA',
'RECTAN',
'RECTANG',
'RECTANGL',
'RECTANGLE',
'RECURSIVE',
'REFERENCE-ONLY',
'REFRESH',
'REFRESH-AUDIT-POLICY',
'REFRESHABLE',
'REGISTER-DOMAIN',
'RELEASE',
'REMOTE',
'REMOVE-EVENTS-PROCEDURE',
'REMOVE-SUPER-PROCEDURE',
'REPEAT',
'REPLACE',
'REPLACE-SELECTION-TEXT',
'REPOSITION',
'REPOSITION-BACKWARD',
'REPOSITION-FORWARD',
'REPOSITION-MODE',
'REPOSITION-TO-ROW',
'REPOSITION-TO-ROWID',
'REQUEST',
'REQUEST-INFO',
'RESET',
'RESIZA',
'RESIZAB',
'RESIZABL',
'RESIZABLE',
'RESIZE',
'RESPONSE-INFO',
'RESTART-ROW',
'RESTART-ROWID',
'RETAIN',
'RETAIN-SHAPE',
'RETRY',
'RETRY-CANCEL',
'RETURN',
'RETURN-ALIGN',
'RETURN-ALIGNE',
'RETURN-INS',
'RETURN-INSE',
'RETURN-INSER',
'RETURN-INSERT',
'RETURN-INSERTE',
'RETURN-INSERTED',
'RETURN-TO-START-DI',
'RETURN-TO-START-DIR',
'RETURN-VAL',
'RETURN-VALU',
'RETURN-VALUE',
'RETURN-VALUE-DATA-TYPE',
'RETURNS',
'REVERSE-FROM',
'REVERT',
'REVOKE',
'RGB-VALUE',
'RIGHT-ALIGNED',
'RIGHT-TRIM',
'ROLES',
'ROUND',
'ROUTINE-LEVEL',
'ROW',
'ROW-HEIGHT-CHARS',
'ROW-HEIGHT-PIXELS',
'ROW-MARKERS',
'ROW-OF',
'ROW-RESIZABLE',
'ROWID',
'RULE',
'RUN',
'RUN-PROCEDURE',
'SAVE CACHE',
'SAVE',
'SAVE-AS',
'SAVE-FILE',
'SAX-COMPLE',
'SAX-COMPLET',
'SAX-COMPLETE',
'SAX-PARSE',
'SAX-PARSE-FIRST',
'SAX-PARSE-NEXT',
'SAX-PARSER-ERROR',
'SAX-RUNNING',
'SAX-UNINITIALIZED',
'SAX-WRITE-BEGIN',
'SAX-WRITE-COMPLETE',
'SAX-WRITE-CONTENT',
'SAX-WRITE-ELEMENT',
'SAX-WRITE-ERROR',
'SAX-WRITE-IDLE',
'SAX-WRITE-TAG',
'SAX-WRITER',
'SCHEMA',
'SCHEMA-LOCATION',
'SCHEMA-MARSHAL',
'SCHEMA-PATH',
'SCREEN',
'SCREEN-IO',
'SCREEN-LINES',
'SCREEN-VAL',
'SCREEN-VALU',
'SCREEN-VALUE',
'SCROLL',
'SCROLL-BARS',
'SCROLL-DELTA',
'SCROLL-OFFSET',
'SCROLL-TO-CURRENT-ROW',
'SCROLL-TO-I',
'SCROLL-TO-IT',
'SCROLL-TO-ITE',
'SCROLL-TO-ITEM',
'SCROLL-TO-SELECTED-ROW',
'SCROLLABLE',
'SCROLLBAR-H',
'SCROLLBAR-HO',
'SCROLLBAR-HOR',
'SCROLLBAR-HORI',
'SCROLLBAR-HORIZ',
'SCROLLBAR-HORIZO',
'SCROLLBAR-HORIZON',
'SCROLLBAR-HORIZONT',
'SCROLLBAR-HORIZONTA',
'SCROLLBAR-HORIZONTAL',
'SCROLLBAR-V',
'SCROLLBAR-VE',
'SCROLLBAR-VER',
'SCROLLBAR-VERT',
'SCROLLBAR-VERTI',
'SCROLLBAR-VERTIC',
'SCROLLBAR-VERTICA',
'SCROLLBAR-VERTICAL',
'SCROLLED-ROW-POS',
'SCROLLED-ROW-POSI',
'SCROLLED-ROW-POSIT',
'SCROLLED-ROW-POSITI',
'SCROLLED-ROW-POSITIO',
'SCROLLED-ROW-POSITION',
'SCROLLING',
'SDBNAME',
'SEAL',
'SEAL-TIMESTAMP',
'SEARCH',
'SEARCH-SELF',
'SEARCH-TARGET',
'SECTION',
'SECURITY-POLICY',
'SEEK',
'SELECT',
'SELECT-ALL',
'SELECT-FOCUSED-ROW',
'SELECT-NEXT-ROW',
'SELECT-PREV-ROW',
'SELECT-ROW',
'SELECTABLE',
'SELECTED',
'SELECTION',
'SELECTION-END',
'SELECTION-LIST',
'SELECTION-START',
'SELECTION-TEXT',
'SELF',
'SEND',
'SEND-SQL-STATEMENT',
'SENSITIVE',
'SEPARATE-CONNECTION',
'SEPARATOR-FGCOLOR',
'SEPARATORS',
'SERIALIZABLE',
'SERIALIZE-HIDDEN',
'SERIALIZE-NAME',
'SERVER',
'SERVER-CONNECTION-BOUND',
'SERVER-CONNECTION-BOUND-REQUEST',
'SERVER-CONNECTION-CONTEXT',
'SERVER-CONNECTION-ID',
'SERVER-OPERATING-MODE',
'SESSION',
'SESSION-ID',
'SET',
'SET-APPL-CONTEXT',
'SET-ATTR-CALL-TYPE',
'SET-ATTRIBUTE-NODE',
'SET-BLUE',
'SET-BLUE-',
'SET-BLUE-V',
'SET-BLUE-VA',
'SET-BLUE-VAL',
'SET-BLUE-VALU',
'SET-BLUE-VALUE',
'SET-BREAK',
'SET-BUFFERS',
'SET-CALLBACK',
'SET-CLIENT',
'SET-COMMIT',
'SET-CONTENTS',
'SET-CURRENT-VALUE',
'SET-DB-CLIENT',
'SET-DYNAMIC',
'SET-EVENT-MANAGER-OPTION',
'SET-GREEN',
'SET-GREEN-',
'SET-GREEN-V',
'SET-GREEN-VA',
'SET-GREEN-VAL',
'SET-GREEN-VALU',
'SET-GREEN-VALUE',
'SET-INPUT-SOURCE',
'SET-OPTION',
'SET-OUTPUT-DESTINATION',
'SET-PARAMETER',
'SET-POINTER-VALUE',
'SET-PROPERTY',
'SET-RED',
'SET-RED-',
'SET-RED-V',
'SET-RED-VA',
'SET-RED-VAL',
'SET-RED-VALU',
'SET-RED-VALUE',
'SET-REPOSITIONED-ROW',
'SET-RGB-VALUE',
'SET-ROLLBACK',
'SET-SELECTION',
'SET-SIZE',
'SET-SORT-ARROW',
'SET-WAIT-STATE',
'SETUSER',
'SETUSERI',
'SETUSERID',
'SHA1-DIGEST',
'SHARE',
'SHARE-',
'SHARE-L',
'SHARE-LO',
'SHARE-LOC',
'SHARE-LOCK',
'SHARED',
'SHOW-IN-TASKBAR',
'SHOW-STAT',
'SHOW-STATS',
'SIDE-LAB',
'SIDE-LABE',
'SIDE-LABEL',
'SIDE-LABEL-H',
'SIDE-LABEL-HA',
'SIDE-LABEL-HAN',
'SIDE-LABEL-HAND',
'SIDE-LABEL-HANDL',
'SIDE-LABEL-HANDLE',
'SIDE-LABELS',
'SIGNATURE',
'SILENT',
'SIMPLE',
'SINGLE',
'SINGLE-RUN',
'SINGLETON',
'SIZE',
'SIZE-C',
'SIZE-CH',
'SIZE-CHA',
'SIZE-CHAR',
'SIZE-CHARS',
'SIZE-P',
'SIZE-PI',
'SIZE-PIX',
'SIZE-PIXE',
'SIZE-PIXEL',
'SIZE-PIXELS',
'SKIP',
'SKIP-DELETED-RECORD',
'SLIDER',
'SMALL-ICON',
'SMALL-TITLE',
'SMALLINT',
'SOME',
'SORT',
'SORT-ASCENDING',
'SORT-NUMBER',
'SOURCE',
'SOURCE-PROCEDURE',
'SPACE',
'SQL',
'SQRT',
'SSL-SERVER-NAME',
'STANDALONE',
'START',
'START-DOCUMENT',
'START-ELEMENT',
'START-MOVE',
'START-RESIZE',
'START-ROW-RESIZE',
'STATE-DETAIL',
'STATIC',
'STATUS',
'STATUS-AREA',
'STATUS-AREA-FONT',
'STDCALL',
'STOP',
'STOP-AFTER',
'STOP-PARSING',
'STOPPE',
'STOPPED',
'STORED-PROC',
'STORED-PROCE',
'STORED-PROCED',
'STORED-PROCEDU',
'STORED-PROCEDUR',
'STORED-PROCEDURE',
'STREAM',
'STREAM-HANDLE',
'STREAM-IO',
'STRETCH-TO-FIT',
'STRICT',
'STRICT-ENTITY-RESOLUTION',
'STRING',
'STRING-VALUE',
'STRING-XREF',
'SUB-AVE',
'SUB-AVER',
'SUB-AVERA',
'SUB-AVERAG',
'SUB-AVERAGE',
'SUB-COUNT',
'SUB-MAXIMUM',
'SUB-MENU',
'SUB-MIN',
'SUB-MINIMUM',
'SUB-TOTAL',
'SUBSCRIBE',
'SUBST',
'SUBSTI',
'SUBSTIT',
'SUBSTITU',
'SUBSTITUT',
'SUBSTITUTE',
'SUBSTR',
'SUBSTRI',
'SUBSTRIN',
'SUBSTRING',
'SUBTYPE',
'SUM',
'SUM-MAX',
'SUM-MAXI',
'SUM-MAXIM',
'SUM-MAXIMU',
'SUPER',
'SUPER-PROCEDURES',
'SUPPRESS-NAMESPACE-PROCESSING',
'SUPPRESS-W',
'SUPPRESS-WA',
'SUPPRESS-WAR',
'SUPPRESS-WARN',
'SUPPRESS-WARNI',
'SUPPRESS-WARNIN',
'SUPPRESS-WARNING',
'SUPPRESS-WARNINGS',
'SYMMETRIC-ENCRYPTION-ALGORITHM',
'SYMMETRIC-ENCRYPTION-IV',
'SYMMETRIC-ENCRYPTION-KEY',
'SYMMETRIC-SUPPORT',
'SYSTEM-ALERT',
'SYSTEM-ALERT-',
'SYSTEM-ALERT-B',
'SYSTEM-ALERT-BO',
'SYSTEM-ALERT-BOX',
'SYSTEM-ALERT-BOXE',
'SYSTEM-ALERT-BOXES',
'SYSTEM-DIALOG',
'SYSTEM-HELP',
'SYSTEM-ID',
'TAB-POSITION',
'TAB-STOP',
'TABLE',
'TABLE-HANDLE',
'TABLE-NUMBER',
'TABLE-SCAN',
'TARGET',
'TARGET-PROCEDURE',
'TEMP-DIR',
'TEMP-DIRE',
'TEMP-DIREC',
'TEMP-DIRECT',
'TEMP-DIRECTO',
'TEMP-DIRECTOR',
'TEMP-DIRECTORY',
'TEMP-TABLE',
'TEMP-TABLE-PREPARE',
'TERM',
'TERMI',
'TERMIN',
'TERMINA',
'TERMINAL',
'TERMINATE',
'TEXT',
'TEXT-CURSOR',
'TEXT-SEG-GROW',
'TEXT-SELECTED',
'THEN',
'THIS-OBJECT',
'THIS-PROCEDURE',
'THREAD-SAFE',
'THREE-D',
'THROUGH',
'THROW',
'THRU',
'TIC-MARKS',
'TIME',
'TIME-SOURCE',
'TITLE',
'TITLE-BGC',
'TITLE-BGCO',
'TITLE-BGCOL',
'TITLE-BGCOLO',
'TITLE-BGCOLOR',
'TITLE-DC',
'TITLE-DCO',
'TITLE-DCOL',
'TITLE-DCOLO',
'TITLE-DCOLOR',
'TITLE-FGC',
'TITLE-FGCO',
'TITLE-FGCOL',
'TITLE-FGCOLO',
'TITLE-FGCOLOR',
'TITLE-FO',
'TITLE-FON',
'TITLE-FONT',
'TO',
'TO-ROWID',
'TODAY',
'TOGGLE-BOX',
'TOOLTIP',
'TOOLTIPS',
'TOP-NAV-QUERY',
'TOP-ONLY',
'TOPIC',
'TOTAL',
'TRAILING',
'TRANS',
'TRANS-INIT-PROCEDURE',
'TRANSACTION',
'TRANSACTION-MODE',
'TRANSPARENT',
'TRIGGER',
'TRIGGERS',
'TRIM',
'TRUE',
'TRUNC',
'TRUNCA',
'TRUNCAT',
'TRUNCATE',
'TYPE',
'TYPE-OF',
'UNBOX',
'UNBUFF',
'UNBUFFE',
'UNBUFFER',
'UNBUFFERE',
'UNBUFFERED',
'UNDERL',
'UNDERLI',
'UNDERLIN',
'UNDERLINE',
'UNDO',
'UNFORM',
'UNFORMA',
'UNFORMAT',
'UNFORMATT',
'UNFORMATTE',
'UNFORMATTED',
'UNION',
'UNIQUE',
'UNIQUE-ID',
'UNIQUE-MATCH',
'UNIX',
'UNLESS-HIDDEN',
'UNLOAD',
'UNSIGNED-LONG',
'UNSUBSCRIBE',
'UP',
'UPDATE',
'UPDATE-ATTRIBUTE',
'URL',
'URL-DECODE',
'URL-ENCODE',
'URL-PASSWORD',
'URL-USERID',
'USE',
'USE-DICT-EXPS',
'USE-FILENAME',
'USE-INDEX',
'USE-REVVIDEO',
'USE-TEXT',
'USE-UNDERLINE',
'USE-WIDGET-POOL',
'USER',
'USER-ID',
'USERID',
'USING',
'V6DISPLAY',
'V6FRAME',
'VALID-EVENT',
'VALID-HANDLE',
'VALID-OBJECT',
'VALIDATE',
'VALIDATE-EXPRESSION',
'VALIDATE-MESSAGE',
'VALIDATE-SEAL',
'VALIDATION-ENABLED',
'VALUE',
'VALUE-CHANGED',
'VALUES',
'VAR',
'VARI',
'VARIA',
'VARIAB',
'VARIABL',
'VARIABLE',
'VERBOSE',
'VERSION',
'VERT',
'VERTI',
'VERTIC',
'VERTICA',
'VERTICAL',
'VIEW',
'VIEW-AS',
'VIEW-FIRST-COLUMN-ON-REOPEN',
'VIRTUAL-HEIGHT',
'VIRTUAL-HEIGHT-',
'VIRTUAL-HEIGHT-C',
'VIRTUAL-HEIGHT-CH',
'VIRTUAL-HEIGHT-CHA',
'VIRTUAL-HEIGHT-CHAR',
'VIRTUAL-HEIGHT-CHARS',
'VIRTUAL-HEIGHT-P',
'VIRTUAL-HEIGHT-PI',
'VIRTUAL-HEIGHT-PIX',
'VIRTUAL-HEIGHT-PIXE',
'VIRTUAL-HEIGHT-PIXEL',
'VIRTUAL-HEIGHT-PIXELS',
'VIRTUAL-WIDTH',
'VIRTUAL-WIDTH-',
'VIRTUAL-WIDTH-C',
'VIRTUAL-WIDTH-CH',
'VIRTUAL-WIDTH-CHA',
'VIRTUAL-WIDTH-CHAR',
'VIRTUAL-WIDTH-CHARS',
'VIRTUAL-WIDTH-P',
'VIRTUAL-WIDTH-PI',
'VIRTUAL-WIDTH-PIX',
'VIRTUAL-WIDTH-PIXE',
'VIRTUAL-WIDTH-PIXEL',
'VIRTUAL-WIDTH-PIXELS',
'VISIBLE',
'VOID',
'WAIT',
'WAIT-FOR',
'WARNING',
'WEB-CONTEXT',
'WEEKDAY',
'WHEN',
'WHERE',
'WHILE',
'WIDGET',
'WIDGET-E',
'WIDGET-EN',
'WIDGET-ENT',
'WIDGET-ENTE',
'WIDGET-ENTER',
'WIDGET-ID',
'WIDGET-L',
'WIDGET-LE',
'WIDGET-LEA',
'WIDGET-LEAV',
'WIDGET-LEAVE',
'WIDGET-POOL',
'WIDTH',
'WIDTH-',
'WIDTH-C',
'WIDTH-CH',
'WIDTH-CHA',
'WIDTH-CHAR',
'WIDTH-CHARS',
'WIDTH-P',
'WIDTH-PI',
'WIDTH-PIX',
'WIDTH-PIXE',
'WIDTH-PIXEL',
'WIDTH-PIXELS',
'WINDOW',
'WINDOW-MAXIM',
'WINDOW-MAXIMI',
'WINDOW-MAXIMIZ',
'WINDOW-MAXIMIZE',
'WINDOW-MAXIMIZED',
'WINDOW-MINIM',
'WINDOW-MINIMI',
'WINDOW-MINIMIZ',
'WINDOW-MINIMIZE',
'WINDOW-MINIMIZED',
'WINDOW-NAME',
'WINDOW-NORMAL',
'WINDOW-STA',
'WINDOW-STAT',
'WINDOW-STATE',
'WINDOW-SYSTEM',
'WITH',
'WORD-INDEX',
'WORD-WRAP',
'WORK-AREA-HEIGHT-PIXELS',
'WORK-AREA-WIDTH-PIXELS',
'WORK-AREA-X',
'WORK-AREA-Y',
'WORK-TAB',
'WORK-TABL',
'WORK-TABLE',
'WORKFILE',
'WRITE',
'WRITE-CDATA',
'WRITE-CHARACTERS',
'WRITE-COMMENT',
'WRITE-DATA-ELEMENT',
'WRITE-EMPTY-ELEMENT',
'WRITE-ENTITY-REF',
'WRITE-EXTERNAL-DTD',
'WRITE-FRAGMENT',
'WRITE-JSON',
'WRITE-MESSAGE',
'WRITE-PROCESSING-INSTRUCTION',
'WRITE-STATUS',
'WRITE-XML',
'WRITE-XMLSCHEMA',
'X',
'X-OF',
'XCODE',
'XML-DATA-TYPE',
'XML-ENTITY-EXPANSION-LIMIT',
'XML-NODE-TYPE',
'XML-SCHEMA-PATH',
'XML-STRICT-ENTITY-RESOLUTION',
'XML-SUPPRESS-NAMESPACE-PROCESSING',
'XREF',
'XREF-XML',
'Y',
'Y-OF',
'YEAR',
'YEAR-OFFSET',
'YES',
'YES-NO',
'YES-NO-CANCEL'
)
|
|
# coding: utf-8
# pylint: disable=invalid-name, protected-access, too-many-arguments, no-self-use, too-many-locals, broad-except
"""numpy interface for operators."""
from __future__ import absolute_import
import traceback
from threading import Lock
from ctypes import CFUNCTYPE, POINTER, Structure, pointer
from ctypes import c_void_p, c_int, c_char, c_char_p, cast, c_bool
from .base import _LIB, check_call
from .base import c_array, c_str, mx_uint, mx_float, ctypes2numpy_shared, NDArrayHandle, py_str
from . import symbol, context
from .ndarray import NDArray, _DTYPE_NP_TO_MX, _DTYPE_MX_TO_NP
c_int_p = POINTER(c_int)
class PythonOp(object):
"""Base class for operators implemented in Python.
Parameters
----------
need_top_grad : bool
the default need_top_grad() function returns this value.
"""
_ref_holder = []
def __init__(self, need_top_grad=True):
self.info_ = None
self.need_top_grad_ = need_top_grad
def __call__(self, *args, **kwargs):
return self.get_symbol(*args, **kwargs)
def get_symbol(self, *args, **kwargs):
"""Create a symbol from numpy operator.
This should only be called once per instance if the operator contains
internal states.
Parameters
----------
args : list
a list of input arguments (symbols).
Returns
-------
sym : mxnet.symbol.Symbol
"""
raise NotImplementedError("Must override this")
def forward(self, in_data, out_data):
"""Forward interface. Override to create new operators.
Parameters
----------
in_data, out_data: list
input and output for forward. See document for
corresponding arguments of Operator::Forward
"""
out_data[0][:] = in_data[0]
def backward(self, out_grad, in_data, out_data, in_grad):
"""Backward interface. Can override when creating new operators.
Parameters
----------
out_grad, in_data, out_data, in_grad : list
input and output for backward. See document for
corresponding arguments of Operator::Backward
"""
# pylint: disable=W0613
in_grad[0][:] = 1.0
def infer_shape(self, in_shape):
"""Interface for ``infer_shape``. Can override when creating new operators.
Parameters
----------
in_shape : list
List of argument shapes in the same order as
declared in list_arguments.
Returns
-------
in_shape : list
List of argument shapes. Can be modified from in_shape.
out_shape : list
List of output shapes calculated from in_shape,
in the same order as declared in list_arguments.
"""
return in_shape, [in_shape[0]]
def list_outputs(self):
"""Interface for ``list_outputs``. Can override when creating new operators.
Returns
-------
outputs : list
List of output blob names.
"""
return ['output']
def list_arguments(self):
"""Interface for ``list_arguments``. Can override when creating new operators.
Returns
-------
in_shape : list
list of argument shapes in the same order as
declared in list_arguments.
"""
return ['data']
def need_top_grad(self):
"""Whether this operator needs out_grad for backward.
Returns
-------
need_top_grad : bool
Whether this operator needs out_grad for backward.
Should be set to False for loss layers.
"""
return self.need_top_grad_
class NumpyOp(PythonOp):
"""Base class for numpy operators. numpy operators allow parts
of computation in symbolic graph to be writen in numpy. This feature
is intended for quickly hacking out a solution for non performance
critical parts. Please consider write a c++ implementation if it becomes
a bottleneck.
Note that if your operator contains internal states (like arrays),
it cannot be used for multi-gpu training.
"""
def __init__(self, need_top_grad=True):
super(NumpyOp, self).__init__(need_top_grad)
def get_symbol(self, *args, **kwargs):
fb_functype = CFUNCTYPE(None, c_int, POINTER(POINTER(mx_float)), POINTER(c_int),
POINTER(POINTER(mx_uint)), POINTER(c_int), c_void_p)
infer_functype = CFUNCTYPE(None, c_int, POINTER(c_int),
POINTER(POINTER(mx_uint)), c_void_p)
list_functype = CFUNCTYPE(None, POINTER(POINTER(POINTER(c_char))), c_void_p)
class NumpyOpInfo(Structure):
"""Structure that holds Callback information. Passed to NumpyOpProp"""
_fields_ = [
('forward', fb_functype),
('backward', fb_functype),
('infer_shape', infer_functype),
('list_outputs', list_functype),
('list_arguments', list_functype),
('p_forward', c_void_p),
('p_backward', c_void_p),
('p_infer_shape', c_void_p),
('p_list_outputs', c_void_p),
('p_list_arguments', c_void_p),
]
def forward_entry(num_tensor, tensor_ptrs, tensor_dims,
tensor_shapes, tensor_tags, _):
"""C Callback for NumpyOp::Forward"""
tensors = [[] for i in range(4)]
for i in range(num_tensor):
shape = [tensor_shapes[i][j] for j in range(tensor_dims[i])]
buff = ctypes2numpy_shared(tensor_ptrs[i], shape)
tensors[tensor_tags[i]].append(buff)
self.forward(in_data=tensors[0], out_data=tensors[1])
def backward_entry(num_tensor, tensor_ptrs, tensor_dims,
tensor_shapes, tensor_tags, _):
"""C Callback for NumpyOp::Backward"""
tensors = [[] for i in range(4)]
for i in range(num_tensor):
shape = [tensor_shapes[i][j] for j in range(tensor_dims[i])]
buff = ctypes2numpy_shared(tensor_ptrs[i], shape)
tensors[tensor_tags[i]].append(buff)
self.backward(in_data=tensors[0], out_data=tensors[1],
in_grad=tensors[2], out_grad=tensors[3])
def infer_shape_entry(num_tensor, tensor_dims,
tensor_shapes, _):
"""C Callback for NumpyOpProp::InferShape"""
n_in = len(self.list_arguments())
n_out = len(self.list_outputs())
assert num_tensor == n_in + n_out
shapes = [[tensor_shapes[i][j] for j in range(tensor_dims[i])] for i in range(n_in)]
ishape, oshape = self.infer_shape(shapes)
assert len(oshape) == n_out
assert len(ishape) == n_in
rshape = list(ishape) + list(oshape)
for i in range(n_in+n_out):
tensor_shapes[i] = cast(c_array(mx_uint, rshape[i]), POINTER(mx_uint))
tensor_dims[i] = len(rshape[i])
def list_outputs_entry(out, _):
"""C Callback for NumpyOpProp::ListOutputs"""
ret = self.list_outputs()
ret = [c_str(i) for i in ret] + [c_char_p(0)]
ret = c_array(c_char_p, ret)
out[0] = cast(ret, POINTER(POINTER(c_char)))
def list_arguments_entry(out, _):
"""C Callback for NumpyOpProp::ListArguments"""
ret = self.list_arguments()
ret = [c_str(i) for i in ret] + [c_char_p(0)]
ret = c_array(c_char_p, ret)
out[0] = cast(ret, POINTER(POINTER(c_char)))
self.info_ = NumpyOpInfo(fb_functype(forward_entry),
fb_functype(backward_entry),
infer_functype(infer_shape_entry),
list_functype(list_outputs_entry),
list_functype(list_arguments_entry),
None, None, None, None, None)
cb_ptr = format(cast(pointer(self.info_), c_void_p).value, 'x')
# pylint: disable=E1101
sym = symbol._internal._Native(*args,
info=cb_ptr,
need_top_grad=self.need_top_grad(),
**kwargs)
# keep a reference of ourself in PythonOp so we don't get garbage collected.
PythonOp._ref_holder.append(self)
return sym
class NDArrayOp(PythonOp):
"""Base class for numpy operators. numpy operators allow parts
of computation in symbolic graph to be writen in numpy. This feature
is intended for quickly hacking out a solution for non performance
critical parts. Please consider write a c++ implementation if it becomes
a bottleneck.
Note that if your operator contains internal states (like arrays),
it cannot be used for multi-gpu training.
"""
def __init__(self, need_top_grad=True):
super(NDArrayOp, self).__init__(need_top_grad)
def get_symbol(self, *args, **kwargs):
fb_functype = CFUNCTYPE(c_bool, c_int, POINTER(c_void_p), POINTER(c_int), c_void_p)
infer_functype = CFUNCTYPE(c_bool, c_int, POINTER(c_int),
POINTER(POINTER(mx_uint)), c_void_p)
list_functype = CFUNCTYPE(c_bool, POINTER(POINTER(POINTER(c_char))), c_void_p)
deps_functype = CFUNCTYPE(c_bool, c_int_p, c_int_p, c_int_p,
c_int_p, POINTER(c_int_p), c_void_p)
class NDArrayOpInfo(Structure):
"""Structure that holds Callback information. Passed to NDArrayOpProp"""
_fields_ = [
('forward', fb_functype),
('backward', fb_functype),
('infer_shape', infer_functype),
('list_outputs', list_functype),
('list_arguments', list_functype),
('declare_backward_dependency', deps_functype),
('p_forward', c_void_p),
('p_backward', c_void_p),
('p_infer_shape', c_void_p),
('p_list_outputs', c_void_p),
('p_list_arguments', c_void_p),
('p_declare_backward_dependency', c_void_p)
]
def forward_entry(num_ndarray, ndarraies, tags, _):
"""C Callback for NDArrayOp::Forward"""
try:
tensors = [[] for i in range(4)]
for i in range(num_ndarray):
if tags[i] == 1:
tensors[tags[i]].append(NDArray(cast(ndarraies[i], NDArrayHandle),
writable=True))
else:
tensors[tags[i]].append(NDArray(cast(ndarraies[i], NDArrayHandle),
writable=False))
self.forward(in_data=tensors[0], out_data=tensors[1])
except Exception:
print('Error in NDArrayOp.forward: %s' % traceback.format_exc())
return False
return True
def backward_entry(num_ndarray, ndarraies, tags, _):
"""C Callback for NDArrayOp::Backward"""
try:
tensors = [[] for i in range(4)]
for i in range(num_ndarray):
if tags[i] == 2:
tensors[tags[i]].append(NDArray(cast(ndarraies[i], NDArrayHandle),
writable=True))
else:
tensors[tags[i]].append(NDArray(cast(ndarraies[i], NDArrayHandle),
writable=False))
self.backward(in_data=tensors[0], out_data=tensors[1],
in_grad=tensors[2], out_grad=tensors[3])
except Exception:
print('Error in NDArrayOp.backward: %s' % traceback.format_exc())
return False
return True
def infer_shape_entry(num_tensor, tensor_dims,
tensor_shapes, _):
"""C Callback for NDArrayOpProp::InferShape"""
try:
n_in = len(self.list_arguments())
n_out = len(self.list_outputs())
assert num_tensor == n_in + n_out
shapes = [[tensor_shapes[i][j] for j in range(tensor_dims[i])] for i in range(n_in)]
ishape, oshape = self.infer_shape(shapes)
assert len(oshape) == n_out
assert len(ishape) == n_in
rshape = list(ishape) + list(oshape)
for i in range(n_in+n_out):
tensor_shapes[i] = cast(c_array(mx_uint, rshape[i]), POINTER(mx_uint))
tensor_dims[i] = len(rshape[i])
except Exception:
print('Error in NDArrayOp.infer_shape: %s' % traceback.format_exc())
return False
return True
def list_outputs_entry(out, _):
"""C Callback for NDArrayOpProp::ListOutputs"""
try:
ret = self.list_outputs()
ret = [c_str(i) for i in ret] + [c_char_p(0)]
ret = c_array(c_char_p, ret)
out[0] = cast(ret, POINTER(POINTER(c_char)))
except Exception:
print('Error in NDArrayOp.list_outputs: %s' % traceback.format_exc())
return False
return True
def list_arguments_entry(out, _):
"""C Callback for NDArrayOpProp::ListArguments"""
try:
ret = self.list_arguments()
ret = [c_str(i) for i in ret] + [c_char_p(0)]
ret = c_array(c_char_p, ret)
out[0] = cast(ret, POINTER(POINTER(c_char)))
except Exception:
print('Error in NDArrayOp.list_arguments: %s' % traceback.format_exc())
return False
return True
def declare_backward_dependency(out_grad, in_data, out_data, num_dep, deps, _):
"""C Callback for NDArrayOpProp::DeclareBacwardDependency"""
try:
out_grad = [out_grad[i] for i in range(len(self.list_outputs()))]
in_data = [in_data[i] for i in range(len(self.list_arguments()))]
out_data = [out_data[i] for i in range(len(self.list_outputs()))]
rdeps = self.declare_backward_dependency(out_grad, in_data, out_data)
num_dep[0] = len(rdeps)
rdeps = cast(c_array(c_int, rdeps), c_int_p)
deps[0] = rdeps
except Exception:
print('Error in NDArrayOp.declare_backward_dependency: %s' % traceback.format_exc())
return False
return True
self.info_ = NDArrayOpInfo(fb_functype(forward_entry),
fb_functype(backward_entry),
infer_functype(infer_shape_entry),
list_functype(list_outputs_entry),
list_functype(list_arguments_entry),
deps_functype(declare_backward_dependency),
None, None, None, None, None, None)
cb_ptr = format(cast(pointer(self.info_), c_void_p).value, 'x')
# pylint: disable=E1101
sym = symbol._internal._NDArray(*args,
info=cb_ptr,
**kwargs)
# keep a reference of ourself in PythonOp so we don't get garbage collected.
PythonOp._ref_holder.append(self)
return sym
def declare_backward_dependency(self, out_grad, in_data, out_data):
"""Declare dependencies of this operator for backward pass.
Parameters
----------
out_grad : list of int
ids of out_grad blobs.
in_data : list of int
ids of in_data blobs.
out_data: list of int
ids of out_data blobs.
Returns
-------
deps : list of int
ids of the needed blobs.
"""
deps = []
if self.need_top_grad():
deps.extend(out_grad)
deps.extend(in_data)
deps.extend(out_data)
return deps
class CustomOp(object):
"""Base class for operators implemented in python"""
def __init__(self):
pass
def forward(self, is_train, req, in_data, out_data, aux):
"""Forward interface. Can override when creating new operators.
Parameters
----------
is_train : bool
whether this is for training
req : list of str
how to assign to out_data. can be 'null', 'write', or 'add'.
You can optionally use self.assign(dst, req, src) to handle this.
in_data, out_data, aux: list of NDArrays
input, output, and auxiliary states for forward. See document for
corresponding arguments of Operator::Forward
"""
# pylint: disable=W0613
pass
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
"""Backward interface. Can override when creating new operators.
Parameters
----------
req : list of str
how to assign to in_grad. can be 'null', 'write', or 'add'.
You can optionally use self.assign(dst, req, src) to handle this.
out_grad, in_data, out_data, in_grad, aux : list of NDArrays
input and output for backward. See document for
corresponding arguments of Operator::Backward
"""
# pylint: disable=W0613
pass
def assign(self, dst, req, src):
"""Helper function for assigning into dst depending on requirements."""
if req == 'null':
return
elif req == 'write' or req == 'inplace':
dst[:] = src
elif req == 'add':
dst[:] += src
class CustomOpProp(object):
"""Base class for operator property class implemented in python.
Parameters
----------
need_top_grad : bool
The default declare_backward_dependency function. Use this value
to determine whether this operator needs gradient input.
"""
def __init__(self, need_top_grad=True):
self.need_top_grad_ = need_top_grad
def infer_shape(self, in_shape):
"""infer_shape interface. Can override when creating new operators.
Parameters
----------
in_shape : list
List of argument shapes in the same order as
declared in list_arguments.
Returns
-------
in_shape : list
List of argument shapes. Can be modified from in_shape.
out_shape : list
List of output shapes calculated from in_shape,
in the same order as declared in list_outputs.
aux_shape : Optional, list
List of aux shapes calculated from in_shape,
in the same order as declared in list_auxiliary_states.
"""
return in_shape, (in_shape[0],)*len(self.list_outputs()), ()
def infer_type(self, in_type):
"""infer_type interface. override to create new operators
Parameters
----------
in_type : list of np.dtype
list of argument types in the same order as
declared in list_arguments.
Returns
-------
in_type : list
list of argument types. Can be modified from in_type.
out_type : list
list of output types calculated from in_type,
in the same order as declared in list_outputs.
aux_type : Optional, list
list of aux types calculated from in_type,
in the same order as declared in list_auxiliary_states.
"""
return in_type, [in_type[0]]*len(self.list_outputs()), \
[in_type[0]]*len(self.list_auxiliary_states())
def list_outputs(self):
"""list_outputs interface. Can override when creating new operators.
Returns
-------
outputs : list
List of output blob names.
"""
return ['output']
def list_arguments(self):
"""list_arguments interface. Can override when creating new operators.
Returns
-------
arguments : list
List of argument blob names.
"""
return ['data']
def list_auxiliary_states(self):
"""list_auxiliary_states interface. Can override when creating new operators.
Returns
-------
auxs : list
list of auxiliary state blob names.
"""
return []
def declare_backward_dependency(self, out_grad, in_data, out_data):
"""Declare dependencies of this operator for backward pass.
Parameters
----------
out_grad : list of int
ids of out_grad blobs.
in_data : list of int
ids of in_data blobs.
out_data: list of int
ids of out_data blobs.
Returns
-------
deps : list of int
ids of the needed blobs.
"""
deps = []
if self.need_top_grad_:
deps.extend(out_grad)
deps.extend(in_data)
deps.extend(out_data)
return deps
def create_operator(self, ctx, in_shapes, in_dtypes):
"""Create an operator that carries out the real computation
given the context, input shapes, and input data types."""
# pylint: disable=W0613
return CustomOp()
class _Registry(object):
"""CustomOp registry."""
def __init__(self):
self.ref_holder = {}
self.counter = 0
self.lock = Lock()
def inc(self):
"""Get index for new entry."""
self.lock.acquire()
cur = self.counter
self.counter += 1
self.lock.release()
return cur
_registry = _Registry()
def register(reg_name):
"""Register a subclass of CustomOpProp to the registry with name reg_name."""
def do_register(prop_cls):
"""Register a subclass of CustomOpProp to the registry."""
class MXCallbackList(Structure):
"""Structure that holds Callback information. Passed to CustomOpProp."""
_fields_ = [
('num_callbacks', c_int),
('callbacks', POINTER(CFUNCTYPE(c_int))),
('contexts', POINTER(c_void_p))
]
fb_functype = CFUNCTYPE(c_int, c_int, POINTER(c_void_p), POINTER(c_int),
POINTER(c_int), c_int, c_void_p)
del_functype = CFUNCTYPE(c_int, c_void_p)
infershape_functype = CFUNCTYPE(c_int, c_int, POINTER(c_int),
POINTER(POINTER(mx_uint)), c_void_p)
infertype_functype = CFUNCTYPE(c_int, c_int, POINTER(c_int), c_void_p)
list_functype = CFUNCTYPE(c_int, POINTER(POINTER(POINTER(c_char))), c_void_p)
deps_functype = CFUNCTYPE(c_int, c_int_p, c_int_p, c_int_p,
c_int_p, POINTER(c_int_p), c_void_p)
createop_functype = CFUNCTYPE(c_int, c_char_p, c_int, POINTER(POINTER(mx_uint)),
POINTER(c_int), POINTER(c_int),
POINTER(MXCallbackList), c_void_p)
req_enum = ('null', 'write', 'inplace', 'add')
def creator(op_type, argc, keys, vals, ret):
"""internal function"""
assert py_str(op_type) == reg_name
kwargs = dict([(py_str(keys[i]), py_str(vals[i])) for i in range(argc)])
op_prop = prop_cls(**kwargs)
def infer_shape_entry(num_tensor, tensor_dims,
tensor_shapes, _):
"""C Callback for ``CustomOpProp::InferShape``."""
try:
n_in = len(op_prop.list_arguments())
n_out = len(op_prop.list_outputs())
n_aux = len(op_prop.list_auxiliary_states())
assert num_tensor == n_in + n_out + n_aux
shapes = [[tensor_shapes[i][j] for j in range(tensor_dims[i])]
for i in range(n_in)]
ret = op_prop.infer_shape(shapes)
if len(ret) == 2:
ishape, oshape = ret
ashape = []
elif len(ret) == 3:
ishape, oshape, ashape = ret
else:
raise AssertionError("infer_shape must return 2 or 3 lists")
assert len(oshape) == n_out, \
"InferShape Error: expecting %d entries in returned output " \
"shapes, got %d."%(n_out, len(oshape))
assert len(ishape) == n_in, \
"InferShape Error: expecting %d entries in returned input " \
"shapes, got %d."%(n_in, len(ishape))
assert len(ashape) == n_aux, \
"InferShape Error: expecting %d entries in returned aux state " \
"shapes, got %d."%(n_aux, len(ashape))
rshape = list(ishape) + list(oshape) + list(ashape)
for i in range(n_in+n_out+n_aux):
tensor_shapes[i] = cast(c_array(mx_uint, rshape[i]), POINTER(mx_uint))
tensor_dims[i] = len(rshape[i])
infer_shape_entry._ref_holder = [tensor_shapes]
except Exception:
print('Error in %s.infer_shape: %s' % (reg_name, traceback.format_exc()))
return False
return True
def infer_type_entry(num_tensor, tensor_types, _):
"""C Callback for CustomOpProp::InferType"""
try:
n_in = len(op_prop.list_arguments())
n_out = len(op_prop.list_outputs())
n_aux = len(op_prop.list_auxiliary_states())
assert num_tensor == n_in + n_out + n_aux
types = [_DTYPE_MX_TO_NP[tensor_types[i]] for i in range(n_in)]
ret = op_prop.infer_type(types)
if len(ret) == 2:
itype, otype = ret
atype = []
elif len(ret) == 3:
itype, otype, atype = ret
else:
raise AssertionError("infer_type must return 2 or 3 lists")
assert len(otype) == n_out, \
"InferType Error: expecting %d entries in returned output " \
"shapes, got %d."%(n_out, len(otype))
assert len(itype) == n_in, \
"InferType Error: expecting %d entries in returned input " \
"shapes, got %d."%(n_in, len(itype))
assert len(atype) == n_aux, \
"InferType Error: expecting %d entries in returned aux state " \
"shapes, got %d."%(n_aux, len(atype))
rtype = list(itype) + list(otype) + list(atype)
for i, dtype in enumerate(rtype):
tensor_types[i] = _DTYPE_NP_TO_MX[dtype]
infer_type_entry._ref_holder = [tensor_types]
except Exception:
print('Error in %s.infer_type: %s' % (reg_name, traceback.format_exc()))
return False
return True
def list_outputs_entry(out, _):
"""C Callback for CustomOpProp::ListOutputs"""
try:
ret = op_prop.list_outputs()
ret = [c_str(i) for i in ret] + [c_char_p(0)]
ret = c_array(c_char_p, ret)
out[0] = cast(ret, POINTER(POINTER(c_char)))
list_outputs_entry._ref_holder = [out]
except Exception:
print('Error in %s.list_outputs: %s' % (reg_name, traceback.format_exc()))
return False
return True
def list_arguments_entry(out, _):
"""C Callback for CustomOpProp::ListArguments"""
try:
ret = op_prop.list_arguments()
ret = [c_str(i) for i in ret] + [c_char_p(0)]
ret = c_array(c_char_p, ret)
out[0] = cast(ret, POINTER(POINTER(c_char)))
list_arguments_entry._ref_holder = [out]
except Exception:
print('Error in %s.list_arguments: %s' % (reg_name, traceback.format_exc()))
return False
return True
def list_auxiliary_states_entry(out, _):
"""C Callback for CustomOpProp::ListAuxiliaryStates"""
try:
ret = op_prop.list_auxiliary_states()
ret = [c_str(i) for i in ret] + [c_char_p(0)]
ret = c_array(c_char_p, ret)
out[0] = cast(ret, POINTER(POINTER(c_char)))
list_auxiliary_states_entry._ref_holder = [out]
except Exception:
tb = traceback.format_exc()
print('Error in %s.list_auxiliary_states: %s' % (reg_name, tb))
return False
return True
def declare_backward_dependency_entry(out_grad, in_data, out_data, num_dep, deps, _):
"""C Callback for CustomOpProp::DeclareBacwardDependency"""
try:
out_grad = [out_grad[i] for i in range(len(op_prop.list_outputs()))]
in_data = [in_data[i] for i in range(len(op_prop.list_arguments()))]
out_data = [out_data[i] for i in range(len(op_prop.list_outputs()))]
rdeps = op_prop.declare_backward_dependency(out_grad, in_data, out_data)
num_dep[0] = len(rdeps)
rdeps = cast(c_array(c_int, rdeps), c_int_p)
deps[0] = rdeps
declare_backward_dependency_entry._ref_holder = [deps]
except Exception:
tb = traceback.format_exc()
print('Error in %s.declare_backward_dependency: %s' % (reg_name, tb))
return False
return True
def create_operator_entry(ctx, num_inputs, shapes, ndims, dtypes, ret, _):
"""C Callback for CustomOpProp::CreateOperator"""
try:
ctx = py_str(ctx)
sep = ctx.find('(')
ctx = context.Context(ctx[:sep], int(ctx[sep+1:-1]))
ndims = [ndims[i] for i in range(num_inputs)]
shapes = [[shapes[i][j] for j in range(ndims[i])] for i in range(num_inputs)]
dtypes = [dtypes[i] for i in range(num_inputs)]
op = op_prop.create_operator(ctx, shapes, dtypes)
def forward_entry(num_ndarray, ndarraies, tags, reqs, is_train, _):
"""C Callback for CustomOp::Forward"""
try:
tensors = [[] for i in range(5)]
for i in range(num_ndarray):
if tags[i] == 1 or tags[i] == 4:
tensors[tags[i]].append(NDArray(cast(ndarraies[i],
NDArrayHandle),
writable=True))
else:
tensors[tags[i]].append(NDArray(cast(ndarraies[i],
NDArrayHandle),
writable=False))
reqs = [req_enum[reqs[i]] for i in range(len(tensors[1]))]
with ctx:
op.forward(is_train=is_train, req=reqs,
in_data=tensors[0], out_data=tensors[1],
aux=tensors[4])
except Exception:
print('Error in CustomOp.forward: %s' % traceback.format_exc())
return False
return True
def backward_entry(num_ndarray, ndarraies, tags, reqs, is_train, _):
"""C Callback for CustomOp::Backward"""
# pylint: disable=W0613
try:
tensors = [[] for i in range(5)]
for i in range(num_ndarray):
if tags[i] == 2 or tags[i] == 4:
tensors[tags[i]].append(NDArray(cast(ndarraies[i],
NDArrayHandle),
writable=True))
else:
tensors[tags[i]].append(NDArray(cast(ndarraies[i],
NDArrayHandle),
writable=False))
reqs = [req_enum[reqs[i]] for i in range(len(tensors[2]))]
with ctx:
op.backward(req=reqs,
in_data=tensors[0], out_data=tensors[1],
in_grad=tensors[2], out_grad=tensors[3],
aux=tensors[4])
except Exception:
print('Error in CustomOp.backward: %s' % traceback.format_exc())
return False
return True
cur = _registry.inc()
def delete_entry(_):
"""C Callback for CustomOp::del"""
try:
del _registry.ref_holder[cur]
except Exception:
print('Error in CustomOp.delete: %s' % traceback.format_exc())
return False
return True
callbacks = [del_functype(delete_entry),
fb_functype(forward_entry),
fb_functype(backward_entry)]
callbacks = [cast(i, CFUNCTYPE(c_int)) for i in callbacks]
contexts = [None, None, None]
ret[0] = MXCallbackList(c_int(len(callbacks)),
cast(c_array(CFUNCTYPE(c_int), callbacks),
POINTER(CFUNCTYPE(c_int))),
cast(c_array(c_void_p, contexts),
POINTER(c_void_p)))
op._ref_holder = [ret]
_registry.ref_holder[cur] = op
except Exception:
print('Error in %s.create_operator: %s' % (reg_name, traceback.format_exc()))
return False
return True
cur = _registry.inc()
def delete_entry(_):
"""C Callback for CustomOpProp::del"""
try:
del _registry.ref_holder[cur]
except Exception:
print('Error in CustomOpProp.delete: %s' % traceback.format_exc())
return False
return True
callbacks = [del_functype(delete_entry),
list_functype(list_arguments_entry),
list_functype(list_outputs_entry),
list_functype(list_auxiliary_states_entry),
infershape_functype(infer_shape_entry),
deps_functype(declare_backward_dependency_entry),
createop_functype(create_operator_entry),
infertype_functype(infer_type_entry)]
callbacks = [cast(i, CFUNCTYPE(c_int)) for i in callbacks]
contexts = [None]*len(callbacks)
ret[0] = MXCallbackList(c_int(len(callbacks)),
cast(c_array(CFUNCTYPE(c_int), callbacks),
POINTER(CFUNCTYPE(c_int))),
cast(c_array(c_void_p, contexts),
POINTER(c_void_p)))
op_prop._ref_holder = [ret]
_registry.ref_holder[cur] = op_prop
return True
creator_functype = CFUNCTYPE(c_int, c_char_p, c_int, POINTER(c_char_p),
POINTER(c_char_p), POINTER(MXCallbackList))
creator_func = creator_functype(creator)
check_call(_LIB.MXCustomOpRegister(c_str(reg_name), creator_func))
cur = _registry.inc()
_registry.ref_holder[cur] = creator_func
return prop_cls
return do_register
register("custom_op")(CustomOpProp)
|
|
#!/usr/bin/env python
'''======================================================
Created by: D. Spencer Maughan and Ishmaal Erekson
Last updated: March 2015
File name: DF_Plots.py
Organization: RISC Lab, Utah State University
======================================================'''
import roslib; roslib.load_manifest('risc_msgs')
import rospy
import numpy as np
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import pylab as p
import matplotlib.pyplot as plt
import time
#=======================#
# Messages Needed #
#=======================#
from risc_msgs.msg import * # states,controls,trajectory
from sensor_msgs.msg import Joy
#========================#
# Globals #
#========================#
rate = 20 # Hz
states = Cortex()
traj = Trajectories()
ctrl = Controls()
start_time = 0
euler_max = 45*np.pi/180
Button_pushed = False
plot_button = 3
#===================================#
# Plotting Variables #
#===================================#
states_of_interest = 16
storage_mat = np.asmatrix(np.zeros((1,states_of_interest)))
index = [0]
name = ['Initial']
#==================#
# Get States #
#==================#
def GetStates(I):
global states
states = I
#=========================#
# Get Joystick Data #
#=========================#
def GetJoy(I):
global Button_pushed
Button_pushed = I.buttons[plot_button]
#======================#
# Get Trajectory #
#======================#
def GetTrajectory(I):
global traj
traj = I
#====================#
# Get Controls #
#====================#
def GetControls(I):
global ctrl
ctrl = I
def Plots():
global storage_mat, index, name
if len(index) > 2:
for i in range(len(index)-1):
# assign data vectors
f = index[i+1]
if i+2 == len(index):
b = -1
else:
b = index[i+2]
x_act = storage_mat[f:b,0]
y_act = storage_mat[f:b,1]
z_act = storage_mat[f:b,2]
x_des = storage_mat[f:b,3]
y_des = storage_mat[f:b,4]
z_des = storage_mat[f:b,5]
phi_des = storage_mat[f:b,6]
theta_des = storage_mat[f:b,7]
psi_des = storage_mat[f:b,8]
phi_act = storage_mat[f:b,9]
theta_act = storage_mat[f:b,10]
psi_act = storage_mat[f:b,11]
xdot_err = storage_mat[f:b,12]
ydot_err = storage_mat[f:b,13]
zdot_err = storage_mat[f:b,14]
t = storage_mat[f:b,15]
# 3d plot
plot3d(name[i+1],x_act,y_act,z_act,x_des,y_des,z_des)
# Roll
plot2d(name[i+1] + ' Roll',phi_act,phi_des,t,'Time (s)','Angle (Deg)')
# Pitch
plot2d(name[i+1] + ' Pitch',theta_act,theta_des,t,'Time (s)','Angle (Deg)')
# Errors
plot3err(name[i+1] + ' Position Errors',x_des-x_act,y_des-y_act,z_des-z_act,t,'Time (s)', 'Error (m)', 'x', 'y', 'z')
plot3err(name[i+1] + ' Velocity Errors',xdot_err,ydot_err,zdot_err,t,'Time (s)', 'Error (m/s)', 'xdot', 'ydot', 'zdot')
plt.show(block=False)
else:
rospy.loginfo("insufficient data")
#==========================#
# Plotting Functions #
#==========================#
def plot3d(Traj_name,x_act,y_act,z_act,x_des,y_des,z_des):
x_act=list(np.array(x_act).reshape(-1))
y_act=list(np.array(y_act).reshape(-1))
z_act=list(np.array(z_act).reshape(-1))
x_des=list(np.array(x_des).reshape(-1))
y_des=list(np.array(y_des).reshape(-1))
z_des=list(np.array(z_des).reshape(-1))
fig = plt.figure(Traj_name)
ax = fig.gca(projection='3d')
ax.plot(x_act, y_act, z_act,'k-', label='Actual')
ax.plot(x_des, y_des, z_des,'r-', label='Desired')
ax.legend()
ax.set_title(Traj_name + ' Trajectory', fontsize=16)
ax.set_xlabel(r'X (m)', fontsize=14)
ax.set_ylabel(r'Y (m)', fontsize=14)
ax.set_zlabel(r'Z (m)', fontsize=14)
ax.set_xlim([-2, 2])
ax.set_ylim([-2, 2])
ax.set_zlim([0, 2])
def plot3err(plot_name,err1,err2,err3,time,xaxis_label, yaxis_label,label1, label2, label3):
Err1 = list(np.array(err1).reshape(-1))
Err2 = list(np.array(err2).reshape(-1))
Err3 = list(np.array(err3).reshape(-1))
time = list(np.array(time).reshape(-1))
fig = plt.figure(plot_name)
plt.plot(time, Err1,'b-', label=label1)
plt.plot(time, Err2,'k-', label=label2)
plt.plot(time, Err3,'r-', label=label3)
plt.legend()
plt.title(plot_name, fontsize=16)
plt.xlabel(xaxis_label, fontsize=14)
plt.ylabel(yaxis_label, fontsize=14)
plt.xlim((time[0],time[-1]))
y_min = min([min(Err1),min(Err2),min(Err3)])
y_min = y_min - .2*abs(y_min)
y_max = max([max(Err1),max(Err2),max(Err3)])
y_max = y_max + .2*abs(y_max)
plt.ylim((y_min,y_max))
def plot2d(plot_name,actual_data,commanded_data,time,xaxis_label, yaxis_label):
actual_data = list(np.array(actual_data).reshape(-1))
commanded_data = list(np.array(commanded_data).reshape(-1))
time = list(np.array(time).reshape(-1))
fig = plt.figure(plot_name)
plt.plot(time, actual_data, 'b-', label='actual')
plt.plot(time, commanded_data,'r:', label='Commanded')
plt.legend()
plt.title(plot_name, fontsize=16)
plt.xlabel(xaxis_label, fontsize=14)
plt.ylabel(yaxis_label, fontsize=14)
plt.xlim((time[0],time[-1]))
y_min = min([min(actual_data),min(commanded_data)])
y_min = y_min - .2*abs(y_min)
y_max = max([max(actual_data),max(commanded_data)])
y_max = y_max + .2*abs(y_max)
plt.ylim((y_min,y_max))
#==================#
# Datalogger #
#==================#
def Datalogger():
global start_time,states,ctrl, traj, euler_max
#======================================================#
# If all states of interest are present log data #
#======================================================#
if len(traj.Obj) > 0 and len(states.Obj) > 0 and len(ctrl.Obj) > 0:
global storage_mat
rospy.loginfo("logging data...")
x_act = states.Obj[0].x
y_act = states.Obj[0].y
z_act = states.Obj[0].z
x_des = traj.Obj[0].x
y_des = traj.Obj[0].y
z_des = traj.Obj[0].z
phi_traj = ctrl.Obj[0].phi*euler_max*180/np.pi
theta_traj = ctrl.Obj[0].theta*euler_max*180/np.pi
psi_traj = traj.Obj[0].psi*180/np.pi
phi_cort = states.Obj[0].phi
theta_cort = states.Obj[0].theta
psi_cort = states.Obj[0].psi
u_cort_err = traj.Obj[0].xdot - states.Obj[0].u
v_cort_err = traj.Obj[0].ydot - states.Obj[0].v
w_cort_err = traj.Obj[0].zdot - states.Obj[0].w
t = float(rospy.get_time() - start_time)
new_stack = np.asmatrix(np.array([x_act, y_act, z_act, z_des, y_des, z_des,\
phi_traj, theta_traj, psi_traj, phi_cort, theta_cort, psi_cort,\
u_cort_err, v_cort_err, w_cort_err,t]))
storage_mat = np.append(storage_mat,new_stack,0)
#==========================================================================#
# If there is a new trajectory store the index and trajectory name #
#==========================================================================#
global storage_mat, states_of_interest,name,index
if len(traj.Obj) > 0 and name[-1] != traj.Obj[0].name:
name.append(traj.Obj[0].name)
index.append(storage_mat.shape[0] -1)
start_time = rospy.get_time()
#===================#
# Main #
#===================#
if __name__=='__main__':
rospy.init_node('DF_Plotter')
start_time = rospy.get_time()
euler_max = float(rospy.get_param("euler_angle_max", ".78537")) #in radians
plot_button = int(rospy.get_param("plot_button", "3"))
#=====================================#
# Set up Publish/Subscribe Loop #
#=====================================#
r = rospy.Rate(rate)
while not rospy.is_shutdown():
sub_states = rospy.Subscriber('/cortex_raw' , Cortex, GetStates)
sub_traj = rospy.Subscriber('/trajectory',Trajectories, GetTrajectory)
sub_cntrl = rospy.Subscriber('/controls' , Controls, GetControls)
sub_joy = rospy.Subscriber('/joy' , Joy, GetJoy)
Datalogger()
if Button_pushed:
Plots()
answer = raw_input('Erase plots and reset datalogger?')
if answer == 'y' or answer == 'yes' or answer == 'I guess' or answer == 'sure':
rospy.loginfo("Resetting datalogger and erasing plots...")
plt.clf()
start_time = rospy.get_time()
storage_mat = np.asmatrix(np.zeros((1,states_of_interest)))
plt.close('all')
else:
plt.clf()
plt.close('all')
rospy.signal_shutdown(0)
r.sleep()
|
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import base64
import calendar
import collections
import itertools
from contextlib import contextmanager
import six
from cryptography import utils, x509
from cryptography.exceptions import UnsupportedAlgorithm, _Reasons
from cryptography.hazmat.backends.interfaces import (
CMACBackend, CipherBackend, DERSerializationBackend, DSABackend,
EllipticCurveBackend, HMACBackend, HashBackend, PBKDF2HMACBackend,
PEMSerializationBackend, RSABackend, X509Backend
)
from cryptography.hazmat.backends.openssl.ciphers import (
_AESCTRCipherContext, _CipherContext
)
from cryptography.hazmat.backends.openssl.cmac import _CMACContext
from cryptography.hazmat.backends.openssl.dsa import (
_DSAParameters, _DSAPrivateKey, _DSAPublicKey
)
from cryptography.hazmat.backends.openssl.ec import (
_EllipticCurvePrivateKey, _EllipticCurvePublicKey
)
from cryptography.hazmat.backends.openssl.encode_asn1 import (
_CRL_ENTRY_EXTENSION_ENCODE_HANDLERS,
_CRL_EXTENSION_ENCODE_HANDLERS, _EXTENSION_ENCODE_HANDLERS,
_encode_asn1_int_gc, _encode_asn1_str_gc, _encode_name_gc, _txt2obj_gc,
)
from cryptography.hazmat.backends.openssl.hashes import _HashContext
from cryptography.hazmat.backends.openssl.hmac import _HMACContext
from cryptography.hazmat.backends.openssl.rsa import (
_RSAPrivateKey, _RSAPublicKey
)
from cryptography.hazmat.backends.openssl.x509 import (
_Certificate, _CertificateRevocationList,
_CertificateSigningRequest, _RevokedCertificate
)
from cryptography.hazmat.bindings._openssl import ffi as _ffi
from cryptography.hazmat.bindings.openssl import binding
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import dsa, ec, rsa
from cryptography.hazmat.primitives.asymmetric.padding import (
MGF1, OAEP, PKCS1v15, PSS
)
from cryptography.hazmat.primitives.ciphers.algorithms import (
AES, ARC4, Blowfish, CAST5, Camellia, IDEA, SEED, TripleDES
)
from cryptography.hazmat.primitives.ciphers.modes import (
CBC, CFB, CFB8, CTR, ECB, GCM, OFB
)
_MemoryBIO = collections.namedtuple("_MemoryBIO", ["bio", "char_ptr"])
class _PasswordUserdata(object):
def __init__(self, password):
self.password = password
self.called = 0
self.exception = None
@binding.ffi_callback("int (char *, int, int, void *)",
name="Cryptography_pem_password_cb")
def _pem_password_cb(buf, size, writing, userdata_handle):
"""
A pem_password_cb function pointer that copied the password to
OpenSSL as required and returns the number of bytes copied.
typedef int pem_password_cb(char *buf, int size,
int rwflag, void *userdata);
Useful for decrypting PKCS8 files and so on.
The userdata pointer must point to a cffi handle of a
_PasswordUserdata instance.
"""
ud = _ffi.from_handle(userdata_handle)
ud.called += 1
if not ud.password:
ud.exception = TypeError(
"Password was not given but private key is encrypted."
)
return -1
elif len(ud.password) < size:
pw_buf = _ffi.buffer(buf, size)
pw_buf[:len(ud.password)] = ud.password
return len(ud.password)
else:
ud.exception = ValueError(
"Passwords longer than {0} bytes are not supported "
"by this backend.".format(size - 1)
)
return 0
@utils.register_interface(CipherBackend)
@utils.register_interface(CMACBackend)
@utils.register_interface(DERSerializationBackend)
@utils.register_interface(DSABackend)
@utils.register_interface(EllipticCurveBackend)
@utils.register_interface(HashBackend)
@utils.register_interface(HMACBackend)
@utils.register_interface(PBKDF2HMACBackend)
@utils.register_interface(RSABackend)
@utils.register_interface(PEMSerializationBackend)
@utils.register_interface(X509Backend)
class Backend(object):
"""
OpenSSL API binding interfaces.
"""
name = "openssl"
def __init__(self):
self._binding = binding.Binding()
self._ffi = self._binding.ffi
self._lib = self._binding.lib
# Set the default string mask for encoding ASN1 strings to UTF8. This
# is the default for newer OpenSSLs for several years and is
# recommended in RFC 2459.
res = self._lib.ASN1_STRING_set_default_mask_asc(b"utf8only")
self.openssl_assert(res == 1)
self._cipher_registry = {}
self._register_default_ciphers()
self.activate_osrandom_engine()
def openssl_assert(self, ok):
return binding._openssl_assert(self._lib, ok)
def activate_builtin_random(self):
# Obtain a new structural reference.
e = self._lib.ENGINE_get_default_RAND()
if e != self._ffi.NULL:
self._lib.ENGINE_unregister_RAND(e)
# Reset the RNG to use the new engine.
self._lib.RAND_cleanup()
# decrement the structural reference from get_default_RAND
res = self._lib.ENGINE_finish(e)
self.openssl_assert(res == 1)
def activate_osrandom_engine(self):
# Unregister and free the current engine.
self.activate_builtin_random()
# Fetches an engine by id and returns it. This creates a structural
# reference.
e = self._lib.ENGINE_by_id(self._binding._osrandom_engine_id)
self.openssl_assert(e != self._ffi.NULL)
# Initialize the engine for use. This adds a functional reference.
res = self._lib.ENGINE_init(e)
self.openssl_assert(res == 1)
# Set the engine as the default RAND provider.
res = self._lib.ENGINE_set_default_RAND(e)
self.openssl_assert(res == 1)
# Decrement the structural ref incremented by ENGINE_by_id.
res = self._lib.ENGINE_free(e)
self.openssl_assert(res == 1)
# Decrement the functional ref incremented by ENGINE_init.
res = self._lib.ENGINE_finish(e)
self.openssl_assert(res == 1)
# Reset the RNG to use the new engine.
self._lib.RAND_cleanup()
def openssl_version_text(self):
"""
Friendly string name of the loaded OpenSSL library. This is not
necessarily the same version as it was compiled against.
Example: OpenSSL 1.0.1e 11 Feb 2013
"""
return self._ffi.string(
self._lib.OpenSSL_version(self._lib.OPENSSL_VERSION)
).decode("ascii")
def create_hmac_ctx(self, key, algorithm):
return _HMACContext(self, key, algorithm)
def hash_supported(self, algorithm):
digest = self._lib.EVP_get_digestbyname(algorithm.name.encode("ascii"))
return digest != self._ffi.NULL
def hmac_supported(self, algorithm):
return self.hash_supported(algorithm)
def create_hash_ctx(self, algorithm):
return _HashContext(self, algorithm)
def cipher_supported(self, cipher, mode):
if self._evp_cipher_supported(cipher, mode):
return True
elif isinstance(mode, CTR) and isinstance(cipher, AES):
return True
else:
return False
def _evp_cipher_supported(self, cipher, mode):
try:
adapter = self._cipher_registry[type(cipher), type(mode)]
except KeyError:
return False
evp_cipher = adapter(self, cipher, mode)
return self._ffi.NULL != evp_cipher
def register_cipher_adapter(self, cipher_cls, mode_cls, adapter):
if (cipher_cls, mode_cls) in self._cipher_registry:
raise ValueError("Duplicate registration for: {0} {1}.".format(
cipher_cls, mode_cls)
)
self._cipher_registry[cipher_cls, mode_cls] = adapter
def _register_default_ciphers(self):
for mode_cls in [CBC, CTR, ECB, OFB, CFB, CFB8, GCM]:
self.register_cipher_adapter(
AES,
mode_cls,
GetCipherByName("{cipher.name}-{cipher.key_size}-{mode.name}")
)
for mode_cls in [CBC, CTR, ECB, OFB, CFB]:
self.register_cipher_adapter(
Camellia,
mode_cls,
GetCipherByName("{cipher.name}-{cipher.key_size}-{mode.name}")
)
for mode_cls in [CBC, CFB, CFB8, OFB]:
self.register_cipher_adapter(
TripleDES,
mode_cls,
GetCipherByName("des-ede3-{mode.name}")
)
self.register_cipher_adapter(
TripleDES,
ECB,
GetCipherByName("des-ede3")
)
for mode_cls in [CBC, CFB, OFB, ECB]:
self.register_cipher_adapter(
Blowfish,
mode_cls,
GetCipherByName("bf-{mode.name}")
)
for mode_cls in [CBC, CFB, OFB, ECB]:
self.register_cipher_adapter(
SEED,
mode_cls,
GetCipherByName("seed-{mode.name}")
)
for cipher_cls, mode_cls in itertools.product(
[CAST5, IDEA],
[CBC, OFB, CFB, ECB],
):
self.register_cipher_adapter(
cipher_cls,
mode_cls,
GetCipherByName("{cipher.name}-{mode.name}")
)
self.register_cipher_adapter(
ARC4,
type(None),
GetCipherByName("rc4")
)
def create_symmetric_encryption_ctx(self, cipher, mode):
if (isinstance(mode, CTR) and isinstance(cipher, AES) and
not self._evp_cipher_supported(cipher, mode)):
# This is needed to provide support for AES CTR mode in OpenSSL
# 1.0.0. It can be removed when we drop 1.0.0 support (RHEL 6.4).
return _AESCTRCipherContext(self, cipher, mode)
else:
return _CipherContext(self, cipher, mode, _CipherContext._ENCRYPT)
def create_symmetric_decryption_ctx(self, cipher, mode):
if (isinstance(mode, CTR) and isinstance(cipher, AES) and
not self._evp_cipher_supported(cipher, mode)):
# This is needed to provide support for AES CTR mode in OpenSSL
# 1.0.0. It can be removed when we drop 1.0.0 support (RHEL 6.4).
return _AESCTRCipherContext(self, cipher, mode)
else:
return _CipherContext(self, cipher, mode, _CipherContext._DECRYPT)
def pbkdf2_hmac_supported(self, algorithm):
return self.hmac_supported(algorithm)
def derive_pbkdf2_hmac(self, algorithm, length, salt, iterations,
key_material):
buf = self._ffi.new("unsigned char[]", length)
evp_md = self._lib.EVP_get_digestbyname(
algorithm.name.encode("ascii"))
self.openssl_assert(evp_md != self._ffi.NULL)
res = self._lib.PKCS5_PBKDF2_HMAC(
key_material,
len(key_material),
salt,
len(salt),
iterations,
evp_md,
length,
buf
)
self.openssl_assert(res == 1)
return self._ffi.buffer(buf)[:]
def _consume_errors(self):
return binding._consume_errors(self._lib)
def _bn_to_int(self, bn):
assert bn != self._ffi.NULL
if six.PY3:
# Python 3 has constant time from_bytes, so use that.
bn_num_bytes = self._lib.BN_num_bytes(bn)
bin_ptr = self._ffi.new("unsigned char[]", bn_num_bytes)
bin_len = self._lib.BN_bn2bin(bn, bin_ptr)
# A zero length means the BN has value 0
self.openssl_assert(bin_len >= 0)
return int.from_bytes(self._ffi.buffer(bin_ptr)[:bin_len], "big")
else:
# Under Python 2 the best we can do is hex()
hex_cdata = self._lib.BN_bn2hex(bn)
self.openssl_assert(hex_cdata != self._ffi.NULL)
hex_str = self._ffi.string(hex_cdata)
self._lib.OPENSSL_free(hex_cdata)
return int(hex_str, 16)
def _int_to_bn(self, num, bn=None):
"""
Converts a python integer to a BIGNUM. The returned BIGNUM will not
be garbage collected (to support adding them to structs that take
ownership of the object). Be sure to register it for GC if it will
be discarded after use.
"""
assert bn is None or bn != self._ffi.NULL
if bn is None:
bn = self._ffi.NULL
if six.PY3:
# Python 3 has constant time to_bytes, so use that.
binary = num.to_bytes(int(num.bit_length() / 8.0 + 1), "big")
bn_ptr = self._lib.BN_bin2bn(binary, len(binary), bn)
self.openssl_assert(bn_ptr != self._ffi.NULL)
return bn_ptr
else:
# Under Python 2 the best we can do is hex()
hex_num = hex(num).rstrip("L").lstrip("0x").encode("ascii") or b"0"
bn_ptr = self._ffi.new("BIGNUM **")
bn_ptr[0] = bn
res = self._lib.BN_hex2bn(bn_ptr, hex_num)
self.openssl_assert(res != 0)
self.openssl_assert(bn_ptr[0] != self._ffi.NULL)
return bn_ptr[0]
def generate_rsa_private_key(self, public_exponent, key_size):
rsa._verify_rsa_parameters(public_exponent, key_size)
rsa_cdata = self._lib.RSA_new()
self.openssl_assert(rsa_cdata != self._ffi.NULL)
rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free)
bn = self._int_to_bn(public_exponent)
bn = self._ffi.gc(bn, self._lib.BN_free)
res = self._lib.RSA_generate_key_ex(
rsa_cdata, key_size, bn, self._ffi.NULL
)
self.openssl_assert(res == 1)
evp_pkey = self._rsa_cdata_to_evp_pkey(rsa_cdata)
return _RSAPrivateKey(self, rsa_cdata, evp_pkey)
def generate_rsa_parameters_supported(self, public_exponent, key_size):
return (public_exponent >= 3 and public_exponent & 1 != 0 and
key_size >= 512)
def load_rsa_private_numbers(self, numbers):
rsa._check_private_key_components(
numbers.p,
numbers.q,
numbers.d,
numbers.dmp1,
numbers.dmq1,
numbers.iqmp,
numbers.public_numbers.e,
numbers.public_numbers.n
)
rsa_cdata = self._lib.RSA_new()
self.openssl_assert(rsa_cdata != self._ffi.NULL)
rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free)
p = self._int_to_bn(numbers.p)
q = self._int_to_bn(numbers.q)
d = self._int_to_bn(numbers.d)
dmp1 = self._int_to_bn(numbers.dmp1)
dmq1 = self._int_to_bn(numbers.dmq1)
iqmp = self._int_to_bn(numbers.iqmp)
e = self._int_to_bn(numbers.public_numbers.e)
n = self._int_to_bn(numbers.public_numbers.n)
res = self._lib.RSA_set0_factors(rsa_cdata, p, q)
self.openssl_assert(res == 1)
res = self._lib.RSA_set0_key(rsa_cdata, n, e, d)
self.openssl_assert(res == 1)
res = self._lib.RSA_set0_crt_params(rsa_cdata, dmp1, dmq1, iqmp)
self.openssl_assert(res == 1)
res = self._lib.RSA_blinding_on(rsa_cdata, self._ffi.NULL)
self.openssl_assert(res == 1)
evp_pkey = self._rsa_cdata_to_evp_pkey(rsa_cdata)
return _RSAPrivateKey(self, rsa_cdata, evp_pkey)
def load_rsa_public_numbers(self, numbers):
rsa._check_public_key_components(numbers.e, numbers.n)
rsa_cdata = self._lib.RSA_new()
self.openssl_assert(rsa_cdata != self._ffi.NULL)
rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free)
e = self._int_to_bn(numbers.e)
n = self._int_to_bn(numbers.n)
res = self._lib.RSA_set0_key(rsa_cdata, n, e, self._ffi.NULL)
self.openssl_assert(res == 1)
evp_pkey = self._rsa_cdata_to_evp_pkey(rsa_cdata)
return _RSAPublicKey(self, rsa_cdata, evp_pkey)
def _create_evp_pkey_gc(self):
evp_pkey = self._lib.EVP_PKEY_new()
self.openssl_assert(evp_pkey != self._ffi.NULL)
evp_pkey = self._ffi.gc(evp_pkey, self._lib.EVP_PKEY_free)
return evp_pkey
def _rsa_cdata_to_evp_pkey(self, rsa_cdata):
evp_pkey = self._create_evp_pkey_gc()
res = self._lib.EVP_PKEY_set1_RSA(evp_pkey, rsa_cdata)
self.openssl_assert(res == 1)
return evp_pkey
def _bytes_to_bio(self, data):
"""
Return a _MemoryBIO namedtuple of (BIO, char*).
The char* is the storage for the BIO and it must stay alive until the
BIO is finished with.
"""
data_char_p = self._ffi.new("char[]", data)
bio = self._lib.BIO_new_mem_buf(
data_char_p, len(data)
)
self.openssl_assert(bio != self._ffi.NULL)
return _MemoryBIO(self._ffi.gc(bio, self._lib.BIO_free), data_char_p)
def _create_mem_bio_gc(self):
"""
Creates an empty memory BIO.
"""
bio_method = self._lib.BIO_s_mem()
self.openssl_assert(bio_method != self._ffi.NULL)
bio = self._lib.BIO_new(bio_method)
self.openssl_assert(bio != self._ffi.NULL)
bio = self._ffi.gc(bio, self._lib.BIO_free)
return bio
def _read_mem_bio(self, bio):
"""
Reads a memory BIO. This only works on memory BIOs.
"""
buf = self._ffi.new("char **")
buf_len = self._lib.BIO_get_mem_data(bio, buf)
self.openssl_assert(buf_len > 0)
self.openssl_assert(buf[0] != self._ffi.NULL)
bio_data = self._ffi.buffer(buf[0], buf_len)[:]
return bio_data
def _evp_pkey_to_private_key(self, evp_pkey):
"""
Return the appropriate type of PrivateKey given an evp_pkey cdata
pointer.
"""
key_type = self._lib.EVP_PKEY_id(evp_pkey)
if key_type == self._lib.EVP_PKEY_RSA:
rsa_cdata = self._lib.EVP_PKEY_get1_RSA(evp_pkey)
self.openssl_assert(rsa_cdata != self._ffi.NULL)
rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free)
return _RSAPrivateKey(self, rsa_cdata, evp_pkey)
elif key_type == self._lib.EVP_PKEY_DSA:
dsa_cdata = self._lib.EVP_PKEY_get1_DSA(evp_pkey)
self.openssl_assert(dsa_cdata != self._ffi.NULL)
dsa_cdata = self._ffi.gc(dsa_cdata, self._lib.DSA_free)
return _DSAPrivateKey(self, dsa_cdata, evp_pkey)
elif (self._lib.Cryptography_HAS_EC == 1 and
key_type == self._lib.EVP_PKEY_EC):
ec_cdata = self._lib.EVP_PKEY_get1_EC_KEY(evp_pkey)
self.openssl_assert(ec_cdata != self._ffi.NULL)
ec_cdata = self._ffi.gc(ec_cdata, self._lib.EC_KEY_free)
return _EllipticCurvePrivateKey(self, ec_cdata, evp_pkey)
else:
raise UnsupportedAlgorithm("Unsupported key type.")
def _evp_pkey_to_public_key(self, evp_pkey):
"""
Return the appropriate type of PublicKey given an evp_pkey cdata
pointer.
"""
key_type = self._lib.EVP_PKEY_id(evp_pkey)
if key_type == self._lib.EVP_PKEY_RSA:
rsa_cdata = self._lib.EVP_PKEY_get1_RSA(evp_pkey)
self.openssl_assert(rsa_cdata != self._ffi.NULL)
rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free)
return _RSAPublicKey(self, rsa_cdata, evp_pkey)
elif key_type == self._lib.EVP_PKEY_DSA:
dsa_cdata = self._lib.EVP_PKEY_get1_DSA(evp_pkey)
self.openssl_assert(dsa_cdata != self._ffi.NULL)
dsa_cdata = self._ffi.gc(dsa_cdata, self._lib.DSA_free)
return _DSAPublicKey(self, dsa_cdata, evp_pkey)
elif (self._lib.Cryptography_HAS_EC == 1 and
key_type == self._lib.EVP_PKEY_EC):
ec_cdata = self._lib.EVP_PKEY_get1_EC_KEY(evp_pkey)
self.openssl_assert(ec_cdata != self._ffi.NULL)
ec_cdata = self._ffi.gc(ec_cdata, self._lib.EC_KEY_free)
return _EllipticCurvePublicKey(self, ec_cdata, evp_pkey)
else:
raise UnsupportedAlgorithm("Unsupported key type.")
def _pem_password_cb(self, password):
"""
Generate a pem_password_cb function pointer that copied the password to
OpenSSL as required and returns the number of bytes copied.
typedef int pem_password_cb(char *buf, int size,
int rwflag, void *userdata);
Useful for decrypting PKCS8 files and so on.
Returns a tuple of (cdata function pointer, userdata).
"""
# Forward compatibility for new static callbacks:
# _pem_password_cb is not a nested function because closures don't
# work well with static callbacks. Static callbacks are registered
# globally. The backend is passed in as userdata argument.
userdata = _PasswordUserdata(password=password)
return _pem_password_cb, userdata
def _oaep_hash_supported(self, algorithm):
if self._lib.Cryptography_HAS_RSA_OAEP_MD:
return isinstance(
algorithm, (
hashes.SHA1,
hashes.SHA224,
hashes.SHA256,
hashes.SHA384,
hashes.SHA512,
)
)
else:
return isinstance(algorithm, hashes.SHA1)
def _pss_mgf1_hash_supported(self, algorithm):
if self._lib.Cryptography_HAS_MGF1_MD:
return self.hash_supported(algorithm)
else:
return isinstance(algorithm, hashes.SHA1)
def rsa_padding_supported(self, padding):
if isinstance(padding, PKCS1v15):
return True
elif isinstance(padding, PSS) and isinstance(padding._mgf, MGF1):
return self._pss_mgf1_hash_supported(padding._mgf._algorithm)
elif isinstance(padding, OAEP) and isinstance(padding._mgf, MGF1):
return (
self._oaep_hash_supported(padding._mgf._algorithm) and
self._oaep_hash_supported(padding._algorithm)
)
else:
return False
def generate_dsa_parameters(self, key_size):
if key_size not in (1024, 2048, 3072):
raise ValueError("Key size must be 1024 or 2048 or 3072 bits.")
ctx = self._lib.DSA_new()
self.openssl_assert(ctx != self._ffi.NULL)
ctx = self._ffi.gc(ctx, self._lib.DSA_free)
res = self._lib.DSA_generate_parameters_ex(
ctx, key_size, self._ffi.NULL, 0,
self._ffi.NULL, self._ffi.NULL, self._ffi.NULL
)
self.openssl_assert(res == 1)
return _DSAParameters(self, ctx)
def generate_dsa_private_key(self, parameters):
ctx = self._lib.DSAparams_dup(parameters._dsa_cdata)
self.openssl_assert(ctx != self._ffi.NULL)
ctx = self._ffi.gc(ctx, self._lib.DSA_free)
self._lib.DSA_generate_key(ctx)
evp_pkey = self._dsa_cdata_to_evp_pkey(ctx)
return _DSAPrivateKey(self, ctx, evp_pkey)
def generate_dsa_private_key_and_parameters(self, key_size):
parameters = self.generate_dsa_parameters(key_size)
return self.generate_dsa_private_key(parameters)
def _dsa_cdata_set_values(self, dsa_cdata, p, q, g, pub_key, priv_key):
res = self._lib.DSA_set0_pqg(dsa_cdata, p, q, g)
self.openssl_assert(res == 1)
res = self._lib.DSA_set0_key(dsa_cdata, pub_key, priv_key)
self.openssl_assert(res == 1)
def load_dsa_private_numbers(self, numbers):
dsa._check_dsa_private_numbers(numbers)
parameter_numbers = numbers.public_numbers.parameter_numbers
dsa_cdata = self._lib.DSA_new()
self.openssl_assert(dsa_cdata != self._ffi.NULL)
dsa_cdata = self._ffi.gc(dsa_cdata, self._lib.DSA_free)
p = self._int_to_bn(parameter_numbers.p)
q = self._int_to_bn(parameter_numbers.q)
g = self._int_to_bn(parameter_numbers.g)
pub_key = self._int_to_bn(numbers.public_numbers.y)
priv_key = self._int_to_bn(numbers.x)
self._dsa_cdata_set_values(dsa_cdata, p, q, g, pub_key, priv_key)
evp_pkey = self._dsa_cdata_to_evp_pkey(dsa_cdata)
return _DSAPrivateKey(self, dsa_cdata, evp_pkey)
def load_dsa_public_numbers(self, numbers):
dsa._check_dsa_parameters(numbers.parameter_numbers)
dsa_cdata = self._lib.DSA_new()
self.openssl_assert(dsa_cdata != self._ffi.NULL)
dsa_cdata = self._ffi.gc(dsa_cdata, self._lib.DSA_free)
p = self._int_to_bn(numbers.parameter_numbers.p)
q = self._int_to_bn(numbers.parameter_numbers.q)
g = self._int_to_bn(numbers.parameter_numbers.g)
pub_key = self._int_to_bn(numbers.y)
priv_key = self._ffi.NULL
self._dsa_cdata_set_values(dsa_cdata, p, q, g, pub_key, priv_key)
evp_pkey = self._dsa_cdata_to_evp_pkey(dsa_cdata)
return _DSAPublicKey(self, dsa_cdata, evp_pkey)
def load_dsa_parameter_numbers(self, numbers):
dsa._check_dsa_parameters(numbers)
dsa_cdata = self._lib.DSA_new()
self.openssl_assert(dsa_cdata != self._ffi.NULL)
dsa_cdata = self._ffi.gc(dsa_cdata, self._lib.DSA_free)
p = self._int_to_bn(numbers.p)
q = self._int_to_bn(numbers.q)
g = self._int_to_bn(numbers.g)
res = self._lib.DSA_set0_pqg(dsa_cdata, p, q, g)
self.openssl_assert(res == 1)
return _DSAParameters(self, dsa_cdata)
def _dsa_cdata_to_evp_pkey(self, dsa_cdata):
evp_pkey = self._create_evp_pkey_gc()
res = self._lib.EVP_PKEY_set1_DSA(evp_pkey, dsa_cdata)
self.openssl_assert(res == 1)
return evp_pkey
def dsa_hash_supported(self, algorithm):
return self.hash_supported(algorithm)
def dsa_parameters_supported(self, p, q, g):
return True
def cmac_algorithm_supported(self, algorithm):
return (
self._lib.Cryptography_HAS_CMAC == 1 and
self.cipher_supported(
algorithm, CBC(b"\x00" * algorithm.block_size)
)
)
def create_cmac_ctx(self, algorithm):
return _CMACContext(self, algorithm)
def create_x509_csr(self, builder, private_key, algorithm):
if not isinstance(algorithm, hashes.HashAlgorithm):
raise TypeError('Algorithm must be a registered hash algorithm.')
if self._lib.CRYPTOGRAPHY_OPENSSL_LESS_THAN_101:
if isinstance(private_key, _DSAPrivateKey):
raise NotImplementedError(
"Certificate signing requests aren't implemented for DSA"
" keys on OpenSSL versions less than 1.0.1."
)
if isinstance(private_key, _EllipticCurvePrivateKey):
raise NotImplementedError(
"Certificate signing requests aren't implemented for EC"
" keys on OpenSSL versions less than 1.0.1."
)
# Resolve the signature algorithm.
evp_md = self._lib.EVP_get_digestbyname(
algorithm.name.encode('ascii')
)
self.openssl_assert(evp_md != self._ffi.NULL)
# Create an empty request.
x509_req = self._lib.X509_REQ_new()
self.openssl_assert(x509_req != self._ffi.NULL)
x509_req = self._ffi.gc(x509_req, self._lib.X509_REQ_free)
# Set x509 version.
res = self._lib.X509_REQ_set_version(x509_req, x509.Version.v1.value)
self.openssl_assert(res == 1)
# Set subject name.
res = self._lib.X509_REQ_set_subject_name(
x509_req, _encode_name_gc(self, builder._subject_name)
)
self.openssl_assert(res == 1)
# Set subject public key.
public_key = private_key.public_key()
res = self._lib.X509_REQ_set_pubkey(
x509_req, public_key._evp_pkey
)
self.openssl_assert(res == 1)
# Add extensions.
sk_extension = self._lib.sk_X509_EXTENSION_new_null()
self.openssl_assert(sk_extension != self._ffi.NULL)
sk_extension = self._ffi.gc(
sk_extension, self._lib.sk_X509_EXTENSION_free
)
# gc is not necessary for CSRs, as sk_X509_EXTENSION_free
# will release all the X509_EXTENSIONs.
self._create_x509_extensions(
extensions=builder._extensions,
handlers=_EXTENSION_ENCODE_HANDLERS,
x509_obj=sk_extension,
add_func=self._lib.sk_X509_EXTENSION_insert,
gc=False
)
res = self._lib.X509_REQ_add_extensions(x509_req, sk_extension)
self.openssl_assert(res == 1)
# Sign the request using the requester's private key.
res = self._lib.X509_REQ_sign(
x509_req, private_key._evp_pkey, evp_md
)
if res == 0:
errors = self._consume_errors()
self.openssl_assert(errors[0][1] == self._lib.ERR_LIB_RSA)
self.openssl_assert(
errors[0][3] == self._lib.RSA_R_DIGEST_TOO_BIG_FOR_RSA_KEY
)
raise ValueError("Digest too big for RSA key")
return _CertificateSigningRequest(self, x509_req)
def create_x509_certificate(self, builder, private_key, algorithm):
if not isinstance(builder, x509.CertificateBuilder):
raise TypeError('Builder type mismatch.')
if not isinstance(algorithm, hashes.HashAlgorithm):
raise TypeError('Algorithm must be a registered hash algorithm.')
if self._lib.CRYPTOGRAPHY_OPENSSL_LESS_THAN_101:
if isinstance(private_key, _DSAPrivateKey):
raise NotImplementedError(
"Certificate signatures aren't implemented for DSA"
" keys on OpenSSL versions less than 1.0.1."
)
if isinstance(private_key, _EllipticCurvePrivateKey):
raise NotImplementedError(
"Certificate signatures aren't implemented for EC"
" keys on OpenSSL versions less than 1.0.1."
)
# Resolve the signature algorithm.
evp_md = self._lib.EVP_get_digestbyname(
algorithm.name.encode('ascii')
)
self.openssl_assert(evp_md != self._ffi.NULL)
# Create an empty certificate.
x509_cert = self._lib.X509_new()
x509_cert = self._ffi.gc(x509_cert, backend._lib.X509_free)
# Set the x509 version.
res = self._lib.X509_set_version(x509_cert, builder._version.value)
self.openssl_assert(res == 1)
# Set the subject's name.
res = self._lib.X509_set_subject_name(
x509_cert, _encode_name_gc(self, list(builder._subject_name))
)
self.openssl_assert(res == 1)
# Set the subject's public key.
res = self._lib.X509_set_pubkey(
x509_cert, builder._public_key._evp_pkey
)
self.openssl_assert(res == 1)
# Set the certificate serial number.
serial_number = _encode_asn1_int_gc(self, builder._serial_number)
res = self._lib.X509_set_serialNumber(x509_cert, serial_number)
self.openssl_assert(res == 1)
# Set the "not before" time.
res = self._lib.ASN1_TIME_set(
self._lib.X509_get_notBefore(x509_cert),
calendar.timegm(builder._not_valid_before.timetuple())
)
self.openssl_assert(res != self._ffi.NULL)
# Set the "not after" time.
res = self._lib.ASN1_TIME_set(
self._lib.X509_get_notAfter(x509_cert),
calendar.timegm(builder._not_valid_after.timetuple())
)
self.openssl_assert(res != self._ffi.NULL)
# Add extensions.
self._create_x509_extensions(
extensions=builder._extensions,
handlers=_EXTENSION_ENCODE_HANDLERS,
x509_obj=x509_cert,
add_func=self._lib.X509_add_ext,
gc=True
)
# Set the issuer name.
res = self._lib.X509_set_issuer_name(
x509_cert, _encode_name_gc(self, list(builder._issuer_name))
)
self.openssl_assert(res == 1)
# Sign the certificate with the issuer's private key.
res = self._lib.X509_sign(
x509_cert, private_key._evp_pkey, evp_md
)
if res == 0:
errors = self._consume_errors()
self.openssl_assert(errors[0][1] == self._lib.ERR_LIB_RSA)
self.openssl_assert(
errors[0][3] == self._lib.RSA_R_DIGEST_TOO_BIG_FOR_RSA_KEY
)
raise ValueError("Digest too big for RSA key")
return _Certificate(self, x509_cert)
def create_x509_crl(self, builder, private_key, algorithm):
if not isinstance(builder, x509.CertificateRevocationListBuilder):
raise TypeError('Builder type mismatch.')
if not isinstance(algorithm, hashes.HashAlgorithm):
raise TypeError('Algorithm must be a registered hash algorithm.')
if self._lib.CRYPTOGRAPHY_OPENSSL_LESS_THAN_101:
if isinstance(private_key, _DSAPrivateKey):
raise NotImplementedError(
"CRL signatures aren't implemented for DSA"
" keys on OpenSSL versions less than 1.0.1."
)
if isinstance(private_key, _EllipticCurvePrivateKey):
raise NotImplementedError(
"CRL signatures aren't implemented for EC"
" keys on OpenSSL versions less than 1.0.1."
)
evp_md = self._lib.EVP_get_digestbyname(
algorithm.name.encode('ascii')
)
self.openssl_assert(evp_md != self._ffi.NULL)
# Create an empty CRL.
x509_crl = self._lib.X509_CRL_new()
x509_crl = self._ffi.gc(x509_crl, backend._lib.X509_CRL_free)
# Set the x509 CRL version. We only support v2 (integer value 1).
res = self._lib.X509_CRL_set_version(x509_crl, 1)
self.openssl_assert(res == 1)
# Set the issuer name.
res = self._lib.X509_CRL_set_issuer_name(
x509_crl, _encode_name_gc(self, list(builder._issuer_name))
)
self.openssl_assert(res == 1)
# Set the last update time.
last_update = self._lib.ASN1_TIME_set(
self._ffi.NULL, calendar.timegm(builder._last_update.timetuple())
)
self.openssl_assert(last_update != self._ffi.NULL)
last_update = self._ffi.gc(last_update, self._lib.ASN1_TIME_free)
res = self._lib.X509_CRL_set_lastUpdate(x509_crl, last_update)
self.openssl_assert(res == 1)
# Set the next update time.
next_update = self._lib.ASN1_TIME_set(
self._ffi.NULL, calendar.timegm(builder._next_update.timetuple())
)
self.openssl_assert(next_update != self._ffi.NULL)
next_update = self._ffi.gc(next_update, self._lib.ASN1_TIME_free)
res = self._lib.X509_CRL_set_nextUpdate(x509_crl, next_update)
self.openssl_assert(res == 1)
# Add extensions.
self._create_x509_extensions(
extensions=builder._extensions,
handlers=_CRL_EXTENSION_ENCODE_HANDLERS,
x509_obj=x509_crl,
add_func=self._lib.X509_CRL_add_ext,
gc=True
)
# add revoked certificates
for revoked_cert in builder._revoked_certificates:
# Duplicating because the X509_CRL takes ownership and will free
# this memory when X509_CRL_free is called.
revoked = self._lib.Cryptography_X509_REVOKED_dup(
revoked_cert._x509_revoked
)
self.openssl_assert(revoked != self._ffi.NULL)
res = self._lib.X509_CRL_add0_revoked(x509_crl, revoked)
self.openssl_assert(res == 1)
res = self._lib.X509_CRL_sign(
x509_crl, private_key._evp_pkey, evp_md
)
if res == 0:
errors = self._consume_errors()
self.openssl_assert(errors[0][1] == self._lib.ERR_LIB_RSA)
self.openssl_assert(
errors[0][3] == self._lib.RSA_R_DIGEST_TOO_BIG_FOR_RSA_KEY
)
raise ValueError("Digest too big for RSA key")
return _CertificateRevocationList(self, x509_crl)
def _create_x509_extensions(self, extensions, handlers, x509_obj,
add_func, gc):
for i, extension in enumerate(extensions):
x509_extension = self._create_x509_extension(
handlers, extension
)
self.openssl_assert(x509_extension != self._ffi.NULL)
if gc:
x509_extension = self._ffi.gc(
x509_extension, self._lib.X509_EXTENSION_free
)
res = add_func(x509_obj, x509_extension, i)
self.openssl_assert(res >= 1)
def _create_x509_extension(self, handlers, extension):
if isinstance(extension.value, x509.UnrecognizedExtension):
obj = _txt2obj_gc(self, extension.oid.dotted_string)
value = _encode_asn1_str_gc(
self, extension.value.value, len(extension.value.value)
)
return self._lib.X509_EXTENSION_create_by_OBJ(
self._ffi.NULL,
obj,
1 if extension.critical else 0,
value
)
else:
try:
encode = handlers[extension.oid]
except KeyError:
raise NotImplementedError(
'Extension not supported: {0}'.format(extension.oid)
)
ext_struct = encode(self, extension.value)
nid = self._lib.OBJ_txt2nid(
extension.oid.dotted_string.encode("ascii")
)
backend.openssl_assert(nid != self._lib.NID_undef)
return self._lib.X509V3_EXT_i2d(
nid, 1 if extension.critical else 0, ext_struct
)
def create_x509_revoked_certificate(self, builder):
if not isinstance(builder, x509.RevokedCertificateBuilder):
raise TypeError('Builder type mismatch.')
x509_revoked = self._lib.X509_REVOKED_new()
self.openssl_assert(x509_revoked != self._ffi.NULL)
x509_revoked = self._ffi.gc(x509_revoked, self._lib.X509_REVOKED_free)
serial_number = _encode_asn1_int_gc(self, builder._serial_number)
res = self._lib.X509_REVOKED_set_serialNumber(
x509_revoked, serial_number
)
self.openssl_assert(res == 1)
rev_date = self._lib.ASN1_TIME_set(
self._ffi.NULL,
calendar.timegm(builder._revocation_date.timetuple())
)
self.openssl_assert(rev_date != self._ffi.NULL)
rev_date = self._ffi.gc(rev_date, self._lib.ASN1_TIME_free)
res = self._lib.X509_REVOKED_set_revocationDate(x509_revoked, rev_date)
self.openssl_assert(res == 1)
# add CRL entry extensions
self._create_x509_extensions(
extensions=builder._extensions,
handlers=_CRL_ENTRY_EXTENSION_ENCODE_HANDLERS,
x509_obj=x509_revoked,
add_func=self._lib.X509_REVOKED_add_ext,
gc=True
)
return _RevokedCertificate(self, None, x509_revoked)
def load_pem_private_key(self, data, password):
return self._load_key(
self._lib.PEM_read_bio_PrivateKey,
self._evp_pkey_to_private_key,
data,
password,
)
def load_pem_public_key(self, data):
mem_bio = self._bytes_to_bio(data)
evp_pkey = self._lib.PEM_read_bio_PUBKEY(
mem_bio.bio, self._ffi.NULL, self._ffi.NULL, self._ffi.NULL
)
if evp_pkey != self._ffi.NULL:
evp_pkey = self._ffi.gc(evp_pkey, self._lib.EVP_PKEY_free)
return self._evp_pkey_to_public_key(evp_pkey)
else:
# It's not a (RSA/DSA/ECDSA) subjectPublicKeyInfo, but we still
# need to check to see if it is a pure PKCS1 RSA public key (not
# embedded in a subjectPublicKeyInfo)
self._consume_errors()
res = self._lib.BIO_reset(mem_bio.bio)
self.openssl_assert(res == 1)
rsa_cdata = self._lib.PEM_read_bio_RSAPublicKey(
mem_bio.bio, self._ffi.NULL, self._ffi.NULL, self._ffi.NULL
)
if rsa_cdata != self._ffi.NULL:
rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free)
evp_pkey = self._rsa_cdata_to_evp_pkey(rsa_cdata)
return _RSAPublicKey(self, rsa_cdata, evp_pkey)
else:
self._handle_key_loading_error()
def load_der_private_key(self, data, password):
# OpenSSL has a function called d2i_AutoPrivateKey that in theory
# handles this automatically, however it doesn't handle encrypted
# private keys. Instead we try to load the key two different ways.
# First we'll try to load it as a traditional key.
bio_data = self._bytes_to_bio(data)
key = self._evp_pkey_from_der_traditional_key(bio_data, password)
if key:
return self._evp_pkey_to_private_key(key)
else:
# Finally we try to load it with the method that handles encrypted
# PKCS8 properly.
return self._load_key(
self._lib.d2i_PKCS8PrivateKey_bio,
self._evp_pkey_to_private_key,
data,
password,
)
def _evp_pkey_from_der_traditional_key(self, bio_data, password):
key = self._lib.d2i_PrivateKey_bio(bio_data.bio, self._ffi.NULL)
if key != self._ffi.NULL:
key = self._ffi.gc(key, self._lib.EVP_PKEY_free)
if password is not None:
raise TypeError(
"Password was given but private key is not encrypted."
)
return key
else:
self._consume_errors()
return None
def load_der_public_key(self, data):
mem_bio = self._bytes_to_bio(data)
evp_pkey = self._lib.d2i_PUBKEY_bio(mem_bio.bio, self._ffi.NULL)
if evp_pkey != self._ffi.NULL:
evp_pkey = self._ffi.gc(evp_pkey, self._lib.EVP_PKEY_free)
return self._evp_pkey_to_public_key(evp_pkey)
else:
# It's not a (RSA/DSA/ECDSA) subjectPublicKeyInfo, but we still
# need to check to see if it is a pure PKCS1 RSA public key (not
# embedded in a subjectPublicKeyInfo)
self._consume_errors()
res = self._lib.BIO_reset(mem_bio.bio)
self.openssl_assert(res == 1)
rsa_cdata = self._lib.d2i_RSAPublicKey_bio(
mem_bio.bio, self._ffi.NULL
)
if rsa_cdata != self._ffi.NULL:
rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free)
evp_pkey = self._rsa_cdata_to_evp_pkey(rsa_cdata)
return _RSAPublicKey(self, rsa_cdata, evp_pkey)
else:
self._handle_key_loading_error()
def load_pem_x509_certificate(self, data):
mem_bio = self._bytes_to_bio(data)
x509 = self._lib.PEM_read_bio_X509(
mem_bio.bio, self._ffi.NULL, self._ffi.NULL, self._ffi.NULL
)
if x509 == self._ffi.NULL:
self._consume_errors()
raise ValueError("Unable to load certificate")
x509 = self._ffi.gc(x509, self._lib.X509_free)
return _Certificate(self, x509)
def load_der_x509_certificate(self, data):
mem_bio = self._bytes_to_bio(data)
x509 = self._lib.d2i_X509_bio(mem_bio.bio, self._ffi.NULL)
if x509 == self._ffi.NULL:
self._consume_errors()
raise ValueError("Unable to load certificate")
x509 = self._ffi.gc(x509, self._lib.X509_free)
return _Certificate(self, x509)
def load_pem_x509_crl(self, data):
mem_bio = self._bytes_to_bio(data)
x509_crl = self._lib.PEM_read_bio_X509_CRL(
mem_bio.bio, self._ffi.NULL, self._ffi.NULL, self._ffi.NULL
)
if x509_crl == self._ffi.NULL:
self._consume_errors()
raise ValueError("Unable to load CRL")
x509_crl = self._ffi.gc(x509_crl, self._lib.X509_CRL_free)
return _CertificateRevocationList(self, x509_crl)
def load_der_x509_crl(self, data):
mem_bio = self._bytes_to_bio(data)
x509_crl = self._lib.d2i_X509_CRL_bio(mem_bio.bio, self._ffi.NULL)
if x509_crl == self._ffi.NULL:
self._consume_errors()
raise ValueError("Unable to load CRL")
x509_crl = self._ffi.gc(x509_crl, self._lib.X509_CRL_free)
return _CertificateRevocationList(self, x509_crl)
def load_pem_x509_csr(self, data):
mem_bio = self._bytes_to_bio(data)
x509_req = self._lib.PEM_read_bio_X509_REQ(
mem_bio.bio, self._ffi.NULL, self._ffi.NULL, self._ffi.NULL
)
if x509_req == self._ffi.NULL:
self._consume_errors()
raise ValueError("Unable to load request")
x509_req = self._ffi.gc(x509_req, self._lib.X509_REQ_free)
return _CertificateSigningRequest(self, x509_req)
def load_der_x509_csr(self, data):
mem_bio = self._bytes_to_bio(data)
x509_req = self._lib.d2i_X509_REQ_bio(mem_bio.bio, self._ffi.NULL)
if x509_req == self._ffi.NULL:
self._consume_errors()
raise ValueError("Unable to load request")
x509_req = self._ffi.gc(x509_req, self._lib.X509_REQ_free)
return _CertificateSigningRequest(self, x509_req)
def _load_key(self, openssl_read_func, convert_func, data, password):
mem_bio = self._bytes_to_bio(data)
password_cb, userdata = self._pem_password_cb(password)
userdata_handle = self._ffi.new_handle(userdata)
evp_pkey = openssl_read_func(
mem_bio.bio,
self._ffi.NULL,
password_cb,
userdata_handle,
)
if evp_pkey == self._ffi.NULL:
if userdata.exception is not None:
errors = self._consume_errors()
self.openssl_assert(errors)
raise userdata.exception
else:
self._handle_key_loading_error()
evp_pkey = self._ffi.gc(evp_pkey, self._lib.EVP_PKEY_free)
if password is not None and userdata.called == 0:
raise TypeError(
"Password was given but private key is not encrypted.")
assert (
(password is not None and userdata.called == 1) or
password is None
)
return convert_func(evp_pkey)
def _handle_key_loading_error(self):
errors = self._consume_errors()
if not errors:
raise ValueError("Could not unserialize key data.")
elif errors[0][1:] in (
(
self._lib.ERR_LIB_EVP,
self._lib.EVP_F_EVP_DECRYPTFINAL_EX,
self._lib.EVP_R_BAD_DECRYPT
),
(
self._lib.ERR_LIB_PKCS12,
self._lib.PKCS12_F_PKCS12_PBE_CRYPT,
self._lib.PKCS12_R_PKCS12_CIPHERFINAL_ERROR,
)
):
raise ValueError("Bad decrypt. Incorrect password?")
elif errors[0][1:] in (
(
self._lib.ERR_LIB_PEM,
self._lib.PEM_F_PEM_GET_EVP_CIPHER_INFO,
self._lib.PEM_R_UNSUPPORTED_ENCRYPTION
),
(
self._lib.ERR_LIB_EVP,
self._lib.EVP_F_EVP_PBE_CIPHERINIT,
self._lib.EVP_R_UNKNOWN_PBE_ALGORITHM
)
):
raise UnsupportedAlgorithm(
"PEM data is encrypted with an unsupported cipher",
_Reasons.UNSUPPORTED_CIPHER
)
elif any(
error[1:] == (
self._lib.ERR_LIB_EVP,
self._lib.EVP_F_EVP_PKCS82PKEY,
self._lib.EVP_R_UNSUPPORTED_PRIVATE_KEY_ALGORITHM
)
for error in errors
):
raise UnsupportedAlgorithm(
"Unsupported public key algorithm.",
_Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM
)
else:
assert errors[0][1] in (
self._lib.ERR_LIB_EVP,
self._lib.ERR_LIB_PEM,
self._lib.ERR_LIB_ASN1,
)
raise ValueError("Could not unserialize key data.")
def elliptic_curve_supported(self, curve):
if self._lib.Cryptography_HAS_EC != 1:
return False
try:
curve_nid = self._elliptic_curve_to_nid(curve)
except UnsupportedAlgorithm:
curve_nid = self._lib.NID_undef
ctx = self._lib.EC_GROUP_new_by_curve_name(curve_nid)
if ctx == self._ffi.NULL:
errors = self._consume_errors()
self.openssl_assert(
curve_nid == self._lib.NID_undef or
errors[0][1:] == (
self._lib.ERR_LIB_EC,
self._lib.EC_F_EC_GROUP_NEW_BY_CURVE_NAME,
self._lib.EC_R_UNKNOWN_GROUP
)
)
return False
else:
self.openssl_assert(curve_nid != self._lib.NID_undef)
self._lib.EC_GROUP_free(ctx)
return True
def elliptic_curve_signature_algorithm_supported(
self, signature_algorithm, curve
):
if self._lib.Cryptography_HAS_EC != 1:
return False
# We only support ECDSA right now.
if not isinstance(signature_algorithm, ec.ECDSA):
return False
return self.elliptic_curve_supported(curve)
def generate_elliptic_curve_private_key(self, curve):
"""
Generate a new private key on the named curve.
"""
if self.elliptic_curve_supported(curve):
curve_nid = self._elliptic_curve_to_nid(curve)
ec_cdata = self._lib.EC_KEY_new_by_curve_name(curve_nid)
self.openssl_assert(ec_cdata != self._ffi.NULL)
ec_cdata = self._ffi.gc(ec_cdata, self._lib.EC_KEY_free)
res = self._lib.EC_KEY_generate_key(ec_cdata)
self.openssl_assert(res == 1)
res = self._lib.EC_KEY_check_key(ec_cdata)
self.openssl_assert(res == 1)
evp_pkey = self._ec_cdata_to_evp_pkey(ec_cdata)
return _EllipticCurvePrivateKey(self, ec_cdata, evp_pkey)
else:
raise UnsupportedAlgorithm(
"Backend object does not support {0}.".format(curve.name),
_Reasons.UNSUPPORTED_ELLIPTIC_CURVE
)
def load_elliptic_curve_private_numbers(self, numbers):
public = numbers.public_numbers
curve_nid = self._elliptic_curve_to_nid(public.curve)
ec_cdata = self._lib.EC_KEY_new_by_curve_name(curve_nid)
self.openssl_assert(ec_cdata != self._ffi.NULL)
ec_cdata = self._ffi.gc(ec_cdata, self._lib.EC_KEY_free)
ec_cdata = self._ec_key_set_public_key_affine_coordinates(
ec_cdata, public.x, public.y)
res = self._lib.EC_KEY_set_private_key(
ec_cdata, self._int_to_bn(numbers.private_value))
self.openssl_assert(res == 1)
evp_pkey = self._ec_cdata_to_evp_pkey(ec_cdata)
return _EllipticCurvePrivateKey(self, ec_cdata, evp_pkey)
def load_elliptic_curve_public_numbers(self, numbers):
curve_nid = self._elliptic_curve_to_nid(numbers.curve)
ec_cdata = self._lib.EC_KEY_new_by_curve_name(curve_nid)
self.openssl_assert(ec_cdata != self._ffi.NULL)
ec_cdata = self._ffi.gc(ec_cdata, self._lib.EC_KEY_free)
ec_cdata = self._ec_key_set_public_key_affine_coordinates(
ec_cdata, numbers.x, numbers.y)
evp_pkey = self._ec_cdata_to_evp_pkey(ec_cdata)
return _EllipticCurvePublicKey(self, ec_cdata, evp_pkey)
def elliptic_curve_exchange_algorithm_supported(self, algorithm, curve):
return (
self.elliptic_curve_supported(curve) and
self._lib.Cryptography_HAS_ECDH == 1 and
isinstance(algorithm, ec.ECDH)
)
def _ec_cdata_to_evp_pkey(self, ec_cdata):
evp_pkey = self._create_evp_pkey_gc()
res = self._lib.EVP_PKEY_set1_EC_KEY(evp_pkey, ec_cdata)
self.openssl_assert(res == 1)
return evp_pkey
def _elliptic_curve_to_nid(self, curve):
"""
Get the NID for a curve name.
"""
curve_aliases = {
"secp192r1": "prime192v1",
"secp256r1": "prime256v1"
}
curve_name = curve_aliases.get(curve.name, curve.name)
curve_nid = self._lib.OBJ_sn2nid(curve_name.encode())
if curve_nid == self._lib.NID_undef:
raise UnsupportedAlgorithm(
"{0} is not a supported elliptic curve".format(curve.name),
_Reasons.UNSUPPORTED_ELLIPTIC_CURVE
)
return curve_nid
@contextmanager
def _tmp_bn_ctx(self):
bn_ctx = self._lib.BN_CTX_new()
self.openssl_assert(bn_ctx != self._ffi.NULL)
bn_ctx = self._ffi.gc(bn_ctx, self._lib.BN_CTX_free)
self._lib.BN_CTX_start(bn_ctx)
try:
yield bn_ctx
finally:
self._lib.BN_CTX_end(bn_ctx)
def _ec_key_determine_group_get_set_funcs(self, ctx):
"""
Given an EC_KEY determine the group and what methods are required to
get/set point coordinates.
"""
self.openssl_assert(ctx != self._ffi.NULL)
nid_two_field = self._lib.OBJ_sn2nid(b"characteristic-two-field")
self.openssl_assert(nid_two_field != self._lib.NID_undef)
group = self._lib.EC_KEY_get0_group(ctx)
self.openssl_assert(group != self._ffi.NULL)
method = self._lib.EC_GROUP_method_of(group)
self.openssl_assert(method != self._ffi.NULL)
nid = self._lib.EC_METHOD_get_field_type(method)
self.openssl_assert(nid != self._lib.NID_undef)
if nid == nid_two_field and self._lib.Cryptography_HAS_EC2M:
set_func = self._lib.EC_POINT_set_affine_coordinates_GF2m
get_func = self._lib.EC_POINT_get_affine_coordinates_GF2m
else:
set_func = self._lib.EC_POINT_set_affine_coordinates_GFp
get_func = self._lib.EC_POINT_get_affine_coordinates_GFp
assert set_func and get_func
return set_func, get_func, group
def _ec_key_set_public_key_affine_coordinates(self, ctx, x, y):
"""
This is a port of EC_KEY_set_public_key_affine_coordinates that was
added in 1.0.1.
Sets the public key point in the EC_KEY context to the affine x and y
values.
"""
if x < 0 or y < 0:
raise ValueError(
"Invalid EC key. Both x and y must be non-negative."
)
set_func, get_func, group = (
self._ec_key_determine_group_get_set_funcs(ctx)
)
point = self._lib.EC_POINT_new(group)
self.openssl_assert(point != self._ffi.NULL)
point = self._ffi.gc(point, self._lib.EC_POINT_free)
bn_x = self._int_to_bn(x)
bn_y = self._int_to_bn(y)
with self._tmp_bn_ctx() as bn_ctx:
check_x = self._lib.BN_CTX_get(bn_ctx)
check_y = self._lib.BN_CTX_get(bn_ctx)
res = set_func(group, point, bn_x, bn_y, bn_ctx)
if res != 1:
self._consume_errors()
raise ValueError("EC point not on curve")
res = get_func(group, point, check_x, check_y, bn_ctx)
self.openssl_assert(res == 1)
res = self._lib.BN_cmp(bn_x, check_x)
if res != 0:
self._consume_errors()
raise ValueError("Invalid EC Key X point.")
res = self._lib.BN_cmp(bn_y, check_y)
if res != 0:
self._consume_errors()
raise ValueError("Invalid EC Key Y point.")
res = self._lib.EC_KEY_set_public_key(ctx, point)
self.openssl_assert(res == 1)
res = self._lib.EC_KEY_check_key(ctx)
if res != 1:
self._consume_errors()
raise ValueError("Invalid EC key.")
return ctx
def _private_key_bytes(self, encoding, format, encryption_algorithm,
evp_pkey, cdata):
if not isinstance(format, serialization.PrivateFormat):
raise TypeError(
"format must be an item from the PrivateFormat enum"
)
if not isinstance(encryption_algorithm,
serialization.KeySerializationEncryption):
raise TypeError(
"Encryption algorithm must be a KeySerializationEncryption "
"instance"
)
if isinstance(encryption_algorithm, serialization.NoEncryption):
password = b""
passlen = 0
evp_cipher = self._ffi.NULL
elif isinstance(encryption_algorithm,
serialization.BestAvailableEncryption):
# This is a curated value that we will update over time.
evp_cipher = self._lib.EVP_get_cipherbyname(
b"aes-256-cbc"
)
password = encryption_algorithm.password
passlen = len(password)
if passlen > 1023:
raise ValueError(
"Passwords longer than 1023 bytes are not supported by "
"this backend"
)
else:
raise ValueError("Unsupported encryption type")
key_type = self._lib.EVP_PKEY_id(evp_pkey)
if encoding is serialization.Encoding.PEM:
if format is serialization.PrivateFormat.PKCS8:
write_bio = self._lib.PEM_write_bio_PKCS8PrivateKey
key = evp_pkey
else:
assert format is serialization.PrivateFormat.TraditionalOpenSSL
if key_type == self._lib.EVP_PKEY_RSA:
write_bio = self._lib.PEM_write_bio_RSAPrivateKey
elif key_type == self._lib.EVP_PKEY_DSA:
write_bio = self._lib.PEM_write_bio_DSAPrivateKey
else:
assert self._lib.Cryptography_HAS_EC == 1
assert key_type == self._lib.EVP_PKEY_EC
write_bio = self._lib.PEM_write_bio_ECPrivateKey
key = cdata
elif encoding is serialization.Encoding.DER:
if format is serialization.PrivateFormat.TraditionalOpenSSL:
if not isinstance(
encryption_algorithm, serialization.NoEncryption
):
raise ValueError(
"Encryption is not supported for DER encoded "
"traditional OpenSSL keys"
)
return self._private_key_bytes_traditional_der(key_type, cdata)
else:
assert format is serialization.PrivateFormat.PKCS8
write_bio = self._lib.i2d_PKCS8PrivateKey_bio
key = evp_pkey
else:
raise TypeError("encoding must be an item from the Encoding enum")
bio = self._create_mem_bio_gc()
res = write_bio(
bio,
key,
evp_cipher,
password,
passlen,
self._ffi.NULL,
self._ffi.NULL
)
self.openssl_assert(res == 1)
return self._read_mem_bio(bio)
def _private_key_bytes_traditional_der(self, key_type, cdata):
if key_type == self._lib.EVP_PKEY_RSA:
write_bio = self._lib.i2d_RSAPrivateKey_bio
elif (self._lib.Cryptography_HAS_EC == 1 and
key_type == self._lib.EVP_PKEY_EC):
write_bio = self._lib.i2d_ECPrivateKey_bio
else:
self.openssl_assert(key_type == self._lib.EVP_PKEY_DSA)
write_bio = self._lib.i2d_DSAPrivateKey_bio
bio = self._create_mem_bio_gc()
res = write_bio(bio, cdata)
self.openssl_assert(res == 1)
return self._read_mem_bio(bio)
def _public_key_bytes(self, encoding, format, key, evp_pkey, cdata):
if not isinstance(encoding, serialization.Encoding):
raise TypeError("encoding must be an item from the Encoding enum")
if (
format is serialization.PublicFormat.OpenSSH or
encoding is serialization.Encoding.OpenSSH
):
if (
format is not serialization.PublicFormat.OpenSSH or
encoding is not serialization.Encoding.OpenSSH
):
raise ValueError(
"OpenSSH format must be used with OpenSSH encoding"
)
return self._openssh_public_key_bytes(key)
elif format is serialization.PublicFormat.SubjectPublicKeyInfo:
if encoding is serialization.Encoding.PEM:
write_bio = self._lib.PEM_write_bio_PUBKEY
else:
assert encoding is serialization.Encoding.DER
write_bio = self._lib.i2d_PUBKEY_bio
key = evp_pkey
elif format is serialization.PublicFormat.PKCS1:
# Only RSA is supported here.
assert self._lib.EVP_PKEY_id(evp_pkey) == self._lib.EVP_PKEY_RSA
if encoding is serialization.Encoding.PEM:
write_bio = self._lib.PEM_write_bio_RSAPublicKey
else:
assert encoding is serialization.Encoding.DER
write_bio = self._lib.i2d_RSAPublicKey_bio
key = cdata
else:
raise TypeError(
"format must be an item from the PublicFormat enum"
)
bio = self._create_mem_bio_gc()
res = write_bio(bio, key)
self.openssl_assert(res == 1)
return self._read_mem_bio(bio)
def _openssh_public_key_bytes(self, key):
if isinstance(key, rsa.RSAPublicKey):
public_numbers = key.public_numbers()
return b"ssh-rsa " + base64.b64encode(
serialization._ssh_write_string(b"ssh-rsa") +
serialization._ssh_write_mpint(public_numbers.e) +
serialization._ssh_write_mpint(public_numbers.n)
)
elif isinstance(key, dsa.DSAPublicKey):
public_numbers = key.public_numbers()
parameter_numbers = public_numbers.parameter_numbers
return b"ssh-dss " + base64.b64encode(
serialization._ssh_write_string(b"ssh-dss") +
serialization._ssh_write_mpint(parameter_numbers.p) +
serialization._ssh_write_mpint(parameter_numbers.q) +
serialization._ssh_write_mpint(parameter_numbers.g) +
serialization._ssh_write_mpint(public_numbers.y)
)
else:
assert isinstance(key, ec.EllipticCurvePublicKey)
public_numbers = key.public_numbers()
try:
curve_name = {
ec.SECP256R1: b"nistp256",
ec.SECP384R1: b"nistp384",
ec.SECP521R1: b"nistp521",
}[type(public_numbers.curve)]
except KeyError:
raise ValueError(
"Only SECP256R1, SECP384R1, and SECP521R1 curves are "
"supported by the SSH public key format"
)
return b"ecdsa-sha2-" + curve_name + b" " + base64.b64encode(
serialization._ssh_write_string(b"ecdsa-sha2-" + curve_name) +
serialization._ssh_write_string(curve_name) +
serialization._ssh_write_string(public_numbers.encode_point())
)
class GetCipherByName(object):
def __init__(self, fmt):
self._fmt = fmt
def __call__(self, backend, cipher, mode):
cipher_name = self._fmt.format(cipher=cipher, mode=mode).lower()
return backend._lib.EVP_get_cipherbyname(cipher_name.encode("ascii"))
backend = Backend()
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the MRUListEx Windows Registry plugin."""
from __future__ import unicode_literals
import unittest
from dfdatetime import filetime as dfdatetime_filetime
from dfwinreg import definitions as dfwinreg_definitions
from dfwinreg import fake as dfwinreg_fake
from plaso.parsers.winreg_plugins import mrulistex
from tests.parsers.winreg_plugins import test_lib
class TestMRUListExStringWindowsRegistryPlugin(test_lib.RegistryPluginTestCase):
"""Tests for the string MRUListEx plugin."""
def _CreateTestKey(self, key_path, time_string):
"""Creates Registry keys and values for testing.
Args:
key_path (str): Windows Registry key path.
time_string (str): key last written date and time.
Returns:
dfwinreg.WinRegistryKey: a Windows Registry key.
"""
filetime = dfdatetime_filetime.Filetime()
filetime.CopyFromDateTimeString(time_string)
registry_key = dfwinreg_fake.FakeWinRegistryKey(
'MRUlist', key_path=key_path,
last_written_time=filetime.timestamp, offset=1456)
# The order is: 201
value_data = (
b'\x02\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\xff\xff\xff\xff')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
'MRUListEx', data=value_data,
data_type=dfwinreg_definitions.REG_BINARY, offset=123)
registry_key.AddValue(registry_value)
value_data = 'Some random text here'.encode('utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
'0', data=value_data, data_type=dfwinreg_definitions.REG_SZ,
offset=1892)
registry_key.AddValue(registry_value)
value_data = 'c:\\evil.exe\x00'.encode('utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
'1', data=value_data, data_type=dfwinreg_definitions.REG_BINARY,
offset=612)
registry_key.AddValue(registry_value)
value_data = 'C:\\looks_legit.exe'.encode('utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
'2', data=value_data, data_type=dfwinreg_definitions.REG_SZ,
offset=1001)
registry_key.AddValue(registry_value)
return registry_key
def testFilters(self):
"""Tests the FILTERS class attribute."""
plugin = mrulistex.MRUListExStringWindowsRegistryPlugin()
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Some Windows\\'
'InterestingApp\\MRUlist')
registry_key = dfwinreg_fake.FakeWinRegistryKey(
'MRUlist', key_path=key_path)
result = self._CheckFiltersOnKeyPath(plugin, registry_key)
self.assertFalse(result)
registry_value = dfwinreg_fake.FakeWinRegistryValue('MRUListEx')
registry_key.AddValue(registry_value)
registry_value = dfwinreg_fake.FakeWinRegistryValue('0')
registry_key.AddValue(registry_value)
result = self._CheckFiltersOnKeyPath(plugin, registry_key)
self.assertTrue(result)
self._AssertNotFiltersOnKeyPath(plugin, 'HKEY_LOCAL_MACHINE\\Bogus')
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\Shell\\BagMRU')
self._AssertNotFiltersOnKeyPath(plugin, key_path)
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\'
'Explorer\\ComDlg32\\OpenSavePidlMRU')
self._AssertNotFiltersOnKeyPath(plugin, key_path)
def testProcess(self):
"""Tests the Process function."""
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Some Windows\\'
'InterestingApp\\MRUlist')
time_string = '2012-08-28 09:23:49.002031'
registry_key = self._CreateTestKey(key_path, time_string)
plugin = mrulistex.MRUListExStringWindowsRegistryPlugin()
storage_writer = self._ParseKeyWithPlugin(registry_key, plugin)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 1)
events = list(storage_writer.GetEvents())
# A MRUListEx event.
event = events[0]
self.CheckTimestamp(event.timestamp, '2012-08-28 09:23:49.002031')
event_data = self._GetEventDataOfEvent(storage_writer, event)
# This should just be the plugin name, as we're invoking it directly,
# and not through the parser.
self.assertEqual(event_data.parser, plugin.plugin_name)
self.assertEqual(event_data.data_type, 'windows:registry:mrulistex')
expected_message = (
'[{0:s}] '
'Index: 1 [MRU Value 2]: C:\\looks_legit.exe '
'Index: 2 [MRU Value 0]: Some random text here '
'Index: 3 [MRU Value 1]: c:\\evil.exe').format(key_path)
expected_short_message = '{0:s}...'.format(expected_message[:77])
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
class TestMRUListExShellItemListWindowsRegistryPlugin(
test_lib.RegistryPluginTestCase):
"""Tests for the shell item list MRUListEx plugin."""
def testFilters(self):
"""Tests the FILTERS class attribute."""
plugin = mrulistex.MRUListExShellItemListWindowsRegistryPlugin()
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\'
'Explorer\\ComDlg32\\OpenSavePidlMRU')
self._AssertFiltersOnKeyPath(plugin, key_path)
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\'
'Explorer\\StreamMRU')
self._AssertFiltersOnKeyPath(plugin, key_path)
self._AssertNotFiltersOnKeyPath(plugin, 'HKEY_LOCAL_MACHINE\\Bogus')
def testProcess(self):
"""Tests the Process function."""
test_file_entry = self._GetTestFileEntry(['NTUSER-WIN7.DAT'])
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\'
'Explorer\\ComDlg32\\OpenSavePidlMRU')
win_registry = self._GetWinRegistryFromFileEntry(test_file_entry)
registry_key = win_registry.GetKeyByPath(key_path)
plugin = mrulistex.MRUListExShellItemListWindowsRegistryPlugin()
storage_writer = self._ParseKeyWithPlugin(
registry_key, plugin, file_entry=test_file_entry)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 65)
events = list(storage_writer.GetEvents())
# A MRUListEx event.
event = events[40]
self.CheckTimestamp(event.timestamp, '2011-08-28 22:48:28.159309')
event_data = self._GetEventDataOfEvent(storage_writer, event)
# This should just be the plugin name, as we're invoking it directly,
# and not through the parser.
self.assertEqual(event_data.parser, plugin.plugin_name)
self.assertEqual(event_data.data_type, 'windows:registry:mrulistex')
expected_message = (
'[{0:s}\\exe] '
'Index: 1 [MRU Value 1]: Shell item path: <My Computer> '
'P:\\Application Tools\\Firefox 6.0\\Firefox Setup 6.0.exe '
'Index: 2 [MRU Value 0]: Shell item path: <Computers and Devices> '
'<UNKNOWN: 0x00>\\\\controller\\WebDavShare\\Firefox Setup 3.6.12.exe'
'').format(key_path)
expected_short_message = '{0:s}...'.format(expected_message[:77])
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
# A shell item event.
event = events[0]
self.CheckTimestamp(event.timestamp, '2012-03-08 22:16:02.000000')
event_data = self._GetEventDataOfEvent(storage_writer, event)
self.assertEqual(event_data.data_type, 'windows:shell_item:file_entry')
expected_message = (
'Name: ALLOYR~1 '
'Long name: Alloy Research '
'NTFS file reference: 44518-33 '
'Shell item path: <Shared Documents Folder (Users Files)> '
'<UNKNOWN: 0x00>\\Alloy Research '
'Origin: {0:s}\\*').format(key_path)
expected_short_message = (
'Name: Alloy Research '
'NTFS file reference: 44518-33 '
'Origin: HKEY_CURRENT_USER\\...')
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
class TestMRUListExStringAndShellItemWindowsRegistryPlugin(
test_lib.RegistryPluginTestCase):
"""Tests for the string and shell item MRUListEx plugin."""
def testFilters(self):
"""Tests the FILTERS class attribute."""
plugin = mrulistex.MRUListExStringAndShellItemWindowsRegistryPlugin()
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\'
'Explorer\\RecentDocs')
self._AssertFiltersOnKeyPath(plugin, key_path)
self._AssertNotFiltersOnKeyPath(plugin, 'HKEY_LOCAL_MACHINE\\Bogus')
def testProcess(self):
"""Tests the Process function."""
test_file_entry = self._GetTestFileEntry(['NTUSER-WIN7.DAT'])
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\'
'Explorer\\RecentDocs')
win_registry = self._GetWinRegistryFromFileEntry(test_file_entry)
registry_key = win_registry.GetKeyByPath(key_path)
plugin = mrulistex.MRUListExStringAndShellItemWindowsRegistryPlugin()
storage_writer = self._ParseKeyWithPlugin(
registry_key, plugin, file_entry=test_file_entry)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 6)
events = list(storage_writer.GetEvents())
# A MRUListEx event.
event = events[0]
self.CheckTimestamp(event.timestamp, '2012-04-01 13:52:39.113742')
event_data = self._GetEventDataOfEvent(storage_writer, event)
# This should just be the plugin name, as we're invoking it directly,
# and not through the parser.
self.assertEqual(event_data.parser, plugin.plugin_name)
self.assertEqual(event_data.data_type, 'windows:registry:mrulistex')
expected_message = (
'[{0:s}] '
'Index: 1 [MRU Value 17]: Path: The SHIELD, '
'Shell item: [The SHIELD.lnk] '
'Index: 2 [MRU Value 18]: '
'Path: captain_america_shield_by_almogrem-d48x9x8.jpg, '
'Shell item: [captain_america_shield_by_almogrem-d48x9x8.lnk] '
'Index: 3 [MRU Value 16]: Path: captain-america-shield-front.jpg, '
'Shell item: [captain-america-shield-front.lnk] '
'Index: 4 [MRU Value 12]: Path: Leadership, '
'Shell item: [Leadership.lnk] '
'Index: 5 [MRU Value 15]: Path: followership.pdf, '
'Shell item: [followership.lnk] '
'Index: 6 [MRU Value 14]: Path: leaderqualities.pdf, '
'Shell item: [leaderqualities.lnk] '
'Index: 7 [MRU Value 13]: Path: htlhtl.pdf, '
'Shell item: [htlhtl.lnk] '
'Index: 8 [MRU Value 8]: Path: StarFury, '
'Shell item: [StarFury (2).lnk] '
'Index: 9 [MRU Value 7]: Path: Earth_SA-26_Thunderbolt.jpg, '
'Shell item: [Earth_SA-26_Thunderbolt.lnk] '
'Index: 10 [MRU Value 11]: Path: 5031RR_BalancedLeadership.pdf, '
'Shell item: [5031RR_BalancedLeadership.lnk] '
'Index: 11 [MRU Value 10]: '
'Path: SA-23E Mitchell-Hyundyne Starfury.docx, '
'Shell item: [SA-23E Mitchell-Hyundyne Starfury.lnk] '
'Index: 12 [MRU Value 9]: Path: StarFury.docx, '
'Shell item: [StarFury (3).lnk] '
'Index: 13 [MRU Value 6]: Path: StarFury.zip, '
'Shell item: [StarFury.lnk] '
'Index: 14 [MRU Value 4]: Path: VIBRANIUM.docx, '
'Shell item: [VIBRANIUM.lnk] '
'Index: 15 [MRU Value 5]: Path: ADAMANTIUM-Background.docx, '
'Shell item: [ADAMANTIUM-Background.lnk] '
'Index: 16 [MRU Value 3]: Path: Pictures, '
'Shell item: [Pictures.lnk] '
'Index: 17 [MRU Value 2]: Path: nick_fury_77831.jpg, '
'Shell item: [nick_fury_77831.lnk] '
'Index: 18 [MRU Value 1]: Path: Downloads, '
'Shell item: [Downloads.lnk] '
'Index: 19 [MRU Value 0]: Path: wallpaper_medium.jpg, '
'Shell item: [wallpaper_medium.lnk]').format(key_path)
expected_short_message = '{0:s}...'.format(expected_message[:77])
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
class TestMRUListExStringAndShellItemListWindowsRegistryPlugin(
test_lib.RegistryPluginTestCase):
"""Tests for the string and shell item list MRUListEx plugin."""
def testFilters(self):
"""Tests the FILTERS class attribute."""
plugin = mrulistex.MRUListExStringAndShellItemListWindowsRegistryPlugin()
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\'
'Explorer\\ComDlg32\\LastVisitedPidlMRU')
self._AssertFiltersOnKeyPath(plugin, key_path)
self._AssertNotFiltersOnKeyPath(plugin, 'HKEY_LOCAL_MACHINE\\Bogus')
def testProcess(self):
"""Tests the Process function."""
test_file_entry = self._GetTestFileEntry(['NTUSER-WIN7.DAT'])
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\'
'Explorer\\ComDlg32\\LastVisitedPidlMRU')
win_registry = self._GetWinRegistryFromFileEntry(test_file_entry)
registry_key = win_registry.GetKeyByPath(key_path)
plugin = mrulistex.MRUListExStringAndShellItemListWindowsRegistryPlugin()
storage_writer = self._ParseKeyWithPlugin(
registry_key, plugin, file_entry=test_file_entry)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 31)
events = list(storage_writer.GetEvents())
# A MRUListEx event.
event = events[30]
self.CheckTimestamp(event.timestamp, '2012-04-01 13:52:38.966290')
event_data = self._GetEventDataOfEvent(storage_writer, event)
# This should just be the plugin name, as we're invoking it directly,
# and not through the parser.
self.assertEqual(event_data.parser, plugin.plugin_name)
self.assertEqual(event_data.data_type, 'windows:registry:mrulistex')
expected_message = (
'[{0:s}] '
'Index: 1 [MRU Value 1]: Path: chrome.exe, '
'Shell item path: <Users Libraries> <UNKNOWN: 0x00> <UNKNOWN: 0x00> '
'<UNKNOWN: 0x00> '
'Index: 2 [MRU Value 7]: '
'Path: {{48E1ED6B-CF49-4609-B1C1-C082BFC3D0B4}}, '
'Shell item path: <Shared Documents Folder (Users Files)> '
'<UNKNOWN: 0x00>\\Alloy Research '
'Index: 3 [MRU Value 6]: '
'Path: {{427865A0-03AF-4F25-82EE-10B6CB1DED3E}}, '
'Shell item path: <Users Libraries> <UNKNOWN: 0x00> <UNKNOWN: 0x00> '
'Index: 4 [MRU Value 5]: '
'Path: {{24B5C9BB-48B5-47FF-8343-40481DBA1E2B}}, '
'Shell item path: <My Computer> C:\\Users\\nfury\\Documents '
'Index: 5 [MRU Value 4]: '
'Path: {{0B8CFE96-DB69-4D33-8E3C-36EAB4F709E0}}, '
'Shell item path: <My Computer> C:\\Users\\nfury\\Documents\\'
'Alloy Research '
'Index: 6 [MRU Value 3]: '
'Path: {{D4F85F66-003D-4127-BCE9-CAD7A57B2857}}, '
'Shell item path: <Users Libraries> <UNKNOWN: 0x00> <UNKNOWN: 0x00> '
'Index: 7 [MRU Value 0]: Path: iexplore.exe, '
'Shell item path: <My Computer> P:\\Application Tools\\Firefox 6.0 '
'Index: 8 [MRU Value 2]: Path: Skype.exe, '
'Shell item path: <Users Libraries> <UNKNOWN: 0x00>').format(key_path)
expected_short_message = '{0:s}...'.format(expected_message[:77])
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python3
from asciimatics.widgets import Frame, TextBox, Layout, Label, Divider, Text, \
CheckBox, RadioButtons, Button, PopUpDialog, TimePicker, DatePicker, DropdownList, PopupMenu
from asciimatics.effects import Background
from asciimatics.event import MouseEvent
from asciimatics.scene import Scene
from asciimatics.screen import Screen
from asciimatics.exceptions import ResizeScreenError, NextScene, StopApplication, \
InvalidFields
from asciimatics.parsers import AsciimaticsParser
import sys
import re
import datetime
import logging
# Test data
tree = r"""
${3,1}*
${2} / \
${2} /${1}o${2} \
${2} /_ _\
${2} / \${4}b
${2} / \
${2} / ${1}o${2} \
${2} /__ __\
${1}d${2} / ${4}o${2} \
${2} / \
${2} / ${4}o ${1}o${2}.\
${2} /___________\
${3}|||
${3}|||
""".split("\n")
# Initial data for the form
form_data = {
"TA": tree,
"TB": "alphabet",
"TC": "123",
"TD": "a@b.com",
"RO": "You can't touch this",
"Things": 2,
"CA": False,
"CB": True,
"CC": False,
"DATE": datetime.datetime.now().date(),
"TIME": datetime.datetime.now().time(),
"PWD": "",
"DD": 1
}
logging.basicConfig(filename="forms.log", level=logging.DEBUG)
class DemoFrame(Frame):
def __init__(self, screen):
super(DemoFrame, self).__init__(screen,
int(screen.height * 2 // 3),
int(screen.width * 2 // 3),
data=form_data,
has_shadow=True,
name="My Form")
layout = Layout([1, 18, 1])
self.add_layout(layout)
self._reset_button = Button("Reset", self._reset)
layout.add_widget(Label("Group 1:"), 1)
layout.add_widget(TextBox(5,
label="My First Box:",
name="TA",
parser=AsciimaticsParser(),
line_wrap=True,
on_change=self._on_change), 1)
layout.add_widget(
Text(label="Alpha:",
name="TB",
on_change=self._on_change,
validator="^[a-zA-Z]*$"), 1)
layout.add_widget(
Text(label="Number:",
name="TC",
on_change=self._on_change,
validator="^[0-9]*$",
max_length=4), 1)
layout.add_widget(
Text(label="Email:",
name="TD",
on_change=self._on_change,
validator=self._check_email), 1)
layout.add_widget(Text(label="Readonly:", name="RO", readonly=True), 1)
layout.add_widget(Divider(height=2), 1)
layout.add_widget(Label("Group 2:"), 1)
layout.add_widget(RadioButtons([("Option 1", 1),
("Option 2", 2),
("Option 3", 3)],
label="A Longer Selection:",
name="Things",
on_change=self._on_change), 1)
layout.add_widget(CheckBox("Field 1",
label="A very silly long name for fields:",
name="CA",
on_change=self._on_change), 1)
layout.add_widget(
CheckBox("Field 2", name="CB", on_change=self._on_change), 1)
layout.add_widget(
CheckBox("Field 3", name="CC", on_change=self._on_change), 1)
layout.add_widget(DatePicker("Date",
name="DATE",
year_range=range(1999, 2100),
on_change=self._on_change), 1)
layout.add_widget(
TimePicker("Time", name="TIME", on_change=self._on_change, seconds=True), 1)
layout.add_widget(Text("Password", name="PWD", on_change=self._on_change, hide_char="*"), 1)
layout.add_widget(DropdownList(
[("Item 1", 1),
("Item 2", 2),
("Item 3", 3),
("Item 3", 4),
("Item 3", 5),
("Item 3", 6),
("Item 3", 7),
("Item 3", 8),
("Item 3", 9),
("Item 3", 10),
("Item 3", 11),
("Item 3", 12),
("Item 3", 13),
("Item 3", 14),
("Item 3", 15),
("Item 3", 16),
("Item 4", 17),
("Item 5", 18), ],
label="Dropdown", name="DD", on_change=self._on_change), 1)
layout.add_widget(Divider(height=3), 1)
layout2 = Layout([1, 1, 1])
self.add_layout(layout2)
layout2.add_widget(self._reset_button, 0)
layout2.add_widget(Button("View Data", self._view), 1)
layout2.add_widget(Button("Quit", self._quit), 2)
self.fix()
def process_event(self, event):
# Handle dynamic pop-ups now.
if (event is not None and isinstance(event, MouseEvent) and
event.buttons == MouseEvent.DOUBLE_CLICK):
# By processing the double-click before Frame handling, we have absolute coordinates.
options = [
("Default", self._set_default),
("Green", self._set_green),
("Monochrome", self._set_mono),
("Bright", self._set_bright),
]
if self.screen.colours >= 256:
options.append(("Red/white", self._set_tlj))
self._scene.add_effect(PopupMenu(self.screen, options, event.x, event.y))
event = None
# Pass any other event on to the Frame and contained widgets.
return super(DemoFrame, self).process_event(event)
def _set_default(self):
self.set_theme("default")
def _set_green(self):
self.set_theme("green")
def _set_mono(self):
self.set_theme("monochrome")
def _set_bright(self):
self.set_theme("bright")
def _set_tlj(self):
self.set_theme("tlj256")
def _on_change(self):
changed = False
self.save()
for key, value in self.data.items():
if key not in form_data or form_data[key] != value:
changed = True
break
self._reset_button.disabled = not changed
def _reset(self):
self.reset()
raise NextScene()
def _view(self):
# Build result of this form and display it.
try:
self.save(validate=True)
message = "Values entered are:\n\n"
for key, value in self.data.items():
message += "- {}: {}\n".format(key, value)
except InvalidFields as exc:
message = "The following fields are invalid:\n\n"
for field in exc.fields:
message += "- {}\n".format(field)
self._scene.add_effect(
PopUpDialog(self._screen, message, ["OK"]))
def _quit(self):
self._scene.add_effect(
PopUpDialog(self._screen,
"Are you sure?",
["Yes", "No"],
has_shadow=True,
on_close=self._quit_on_yes))
@staticmethod
def _check_email(value):
m = re.match(r"^[a-zA-Z0-9_\-.]+@[a-zA-Z0-9_\-.]+\.[a-zA-Z0-9_\-.]+$",
value)
return len(value) == 0 or m is not None
@staticmethod
def _quit_on_yes(selected):
# Yes is the first button
if selected == 0:
raise StopApplication("User requested exit")
def demo(screen, scene):
screen.play([Scene([
Background(screen),
DemoFrame(screen)
], -1)], stop_on_resize=True, start_scene=scene, allow_int=True)
last_scene = None
while True:
try:
Screen.wrapper(demo, catch_interrupt=False, arguments=[last_scene])
sys.exit(0)
except ResizeScreenError as e:
last_scene = e.scene
|
|
# Utilities for TARDIS
from astropy import units as u, constants
import numexpr as ne
import numpy as np
import os
import yaml
import re
import logging
import atomic
k_B_cgs = constants.k_B.cgs.value
c_cgs = constants.c.cgs.value
h_cgs = constants.h.cgs.value
m_e_cgs = constants.m_e.cgs.value
e_charge_gauss = constants.e.gauss.value
class MalformedError(Exception):
pass
class MalformedSpeciesError(MalformedError):
def __init__(self, malformed_element_symbol):
self.malformed_element_symbol = malformed_element_symbol
def __str__(self):
return 'Expecting a species notation (e.g. "Si 2", "Si II", "Fe IV") - supplied %s' % self.malformed_element_symbol
class MalformedElementSymbolError(MalformedError):
def __init__(self, malformed_element_symbol):
self.malformed_element_symbol = malformed_element_symbol
def __str__(self):
return 'Expecting an atomic symbol (e.g. Fe) - supplied %s' % self.malformed_element_symbol
class MalformedQuantityError(MalformedError):
def __init__(self, malformed_quantity_string):
self.malformed_quantity_string = malformed_quantity_string
def __str__(self):
return 'Expecting a quantity string(e.g. "5 km/s") for keyword - supplied %s' % self.malformed_quantity_string
logger = logging.getLogger(__name__)
synpp_default_yaml_fname = os.path.join(os.path.dirname(__file__), 'data', 'synpp_default.yaml')
def int_to_roman(int_input):
"""
from http://code.activestate.com/recipes/81611-roman-numerals/
Convert an integer to Roman numerals.
:param int_input: an integer between 1 and 3999
:returns result: roman equivalent string of passed :param{int_input}
Examples:
>>> int_to_roman(0)
Traceback (most recent call last):
ValueError: Argument must be between 1 and 3999
>>> int_to_roman(-1)
Traceback (most recent call last):
ValueError: Argument must be between 1 and 3999
>>> int_to_roman(1.5)
Traceback (most recent call last):
TypeError: expected integer, got <type 'float'>
>>> for i in range(1, 21): print int_to_roman(i),
...
I II III IV V VI VII VIII IX X XI XII XIII XIV XV XVI XVII XVIII XIX XX
>>> print int_to_roman(2000)
MM
>>> print int_to_roman(1999)
MCMXCIX
"""
if not isinstance(int_input, int):
raise TypeError("Expected integer, got %s" % type(int_input))
if not 0 < int_input < 4000:
raise ValueError("Argument must be between 1 and 3999")
int_roman_tuples = [(1000, 'M'), (900, 'CM'), (500, 'D'), (400, 'CD'),
(100 , 'C'), (90 , 'XC'), (50 , 'L'), (40 , 'XL'),
(10 , 'X'), (9 , 'IX'), (5 , 'V'), (4 , 'IV'), (1, 'I')]
result = ''
for (integer, roman) in int_roman_tuples:
count = int(int_input / integer)
result += roman * count
int_input -= integer * count
return result
def roman_to_int(roman_input):
"""
from http://code.activestate.com/recipes/81611-roman-numerals/
Convert a roman numeral to an integer.
:param roman_input: a valid roman numeral string
:returns sum: equivalent integer of passed :param{roman_input}
>>> r = range(1, 4000)
>>> nums = [int_to_roman(i) for i in r]
>>> ints = [roman_to_int(n) for n in nums]
>>> print r == ints
1
>>> roman_to_int('VVVIV')
Traceback (most recent call last):
...
ValueError: input is not a valid roman numeral: VVVIV
>>> roman_to_int(1)
Traceback (most recent call last):
...
TypeError: expected string, got <type 'int'>
>>> roman_to_int('a')
Traceback (most recent call last):
...
ValueError: input is not a valid roman numeral: A
>>> roman_to_int('IL')
Traceback (most recent call last):
...
ValueError: input is not a valid roman numeral: IL
"""
if not isinstance(roman_input, str):
raise TypeError("expected string, got %s" % type(roman_input))
roman_input = roman_input.upper()
nums = ['M', 'D', 'C', 'L', 'X', 'V', 'I']
ints = [1000, 500, 100, 50, 10, 5, 1]
places = []
for c in roman_input:
if not c in nums:
raise ValueError("input is not a valid roman numeral: %s" % roman_input)
for i in range(len(roman_input)):
c = roman_input[i]
value = ints[nums.index(c)]
# If the next place holds a larger number, this value is negative.
try:
nextvalue = ints[nums.index(roman_input[i +1])]
if nextvalue > value:
value *= -1
except IndexError:
# there is no next place.
pass
places.append(value)
result = 0
for n in places:
result += n
# Easiest test for validity...
if int_to_roman(result) == roman_input:
return result
else:
raise ValueError('input is not a valid roman numeral: %s' % roman_input)
def calculate_luminosity(spec_fname, distance, wavelength_column=0, wavelength_unit=u.angstrom, flux_column=1,
flux_unit=u.Unit('erg / (Angstrom cm2 s)')):
#BAD STYLE change to parse quantity
distance = u.Unit(distance)
wavelength, flux = np.loadtxt(spec_fname, usecols=(wavelength_column, flux_column), unpack=True)
flux_density = np.trapz(flux, wavelength) * (flux_unit * wavelength_unit)
luminosity = (flux_density * 4 * np.pi * distance**2).to('erg/s')
return luminosity.value, wavelength.min(), wavelength.max()
def create_synpp_yaml(radial1d_mdl, fname, shell_no=0, lines_db=None):
logger.warning('Currently only works with Si and a special setup')
if not radial1d_mdl.atom_data.has_synpp_refs:
raise ValueError(
'The current atom dataset does not contain the necesarry reference files (please contact the authors)')
radial1d_mdl.atom_data.synpp_refs['ref_log_tau'] = -99.0
for key, value in radial1d_mdl.atom_data.synpp_refs.iterrows():
try:
radial1d_mdl.atom_data.synpp_refs['ref_log_tau'].ix[key] = np.log10(
radial1d_mdl.plasma.tau_sobolevs[0].ix[value['line_id']])
except KeyError:
pass
relevant_synpp_refs = radial1d_mdl.atom_data.synpp_refs[radial1d_mdl.atom_data.synpp_refs['ref_log_tau'] > -50]
with open(synpp_default_yaml_fname) as stream:
yaml_reference = yaml.load(stream)
if lines_db is not None:
yaml_reference['opacity']['line_dir'] = os.path.join(lines_db, 'lines')
yaml_reference['opacity']['line_dir'] = os.path.join(lines_db, 'refs.dat')
yaml_reference['output']['min_wl'] = float(radial1d_mdl.runner.spectrum.wavelength.to('angstrom').value.min())
yaml_reference['output']['max_wl'] = float(radial1d_mdl.runner.spectrum.wavelength.to('angstrom').value.max())
#raise Exception("there's a problem here with units what units does synpp expect?")
yaml_reference['opacity']['v_ref'] = float((radial1d_mdl.tardis_config.structure.v_inner[0].to('km/s') /
(1000. * u.km / u.s)).value)
yaml_reference['grid']['v_outer_max'] = float((radial1d_mdl.tardis_config.structure.v_outer[-1].to('km/s') /
(1000. * u.km / u.s)).value)
#pdb.set_trace()
yaml_setup = yaml_reference['setups'][0]
yaml_setup['ions'] = []
yaml_setup['log_tau'] = []
yaml_setup['active'] = []
yaml_setup['temp'] = []
yaml_setup['v_min'] = []
yaml_setup['v_max'] = []
yaml_setup['aux'] = []
for species, synpp_ref in relevant_synpp_refs.iterrows():
yaml_setup['ions'].append(100 * species[0] + species[1])
yaml_setup['log_tau'].append(float(synpp_ref['ref_log_tau']))
yaml_setup['active'].append(True)
yaml_setup['temp'].append(yaml_setup['t_phot'])
yaml_setup['v_min'].append(yaml_reference['opacity']['v_ref'])
yaml_setup['v_max'].append(yaml_reference['grid']['v_outer_max'])
yaml_setup['aux'].append(1e200)
with open(fname, 'w') as f:
yaml.dump(yaml_reference, stream=f, explicit_start=True)
def intensity_black_body(nu, T):
"""
Calculate the intensity of a black-body according to the following formula
.. math::
I(\\nu, T) = \\frac{2h\\nu^3}{c^2}\frac{1}{e^{h\\nu \\beta_\\textrm{rad}} - 1}
"""
beta_rad = 1 / (k_B_cgs * T)
coefficient = 2 * h_cgs / c_cgs ** 2
intensity = ne.evaluate('coefficient * nu**3 / '
'(exp(h_cgs * nu * beta_rad) -1 )')
return intensity
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
r"""Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
The Savitzky-Golay filter removes high frequency noise from data.
It has the advantage of preserving the original shape and
features of the signal better than other types of filtering
approaches, such as moving averages techniques.
Parameters
----------
y : array_like, shape (N,)
the values of the time history of the signal.
window_size : int
the length of the window. Must be an odd integer number.
order : int
the order of the polynomial used in the filtering.
Must be less then `window_size` - 1.
deriv: int
the order of the derivative to compute (default = 0 means only smoothing)
Returns
-------
ys : ndarray, shape (N)
the smoothed signal (or it's n-th derivative).
Notes
-----
The Savitzky-Golay is a type of low-pass filter, particularly
suited for smoothing noisy data. The main idea behind this
approach is to make for each point a least-square fit with a
polynomial of high order over a odd-sized window centered at
the point.
Examples
--------
t = np.linspace(-4, 4, 500)
y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape)
ysg = savitzky_golay(y, window_size=31, order=4)
import matplotlib.pyplot as plt
plt.plot(t, y, label='Noisy signal')
plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')
plt.plot(t, ysg, 'r', label='Filtered signal')
plt.legend()
plt.show()
References
----------
.. [1] A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
.. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing
W.H. Press, S.A. Teukolsky, W.T. Vetterling, B.P. Flannery
Cambridge University Press ISBN-13: 9780521880688
"""
import numpy as np
from math import factorial
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError, msg:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = range(order+1)
half_window = (window_size -1) // 2
# precompute coefficients
b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )
lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve( m[::-1], y, mode='valid')
def species_tuple_to_string(species_tuple, roman_numerals=True):
atomic_number, ion_number = species_tuple
element_symbol = atomic.atomic_number2symbol[atomic_number]
if roman_numerals:
roman_ion_number = int_to_roman(ion_number+1)
return '%s %s' % (element_symbol, roman_ion_number)
else:
return '%s %d' % (element_symbol, ion_number)
def species_string_to_tuple(species_string):
try:
element_symbol, ion_number_string = re.match('^(\w+)\s*(\d+)', species_string).groups()
except AttributeError:
try:
element_symbol, ion_number_string = species_string.split()
except ValueError:
raise MalformedSpeciesError('Species string "{0}" is not of format <element_symbol><number> '
'(e.g. Fe 2, Fe2, ..)'.format(species_string))
atomic_number = element_symbol2atomic_number(element_symbol)
try:
ion_number = roman_to_int(ion_number_string)
except ValueError:
try:
ion_number = int(ion_number_string)
except ValueError:
raise MalformedSpeciesError("Given ion number ('{}') could not be parsed ".format(ion_number_string))
if ion_number > atomic_number:
raise ValueError('Species given does not exist: ion number > atomic number')
return atomic_number, ion_number - 1
def parse_quantity(quantity_string):
if not isinstance(quantity_string, basestring):
raise MalformedQuantityError(quantity_string)
try:
value_string, unit_string = quantity_string.split()
except ValueError:
raise MalformedQuantityError(quantity_string)
try:
value = float(value_string)
except ValueError:
raise MalformedQuantityError(quantity_string)
try:
q = u.Quantity(value, unit_string)
except ValueError:
raise MalformedQuantityError(quantity_string)
return q
def element_symbol2atomic_number(element_string):
reformatted_element_string = reformat_element_symbol(element_string)
if reformatted_element_string not in atomic.symbol2atomic_number:
raise MalformedElementSymbolError(element_string)
return atomic.symbol2atomic_number[reformatted_element_string]
def atomic_number2element_symbol(atomic_number):
"""
Convert atomic number to string symbol
"""
return atomic.atomic_number2symbol[atomic_number]
def reformat_element_symbol(element_string):
"""
Reformat the string so the first letter is uppercase and all subsequent letters lowercase
Parameters
----------
element_symbol: str
Returns
-------
reformated element symbol
"""
return element_string[0].upper() + element_string[1:].lower()
def quantity_linspace(start, stop, num, **kwargs):
"""
Calculate the linspace for a quantity start and stop.
Other than that essentially the same input parameters as linspace
Parameters
----------
start: ~astropy.Quantity
stop: ~astropy.Quantity
num: ~int
Returns
-------
: ~astropy.Quantity
"""
if not (hasattr(start, 'unit') and hasattr(stop, 'unit')):
raise ValueError('Both start and stop need to be quantities with a '
'unit attribute')
return np.linspace(start.value, stop.to(start.unit).value, num, **kwargs) * start.unit
|
|
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012, Cloudscaling
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""nova HACKING file compliance testing
built on top of pep8.py
"""
import fnmatch
import inspect
import logging
import os
import re
import subprocess
import sys
import tokenize
import warnings
import pep8
# Don't need this for testing
logging.disable('LOG')
#N1xx comments
#N2xx except
#N3xx imports
#N4xx docstrings
#N5xx dictionaries/lists
#N6xx calling methods
#N7xx localization
#N8xx git commit messages
IMPORT_EXCEPTIONS = ['sqlalchemy', 'migrate', 'nova.db.sqlalchemy.session']
DOCSTRING_TRIPLE = ['"""', "'''"]
VERBOSE_MISSING_IMPORT = os.getenv('HACKING_VERBOSE_MISSING_IMPORT', 'False')
# Monkey patch broken excluded filter in pep8
# See https://github.com/jcrocholl/pep8/pull/111
def excluded(self, filename):
"""
Check if options.exclude contains a pattern that matches filename.
"""
basename = os.path.basename(filename)
return any((pep8.filename_match(filename, self.options.exclude,
default=False),
pep8.filename_match(basename, self.options.exclude,
default=False)))
def input_dir(self, dirname):
"""Check all files in this directory and all subdirectories."""
dirname = dirname.rstrip('/')
if self.excluded(dirname):
return 0
counters = self.options.report.counters
verbose = self.options.verbose
filepatterns = self.options.filename
runner = self.runner
for root, dirs, files in os.walk(dirname):
if verbose:
print('directory ' + root)
counters['directories'] += 1
for subdir in sorted(dirs):
if self.excluded(os.path.join(root, subdir)):
dirs.remove(subdir)
for filename in sorted(files):
# contain a pattern that matches?
if ((pep8.filename_match(filename, filepatterns) and
not self.excluded(filename))):
runner(os.path.join(root, filename))
def is_import_exception(mod):
return (mod in IMPORT_EXCEPTIONS or
any(mod.startswith(m + '.') for m in IMPORT_EXCEPTIONS))
def import_normalize(line):
# convert "from x import y" to "import x.y"
# handle "from x import y as z" to "import x.y as z"
split_line = line.split()
if ("import" in line and line.startswith("from ") and "," not in line and
split_line[2] == "import" and split_line[3] != "*" and
split_line[1] != "__future__" and
(len(split_line) == 4 or
(len(split_line) == 6 and split_line[4] == "as"))):
return "import %s.%s" % (split_line[1], split_line[3])
else:
return line
def nova_todo_format(physical_line):
"""Check for 'TODO()'.
nova HACKING guide recommendation for TODO:
Include your name with TODOs as in "#TODO(termie)"
N101
"""
pos = physical_line.find('TODO')
pos1 = physical_line.find('TODO(')
pos2 = physical_line.find('#') # make sure it's a comment
if (pos != pos1 and pos2 >= 0 and pos2 < pos):
return pos, "NOVA N101: Use TODO(NAME)"
def nova_except_format(logical_line):
"""Check for 'except:'.
nova HACKING guide recommends not using except:
Do not write "except:", use "except Exception:" at the very least
N201
"""
if logical_line.startswith("except:"):
yield 6, "NOVA N201: no 'except:' at least use 'except Exception:'"
def nova_except_format_assert(logical_line):
"""Check for 'assertRaises(Exception'.
nova HACKING guide recommends not using assertRaises(Exception...):
Do not use overly broad Exception type
N202
"""
if logical_line.startswith("self.assertRaises(Exception"):
yield 1, "NOVA N202: assertRaises Exception too broad"
def nova_one_import_per_line(logical_line):
"""Check for import format.
nova HACKING guide recommends one import per line:
Do not import more than one module per line
Examples:
BAD: from nova.rpc.common import RemoteError, LOG
N301
"""
pos = logical_line.find(',')
parts = logical_line.split()
if (pos > -1 and (parts[0] == "import" or
parts[0] == "from" and parts[2] == "import") and
not is_import_exception(parts[1])):
yield pos, "NOVA N301: one import per line"
_missingImport = set([])
def nova_import_module_only(logical_line):
"""Check for import module only.
nova HACKING guide recommends importing only modules:
Do not import objects, only modules
N302 import only modules
N303 Invalid Import
N304 Relative Import
"""
def importModuleCheck(mod, parent=None, added=False):
"""
If can't find module on first try, recursively check for relative
imports
"""
current_path = os.path.dirname(pep8.current_file)
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
valid = True
if parent:
if is_import_exception(parent):
return
parent_mod = __import__(parent, globals(), locals(),
[mod], -1)
valid = inspect.ismodule(getattr(parent_mod, mod))
else:
__import__(mod, globals(), locals(), [], -1)
valid = inspect.ismodule(sys.modules[mod])
if not valid:
if added:
sys.path.pop()
added = False
return logical_line.find(mod), ("NOVA N304: No "
"relative imports. '%s' is a relative import"
% logical_line)
return logical_line.find(mod), ("NOVA N302: import only "
"modules. '%s' does not import a module"
% logical_line)
except (ImportError, NameError) as exc:
if not added:
added = True
sys.path.append(current_path)
return importModuleCheck(mod, parent, added)
else:
name = logical_line.split()[1]
if name not in _missingImport:
if VERBOSE_MISSING_IMPORT != 'False':
print >> sys.stderr, ("ERROR: import '%s' in %s "
"failed: %s" %
(name, pep8.current_file, exc))
_missingImport.add(name)
added = False
sys.path.pop()
return
except AttributeError:
# Invalid import
return logical_line.find(mod), ("NOVA N303: Invalid import, "
"AttributeError raised")
# convert "from x import y" to " import x.y"
# convert "from x import y as z" to " import x.y"
import_normalize(logical_line)
split_line = logical_line.split()
if (logical_line.startswith("import ") and "," not in logical_line and
(len(split_line) == 2 or
(len(split_line) == 4 and split_line[2] == "as"))):
mod = split_line[1]
rval = importModuleCheck(mod)
if rval is not None:
yield rval
# TODO(jogo) handle "from x import *"
#TODO(jogo): import template: N305
def nova_import_alphabetical(logical_line, line_number, lines):
"""Check for imports in alphabetical order.
nova HACKING guide recommendation for imports:
imports in human alphabetical order
N306
"""
# handle import x
# use .lower since capitalization shouldn't dictate order
split_line = import_normalize(logical_line.strip()).lower().split()
split_previous = import_normalize(lines[line_number - 2]
).strip().lower().split()
# with or without "as y"
length = [2, 4]
if (len(split_line) in length and len(split_previous) in length and
split_line[0] == "import" and split_previous[0] == "import"):
if split_line[1] < split_previous[1]:
yield (0, "NOVA N306: imports not in alphabetical order (%s, %s)"
% (split_previous[1], split_line[1]))
def nova_import_no_db_in_virt(logical_line, filename):
if ("nova/virt" in filename and
not filename.endswith("fake.py") and
"nova import db" in logical_line):
yield (0, "NOVA N307: nova.db import not allowed in nova/virt/*")
def nova_docstring_start_space(physical_line):
"""Check for docstring not start with space.
nova HACKING guide recommendation for docstring:
Docstring should not start with space
N401
"""
pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) # start
if (pos != -1 and len(physical_line) > pos + 1):
if (physical_line[pos + 3] == ' '):
return (pos, "NOVA N401: one line docstring should not start with"
" a space")
def nova_docstring_one_line(physical_line):
"""Check one line docstring end.
nova HACKING guide recommendation for one line docstring:
A one line docstring looks like this and ends in a period.
N402
"""
pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) # start
end = max([physical_line[-4:-1] == i for i in DOCSTRING_TRIPLE]) # end
if (pos != -1 and end and len(physical_line) > pos + 4):
if (physical_line[-5] != '.'):
return pos, "NOVA N402: one line docstring needs a period"
def nova_docstring_multiline_end(physical_line):
"""Check multi line docstring end.
nova HACKING guide recommendation for docstring:
Docstring should end on a new line
N403
"""
pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) # start
if (pos != -1 and len(physical_line) == pos):
if (physical_line[pos + 3] == ' '):
return (pos, "NOVA N403: multi line docstring end on new line")
FORMAT_RE = re.compile("%(?:"
"%|" # Ignore plain percents
"(\(\w+\))?" # mapping key
"([#0 +-]?" # flag
"(?:\d+|\*)?" # width
"(?:\.\d+)?" # precision
"[hlL]?" # length mod
"\w))") # type
class LocalizationError(Exception):
pass
def check_i18n():
"""Generator that checks token stream for localization errors.
Expects tokens to be ``send``ed one by one.
Raises LocalizationError if some error is found.
"""
while True:
try:
token_type, text, _, _, line = yield
except GeneratorExit:
return
if (token_type == tokenize.NAME and text == "_" and
not line.startswith('def _(msg):')):
while True:
token_type, text, start, _, _ = yield
if token_type != tokenize.NL:
break
if token_type != tokenize.OP or text != "(":
continue # not a localization call
format_string = ''
while True:
token_type, text, start, _, _ = yield
if token_type == tokenize.STRING:
format_string += eval(text)
elif token_type == tokenize.NL:
pass
else:
break
if not format_string:
raise LocalizationError(start,
"NOVA N701: Empty localization string")
if token_type != tokenize.OP:
raise LocalizationError(start,
"NOVA N701: Invalid localization call")
if text != ")":
if text == "%":
raise LocalizationError(start,
"NOVA N702: Formatting operation should be outside"
" of localization method call")
elif text == "+":
raise LocalizationError(start,
"NOVA N702: Use bare string concatenation instead"
" of +")
else:
raise LocalizationError(start,
"NOVA N702: Argument to _ must be just a string")
format_specs = FORMAT_RE.findall(format_string)
positional_specs = [(key, spec) for key, spec in format_specs
if not key and spec]
# not spec means %%, key means %(smth)s
if len(positional_specs) > 1:
raise LocalizationError(start,
"NOVA N703: Multiple positional placeholders")
def nova_localization_strings(logical_line, tokens):
"""Check localization in line.
N701: bad localization call
N702: complex expression instead of string as argument to _()
N703: multiple positional placeholders
"""
gen = check_i18n()
next(gen)
try:
map(gen.send, tokens)
gen.close()
except LocalizationError as e:
yield e.args
#TODO(jogo) Dict and list objects
current_file = ""
def readlines(filename):
"""Record the current file being tested."""
pep8.current_file = filename
return open(filename).readlines()
def add_nova():
"""Monkey patch in nova guidelines.
Look for functions that start with nova_ and have arguments
and add them to pep8 module
Assumes you know how to write pep8.py checks
"""
for name, function in globals().items():
if not inspect.isfunction(function):
continue
args = inspect.getargspec(function)[0]
if args and name.startswith("nova"):
exec("pep8.%s = %s" % (name, name))
def once_git_check_commit_title():
"""Check git commit messages.
nova HACKING recommends not referencing a bug or blueprint in first line,
it should provide an accurate description of the change
N801
N802 Title limited to 50 chars
"""
#Get title of most recent commit
subp = subprocess.Popen(['git', 'log', '--no-merges', '--pretty=%s', '-1'],
stdout=subprocess.PIPE)
title = subp.communicate()[0]
if subp.returncode:
raise Exception("git log failed with code %s" % subp.returncode)
#From https://github.com/openstack/openstack-ci-puppet
# /blob/master/modules/gerrit/manifests/init.pp#L74
#Changeid|bug|blueprint
git_keywords = (r'(I[0-9a-f]{8,40})|'
'([Bb]ug|[Ll][Pp])[\s\#:]*(\d+)|'
'([Bb]lue[Pp]rint|[Bb][Pp])[\s\#:]*([A-Za-z0-9\\-]+)')
GIT_REGEX = re.compile(git_keywords)
error = False
#NOTE(jogo) if match regex but over 3 words, acceptable title
if GIT_REGEX.search(title) is not None and len(title.split()) <= 3:
print ("N801: git commit title ('%s') should provide an accurate "
"description of the change, not just a reference to a bug "
"or blueprint" % title.strip())
error = True
if len(title.decode('utf-8')) > 72:
print ("N802: git commit title ('%s') should be under 50 chars"
% title.strip())
error = True
return error
if __name__ == "__main__":
#include nova path
sys.path.append(os.getcwd())
#Run once tests (not per line)
once_error = once_git_check_commit_title()
#NOVA error codes start with an N
pep8.ERRORCODE_REGEX = re.compile(r'[EWN]\d{3}')
add_nova()
pep8.current_file = current_file
pep8.readlines = readlines
pep8.StyleGuide.excluded = excluded
pep8.StyleGuide.input_dir = input_dir
try:
pep8._main()
sys.exit(once_error)
finally:
if len(_missingImport) > 0:
print >> sys.stderr, ("%i imports missing in this test environment"
% len(_missingImport))
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# JRTPLIB documentation build configuration file, created by
# sphinx-quickstart on Fri May 13 17:36:32 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'test'
# General information about the project.
project = 'JRTPLIB'
copyright = '2004-now'
author = 'Jori Liesenborgs'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
def checkDevel(version):
if os.getenv("READTHEDOCS") == 'True':
if os.getenv("READTHEDOCS_VERSION") == 'latest':
version += " (development version)"
return version
def getVersion():
curDir = os.path.dirname(os.path.realpath(__file__))
cmakePath = os.path.join(curDir,"../../CMakeLists.txt")
confPath = os.path.join(curDir,"../../configure")
if os.path.exists(cmakePath):
gotMajor, gotMinor, gotDebug = False, False, False
major, minor, debug = "?","?","?"
for l in open(cmakePath):
pref = "set(VERSION_"
if l.startswith(pref):
l = l[len(pref):].strip()
idx = l.find(")")
versionPart = l[:idx].strip()
t,n = versionPart.split()
if t == "MAJOR": major, gotMajor = n, True
elif t == "MINOR": minor, gotMinor = n, True
elif t == "DEBUG": debug, gotDebug = n, True
if gotMajor and gotMinor and gotDebug:
return checkDevel(major + "." + minor + "." + debug)
elif os.path.exists(confPath):
for l in open(confPath):
l = l.strip()
pref = "VERSION="
if l.startswith(pref):
version = l[len(pref):].strip()
return checkDevel(version)
raise Exception("No version number found")
version = getVersion()
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'alabaster'
html_theme = "classic"
html_theme_path = [ "." ]
#html_theme_options = {
# "rightsidebar": "true",
# "relbarbgcolor": "black"
#}
#import sphinx_rtd_theme
#html_theme = "sphinx_rtd_theme"
#html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'jrtplibdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'jrtplibdoc.tex', 'JRTPLIB Documentation',
'Jori Liesenborgs', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'jrtplibdoc', 'JRTPLIB Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'jrtplibdoc', 'JRTPLIB Documentation',
author, 'jrtplibdoc', 'RTP Library',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
def checkMarkdownSetting():
for l in open("Doxyfile-changed"):
if "MARKDOWN_SUPPORT" in l: # Markdown was configured
return
# In older doxygen, there was no markdown so we'll set the default
# to NO to avoid strange effects
with open("Doxyfile-changed", "at") as f:
f.write("\nMARKDOWN_SUPPORT = NO\n")
def changeVersionString():
data = open("Doxyfile-changed").read()
data = data.replace("{X.X.X}", getVersion());
open("Doxyfile-changed", "wt").write(data)
import subprocess
import os
import shutil
curpath = os.getcwd()
try:
if os.getenv("READTHEDOCS") == 'True':
dstdir = "_build/html"
else:
dstdir = "../build/html"
dstdir = os.path.abspath(dstdir)
os.chdir("../../")
if os.path.exists("Doxyfile"):
subprocess.call("cp Doxyfile Doxyfile-changed", shell=True)
checkMarkdownSetting()
changeVersionString()
subprocess.call("doxygen Doxyfile-changed", shell=True)
subprocess.call("mv -f documentation/* {}".format(dstdir), shell=True)
elif os.path.exists("doc/jrtplib.tex"):
os.chdir("doc")
subprocess.call("pdflatex jrtplib.tex", shell=True)
subprocess.call("pdflatex jrtplib.tex", shell=True)
subprocess.call("mv -f jrtplib.pdf {}".format(dstdir), shell=True)
with open("{}/index.html".format(dstdir),"wt") as f:
f.write('''
<html>
<head>
<style>
body, html {
margin: 0px;
width: 100%;
height: 100%;
overflow: auto;
}
</style>
</head>
<body>
<embed src="jrtplib.pdf" width="100%" height="100%" type="application/pdf">
</body>
</html>''')
finally:
os.chdir(curpath)
with open("test.rst", "wb") as f:
f.write("Test output\n")
f.write("===========\n\n")
f.write(".. code-block:: none\n\n")
output = subprocess.check_output("which doxygen ; pwd ; set ; ls ; ls _build ; ls _build/html ; ls ../ ; ", shell = True)
for l in output.splitlines():
f.write(" ")
f.write(l)
f.write("\n")
|
|
#!/usr/bin/env python
# coding=utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Fine-tuning the library models for sequence classification."""
import logging
import os
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, Optional
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
glue_compute_metrics,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
class Split(Enum):
train = "train"
dev = "validation"
test = "test"
def get_tfds(
task_name: str,
tokenizer: PreTrainedTokenizer,
max_seq_length: Optional[int] = None,
mode: Split = Split.train,
data_dir: str = None,
):
if task_name == "mnli-mm" and mode == Split.dev:
tfds_name = "mnli_mismatched"
elif task_name == "mnli-mm" and mode == Split.train:
tfds_name = "mnli"
elif task_name == "mnli" and mode == Split.dev:
tfds_name = "mnli_matched"
elif task_name == "sst-2":
tfds_name = "sst2"
elif task_name == "sts-b":
tfds_name = "stsb"
else:
tfds_name = task_name
ds, info = tfds.load("glue/" + tfds_name, split=mode.value, with_info=True, data_dir=data_dir)
ds = glue_convert_examples_to_features(ds, tokenizer, max_seq_length, task_name)
ds = ds.apply(tf.data.experimental.assert_cardinality(info.splits[mode.value].num_examples))
return ds
logger = logging.getLogger(__name__)
@dataclass
class GlueDataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class
into argparse arguments to be able to specify them on
the command line.
"""
task_name: str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys())})
data_dir: Optional[str] = field(default=None, metadata={"help": "The input/output data dir for TFDS."})
max_seq_length: int = field(
default=128,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
def __post_init__(self):
self.task_name = self.task_name.lower()
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
use_fast: bool = field(default=False, metadata={"help": "Set this flag to use fast tokenization."})
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, GlueDataTrainingArguments, TFTrainingArguments))
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(
f"n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1)}, "
f"16-bits training: {training_args.fp16}",
)
logger.info(f"Training/evaluation parameters {training_args}")
try:
num_labels = glue_tasks_num_labels["mnli" if data_args.task_name == "mnli-mm" else data_args.task_name]
output_mode = glue_output_modes[data_args.task_name]
except KeyError:
raise ValueError(f"Task not found: {data_args.task_name}")
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
num_labels=num_labels,
finetuning_task=data_args.task_name,
cache_dir=model_args.cache_dir,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
)
with training_args.strategy.scope():
model = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path,
from_pt=bool(".bin" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
)
# Get datasets
train_dataset = (
get_tfds(
task_name=data_args.task_name,
tokenizer=tokenizer,
max_seq_length=data_args.max_seq_length,
data_dir=data_args.data_dir,
)
if training_args.do_train
else None
)
eval_dataset = (
get_tfds(
task_name=data_args.task_name,
tokenizer=tokenizer,
max_seq_length=data_args.max_seq_length,
mode=Split.dev,
data_dir=data_args.data_dir,
)
if training_args.do_eval
else None
)
def compute_metrics(p: EvalPrediction) -> Dict:
if output_mode == "classification":
preds = np.argmax(p.predictions, axis=1)
elif output_mode == "regression":
preds = np.squeeze(p.predictions)
return glue_compute_metrics(data_args.task_name, preds, p.label_ids)
# Initialize our Trainer
trainer = TFTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
compute_metrics=compute_metrics,
)
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
results = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
result = trainer.evaluate()
output_eval_file = os.path.join(training_args.output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key, value in result.items():
logger.info(f" {key} = {value}")
writer.write(f"{key} = {value}\n")
results.update(result)
return results
if __name__ == "__main__":
main()
|
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for glazier.lib.actions.files."""
from unittest import mock
from absl.testing import absltest
from glazier.lib import buildinfo
from glazier.lib.actions import files
from pyfakefs import fake_filesystem
from pyfakefs import fake_filesystem_shutil
class FilesTest(absltest.TestCase):
def setUp(self):
super(FilesTest, self).setUp()
self.filesystem = fake_filesystem.FakeFilesystem()
files.open = fake_filesystem.FakeFileOpen(self.filesystem)
files.file_util.shutil = fake_filesystem_shutil.FakeShutilModule(
self.filesystem)
files.WindowsError = Exception
# TODO(b/152894756): Split into separate tests.
@mock.patch.object(files.execute, 'execute_binary', autospec=True)
@mock.patch.object(files.shlex, 'split', autospec=True)
@mock.patch.object(files.cache.Cache, 'CacheFromLine', autospec=True)
def testExecute(self, cache, split, eb):
bi = buildinfo.BuildInfo()
cache.side_effect = iter(['cmd.exe /c', 'explorer.exe'])
eb.return_value = 0
e = files.Execute([['cmd.exe /c', [0]], ['explorer.exe']], bi)
e.Run()
self.assertTrue(split.called)
# success codes
cache.side_effect = None
cache.return_value = 'cmd.exe /c script.bat'
e = files.Execute([['cmd.exe /c script.bat', [2, 4]]], bi)
with self.assertRaises(files.ActionError):
e.Run()
eb.return_value = 4
e.Run()
# reboot codes - no retry
e = files.Execute([['cmd.exe /c script.bat', [0], [2, 4]]], bi)
with self.assertRaises(files.RestartEvent) as cm:
e.Run()
exception = cm.exception
self.assertEqual(exception.retry_on_restart, False)
# reboot codes - retry
e = files.Execute([['cmd.exe /c #script.bat', [0], [2, 4], True]], bi)
with self.assertRaises(files.RestartEvent) as cm:
e.Run()
exception = cm.exception
self.assertEqual(exception.retry_on_restart, True)
cache.assert_called_with(mock.ANY, 'cmd.exe /c #script.bat', bi)
# Shell
files.Execute([['cmd.exe /c #script.bat', [4], [0], True, True]], bi).Run()
eb.assert_called_with(mock.ANY, mock.ANY, [4, 0], shell=True)
# KeyboardInterrupt
eb.side_effect = KeyboardInterrupt
with self.assertRaises(files.ActionError):
e.Run()
# Execute Error
eb.side_effect = files.execute.Error
with self.assertRaises(files.ActionError):
e.Run()
# ValueError
split.side_effect = ValueError
with self.assertRaises(files.ActionError):
e.Run()
# Cache error
cache.side_effect = files.cache.CacheError
with self.assertRaises(files.ActionError):
e.Run()
# TODO(b/152894756): Paramaterize and add cm for these tests
# (go/python-tips/011).
def testExecuteValidation(self):
e = files.Execute([['cmd.exe', [0], [2], False], ['explorer.exe']], None)
e.Validate()
e = files.Execute([[]], None)
self.assertRaises(files.ValidationError, e.Validate)
e = files.Execute(['explorer.exe'], None)
self.assertRaises(files.ValidationError, e.Validate)
e = files.Execute('explorer.exe', None)
self.assertRaises(files.ValidationError, e.Validate)
e = files.Execute([['cmd.exe', [0]], ['explorer.exe', '0']], None)
self.assertRaises(files.ValidationError, e.Validate)
e = files.Execute([['cmd.exe', [0]], ['explorer.exe', ['0']]], None)
self.assertRaises(files.ValidationError, e.Validate)
e = files.Execute([['cmd.exe', [0], ['2']], ['explorer.exe']], None)
self.assertRaises(files.ValidationError, e.Validate)
e = files.Execute([['cmd.exe', [0], [2], 'True'], ['explorer.exe']], None)
self.assertRaises(files.ValidationError, e.Validate)
with self.assertRaises(files.ValidationError):
files.Execute([['cmd.exe', [0], [2], False, 'True'], ['explorer.exe']],
None).Validate()
@mock.patch.object(buildinfo.BuildInfo, 'ReleasePath', autospec=True)
@mock.patch.object(buildinfo.BuildInfo, 'BinaryPath', autospec=True)
@mock.patch.object(files.download.Download, 'DownloadFile', autospec=True)
def testGetFromBin(self, down_file, bin_path, rel_path):
rel_path.return_value = 'https://glazier-server.example.com/'
bin_path.return_value = 'https://glazier-server.example.com/bin/'
test_sha256 = (
'58157bf41ce54731c0577f801035d47ec20ed16a954f10c29359b8adedcae800')
self.filesystem.create_file(
r'/tmp/autobuild.par.sha256', contents=test_sha256)
down_file.return_value = True
files.Get([['@glazier/1.0/autobuild.par', '/tmp/autobuild.par']],
buildinfo.BuildInfo()).Run()
down_file.assert_called_with(
mock.ANY,
'https://glazier-server.example.com/bin/glazier/1.0/autobuild.par',
'/tmp/autobuild.par',
show_progress=True)
@mock.patch.object(buildinfo.BuildInfo, 'ReleasePath', autospec=True)
@mock.patch.object(buildinfo.BuildInfo, 'BinaryPath', autospec=True)
@mock.patch.object(files.download.Download, 'DownloadFile', autospec=True)
def testGetFromConf(self, down_file, bin_path, rel_path):
rel_path.return_value = 'https://glazier-server.example.com/'
bin_path.return_value = 'https://glazier-server.example.com/bin/'
down_file.return_value = True
files.Get([['#test/script.ps1', '/tmp/autobuild.par']],
buildinfo.BuildInfo()).Run()
down_file.assert_called_with(
mock.ANY,
'https://glazier-server.example.com/test/script.ps1',
'/tmp/autobuild.par',
show_progress=True)
@mock.patch.object(buildinfo.BuildInfo, 'ReleasePath', autospec=True)
@mock.patch.object(buildinfo.BuildInfo, 'BinaryPath', autospec=True)
@mock.patch.object(files.download.Download, 'DownloadFile', autospec=True)
def testGetFromUntagged(self, down_file, bin_path, rel_path):
rel_path.return_value = 'https://glazier-server.example.com/'
bin_path.return_value = 'https://glazier-server.example.com/bin/'
down_file.return_value = True
files.Get([['test/script.ps1', '/tmp/autobuild.par']],
buildinfo.BuildInfo()).Run()
down_file.assert_called_with(
mock.ANY,
'https://glazier-server.example.com/test/script.ps1',
'/tmp/autobuild.par',
show_progress=True)
@mock.patch.object(buildinfo.BuildInfo, 'ReleasePath', autospec=True)
@mock.patch.object(buildinfo.BuildInfo, 'BinaryPath', autospec=True)
@mock.patch.object(files.download.Download, 'DownloadFile', autospec=True)
def testGetFromLocal(self, down_file, bin_path, rel_path):
rel_path.return_value = 'C:/glazier/conf'
bin_path.return_value = 'https://glazier-server.example.com/bin/'
down_file.return_value = True
files.Get([['#script.ps1', '/tmp/autobuild.par']],
buildinfo.BuildInfo()).Run()
down_file.assert_called_with(
mock.ANY,
'C:/glazier/conf/script.ps1',
'/tmp/autobuild.par',
show_progress=True)
@mock.patch.object(buildinfo.BuildInfo, 'ReleasePath', autospec=True)
@mock.patch.object(files.download.Download, 'DownloadFile', autospec=True)
def testGetDownloadErr(self, down_file, r_path):
r_path.return_value = 'https://glazier-server.example.com/'
remote = '@glazier/1.0/autobuild.par'
local = r'/tmp/autobuild.par'
down_file.side_effect = files.download.DownloadError('Error')
with self.assertRaises(files.ActionError):
files.Get([[remote, local]], buildinfo.BuildInfo()).Run()
@mock.patch.object(buildinfo.BuildInfo, 'ReleasePath', autospec=True)
@mock.patch.object(files.download.Download, 'DownloadFile', autospec=True)
def testGetMkdirErr(self, down_file, r_path):
r_path.return_value = 'https://glazier-server.example.com/'
remote = '@glazier/1.0/autobuild.par'
self.filesystem.create_file('/directory')
down_file.side_effect = files.download.DownloadError('Error')
with self.assertRaises(files.ActionError):
files.Get([[remote, '/directory/file.txt']], buildinfo.BuildInfo()).Run()
@mock.patch.object(buildinfo.BuildInfo, 'ReleasePath', autospec=True)
@mock.patch.object(files.download.Download, 'DownloadFile', autospec=True)
def testGetRelativePath(self, down_file, r_path):
r_path.return_value = 'https://glazier-server.example.com/'
self.filesystem.create_file(
r'/tmp/autobuild.par.sha256',
contents='58157bf41ce54731c0577f801035d47ec20ed16a954f10c29359b8adedcae800'
)
down_file.return_value = True
files.Get([['autobuild.bat', '/tmp/autobuild.bat']],
buildinfo.BuildInfo()).Run()
down_file.assert_called_with(
mock.ANY,
'https://glazier-server.example.com/autobuild.bat',
'/tmp/autobuild.bat',
show_progress=True)
@mock.patch.object(buildinfo.BuildInfo, 'ReleasePath', autospec=True)
@mock.patch.object(files.download.Download, 'DownloadFile', autospec=True)
@mock.patch.object(files.download.Download, 'VerifyShaHash', autospec=True)
def testGetHashMatch(self, verify, down_file, r_path):
r_path.return_value = 'https://glazier-server.example.com/'
local = r'/tmp/autobuild.par'
test_sha256 = (
'58157bf41ce54731c0577f801035d47ec20ed16a954f10c29359b8adedcae800')
self.filesystem.create_file(
r'/tmp/autobuild.par.sha256', contents=test_sha256)
down_file.return_value = True
verify.return_value = True
files.Get([['@glazier/1.0/autobuild.par', local, test_sha256]],
buildinfo.BuildInfo()).Run()
verify.assert_called_with(mock.ANY, local, test_sha256)
@mock.patch.object(buildinfo.BuildInfo, 'ReleasePath', autospec=True)
@mock.patch.object(files.download.Download, 'DownloadFile', autospec=True)
@mock.patch.object(files.download.Download, 'VerifyShaHash', autospec=True)
def testGetHashMismatch(self, verify, down_file, r_path):
r_path.return_value = 'https://glazier-server.example.com/'
test_sha256 = (
'58157bf41ce54731c0577f801035d47ec20ed16a954f10c29359b8adedcae800')
self.filesystem.create_file(
r'/tmp/autobuild.par.sha256', contents=test_sha256)
down_file.return_value = True
verify.return_value = False
with self.assertRaises(files.ActionError):
files.Get(
[['@glazier/1.0/autobuild.par', r'/tmp/autobuild.par', test_sha256]],
buildinfo.BuildInfo()).Run()
@mock.patch.object(buildinfo.BuildInfo, 'ReleasePath', autospec=True)
@mock.patch.object(files.download.Download, 'DownloadFile', autospec=True)
@mock.patch.object(files.download.Download, 'VerifyShaHash', autospec=True)
def testGetNoHash(self, verify, down_file, r_path):
r_path.return_value = 'https://glazier-server.example.com/'
test_sha256 = (
'58157bf41ce54731c0577f801035d47ec20ed16a954f10c29359b8adedcae800')
self.filesystem.create_file(
r'/tmp/autobuild.par.sha256', contents=test_sha256)
down_file.return_value = True
files.Get([['@glazier/1.0/autobuild.par', r'/tmp/autobuild.par', '']],
buildinfo.BuildInfo()).Run()
self.assertFalse(verify.called)
def testGetValidate(self):
with self.assertRaises(files.ValidationError):
files.Get('String', None).Validate()
with self.assertRaises(files.ValidationError):
files.Get([[1, 2, 3]], None).Validate()
with self.assertRaises(files.ValidationError):
files.Get([[1, '/tmp/out/path']], None).Validate()
with self.assertRaises(files.ValidationError):
files.Get([['/tmp/src.zip', 2]], None).Validate()
files.Get([['https://glazier/bin/src.zip', '/tmp/out/src.zip']],
None).Validate()
files.Get([['https://glazier/bin/src.zip', '/tmp/out/src.zip', '12345']],
None).Validate()
with self.assertRaises(files.ValidationError):
files.Get([[
'https://glazier/bin/src.zip', '/tmp/out/src.zip', '12345', '67890'
]], None).Validate()
@mock.patch.object(files.file_util, 'CreateDirectories', autospec=True)
@mock.patch('glazier.lib.buildinfo.BuildInfo', autospec=True)
def testUnzip(self, build_info, create_dir):
src = '/tmp/input.zip'
dst = '/out/dir/path'
# bad args
un = files.Unzip([], build_info)
self.assertRaises(files.ActionError, un.Run)
un = files.Unzip([src], build_info)
self.assertRaises(files.ActionError, un.Run)
# bad path
un = files.Unzip([src, dst], build_info)
self.assertRaises(files.ActionError, un.Run)
# create error
create_dir.side_effect = files.file_util.Error
self.assertRaises(files.ActionError, un.Run)
# good
create_dir.side_effect = None
with mock.patch.object(files.zipfile, 'ZipFile', autospec=True) as z:
un = files.Unzip([src, dst], build_info)
un.Run()
z.assert_called_with(src)
z.return_value.extractall.assert_called_with(dst)
create_dir.assert_called_with(dst)
def testUnzipValidate(self):
un = files.Unzip('String', None)
self.assertRaises(files.ValidationError, un.Validate)
un = files.Unzip([1, 2, 3], None)
self.assertRaises(files.ValidationError, un.Validate)
un = files.Unzip([1, '/tmp/out/path'], None)
self.assertRaises(files.ValidationError, un.Validate)
un = files.Unzip(['/tmp/src.zip', 2], None)
self.assertRaises(files.ValidationError, un.Validate)
un = files.Unzip(['/tmp/src.zip', '/tmp/out/path'], None)
un.Validate()
if __name__ == '__main__':
absltest.main()
|
|
# coding: utf8
"""
Delphi Decision Maker - Controllers
"""
module = request.controller
if module not in deployment_settings.modules:
session.error = T("Module disabled!")
redirect(URL(r=request, c="default", f="index"))
response.menu_options = [
[T("Active Problems"), False, URL(r=request, f="index")],
]
if s3_has_role(1):
response.menu_options.extend([
[T("Groups"), False, URL(r=request, f="group")],
])
if s3_has_role(1):
response.menu_options.extend([
[T("Group Memberships"), False, URL(r=request, f="user_to_group")],
])
if s3_has_role(1):
response.menu_options.extend([
[T("Problem Administration"), False, URL(r=request, f="problem")],
])
UNIT_NORMAL = (
( 0.0, .0, .01, .02, .03, .04, .05, .06, .07, .08, .09 ),
( .0, .5000, .5040, .5080, .5120, .5160, .5199, .5239, .5279, .5319, .5359 ),
( .1, .5398, .5438, .5478, .5517, .5557, .5596, .5636, .5675, .5714, .5753 ),
( .2, .5793, .5832, .5871, .5910, .5948, .5987, .6026, .6064, .6103, .6141 ),
( .3, .6179, .6217, .6255, .6293, .6331, .6368, .6406, .6443, .6480, .6517 ),
( .4, .6554, .6591, .6628, .6664, .6700, .6736, .6772, .6808, .6844, .6879 ),
( .5, .6915, .6950, .6985, .7019, .7054, .7088, .7123, .7157, .7190, .7224 ),
( .6, .7257, .7291, .7324, .7357, .7389, .7422, .7454, .7486, .7517, .7549 ),
( .7, .7580, .7611, .7642, .7673, .7703, .7734, .7764, .7794, .7823, .7852 ),
( .8, .7881, .7910, .7939, .7967, .7995, .8023, .8051, .8078, .8106, .8133 ),
( .9, .8159, .8186, .8212, .8238, .8264, .8289, .8315, .8340, .8365, .8389 ),
( 1.0, .8415, .8438, .8461, .8485, .8508, .8531, .8554, .8577, .8509, .8621 ),
( 1.1, .8643, .8665, .8686, .8708, .8729, .8749, .8770, .8790, .8810, .8830 ),
( 1.2, .8849, .8869, .8888, .8907, .8925, .8944, .8962, .8980, .8997, .90147 ),
( 1.3, .90320, .90490, .90658, .90824, .90988, .91149, .91309, .91466, .91621, .91774 ),
( 1.4, .91924, .92073, .92220, .92364, .92507, .92647, .92785, .92922, .93056, .93189 ),
( 1.5, .93319, .93448, .93574, .93699, .93822, .93943, .94062, .94179, .94295, .94408 ),
( 1.6, .94520, .94630, .94738, .94845, .94950, .95053, .95154, .95254, .95352, .95449 ),
( 1.7, .95543, .95637, .95728, .95818, .95907, .95994, .96080, .96164, .96246, .96327 ),
( 1.8, .96407, .96485, .96562, .96638, .96712, .96784, .97856, .96926, .96995, .97062 ),
( 1.9, .97128, .97193, .97257, .97320, .97381, .97441, .97500, .97558, .97615, .97670 ),
( 2.0, .97725, .97778, .97831, .97882, .97932, .97982, .98030, .98077, .98124, .98169 ),
( 2.1, .98214, .98257, .98300, .98341, .98382, .98422, .98461, .98500, .98537, .98574 ),
( 2.2, .98610, .98645, .98679, .98713, .98745, .98778, .98809, .98840, .98870, .98899 ),
( 2.3, .98928, .98956, .98983, .990097, .990358, .990613, .990863, .991106, .991344, .991576 ),
( 2.4, .991802, .992024, .992240, .992451, .992656, .992857, .993053, .993244, .993431, .993613 ),
( 2.5, .993790, .993963, .994132, .994297, .994457, .994614, .994766, .994915, .995060, .995201 ),
( 2.6, .995339, .995473, .995604, .995731, .995855, .995975, .996093, .996207, .996319, .996427 ),
( 2.7, .996533, .996636, .996736, .996833, .996928, .997020, .997110, .997197, .997282, .997365 ),
( 2.8, .997445, .997523, .997599, .997673, .997744, .997814, .997882, .997948, .998012, .998074 ),
( 2.9, .998134, .998193, .998250, .998305, .998359, .998411, .998460, .998511, .998559, .998605 ),
( 3.0, .998650, .998694, .998736, .998777, .998817, .998856, .998893, .998930, .998965, .998999 ),
( 3.1, .9990324, .9990646, .9990957, .9991260, .9991553, .9991836, .9992112, .9992378, .9992636, .9992886 ),
( 3.2, .9993129, .9993363, .9993590, .9993810, .9994024, .9994230, .9994429, .9994623, .9994810, .9994991 ),
( 3.3, .9995166, .9995335, .9995499, .9995658, .9995811, .9995959, .9996103, .9996242, .9996376, .9996505 ),
( 3.4, .9996631, .9996752, .9996869, .9996982, .9997091, .9997197, .9997299, .9997398, .9997493, .9997585 ),
( 3.5, .9997674, .9997759, .9997842, .9997922, .9997999, .9998074, .9998146, .9998215, .9998282, .9998347 ),
( 3.6, .9998409, .9998469, .9998527, .9998583, .9998637, .9998689, .9998739, .9998787, .9998834, .9998879 ),
( 3.7, .9998922, .9998964, .99990039, .99990426, .99990799, .99991158, .99991504, .99991838, .99992159, .99992468 ),
( 3.8, .99992765, .99993052, .99993327, .99993593, .99993848, .99994094, .99994331, .99994558, .99994777, .99994988 ),
( 3.9, .99995190, .99995385, .99995573, .99995753, .99995926, .99996092, .99996253, .99996406, .99996554, .99996696 ),
( 4.0, .99996833, .99996964, .99997090, .99997211, .99997327, .99997439, .99997546, .99997649, .99997748, .99997843 ),
( 4.1, .99997934, .99998022, .99998106, .99998186, .99998263, .99998338, .99998409, .99998477, .99998542, .99998605 ),
( 4.2, .99998665, .99998723, .99998778, .99998832, .99998882, .99998931, .99998978, .999990226, .999990655, .999991066 ),
( 4.3, .999991460, .999991837, .999992199, .999992545, .999992876, .999993193, .999993497, .999993788, .999994066, .999994332 ),
( 4.4, .999994587, .999994831, .999995065, .999995288, .999995502, .999995706, .999995902, .999996089, .999996268, .999996439 ),
( 4.5, .999996602, .999996759, .999996908, .999997051, .999997187, .999997318, .999997442, .999997561, .999997675, .999997784 ),
( 4.6, .999997888, .999997987, .999998081, .999998172, .999998258, .999998340, .999998419, .999998494, .999998566, .999998634 ),
( 4.7, .999998699, .999998761, .999998821, .999998877, .999998931, .999998983, .9999990320, .9999990789, .9999991235, .9999991661 ),
( 4.8, .9999992067, .9999992453, .9999992822, .9999993173, .9999993508, .9999993827, .9999994131, .9999994420, .9999994696, .9999994958 ),
( 4.9, .9999995208, .9999995446, .9999995673, .9999995889, .9999996094, .9999996289, .9999996475, .9999996652, .9999996821, .9999996981 )
)
MIN_COLOR = (0xfc, 0xaf, 0x3e)
MAX_COLOR = (0x4e, 0x9a, 0x06)
def __lookupTable(mp):
unitValue = 0.0
for j in range(1, 50):
if mp == UNIT_NORMAL[j][1]:
unitValue = UNIT_NORMAL[j][0]
elif (UNIT_NORMAL[j][1] < mp) and (mp < UNIT_NORMAL[j + 1][1]):
for i in range(2, 11):
if (UNIT_NORMAL[j][i - 1] < mp) and (mp <= UNIT_NORMAL[j][i]):
unitValue = UNIT_NORMAL[j][0] + UNIT_NORMAL[0][i]
if mp > UNIT_NORMAL[j][10]:
unitValue = UNIT_NORMAL[j + 1][0]
if (mp > UNIT_NORMAL[50][1]) and (mp < UNIT_NORMAL[50][10]):
for i in range(2, 11):
if (UNIT_NORMAL[50][i - 1] < mp) and (mp <= UNIT_NORMAL[50][i]):
unitValue = UNIT_NORMAL[50][0] + UNIT_NORMAL[0][i];
if mp > UNIT_NORMAL[50][10]:
unitValue = 5.0; # suppose infinite value occur
return unitValue
def __cal_votes(pr, i_ids):
num_voted = 0
votes = {}
for i1 in i_ids:
for i2 in i_ids:
votes[(i1, i2)] = 0
users = db(db.auth_user.id > 0).select()
for u in users:
query = (db.delphi_vote.problem_id == pr.id) & \
(db.delphi_vote.user_id == u.id) & \
(db.delphi_vote.rank < 9888)
u_votes = [v.solution_id for v in db(query).select(
db.delphi_vote.solution_id,
db.delphi_vote.rank,
orderby = db.delphi_vote.rank)]
if len(u_votes) > 1: num_voted += 1
for i1 in range(len(u_votes)):
for i2 in range(i1+1, len(u_votes)):
votes[(u_votes[i1], u_votes[i2])] += 1
return (votes, num_voted)
class DU:
def user(self):
return db.auth_user[self.user_id]
def __init__(self, group_id=None):
self.authorised = auth.has_membership(1)
self.user_id = auth.user.id if (auth.is_logged_in() and session.auth) else None
self.status = "guest"
self.membership = None
if self.authorised:
self.status = "moderator"
elif self.user_id != None and group_id != None:
self.membership = db((db.delphi_user_to_group.group_id == group_id) &
(db.delphi_user_to_group.user_id == self.user_id)).select()
if self.membership:
self.membership = self.membership[0]
self.status = self.membership.status
self.authorised = (self.status == "moderator")
self.can_vote = self.status in ("moderator", "participant")
self.can_add_item = self.status != "guest"
self.can_post = self.status != "guest"
def __get_commons(solution=None):
if solution:
problem_id = solution.problem_id
else:
problem_id = request.args(0)
pr = db.delphi_problem[problem_id]
if not pr:
raise HTTP(404)
user = DU(pr.group_id)
response.menu_options.extend([
[pr.group_id.name, False, URL(r=request, f="group_summary", args=pr.group_id.id)],
])
return (pr, user)
def index():
"Module Home Page"
module_name = deployment_settings.modules[module].name_nice
groups = db(db.delphi_group.active == True).select()
result = []
for group in groups:
actions = []
duser = DU(group)
if duser.authorised:
actions.append(("group/%d/update" % group.id, "Edit"))
actions.append(("new_problem/create/?group=%s&next=%s" % \
(group.id,
URL(r=request, f="group_summary", args=group.id)),
"Add New Problem"))
actions.append(("group_summary/%s/#request" % group.id, "Review Requests"))
else:
actions.append(("group_summary/%s/#request" % group.id,
"Role: %s%s" % (duser.status,
(duser.membership and duser.membership.req) and "*" or "")))
latest_problems = db((db.delphi_problem.group_id == group.id) &
(db.delphi_problem.active == True)). \
select(orderby =~ db.delphi_problem.last_modification)
result.append((group, latest_problems, actions))
response.title = module_name
return dict(groups_problems=result, name="Active Problems",
module_name=module_name)
def group_summary():
group_id = request.args(0)
group = db.delphi_group[group_id]
if not group:
raise HTTP(404)
duser = DU(group.id)
forms = []
table = db.delphi_user_to_group
table.req.default = True
table.user_id.writable = False
table.user_id.default = duser.user_id
table.group_id.default = group_id
table.group_id.writable = False
fields = ["user_id", "description", "status"]
if duser.authorised:
fields.append("req")
membership_requests = db((table.group_id==group.id) &
(table.req==True)).select()
for membership_req in membership_requests:
form = SQLFORM(table, record=membership_req.id,
fields=fields, labels={
"req": "Needs more review:"
})
ret = form.accepts(request.post_vars, session, dbio=True)
if form.errors:
session.error = T("There are errors")
forms.append(form)
elif duser.user_id:
table.status.writable = False
if duser.membership: fields.append("req")
form = SQLFORM(table, record=duser.membership,
fields=fields, labels={
"status": "Current status:",
"req": "Request for review:"
})
ret = form.accepts(request.post_vars, session, dbio=True)
if form.errors:
session.error = T("There are errors")
forms.append(form)
table = db.delphi_problem
latest_problems = db((table.group_id == group.id) &
(table.active == True)). \
select(orderby =~ table.last_modification)
return dict(latest_problems=latest_problems, group=group, duser=duser,
name="Active Problems in %s" % group.name, forms=forms)
def new_problem():
group = db(db.delphi_group.id == request.get_vars["group"]).select()[0]
duser = DU(group)
if not duser.authorised:
raise HTTP(403)
response.menu_options.extend([
[group.name, False, URL(r=request, f="group_summary", args=group.id)],
])
table = db.delphi_problem
table.group.default = request.get_vars["group"]
table.group.writable = False
return problem()
def group():
if not s3_has_role(1):
raise HTTP(403)
return s3_rest_controller(module, "group")
def user_to_group():
if not s3_has_role(1):
raise HTTP(403)
return s3_rest_controller(module, "user_to_group")
def problem():
# TODO: access check
return s3_rest_controller(module, "problem")
def solution():
# TODO: access check
table.problem_id.default = get_last_problem_id()
return s3_rest_controller(module, "solution")
def summary():
pr, duser = __get_commons()
user = duser.user()
if user:
voted = user.delphi_vote.select()
else:
voted = False
if duser.can_add_item and "item_name" in request.post_vars:
db.delphi_solution.insert(problem_id=pr,
name=request.post_vars["item_name"],
description=request.post_vars["item_description"])
return dict(problem=pr, items=pr.delphi_solution.select(),
voted=voted, name="Options", duser=duser)
def save_vote():
pr, duser = __get_commons()
if not duser.can_vote:
raise HTTP(403)
items = [i.id for i in pr.delphi_solution.select()]
ranks = {}
for item_id in items:
if str(item_id) in request.post_vars:
ranks[item_id] = request.post_vars[str(item_id)]
table = db.delphi_vote
query = ( table.problem_id == pr.id) & ( table.user_id == duser.user_id)
if duser.user_id:
voted = db(query).select(orderby = table.rank)
else:
voted = False
if voted:
for old in voted:
del table[old.id]
for item_id, rank in ranks.items():
table.insert(problem_id=pr.id, solution_id=item_id, rank=rank)
return '"OK"'
def vote():
pr, duser = __get_commons()
items = dict([(i.id, i.name) for i in pr.delphi_solution.select()])
n = len(items)
if duser.user_id:
table = db.delphi_vote
query = (table.problem_id == pr.id) & (table.user_id == duser.user_id)
voted = db(query).select(orderby = table.rank)
else:
voted = False
# v.rank == 9999 -> user has selected not to vote on v.solution_id
# rank == 9998 -> the solution is new and the user hasn't voted on it yet
if voted:
sorted_items = [v.solution_id for v in voted]
ranks = dict([(v.solution_id, v.rank) for v in voted])
n = len(sorted_items)
last_enabled = -1
while ((-last_enabled) <= n) and (ranks[sorted_items[last_enabled]] == 9999):
last_enabled -= 1
for i in items.keys():
if not i in ranks.keys():
if last_enabled == -1:
sorted_items.append(i)
else:
sorted_items.insert(last_enabled + 1, i)
ranks[i] = 9998
else:
votes, num_voted = __cal_votes(pr, items.keys())
def cc1(i1, i2):
if votes[(i1, i2)] > votes[(i2, i1)]: return -1
if votes[(i1, i2)] < votes[(i2, i1)]: return +1
return 0
sorted_items = sorted(list(items.keys()), cc1)
ranks = dict([(i, 9998) for i in sorted_items])
return dict(problem=pr, items=items, sorted_items=sorted_items, ranks=ranks,
duser=duser, voted=voted, name="Vote")
def status():
pr, duser = __get_commons()
items = dict([(i.id, i.name) for i in pr.delphi_solution.select()])
i_ids = items.keys()
n = len(i_ids)
empty = dict(problem=pr, items=items, beans=[], duser=duser,
votes={}, scale={}, title=T("Scale of Results"), num_voted=0)
if n == 0:
return empty
votes, num_voted = __cal_votes(pr, i_ids)
scale = {}
if num_voted == 0:
return empty
for i1 in i_ids:
scale[i1] = 0
for i2 in i_ids:
if i1 == i2:
continue
tt2 = float(votes[(i1, i2)] + votes[(i2, i1)])
if votes[(i1, i2)] > votes[(i2, i1)]:
scale[i1] += __lookupTable(votes[(i1, i2)]/tt2)
elif votes[(i1, i2)] < votes[(i2, i1)]:
scale[i1] -= __lookupTable(votes[(i2, i1)]/tt2)
def cc2(i1, i2):
if scale[i1] > scale[i2]: return -1
if scale[i1] < scale[i2]: return +1
return 0
i_ids.sort(cc2)
beans_num = int((n+1) * 2)
bean_size = 10.0 * n / beans_num
beans = []
i = 0
for j in range(beans_num):
color = "%02x%02x%02x" % tuple([int(((j*MIN_COLOR[k]) + ((beans_num-j)*MAX_COLOR[k])) / beans_num) for k in (0, 1, 2)])
limit = ((beans_num - j - 1) * bean_size) - (5 * n)
bean = []
while i < n and scale[i_ids[i]] >= limit:
bean.append(i_ids[i])
i += 1
beans.append((color, bean))
return dict(problem=pr, duser=duser, items=items, beans=beans, scale=scale,
title=T("Scale of Results"), votes=votes, num_voted=num_voted)
def discuss():
item_id = request.args(0)
item = db.delphi_solution[item_id]
if not item:
raise HTTP(404)
pr, duser = __get_commons(solution=item)
user = duser.user()
if user and duser.can_post:
form = SQLFORM(db.delphi_forum_post,
fields=["title", "post"],
labels=dict(post="%s %s:" % (user.first_name, user.last_name)))
else:
form = None
if form and form.accepts(request.post_vars, session):
post = request.post_vars["post"]
title = request.post_vars["title"]
post_html = ""
k = -1
for l in post.split("\n"):
l = l.strip()
old = k
k = 0
while k<len(l) and l[k] == ">":
k += 1
if k != old:
if old > 0:
post_html += "</blockquote>"
else:
post_html += "</p>"
if k > 0:
post_html += "<blockquote class='delphi_q%d'>" % (((k - 1) % 6) + 1)
else:
post_html += "<p>"
else:
post_html += "<br/>"
post_html += l[k:]
if k > 0:
post_html += "</blockquote>"
db.delphi_forum_post.insert(title=title, solution_id=item, post=post, post_html=post_html)
session.flash = T("Your post was added successfully.")
elif form and form.errors:
session.error = T("There are errors")
return dict(item=item, problem=item.problem_id, duser=duser,
form=form, title=T("Discussion Forum"), authorised=False)
|
|
# Copyright 2012 VMware, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from neutron_lib.api import converters
from neutron_lib.api import extensions
from neutron_lib.db import constants as db_const
from neutron_lib.exceptions import l3 as l3_exc
from neutron_lib.plugins import constants
import six
from neutron.api.v2 import resource_helper
from neutron.conf import quota
# TODO(boden): remove these shims on l3 api def consumption
RouterNotFound = l3_exc.RouterNotFound
RouterInUse = l3_exc.RouterInUse
RouterInterfaceNotFound = l3_exc.RouterInterfaceNotFound
RouterInterfaceNotFoundForSubnet = l3_exc.RouterInterfaceNotFoundForSubnet
RouterInterfaceInUseByFloatingIP = l3_exc.RouterInterfaceInUseByFloatingIP
FloatingIPNotFound = l3_exc.FloatingIPNotFound
ExternalGatewayForFloatingIPNotFound = (
l3_exc.ExternalGatewayForFloatingIPNotFound)
FloatingIPPortAlreadyAssociated = l3_exc.FloatingIPPortAlreadyAssociated
RouterExternalGatewayInUseByFloatingIp = (
l3_exc.RouterExternalGatewayInUseByFloatingIp)
RouterInterfaceAttachmentConflict = l3_exc.RouterInterfaceAttachmentConflict
ROUTER = 'router'
ROUTERS = 'routers'
FLOATINGIP = 'floatingip'
FLOATINGIPS = '%ss' % FLOATINGIP
EXTERNAL_GW_INFO = 'external_gateway_info'
RESOURCE_ATTRIBUTE_MAP = {
ROUTERS: {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': db_const.NAME_FIELD_SIZE},
'is_visible': True, 'default': ''},
'admin_state_up': {'allow_post': True, 'allow_put': True,
'default': True,
'convert_to': converters.convert_to_boolean,
'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {
'type:string': db_const.PROJECT_ID_FIELD_SIZE},
'is_visible': True},
EXTERNAL_GW_INFO: {'allow_post': True, 'allow_put': True,
'is_visible': True, 'default': None,
'enforce_policy': True,
'validate': {
'type:dict_or_nodata': {
'network_id': {'type:uuid': None,
'required': True},
'external_fixed_ips': {
'convert_list_to':
converters.convert_kvp_list_to_dict,
'type:fixed_ips': None,
'default': None,
'required': False,
}
}
}}
},
FLOATINGIPS: {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'floating_ip_address': {'allow_post': True, 'allow_put': False,
'validate': {'type:ip_address_or_none': None},
'is_visible': True, 'default': None,
'enforce_policy': True},
'subnet_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid_or_none': None},
'is_visible': False, # Use False for input only attr
'default': None},
'floating_network_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'router_id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid_or_none': None},
'is_visible': True, 'default': None},
'port_id': {'allow_post': True, 'allow_put': True,
'validate': {'type:uuid_or_none': None},
'is_visible': True, 'default': None,
'required_by_policy': True},
'fixed_ip_address': {'allow_post': True, 'allow_put': True,
'validate': {'type:ip_address_or_none': None},
'is_visible': True, 'default': None},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {
'type:string': db_const.PROJECT_ID_FIELD_SIZE},
'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
},
}
# Register the configuration options
quota.register_quota_opts(quota.l3_quota_opts)
class L3(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return "Neutron L3 Router"
@classmethod
def get_alias(cls):
return "router"
@classmethod
def get_description(cls):
return ("Router abstraction for basic L3 forwarding"
" between L2 Neutron networks and access to external"
" networks via a NAT gateway.")
@classmethod
def get_updated(cls):
return "2012-07-20T10:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
plural_mappings = resource_helper.build_plural_mappings(
{}, RESOURCE_ATTRIBUTE_MAP)
action_map = {'router': {'add_router_interface': 'PUT',
'remove_router_interface': 'PUT'}}
return resource_helper.build_resource_info(plural_mappings,
RESOURCE_ATTRIBUTE_MAP,
constants.L3,
action_map=action_map,
register_quota=True)
def update_attributes_map(self, attributes):
super(L3, self).update_attributes_map(
attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP)
def get_extended_resources(self, version):
if version == "2.0":
return RESOURCE_ATTRIBUTE_MAP
else:
return {}
@six.add_metaclass(abc.ABCMeta)
class RouterPluginBase(object):
@abc.abstractmethod
def create_router(self, context, router):
pass
@abc.abstractmethod
def update_router(self, context, id, router):
pass
@abc.abstractmethod
def get_router(self, context, id, fields=None):
pass
@abc.abstractmethod
def delete_router(self, context, id):
pass
@abc.abstractmethod
def get_routers(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None, page_reverse=False):
pass
@abc.abstractmethod
def add_router_interface(self, context, router_id, interface_info=None):
pass
@abc.abstractmethod
def remove_router_interface(self, context, router_id, interface_info):
pass
@abc.abstractmethod
def create_floatingip(self, context, floatingip):
pass
@abc.abstractmethod
def update_floatingip(self, context, id, floatingip):
pass
@abc.abstractmethod
def get_floatingip(self, context, id, fields=None):
pass
@abc.abstractmethod
def delete_floatingip(self, context, id):
pass
@abc.abstractmethod
def get_floatingips(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
pass
def get_routers_count(self, context, filters=None):
raise NotImplementedError()
def get_floatingips_count(self, context, filters=None):
raise NotImplementedError()
|
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Hoverlabel(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "heatmapgl"
_path_str = "heatmapgl.hoverlabel"
_valid_props = {
"align",
"alignsrc",
"bgcolor",
"bgcolorsrc",
"bordercolor",
"bordercolorsrc",
"font",
"namelength",
"namelengthsrc",
}
# align
# -----
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
# alignsrc
# --------
@property
def alignsrc(self):
"""
Sets the source reference on Chart Studio Cloud for align .
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["alignsrc"]
@alignsrc.setter
def alignsrc(self, val):
self["alignsrc"] = val
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bgcolorsrc
# ----------
@property
def bgcolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for bgcolor .
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# bordercolorsrc
# --------------
@property
def bordercolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
bordercolor .
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bordercolorsrc"]
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self["bordercolorsrc"] = val
# font
# ----
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.heatmapgl.hoverlabel.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for family .
size
sizesrc
Sets the source reference on Chart Studio Cloud
for size .
Returns
-------
plotly.graph_objs.heatmapgl.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# namelength
# ----------
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
# namelengthsrc
# -------------
@property
def namelengthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for namelength
.
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["namelengthsrc"]
@namelengthsrc.setter
def namelengthsrc(self, val):
self["namelengthsrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
align .
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
bgcolor .
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
namelength .
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
**kwargs
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.heatmapgl.Hoverlabel`
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
align .
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
bgcolor .
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
namelength .
Returns
-------
Hoverlabel
"""
super(Hoverlabel, self).__init__("hoverlabel")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.heatmapgl.Hoverlabel
constructor must be a dict or
an instance of :class:`plotly.graph_objs.heatmapgl.Hoverlabel`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("align", None)
_v = align if align is not None else _v
if _v is not None:
self["align"] = _v
_v = arg.pop("alignsrc", None)
_v = alignsrc if alignsrc is not None else _v
if _v is not None:
self["alignsrc"] = _v
_v = arg.pop("bgcolor", None)
_v = bgcolor if bgcolor is not None else _v
if _v is not None:
self["bgcolor"] = _v
_v = arg.pop("bgcolorsrc", None)
_v = bgcolorsrc if bgcolorsrc is not None else _v
if _v is not None:
self["bgcolorsrc"] = _v
_v = arg.pop("bordercolor", None)
_v = bordercolor if bordercolor is not None else _v
if _v is not None:
self["bordercolor"] = _v
_v = arg.pop("bordercolorsrc", None)
_v = bordercolorsrc if bordercolorsrc is not None else _v
if _v is not None:
self["bordercolorsrc"] = _v
_v = arg.pop("font", None)
_v = font if font is not None else _v
if _v is not None:
self["font"] = _v
_v = arg.pop("namelength", None)
_v = namelength if namelength is not None else _v
if _v is not None:
self["namelength"] = _v
_v = arg.pop("namelengthsrc", None)
_v = namelengthsrc if namelengthsrc is not None else _v
if _v is not None:
self["namelengthsrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for convolutional recurrent layers."""
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
@keras_parameterized.run_all_keras_modes
class ConvLSTMTest(keras_parameterized.TestCase):
@parameterized.named_parameters(
*testing_utils.generate_combinations_with_testcase_name(
data_format=['channels_first', 'channels_last'],
return_sequences=[True, False]))
def test_conv_lstm(self, data_format, return_sequences):
num_row = 3
num_col = 3
filters = 2
num_samples = 1
input_channel = 2
input_num_row = 5
input_num_col = 5
sequence_len = 2
if data_format == 'channels_first':
inputs = np.random.rand(num_samples, sequence_len,
input_channel,
input_num_row, input_num_col)
else:
inputs = np.random.rand(num_samples, sequence_len,
input_num_row, input_num_col,
input_channel)
# test for return state:
x = keras.Input(batch_shape=inputs.shape)
kwargs = {'data_format': data_format,
'return_sequences': return_sequences,
'return_state': True,
'stateful': True,
'filters': filters,
'kernel_size': (num_row, num_col),
'padding': 'valid'}
layer = keras.layers.ConvLSTM2D(**kwargs)
layer.build(inputs.shape)
outputs = layer(x)
_, states = outputs[0], outputs[1:]
self.assertEqual(len(states), 2)
model = keras.models.Model(x, states[0])
state = model.predict(inputs)
self.assertAllClose(
keras.backend.eval(layer.states[0]), state, atol=1e-4)
# test for output shape:
testing_utils.layer_test(
keras.layers.ConvLSTM2D,
kwargs={'data_format': data_format,
'return_sequences': return_sequences,
'filters': filters,
'kernel_size': (num_row, num_col),
'padding': 'valid'},
input_shape=inputs.shape)
def test_conv_lstm_statefulness(self):
# Tests for statefulness
num_row = 3
num_col = 3
filters = 2
num_samples = 1
input_channel = 2
input_num_row = 5
input_num_col = 5
sequence_len = 2
inputs = np.random.rand(num_samples, sequence_len,
input_num_row, input_num_col,
input_channel)
with self.cached_session():
model = keras.models.Sequential()
kwargs = {'data_format': 'channels_last',
'return_sequences': False,
'filters': filters,
'kernel_size': (num_row, num_col),
'stateful': True,
'batch_input_shape': inputs.shape,
'padding': 'same'}
layer = keras.layers.ConvLSTM2D(**kwargs)
model.add(layer)
model.compile(optimizer='sgd', loss='mse')
out1 = model.predict(np.ones_like(inputs))
# train once so that the states change
model.train_on_batch(np.ones_like(inputs),
np.random.random(out1.shape))
out2 = model.predict(np.ones_like(inputs))
# if the state is not reset, output should be different
self.assertNotEqual(out1.max(), out2.max())
# check that output changes after states are reset
# (even though the model itself didn't change)
layer.reset_states()
out3 = model.predict(np.ones_like(inputs))
self.assertNotEqual(out3.max(), out2.max())
# check that container-level reset_states() works
model.reset_states()
out4 = model.predict(np.ones_like(inputs))
self.assertAllClose(out3, out4, atol=1e-5)
# check that the call to `predict` updated the states
out5 = model.predict(np.ones_like(inputs))
self.assertNotEqual(out4.max(), out5.max())
def test_conv_lstm_regularizers(self):
# check regularizers
num_row = 3
num_col = 3
filters = 2
num_samples = 1
input_channel = 2
input_num_row = 5
input_num_col = 5
sequence_len = 2
inputs = np.random.rand(num_samples, sequence_len,
input_num_row, input_num_col,
input_channel)
with self.cached_session():
kwargs = {'data_format': 'channels_last',
'return_sequences': False,
'kernel_size': (num_row, num_col),
'stateful': True,
'filters': filters,
'batch_input_shape': inputs.shape,
'kernel_regularizer': keras.regularizers.L1L2(l1=0.01),
'recurrent_regularizer': keras.regularizers.L1L2(l1=0.01),
'activity_regularizer': 'l2',
'bias_regularizer': 'l2',
'kernel_constraint': 'max_norm',
'recurrent_constraint': 'max_norm',
'bias_constraint': 'max_norm',
'padding': 'same'}
layer = keras.layers.ConvLSTM2D(**kwargs)
layer.build(inputs.shape)
self.assertEqual(len(layer.losses), 3)
layer(keras.backend.variable(np.ones(inputs.shape)))
self.assertEqual(len(layer.losses), 4)
def test_conv_lstm_dropout(self):
# check dropout
with self.cached_session():
testing_utils.layer_test(
keras.layers.ConvLSTM2D,
kwargs={'data_format': 'channels_last',
'return_sequences': False,
'filters': 2,
'kernel_size': (3, 3),
'padding': 'same',
'dropout': 0.1,
'recurrent_dropout': 0.1},
input_shape=(1, 2, 5, 5, 2))
def test_conv_lstm_cloning(self):
with self.cached_session():
model = keras.models.Sequential()
model.add(keras.layers.ConvLSTM2D(5, 3, input_shape=(None, 5, 5, 3)))
test_inputs = np.random.random((2, 4, 5, 5, 3))
reference_outputs = model.predict(test_inputs)
weights = model.get_weights()
# Use a new graph to clone the model
with self.cached_session():
clone = keras.models.clone_model(model)
clone.set_weights(weights)
outputs = clone.predict(test_inputs)
self.assertAllClose(reference_outputs, outputs, atol=1e-5)
@test.disable_with_predicate(
pred=test.is_built_with_rocm,
skip_message='Skipping the test as OOM occurred with 1 GB budget.')
def test_conv_lstm_with_initial_state(self):
num_samples = 32
sequence_len = 5
encoder_inputs = keras.layers.Input((None, 32, 32, 3))
encoder = keras.layers.ConvLSTM2D(
filters=32, kernel_size=(3, 3), padding='same',
return_sequences=False, return_state=True)
_, state_h, state_c = encoder(encoder_inputs)
encoder_states = [state_h, state_c]
decoder_inputs = keras.layers.Input((None, 32, 32, 4))
decoder_lstm = keras.layers.ConvLSTM2D(
filters=32, kernel_size=(3, 3), padding='same',
return_sequences=False, return_state=False)
decoder_outputs = decoder_lstm(decoder_inputs, initial_state=encoder_states)
output = keras.layers.Conv2D(
1, (3, 3), padding='same', activation='relu')(decoder_outputs)
model = keras.Model([encoder_inputs, decoder_inputs], output)
model.compile(
optimizer='sgd', loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
x_1 = np.random.rand(num_samples, sequence_len, 32, 32, 3)
x_2 = np.random.rand(num_samples, sequence_len, 32, 32, 4)
y = np.random.rand(num_samples, 32, 32, 1)
model.fit([x_1, x_2], y)
model.predict([x_1, x_2])
if __name__ == '__main__':
test.main()
|
|
"""
Support for ZWave HVAC devices.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/hvac.zwave/
"""
# Because we do not compile openzwave on CI
# pylint: disable=import-error
import logging
from blumate.components.hvac import DOMAIN
from blumate.components.hvac import HvacDevice
from blumate.components.zwave import (
ATTR_NODE_ID, ATTR_VALUE_ID, ZWaveDeviceEntity)
from blumate.components import zwave
from blumate.const import (TEMP_FAHRENHEIT, TEMP_CELSIUS)
_LOGGER = logging.getLogger(__name__)
CONF_NAME = 'name'
DEFAULT_NAME = 'ZWave Hvac'
REMOTEC = 0x5254
REMOTEC_ZXT_120 = 0x8377
REMOTEC_ZXT_120_THERMOSTAT = (REMOTEC, REMOTEC_ZXT_120, 0)
COMMAND_CLASS_SENSOR_MULTILEVEL = 0x31
COMMAND_CLASS_THERMOSTAT_MODE = 0x40
COMMAND_CLASS_THERMOSTAT_SETPOINT = 0x43
COMMAND_CLASS_THERMOSTAT_FAN_MODE = 0x44
COMMAND_CLASS_CONFIGURATION = 0x70
WORKAROUND_ZXT_120 = 'zxt_120'
DEVICE_MAPPINGS = {
REMOTEC_ZXT_120_THERMOSTAT: WORKAROUND_ZXT_120
}
ZXT_120_SET_TEMP = {
'Heat': 1,
'Cool': 2,
'Dry Air': 8,
'Auto Changeover': 10
}
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the ZWave Hvac devices."""
if discovery_info is None or zwave.NETWORK is None:
_LOGGER.debug("No discovery_info=%s or no NETWORK=%s",
discovery_info, zwave.NETWORK)
return
node = zwave.NETWORK.nodes[discovery_info[ATTR_NODE_ID]]
value = node.values[discovery_info[ATTR_VALUE_ID]]
value.set_change_verified(False)
add_devices([ZWaveHvac(value)])
_LOGGER.debug("discovery_info=%s and zwave.NETWORK=%s",
discovery_info, zwave.NETWORK)
# pylint: disable=too-many-arguments
class ZWaveHvac(ZWaveDeviceEntity, HvacDevice):
"""Represents a HeatControl hvac."""
# pylint: disable=too-many-public-methods, too-many-instance-attributes
def __init__(self, value):
"""Initialize the zwave hvac."""
from openzwave.network import ZWaveNetwork
from pydispatch import dispatcher
ZWaveDeviceEntity.__init__(self, value, DOMAIN)
self._node = value.node
self._target_temperature = None
self._current_temperature = None
self._current_operation = None
self._operation_list = None
self._current_operation_state = None
self._current_fan_mode = None
self._fan_list = None
self._current_swing_mode = None
self._swing_list = None
self._unit = None
self._zxt_120 = None
self.update_properties()
# register listener
dispatcher.connect(
self.value_changed, ZWaveNetwork.SIGNAL_VALUE_CHANGED)
# Make sure that we have values for the key before converting to int
if (value.node.manufacturer_id.strip() and
value.node.product_id.strip()):
specific_sensor_key = (int(value.node.manufacturer_id, 16),
int(value.node.product_id, 16),
value.index)
if specific_sensor_key in DEVICE_MAPPINGS:
if DEVICE_MAPPINGS[specific_sensor_key] == WORKAROUND_ZXT_120:
_LOGGER.debug("Remotec ZXT-120 Zwave Thermostat as HVAC")
self._zxt_120 = 1
def value_changed(self, value):
"""Called when a value has changed on the network."""
if self._value.node == value.node:
self.update_properties()
self.update_ha_state(True)
_LOGGER.debug("Value changed on network %s", value)
def update_properties(self):
"""Callback on data change for the registered node/value pair."""
# Set point
for value in self._node.get_values(
class_id=COMMAND_CLASS_THERMOSTAT_SETPOINT).values():
if int(value.data) != 0:
self._target_temperature = int(value.data)
# Operation Mode
for value in self._node.get_values(
class_id=COMMAND_CLASS_THERMOSTAT_MODE).values():
self._current_operation = value.data
self._operation_list = list(value.data_items)
_LOGGER.debug("self._operation_list=%s", self._operation_list)
# Current Temp
for value in self._node.get_values(
class_id=COMMAND_CLASS_SENSOR_MULTILEVEL).values():
self._current_temperature = int(value.data)
self._unit = value.units
# Fan Mode
for value in self._node.get_values(
class_id=COMMAND_CLASS_THERMOSTAT_FAN_MODE).values():
self._current_operation_state = value.data
self._fan_list = list(value.data_items)
_LOGGER.debug("self._fan_list=%s", self._fan_list)
_LOGGER.debug("self._current_operation_state=%s",
self._current_operation_state)
# Swing mode
if self._zxt_120 == 1:
for value in self._node.get_values(
class_id=COMMAND_CLASS_CONFIGURATION).values():
if value.command_class == 112 and value.index == 33:
self._current_swing_mode = value.data
self._swing_list = [0, 1]
_LOGGER.debug("self._swing_list=%s", self._swing_list)
@property
def should_poll(self):
"""No polling on ZWave."""
return False
@property
def current_fan_mode(self):
"""Return the fan speed set."""
return self._current_operation_state
@property
def fan_list(self):
"""List of available fan modes."""
return self._fan_list
@property
def current_swing_mode(self):
"""Return the swing mode set."""
return self._current_swing_mode
@property
def swing_list(self):
"""List of available swing modes."""
return self._swing_list
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
unit = self._unit
if unit == 'C':
return TEMP_CELSIUS
elif unit == 'F':
return TEMP_FAHRENHEIT
else:
_LOGGER.exception("unit_of_measurement=%s is not valid",
unit)
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temperature
@property
def current_operation(self):
"""Return the current operation mode."""
return self._current_operation
@property
def operation_list(self):
"""List of available operation modes."""
return self._operation_list
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temperature
def set_temperature(self, temperature):
"""Set new target temperature."""
for value in self._node.get_values(
class_id=COMMAND_CLASS_THERMOSTAT_SETPOINT).values():
if value.command_class != 67:
continue
if self._zxt_120:
# ZXT-120 does not support get setpoint
self._target_temperature = temperature
if ZXT_120_SET_TEMP.get(self._current_operation) \
!= value.index:
continue
# ZXT-120 responds only to whole int
value.data = int(round(temperature, 0))
else:
value.data = int(temperature)
def set_fan_mode(self, fan):
"""Set new target fan mode."""
for value in self._node.get_values(
class_id=COMMAND_CLASS_THERMOSTAT_FAN_MODE).values():
if value.command_class == 68 and value.index == 0:
value.data = bytes(fan, 'utf-8')
def set_operation_mode(self, operation_mode):
"""Set new target operation mode."""
for value in self._node.get_values(
class_id=COMMAND_CLASS_THERMOSTAT_MODE).values():
if value.command_class == 64 and value.index == 0:
value.data = bytes(operation_mode, 'utf-8')
def set_swing_mode(self, swing_mode):
"""Set new target swing mode."""
if self._zxt_120 == 1:
for value in self._node.get_values(
class_id=COMMAND_CLASS_CONFIGURATION).values():
if value.command_class == 112 and value.index == 33:
value.data = int(swing_mode)
|
|
from cheeseprism.utils import path
from cheeseprism.utils import resource_spec
from itertools import count
from mock import Mock
from mock import patch
from pprint import pformat as pprint
from stuf import stuf
import futures
import json
import logging
import subprocess
import textwrap
import unittest
logger = logging.getLogger(__name__)
here = path(__file__).parent
def test_data_from_path():
from cheeseprism import index
datafile = here / 'index.json'
assert index.IndexManager.data_from_path(datafile) == {}
datafile.write_text("{}")
assert index.IndexManager.data_from_path(datafile) == {}
class IndexTestCase(unittest.TestCase):
counter = count()
index_parent = here / "test-indexes"
dummy = here / "dummypackage/dist/dummypackage-0.0dev.tar.gz"
dum_whl = here / "dummypackage/dist/dummypackage-0.0dev-py27-none-any.whl"
@classmethod
def get_base(cls):
return path(resource_spec(cls.index_parent))
@property
def base(self): return self.get_base()
def make_one(self, pkg='dummy', index_path=None):
from cheeseprism import index
executor = futures.ThreadPoolExecutor(1)
if index_path is None:
index_path = self.new_path('test-index')
idx = index.IndexManager(index_path, executor=executor)
pkg = getattr(self, pkg)
pkg.copy(idx.path)
self.dummypath = idx.path / pkg.name
return idx
def new_path(self, index_name):
count = self.count = next(self.counter)
return self.base / ("%s-%s" %(count, index_name))
def test_index_init_baderrorfolder(self):
bfp = self.new_path('badfolder')
bfp.mkdir()
(bfp / '_errors').touch()
idx = self.make_one(index_path=bfp)
assert '_errors' in {x.name for x in idx.path.dirs()}
assert '_errors.bak' in {x.name for x in idx.path.files()}
def test_register_archive(self):
self.im = self.make_one()
pkgdata, md5 = self.im.register_archive(self.dummypath)
assert md5 == '3ac58d89cb7f7b718bc6d0beae85c282'
assert pkgdata
idxjson = self.im.data_from_path(self.im.datafile_path)
assert md5 in idxjson
assert idxjson[md5] == pkgdata
def test_write_datafile(self):
"""
create and write archive data to index.json
"""
self.im = self.make_one()
data = self.im.write_datafile(hello='computer')
assert 'hello' in data
assert self.im.datafile_path.exists()
assert 'hello' in self.im.data_from_path(self.im.datafile_path)
def test_write_datafile_w_existing_datafile(self):
"""
write data to an existing datafile
"""
self.im = self.make_one()
data = self.im.write_datafile(hello='computer')
assert self.im.datafile_path.exists()
data = self.im.write_datafile(hello='operator')
assert data['hello'] == 'operator'
assert self.im.data_from_path(self.im.datafile_path)['hello'] == 'operator'
def test_regenerate_index_write_html_false(self):
im = self.make_one()
im.write_html = False
home, leaves = im.regenerate_all()
pth = im.path
assert home is None
assert not (pth / im.root_index_file).exists()
def test_regenerate_index(self):
self.im = self.make_one()
home, leaves = self.im.regenerate_all()
pth = self.im.path
file_structure = [(x.parent.name, x.name) for x in pth.walk()]
index_name = u'%s-test-index' %self.count
expected = [(index_name, u'dummypackage'),
(u'dummypackage', u'index.html'),
(path(u'dummypackage'), path(u'index.json')),
(index_name, u'dummypackage-0.0dev.tar.gz'),
(index_name, u'index.html')]
assert len(leaves) == 1
assert leaves[0].exists()
assert leaves[0].name == 'index.json'
assert leaves[0].parent.name == 'dummypackage'
etxt = pprint(sorted(str(x) for x in expected))
fstxt = pprint(sorted(str(x) for x in file_structure))
assert set(expected).issubset(file_structure), \
textwrap.dedent("""
Expected not a subset of result::
expected:
%s
actual:
%s""") %(etxt, fstxt)
def test_leafdata(self):
self.im = self.make_one()
fpath = here / path('dummypackage2/dist/dummypackage-0.1.tar.gz')
dist = Mock(name='dist')
distinfo = dist.name, dist.version = 'distname', 'version'
data = self.im.leafdata(fpath, dist)
assert distinfo == (data['name'], data['version'])
assert data['md5'] == fpath.md5hex
assert data['size'] == fpath.size
assert data['filename'] == fpath.name
assert 'mtime' in data
assert data['ctime'] == fpath.ctime
assert 'atime' in data
def test_rebuild_leaf_subscriber(self):
"""
Cover event subscriber
"""
from cheeseprism.event import PackageAdded
from cheeseprism.index import rebuild_leaf
self.im = self.make_one()
event = PackageAdded(self.im, here / path('dummypackage2/dist/dummypackage-0.1.tar.gz'))
with patch('cheeseprism.index.IndexManager.regenerate_leaf') as rl:
out = rebuild_leaf(event)
assert out is not None
assert rl.call_args == (('dummypackage',), {})
def test_rebuild_leaf_subscriber_existing_leaf(self):
from cheeseprism.event import PackageAdded
from cheeseprism.index import rebuild_leaf
self.im = self.make_one()
self.im.regenerate_leaf('dummypackage')
distpath = here / path('dummypackage2/dist/dummypackage-0.1.tar.gz')
event = PackageAdded(self.im, path=distpath)
out = rebuild_leaf(event)
assert len(out) == 2
def test_html_free_remove_index(self):
idx = self.make_one()
home, leaves = idx.regenerate_all()
free_gen = idx._leaf_html_free(idx.path / 'dummypackage', idx.path.files("*.gz"))
linkout = next(free_gen)
assert 'index.html' not in {f.name for f in linkout.parent.files()}
def test__html_free_leafdir_disappears(self):
idx = self.make_one()
home, leaves = idx.regenerate_all()
leafdir = idx.path / 'dummypackage'
free_gen = idx._leaf_html_free(leafdir, idx.path.files("*.gz"))
leafdir.rmtree()
with self.assertRaises(StopIteration):
next(free_gen)
def test__html_free_version_disappears(self):
idx = self.make_one()
home, leaves = idx.regenerate_all()
leafdir = idx.path / 'dummypackage'
versions = idx.path.files('*.gz')
[x.remove() for x in versions]
free_gen = idx._leaf_html_free(leafdir, versions)
with self.assertRaises(StopIteration):
next(free_gen)
def test_add_version_to_leaf_html_free(self):
idx = self.make_one()
idx.write_html = False
name = 'dummypackage'
idx.regenerate_leaf(name)
distpath = here / path('dummypackage2/dist/dummypackage-0.1.tar.gz')
distpath.copy(idx.path / distpath.name)
data = idx.add_version_to_leaf(distpath, name)
assert len(data) == 2
assert all(x.islink() for x in (idx.path / name).files("*.gz"))
def test_cleanup_links_version_removed(self):
idx = self.make_one()
idx.write_html = False
name = 'dummypackage'
idx.regenerate_leaf(name)
leafdir = idx.path / name
leafjson = leafdir / 'index.json'
rem, miss = idx.cleanup_links(leafdir, leafjson, [])
assert len(rem) == 1
assert rem[0].name == 'dummypackage-0.0dev.tar.gz'
def test_cleanup_links_archive_missing(self):
idx = self.make_one()
idx.write_html = False
name = 'dummypackage'
idx.regenerate_leaf(name)
leafdir = idx.path / name
leafjson = leafdir / 'index.json'
badpath = Mock()
badpath.name = 'dummypackage-0.0dev.tar.gz'
badpath.exists.return_value = False
rem, miss = idx.cleanup_links(leafdir, leafjson, [badpath])
assert len(miss) == 1
assert miss[0] == badpath.name
assert leafjson.text() == '[]'
def test_add_version_to_leaf_nodups(self):
idx = self.make_one()
name = 'dummypackage'
idx.regenerate_leaf(name)
distpath = here / path('dummypackage2/dist/dummypackage-0.1.tar.gz')
distpath.copy(idx.path / distpath.name)
data = idx.add_version_to_leaf(distpath, name)
assert idx.path.exists()
data = idx.add_version_to_leaf(distpath, name)
assert len(data) == 2
def test_add_version_to_leaf_w_remove_file_cleans_up_leafdata(self):
idx = self.make_one()
idx.write_html = False
name = 'dummypackage'
leafpath = idx.path / name
distpath = here / path('dummypackage2/dist/dummypackage-0.1.tar.gz')
idx.regenerate_leaf(name)
distpath.copy(idx.path)
assert len(leafpath.files()) == 2
distpath = idx.path / distpath.name
data = idx.add_version_to_leaf(distpath, name)
distpath2 = idx.path / self.dum_whl.name
self.dum_whl.copy(distpath2)
data = idx.add_version_to_leaf(distpath2, name)
assert len(data) == 3
assert (leafpath / distpath.name).exists()
distpath.remove()
leafdata = idx.cleanup_leafdata(leafpath, leafpath / 'index.json')
assert len(leafdata) == 2
def test_regenerate_leaf(self):
self.im = self.make_one()
[x for x in self.im.regenerate_all()]
leafindex = self.im.path / 'dummypackage/index.html'
new_arch = here / path('dummypackage2/dist/dummypackage-0.1.tar.gz')
new_arch.copy(self.im.path)
added = self.im.path / new_arch.name
before_txt = leafindex.text()
info = self.im.pkginfo_from_file(added)
out = self.im.regenerate_leaf(info.name)
assert before_txt != out.text()
def test_no_leaf_index_write_html_false(self):
"""
leaf index should not exist if index.write_html is False
"""
self.im = self.make_one()
self.im.write_html = False
[x for x in self.im.regenerate_all()]
leafindex = self.im.path / 'dummypackage/index.html'
assert not leafindex.exists()
def test_regenerate_leaf_html_free(self):
self.im = self.make_one()
self.im.write_html = False
[x for x in self.im.regenerate_all()]
new_arch = here / path('dummypackage2/dist/dummypackage-0.1.tar.gz')
new_arch.copy(self.im.path)
added = self.im.path / new_arch.name
info = self.im.pkginfo_from_file(added)
out = self.im.regenerate_leaf(info.name)
with open(out) as fd:
data = json.load(fd)
assert new_arch.name in set([x['filename'] for x in data])
symlink = out.parent / new_arch.name
assert symlink.exists() is True
@patch('pyramid.threadlocal.get_current_registry')
def test_bulk_add_pkg(self, getreg):
from cheeseprism.index import bulk_add_pkgs
self.im = self.make_one()
pkg = stuf(name='dummypackage', version='0.1',
filename=self.dummy.name)
pkgs = pkg,
index = Mock(name='index')
index.path = self.im.path
leaves, archs = bulk_add_pkgs(index, pkgs)
assert len(archs) == 1
assert len(leaves) == 1
assert 'dummypackage' in leaves
assert archs[0].basename() == u'dummypackage-0.0dev.tar.gz'
assert index.regenerate_leaf.called
@patch('pyramid.threadlocal.get_current_registry')
def test_bulk_add_pkg_regen_error(self, getreg):
with patch('cheeseprism.index.IndexManager.regenerate_leaf') as rl:
rl.side_effect = ValueError('BAAAAAAD')
from cheeseprism.index import bulk_add_pkgs
idx = self.make_one()
pkg = stuf(name='dummypackage', version='0.1',
filename=self.dummy.name)
pkgs = pkg,
assert bulk_add_pkgs(idx, pkgs)
def tearDown(self):
logger.debug("teardown: %s", self.count)
if self.base.exists():
dirs = self.base.dirs()
logger.info(pprint(dirs))
logger.info(pprint([x.rmtree() for x in dirs]))
def test_group_by_magnitude():
from cheeseprism.index import IndexManager
fiver = range(5)
assert IndexManager.group_by_magnitude(fiver)[0] == fiver
assert next(IndexManager.group_by_magnitude(range(101))) == range(10)
assert next(IndexManager.group_by_magnitude(range(1001))) == range(100)
class ClassOrStaticMethods(unittest.TestCase):
def test_move_on_error(self):
from cheeseprism.index import ArchiveUtil
exc, path = Mock(), Mock()
path.basename.return_value = '_path_'
ArchiveUtil.move_on_error('errors', exc, path)
assert path.rename.called
assert path.rename.call_args[0][0] == 'errors/_path_'
def test_pkginfo_from_file_whl(self):
"""
.pkginfo_from_file: wheel
"""
from cheeseprism.index import IndexManager
with patch('pkginfo.wheel.Wheel', new=Mock(return_value=True)):
assert IndexManager.pkginfo_from_file('blah.whl') is True
@patch('pkginfo.bdist.BDist', new=Mock(return_value=True))
def test_pkginfo_from_file_egg(self):
"""
.pkginfo_from_file: bdist
"""
from cheeseprism.index import IndexManager
assert IndexManager.pkginfo_from_file('blah.egg') is True
@patch('pkginfo.sdist.SDist', new=Mock(return_value=True))
def test_pkginfo_from_file_sdist(self):
"""
.pkginfo_from_file: sdist
"""
from cheeseprism.index import IndexManager
for ext in ('.gz','.tgz', '.bz2', '.zip'):
assert IndexManager.pkginfo_from_file('blah.%s' %ext) is True
def test_pkginfo_from_bad_ext(self):
"""
.pkginfo_from_file with unrecognized extension
"""
from cheeseprism.index import IndexManager
with self.assertRaises(RuntimeError):
IndexManager.pkginfo_from_file('adfasdkfha.adkfhalsdk')
def test_pkginfo_from_bad_ext_handled(self):
"""
.pkginfo_from_file with unrecognized extension
"""
from cheeseprism.index import IndexManager
handler = Mock(name='handler')
IndexManager.pkginfo_from_file('adfasdkfha.adkfhalsdk', handle_error=handler)
assert handler.call_args[0][1] == 'adfasdkfha.adkfhalsdk'
assert isinstance(handler.call_args[0][0], RuntimeError)
def test_pkginfo_from_no_ext(self):
"""
.pkginfo_from_file with no extension
"""
from cheeseprism.index import IndexManager
with self.assertRaises(RuntimeError):
IndexManager.pkginfo_from_file('adfasdkfha')
def test_pkginfo_from_file_exc_and_handler(self):
"""
.pkginfo_from_file with exception and handler
"""
from cheeseprism.index import IndexManager
exc = Exception("BOOM")
with patch('pkginfo.bdist.BDist', side_effect=exc):
eh = Mock(name='error_handler')
IndexManager.pkginfo_from_file('bad.egg', handle_error=eh)
assert eh.called
assert eh.call_args[0] == (exc, 'bad.egg'), eh.call_args[0]
def test_pkginfo_from_file_exc(self):
"""
.pkginfo_from_file with exception and no handler
"""
from cheeseprism.index import IndexManager
exc = ValueError("BOOM")
with self.assertRaises(ValueError):
with patch('pkginfo.bdist.BDist', side_effect=exc):
IndexManager.pkginfo_from_file('bad.egg')
def test_cleanup():
assert not IndexTestCase.get_base().dirs()
def test_noop():
from cheeseprism.index import noop
assert noop() is None
def test_async_bulk_update_at_start():
from cheeseprism.index import async_bulk_update_at_start as func
event = Mock()
thread_ctor = Mock()
func(event, thread=thread_ctor)
def test_bulk_update_subscriber():
from cheeseprism.index import bulk_update_index
from cheeseprism.event import IndexUpdate
event = Mock(name='event', spec=IndexUpdate(Mock(), Mock()))
idx = event.index = Mock(name='idx')
idx.attach_mock(Mock(return_value=[]), 'update_data')
with patch('cheeseprism.index.bulk_add_pkgs', return_value=True):
assert bulk_update_index(event) == True
|
|
"""Includes the Validation and Validator classes."""
import idb.util as util
class Validation(object):
"""A simple validation mechanism, designed for use by idb.data.ModelBase."""
def __init__(self, callback, property_name=None, message=None, is_simple=True,
is_property_specific=True):
"""
is_simple - when True the validation requires no outside information to process, if False,
outside information (such as a database connection) is required.
is_property_specific - when True, the value of the property under question is passed to the
validation callback, when false the entire model is passed.
"""
self.property_name = property_name
self.callback = callback
self.message = message
self.is_simple = is_simple
self.filters = []
self.pretty_property_name = None
self.is_property_specific = is_property_specific
def add_filter(self, filter_):
"""Filters are methods that accept a model and return True if the model should be validated or
False if it should not. This makes it possible to exempt models from validations when
necessary."""
self.filters.append(filter_)
return self
def set_pretty_property_name(self, ppn):
"""Set the pretty property name for the validation and return the validation instance."""
self.pretty_property_name = ppn
return self
def is_valid(self, model, validator=None):
"""Returns true if the model passes the validation, and false if not. Validator must be
present_optional if validation is not 'simple'.
"""
if self.property_name and self.is_property_specific:
arg0 = getattr(model, self.property_name)
else:
arg0 = model
if self.is_simple:
is_valid = self.callback(arg0)
else:
is_valid = self.callback(arg0, validator)
return (is_valid, None if is_valid else (self.message or "is invalid"))
def validate(self, model, validator=None):
"""Checks the model against all filters, and if it shoud be validated, runs the validation. if
the model is invalid, an error is added to the model. Then the validity value is returned.
"""
for filter in self.filters:
if not filter(model):
return True
is_valid, message = self.is_valid(model, validator)
if not is_valid:
model.add_error(self.pretty_property_name or self.property_name, message)
return is_valid
@staticmethod
def _is_present(v):
"""Returns true if the value is not None, and if it is either not a string, or a string with
length > 0.
"""
if v is None:
return False
if isinstance(v, str):
return len(v) > 0
return True
@staticmethod
def is_present(property_name):
"""Returns a validation that returns false when the property is None or an empty string."""
return Validation(Validation._is_present , property_name, "cannot be blank")
@staticmethod
def is_length(property_name, *, min_length=1, max_length=None, present_optional=False):
"""Returns a validation that checks the length of a string."""
def check(v):
if not v:
return present_optional
else:
if len(v) >= min_length:
if max_length == None:
return True
else:
return len(v) <= max_length
else:
return False
if max_length:
message = "must be at least {0} characters long".format(min_length)
else:
message = "must be between {0} and {1} characters long".format(min_length, max_length)
return Validation(check, property_name, message)
@staticmethod
def matches(property_name, regex, *, present_optional=False, message=None):
"""Returns a validation that checks a property against a regex."""
def check(v):
if not v:
return present_optional
else:
return True if regex.search(v) else False
return Validation(check, property_name, message)
@staticmethod
def is_numeric(property_name, *, numtype="float", min=None, max=None, present_optional=False,
message=None):
"""Returns a validation that checks a property as a number, with optional range constraints."""
if numtype == "int":
cast = util.try_parse_int
elif numtype == "decimal":
cast = util.try_parse_decimal
elif numtype == "float":
cast = util.try_parse_float
else:
raise ValueError("numtype argument must be one of: int, decimal, float")
def check(v):
if v is None:
return present_optional
else:
isn, nv = cast(v)
if not isn:
return False
else:
if min is not None and nv < min:
return False
if max is not None and nv > max:
return False
return True
if not message:
p = ["must be a"]
if numtype == "int":
p.append("whole number")
else:
p.append("number")
if min is not None and max is not None:
p.append("between {0} and {1}".format(min, max))
elif min is not None:
p.append("greater than or equal to {0}".format(min))
elif max is not None:
p.append("less than or equal to {0}".format(max))
message = " ".join(p)
return Validation(check, property_name, message)
@staticmethod
def is_date(property_name, *, format=None, present_optional=False, message=None):
"""Returns a validation that checks a value as a date."""
# NOTE: Not currently using format param
def check(v):
if v is None:
return present_optional
else:
isd, dv = util.try_parse_date(v)
return isd
return Validation(check, property_name, message)
@staticmethod
def is_datetime(property_name, *, format=None, present_optional=False, message=None):
"""Returns a validation that checks a value as a datetime."""
# NOTE: Not currently using format param
def check(v):
if v is None:
return present_optional
else:
isd, dv = util.try_parse_datetime(v)
return isd
return Validation(check, property_name, message)
@staticmethod
def is_in(property_name, set_values, *, present_optional=False, message=None):
"""Returns a validation that checks that a value is contained within a given set."""
def check(v):
if v is None:
return present_optional
else:
return v in set_values
return Validation(check, property_name, message)
@staticmethod
def is_unique(keys, *, scope=None, comparison_operators=None, present_optional=False,
message=None):
"""Returns a validation that makes sure the given value is unique for a table and optionally a
scope."""
def check(pn, validator):
m = validator.model
da = validator.data_access
pkname = m.primary_key_name
pk = m.primary_key
if isinstance(keys, str):
k = getattr(m, keys)
if present_optional and k is None:
return True
if comparison_operators:
if isinstance(comparison_operators, str):
op = comparison_operators
else:
op = comparison_operators[0]
else:
op = " = "
constraints = [(keys, k, op)]
else:
if comparison_operators:
ops = comparison_operators
else:
ops = [" = "] * len(keys)
constraints = list(zip(keys, [getattr(m, key) for key in keys], ops))
if scope:
if comparison_operators:
ops = comparison_operators[len(constraints):]
else:
ops = [" = "] * len(scope)
constraints.extend(zip(scope, [getattr(m, s) for s in scope], ops))
dupe = da.find(m.table_name, constraints, columns=pkname)
if dupe is None:
return True
if isinstance(pkname, str):
return dupe[0] == pk
else:
return tuple(dupe) == tuple(pk)
return Validation(check, keys, message or "is already taken", is_simple=False)
class Validator(object):
"""Holds a collection of validations and methods to work with them"""
def __init__(self, validations=None, *, fail_fast=False):
self.validations = validations or []
self.model = None
self.data_access = None
self.fail_fast = fail_fast
def add(self, validation):
"""Adds a validation to the collection."""
self.validations.append(validation)
def validate(self, model, data_access=None, *, fail_fast=None):
"""Validates a model against the collection of validations."""
if fail_fast is None:
fail_fast = self.fail_fast
self.model = model
self.data_access = data_access
is_valid = True
for validation in self.validations:
if not validation.validate(model, self):
is_valid = False
if fail_fast:
break
return is_valid
|
|
from __future__ import unicode_literals
import unittest
from datetime import date, datetime, time, timedelta
from decimal import Decimal
from operator import attrgetter, itemgetter
from uuid import UUID
from django.core.exceptions import FieldError
from django.db import connection, models
from django.db.models import F, Q, Max, Min, Value
from django.db.models.expressions import Case, When
from django.test import TestCase
from django.utils import six
from .models import CaseTestModel, Client, FKCaseTestModel, O2OCaseTestModel
try:
from PIL import Image
except ImportError:
Image = None
class CaseExpressionTests(TestCase):
@classmethod
def setUpTestData(cls):
o = CaseTestModel.objects.create(integer=1, integer2=1, string='1')
O2OCaseTestModel.objects.create(o2o=o, integer=1)
FKCaseTestModel.objects.create(fk=o, integer=1)
o = CaseTestModel.objects.create(integer=2, integer2=3, string='2')
O2OCaseTestModel.objects.create(o2o=o, integer=2)
FKCaseTestModel.objects.create(fk=o, integer=2)
FKCaseTestModel.objects.create(fk=o, integer=3)
o = CaseTestModel.objects.create(integer=3, integer2=4, string='3')
O2OCaseTestModel.objects.create(o2o=o, integer=3)
FKCaseTestModel.objects.create(fk=o, integer=3)
FKCaseTestModel.objects.create(fk=o, integer=4)
o = CaseTestModel.objects.create(integer=2, integer2=2, string='2')
O2OCaseTestModel.objects.create(o2o=o, integer=2)
FKCaseTestModel.objects.create(fk=o, integer=2)
FKCaseTestModel.objects.create(fk=o, integer=3)
o = CaseTestModel.objects.create(integer=3, integer2=4, string='3')
O2OCaseTestModel.objects.create(o2o=o, integer=3)
FKCaseTestModel.objects.create(fk=o, integer=3)
FKCaseTestModel.objects.create(fk=o, integer=4)
o = CaseTestModel.objects.create(integer=3, integer2=3, string='3')
O2OCaseTestModel.objects.create(o2o=o, integer=3)
FKCaseTestModel.objects.create(fk=o, integer=3)
FKCaseTestModel.objects.create(fk=o, integer=4)
o = CaseTestModel.objects.create(integer=4, integer2=5, string='4')
O2OCaseTestModel.objects.create(o2o=o, integer=1)
FKCaseTestModel.objects.create(fk=o, integer=5)
# GROUP BY on Oracle fails with TextField/BinaryField; see #24096.
cls.non_lob_fields = [
f.name for f in CaseTestModel._meta.get_fields()
if not (f.is_relation and f.auto_created) and not isinstance(f, (models.BinaryField, models.TextField))
]
def test_annotate(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(test=Case(
When(integer=1, then=Value('one')),
When(integer=2, then=Value('two')),
default=Value('other'),
output_field=models.CharField(),
)).order_by('pk'),
[(1, 'one'), (2, 'two'), (3, 'other'), (2, 'two'), (3, 'other'), (3, 'other'), (4, 'other')],
transform=attrgetter('integer', 'test')
)
def test_annotate_without_default(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(test=Case(
When(integer=1, then=1),
When(integer=2, then=2),
output_field=models.IntegerField(),
)).order_by('pk'),
[(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'test')
)
def test_annotate_with_expression_as_value(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(f_test=Case(
When(integer=1, then=F('integer') + 1),
When(integer=2, then=F('integer') + 3),
default='integer',
)).order_by('pk'),
[(1, 2), (2, 5), (3, 3), (2, 5), (3, 3), (3, 3), (4, 4)],
transform=attrgetter('integer', 'f_test')
)
def test_annotate_with_expression_as_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(f_test=Case(
When(integer2=F('integer'), then=Value('equal')),
When(integer2=F('integer') + 1, then=Value('+1')),
output_field=models.CharField(),
)).order_by('pk'),
[(1, 'equal'), (2, '+1'), (3, '+1'), (2, 'equal'), (3, '+1'), (3, 'equal'), (4, '+1')],
transform=attrgetter('integer', 'f_test')
)
def test_annotate_with_join_in_value(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(join_test=Case(
When(integer=1, then=F('o2o_rel__integer') + 1),
When(integer=2, then=F('o2o_rel__integer') + 3),
default='o2o_rel__integer',
)).order_by('pk'),
[(1, 2), (2, 5), (3, 3), (2, 5), (3, 3), (3, 3), (4, 1)],
transform=attrgetter('integer', 'join_test')
)
def test_annotate_with_join_in_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(join_test=Case(
When(integer2=F('o2o_rel__integer'), then=Value('equal')),
When(integer2=F('o2o_rel__integer') + 1, then=Value('+1')),
default=Value('other'),
output_field=models.CharField(),
)).order_by('pk'),
[(1, 'equal'), (2, '+1'), (3, '+1'), (2, 'equal'), (3, '+1'), (3, 'equal'), (4, 'other')],
transform=attrgetter('integer', 'join_test')
)
def test_annotate_with_join_in_predicate(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(join_test=Case(
When(o2o_rel__integer=1, then=Value('one')),
When(o2o_rel__integer=2, then=Value('two')),
When(o2o_rel__integer=3, then=Value('three')),
default=Value('other'),
output_field=models.CharField(),
)).order_by('pk'),
[(1, 'one'), (2, 'two'), (3, 'three'), (2, 'two'), (3, 'three'), (3, 'three'), (4, 'one')],
transform=attrgetter('integer', 'join_test')
)
def test_annotate_with_annotation_in_value(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
f_plus_1=F('integer') + 1,
f_plus_3=F('integer') + 3,
).annotate(
f_test=Case(
When(integer=1, then='f_plus_1'),
When(integer=2, then='f_plus_3'),
default='integer',
),
).order_by('pk'),
[(1, 2), (2, 5), (3, 3), (2, 5), (3, 3), (3, 3), (4, 4)],
transform=attrgetter('integer', 'f_test')
)
def test_annotate_with_annotation_in_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
f_plus_1=F('integer') + 1,
).annotate(
f_test=Case(
When(integer2=F('integer'), then=Value('equal')),
When(integer2=F('f_plus_1'), then=Value('+1')),
output_field=models.CharField(),
),
).order_by('pk'),
[(1, 'equal'), (2, '+1'), (3, '+1'), (2, 'equal'), (3, '+1'), (3, 'equal'), (4, '+1')],
transform=attrgetter('integer', 'f_test')
)
def test_annotate_with_annotation_in_predicate(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
f_minus_2=F('integer') - 2,
).annotate(
test=Case(
When(f_minus_2=-1, then=Value('negative one')),
When(f_minus_2=0, then=Value('zero')),
When(f_minus_2=1, then=Value('one')),
default=Value('other'),
output_field=models.CharField(),
),
).order_by('pk'),
[(1, 'negative one'), (2, 'zero'), (3, 'one'), (2, 'zero'), (3, 'one'), (3, 'one'), (4, 'other')],
transform=attrgetter('integer', 'test')
)
def test_annotate_with_aggregation_in_value(self):
self.assertQuerysetEqual(
CaseTestModel.objects.values(*self.non_lob_fields).annotate(
min=Min('fk_rel__integer'),
max=Max('fk_rel__integer'),
).annotate(
test=Case(
When(integer=2, then='min'),
When(integer=3, then='max'),
),
).order_by('pk'),
[(1, None, 1, 1), (2, 2, 2, 3), (3, 4, 3, 4), (2, 2, 2, 3), (3, 4, 3, 4), (3, 4, 3, 4), (4, None, 5, 5)],
transform=itemgetter('integer', 'test', 'min', 'max')
)
def test_annotate_with_aggregation_in_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.values(*self.non_lob_fields).annotate(
min=Min('fk_rel__integer'),
max=Max('fk_rel__integer'),
).annotate(
test=Case(
When(integer2=F('min'), then=Value('min')),
When(integer2=F('max'), then=Value('max')),
output_field=models.CharField(),
),
).order_by('pk'),
[(1, 1, 'min'), (2, 3, 'max'), (3, 4, 'max'), (2, 2, 'min'), (3, 4, 'max'), (3, 3, 'min'), (4, 5, 'min')],
transform=itemgetter('integer', 'integer2', 'test')
)
def test_annotate_with_aggregation_in_predicate(self):
self.assertQuerysetEqual(
CaseTestModel.objects.values(*self.non_lob_fields).annotate(
max=Max('fk_rel__integer'),
).annotate(
test=Case(
When(max=3, then=Value('max = 3')),
When(max=4, then=Value('max = 4')),
default=Value(''),
output_field=models.CharField(),
),
).order_by('pk'),
[(1, 1, ''), (2, 3, 'max = 3'), (3, 4, 'max = 4'), (2, 3, 'max = 3'),
(3, 4, 'max = 4'), (3, 4, 'max = 4'), (4, 5, '')],
transform=itemgetter('integer', 'max', 'test')
)
def test_combined_expression(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
test=Case(
When(integer=1, then=2),
When(integer=2, then=1),
default=3,
output_field=models.IntegerField(),
) + 1,
).order_by('pk'),
[(1, 3), (2, 2), (3, 4), (2, 2), (3, 4), (3, 4), (4, 4)],
transform=attrgetter('integer', 'test')
)
if connection.vendor == 'sqlite' and connection.Database.sqlite_version_info < (3, 7, 0):
# There is a bug in sqlite < 3.7.0, where placeholder order is lost.
# Thus, the above query returns <condition_value> + <result_value>
# for each matching case instead of <result_value> + 1 (#24148).
test_combined_expression = unittest.expectedFailure(test_combined_expression)
def test_in_subquery(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(
pk__in=CaseTestModel.objects.annotate(
test=Case(
When(integer=F('integer2'), then='pk'),
When(integer=4, then='pk'),
output_field=models.IntegerField(),
),
).values('test')).order_by('pk'),
[(1, 1), (2, 2), (3, 3), (4, 5)],
transform=attrgetter('integer', 'integer2')
)
def test_aggregate(self):
self.assertEqual(
CaseTestModel.objects.aggregate(
one=models.Sum(Case(
When(integer=1, then=1),
output_field=models.IntegerField(),
)),
two=models.Sum(Case(
When(integer=2, then=1),
output_field=models.IntegerField(),
)),
three=models.Sum(Case(
When(integer=3, then=1),
output_field=models.IntegerField(),
)),
four=models.Sum(Case(
When(integer=4, then=1),
output_field=models.IntegerField(),
)),
),
{'one': 1, 'two': 2, 'three': 3, 'four': 1}
)
def test_aggregate_with_expression_as_value(self):
self.assertEqual(
CaseTestModel.objects.aggregate(
one=models.Sum(Case(When(integer=1, then='integer'))),
two=models.Sum(Case(When(integer=2, then=F('integer') - 1))),
three=models.Sum(Case(When(integer=3, then=F('integer') + 1))),
),
{'one': 1, 'two': 2, 'three': 12}
)
def test_aggregate_with_expression_as_condition(self):
self.assertEqual(
CaseTestModel.objects.aggregate(
equal=models.Sum(Case(
When(integer2=F('integer'), then=1),
output_field=models.IntegerField(),
)),
plus_one=models.Sum(Case(
When(integer2=F('integer') + 1, then=1),
output_field=models.IntegerField(),
)),
),
{'equal': 3, 'plus_one': 4}
)
def test_filter(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(integer2=Case(
When(integer=2, then=3),
When(integer=3, then=4),
default=1,
output_field=models.IntegerField(),
)).order_by('pk'),
[(1, 1), (2, 3), (3, 4), (3, 4)],
transform=attrgetter('integer', 'integer2')
)
def test_filter_without_default(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(integer2=Case(
When(integer=2, then=3),
When(integer=3, then=4),
output_field=models.IntegerField(),
)).order_by('pk'),
[(2, 3), (3, 4), (3, 4)],
transform=attrgetter('integer', 'integer2')
)
def test_filter_with_expression_as_value(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(integer2=Case(
When(integer=2, then=F('integer') + 1),
When(integer=3, then=F('integer')),
default='integer',
)).order_by('pk'),
[(1, 1), (2, 3), (3, 3)],
transform=attrgetter('integer', 'integer2')
)
def test_filter_with_expression_as_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(string=Case(
When(integer2=F('integer'), then=Value('2')),
When(integer2=F('integer') + 1, then=Value('3')),
output_field=models.CharField(),
)).order_by('pk'),
[(3, 4, '3'), (2, 2, '2'), (3, 4, '3')],
transform=attrgetter('integer', 'integer2', 'string')
)
def test_filter_with_join_in_value(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(integer2=Case(
When(integer=2, then=F('o2o_rel__integer') + 1),
When(integer=3, then=F('o2o_rel__integer')),
default='o2o_rel__integer',
)).order_by('pk'),
[(1, 1), (2, 3), (3, 3)],
transform=attrgetter('integer', 'integer2')
)
def test_filter_with_join_in_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(integer=Case(
When(integer2=F('o2o_rel__integer') + 1, then=2),
When(integer2=F('o2o_rel__integer'), then=3),
output_field=models.IntegerField(),
)).order_by('pk'),
[(2, 3), (3, 3)],
transform=attrgetter('integer', 'integer2')
)
def test_filter_with_join_in_predicate(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(integer2=Case(
When(o2o_rel__integer=1, then=1),
When(o2o_rel__integer=2, then=3),
When(o2o_rel__integer=3, then=4),
output_field=models.IntegerField(),
)).order_by('pk'),
[(1, 1), (2, 3), (3, 4), (3, 4)],
transform=attrgetter('integer', 'integer2')
)
def test_filter_with_annotation_in_value(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
f=F('integer'),
f_plus_1=F('integer') + 1,
).filter(
integer2=Case(
When(integer=2, then='f_plus_1'),
When(integer=3, then='f'),
),
).order_by('pk'),
[(2, 3), (3, 3)],
transform=attrgetter('integer', 'integer2')
)
def test_filter_with_annotation_in_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
f_plus_1=F('integer') + 1,
).filter(
integer=Case(
When(integer2=F('integer'), then=2),
When(integer2=F('f_plus_1'), then=3),
output_field=models.IntegerField(),
),
).order_by('pk'),
[(3, 4), (2, 2), (3, 4)],
transform=attrgetter('integer', 'integer2')
)
def test_filter_with_annotation_in_predicate(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
f_plus_1=F('integer') + 1,
).filter(
integer2=Case(
When(f_plus_1=3, then=3),
When(f_plus_1=4, then=4),
default=1,
output_field=models.IntegerField(),
),
).order_by('pk'),
[(1, 1), (2, 3), (3, 4), (3, 4)],
transform=attrgetter('integer', 'integer2')
)
def test_filter_with_aggregation_in_value(self):
self.assertQuerysetEqual(
CaseTestModel.objects.values(*self.non_lob_fields).annotate(
min=Min('fk_rel__integer'),
max=Max('fk_rel__integer'),
).filter(
integer2=Case(
When(integer=2, then='min'),
When(integer=3, then='max'),
),
).order_by('pk'),
[(3, 4, 3, 4), (2, 2, 2, 3), (3, 4, 3, 4)],
transform=itemgetter('integer', 'integer2', 'min', 'max')
)
def test_filter_with_aggregation_in_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.values(*self.non_lob_fields).annotate(
min=Min('fk_rel__integer'),
max=Max('fk_rel__integer'),
).filter(
integer=Case(
When(integer2=F('min'), then=2),
When(integer2=F('max'), then=3),
),
).order_by('pk'),
[(3, 4, 3, 4), (2, 2, 2, 3), (3, 4, 3, 4)],
transform=itemgetter('integer', 'integer2', 'min', 'max')
)
def test_filter_with_aggregation_in_predicate(self):
self.assertQuerysetEqual(
CaseTestModel.objects.values(*self.non_lob_fields).annotate(
max=Max('fk_rel__integer'),
).filter(
integer=Case(
When(max=3, then=2),
When(max=4, then=3),
),
).order_by('pk'),
[(2, 3, 3), (3, 4, 4), (2, 2, 3), (3, 4, 4), (3, 3, 4)],
transform=itemgetter('integer', 'integer2', 'max')
)
def test_update(self):
CaseTestModel.objects.update(
string=Case(
When(integer=1, then=Value('one')),
When(integer=2, then=Value('two')),
default=Value('other'),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, 'one'), (2, 'two'), (3, 'other'), (2, 'two'), (3, 'other'), (3, 'other'), (4, 'other')],
transform=attrgetter('integer', 'string')
)
def test_update_without_default(self):
CaseTestModel.objects.update(
integer2=Case(
When(integer=1, then=1),
When(integer=2, then=2),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'integer2')
)
def test_update_with_expression_as_value(self):
CaseTestModel.objects.update(
integer=Case(
When(integer=1, then=F('integer') + 1),
When(integer=2, then=F('integer') + 3),
default='integer',
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[('1', 2), ('2', 5), ('3', 3), ('2', 5), ('3', 3), ('3', 3), ('4', 4)],
transform=attrgetter('string', 'integer')
)
def test_update_with_expression_as_condition(self):
CaseTestModel.objects.update(
string=Case(
When(integer2=F('integer'), then=Value('equal')),
When(integer2=F('integer') + 1, then=Value('+1')),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, 'equal'), (2, '+1'), (3, '+1'), (2, 'equal'), (3, '+1'), (3, 'equal'), (4, '+1')],
transform=attrgetter('integer', 'string')
)
def test_update_with_join_in_condition_raise_field_error(self):
with self.assertRaisesMessage(FieldError, 'Joined field references are not permitted in this query'):
CaseTestModel.objects.update(
integer=Case(
When(integer2=F('o2o_rel__integer') + 1, then=2),
When(integer2=F('o2o_rel__integer'), then=3),
output_field=models.IntegerField(),
),
)
def test_update_with_join_in_predicate_raise_field_error(self):
with self.assertRaisesMessage(FieldError, 'Joined field references are not permitted in this query'):
CaseTestModel.objects.update(
string=Case(
When(o2o_rel__integer=1, then=Value('one')),
When(o2o_rel__integer=2, then=Value('two')),
When(o2o_rel__integer=3, then=Value('three')),
default=Value('other'),
output_field=models.CharField(),
),
)
def test_update_big_integer(self):
CaseTestModel.objects.update(
big_integer=Case(
When(integer=1, then=1),
When(integer=2, then=2),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'big_integer')
)
def test_update_binary(self):
CaseTestModel.objects.update(
binary=Case(
# fails on postgresql on Python 2.7 if output_field is not
# set explicitly
When(integer=1, then=Value(b'one', output_field=models.BinaryField())),
When(integer=2, then=Value(b'two', output_field=models.BinaryField())),
default=Value(b'', output_field=models.BinaryField()),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, b'one'), (2, b'two'), (3, b''), (2, b'two'), (3, b''), (3, b''), (4, b'')],
transform=lambda o: (o.integer, six.binary_type(o.binary))
)
def test_update_boolean(self):
CaseTestModel.objects.update(
boolean=Case(
When(integer=1, then=True),
When(integer=2, then=True),
default=False,
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, True), (2, True), (3, False), (2, True), (3, False), (3, False), (4, False)],
transform=attrgetter('integer', 'boolean')
)
def test_update_comma_separated_integer(self):
CaseTestModel.objects.update(
comma_separated_integer=Case(
When(integer=1, then=Value('1')),
When(integer=2, then=Value('2,2')),
default=Value(''),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, '1'), (2, '2,2'), (3, ''), (2, '2,2'), (3, ''), (3, ''), (4, '')],
transform=attrgetter('integer', 'comma_separated_integer')
)
def test_update_date(self):
CaseTestModel.objects.update(
date=Case(
When(integer=1, then=date(2015, 1, 1)),
When(integer=2, then=date(2015, 1, 2)),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[
(1, date(2015, 1, 1)), (2, date(2015, 1, 2)), (3, None), (2, date(2015, 1, 2)),
(3, None), (3, None), (4, None)
],
transform=attrgetter('integer', 'date')
)
def test_update_date_time(self):
CaseTestModel.objects.update(
date_time=Case(
When(integer=1, then=datetime(2015, 1, 1)),
When(integer=2, then=datetime(2015, 1, 2)),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[
(1, datetime(2015, 1, 1)), (2, datetime(2015, 1, 2)), (3, None), (2, datetime(2015, 1, 2)),
(3, None), (3, None), (4, None)
],
transform=attrgetter('integer', 'date_time')
)
def test_update_decimal(self):
CaseTestModel.objects.update(
decimal=Case(
When(integer=1, then=Decimal('1.1')),
When(integer=2, then=Decimal('2.2')),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, Decimal('1.1')), (2, Decimal('2.2')), (3, None), (2, Decimal('2.2')), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'decimal')
)
def test_update_duration(self):
CaseTestModel.objects.update(
duration=Case(
# fails on sqlite if output_field is not set explicitly on all
# Values containing timedeltas
When(integer=1, then=Value(timedelta(1), output_field=models.DurationField())),
When(integer=2, then=Value(timedelta(2), output_field=models.DurationField())),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, timedelta(1)), (2, timedelta(2)), (3, None), (2, timedelta(2)), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'duration')
)
def test_update_email(self):
CaseTestModel.objects.update(
email=Case(
When(integer=1, then=Value('1@example.com')),
When(integer=2, then=Value('2@example.com')),
default=Value(''),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, '1@example.com'), (2, '2@example.com'), (3, ''), (2, '2@example.com'), (3, ''), (3, ''), (4, '')],
transform=attrgetter('integer', 'email')
)
def test_update_file(self):
CaseTestModel.objects.update(
file=Case(
When(integer=1, then=Value('~/1')),
When(integer=2, then=Value('~/2')),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, '~/1'), (2, '~/2'), (3, ''), (2, '~/2'), (3, ''), (3, ''), (4, '')],
transform=lambda o: (o.integer, six.text_type(o.file))
)
def test_update_file_path(self):
CaseTestModel.objects.update(
file_path=Case(
When(integer=1, then=Value('~/1')),
When(integer=2, then=Value('~/2')),
default=Value(''),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, '~/1'), (2, '~/2'), (3, ''), (2, '~/2'), (3, ''), (3, ''), (4, '')],
transform=attrgetter('integer', 'file_path')
)
def test_update_float(self):
CaseTestModel.objects.update(
float=Case(
When(integer=1, then=1.1),
When(integer=2, then=2.2),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, 1.1), (2, 2.2), (3, None), (2, 2.2), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'float')
)
@unittest.skipUnless(Image, "Pillow not installed")
def test_update_image(self):
CaseTestModel.objects.update(
image=Case(
When(integer=1, then=Value('~/1')),
When(integer=2, then=Value('~/2')),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, '~/1'), (2, '~/2'), (3, ''), (2, '~/2'), (3, ''), (3, ''), (4, '')],
transform=lambda o: (o.integer, six.text_type(o.image))
)
def test_update_generic_ip_address(self):
CaseTestModel.objects.update(
generic_ip_address=Case(
# fails on postgresql if output_field is not set explicitly
When(integer=1, then=Value('1.1.1.1')),
When(integer=2, then=Value('2.2.2.2')),
output_field=models.GenericIPAddressField(),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, '1.1.1.1'), (2, '2.2.2.2'), (3, None), (2, '2.2.2.2'), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'generic_ip_address')
)
def test_update_null_boolean(self):
CaseTestModel.objects.update(
null_boolean=Case(
When(integer=1, then=True),
When(integer=2, then=False),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, True), (2, False), (3, None), (2, False), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'null_boolean')
)
def test_update_positive_integer(self):
CaseTestModel.objects.update(
positive_integer=Case(
When(integer=1, then=1),
When(integer=2, then=2),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'positive_integer')
)
def test_update_positive_small_integer(self):
CaseTestModel.objects.update(
positive_small_integer=Case(
When(integer=1, then=1),
When(integer=2, then=2),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'positive_small_integer')
)
def test_update_slug(self):
CaseTestModel.objects.update(
slug=Case(
When(integer=1, then=Value('1')),
When(integer=2, then=Value('2')),
default=Value(''),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, '1'), (2, '2'), (3, ''), (2, '2'), (3, ''), (3, ''), (4, '')],
transform=attrgetter('integer', 'slug')
)
def test_update_small_integer(self):
CaseTestModel.objects.update(
small_integer=Case(
When(integer=1, then=1),
When(integer=2, then=2),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'small_integer')
)
def test_update_string(self):
CaseTestModel.objects.filter(string__in=['1', '2']).update(
string=Case(
When(integer=1, then=Value('1', output_field=models.CharField())),
When(integer=2, then=Value('2', output_field=models.CharField())),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.filter(string__in=['1', '2']).order_by('pk'),
[(1, '1'), (2, '2'), (2, '2')],
transform=attrgetter('integer', 'string')
)
def test_update_text(self):
CaseTestModel.objects.update(
text=Case(
When(integer=1, then=Value('1')),
When(integer=2, then=Value('2')),
default=Value(''),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, '1'), (2, '2'), (3, ''), (2, '2'), (3, ''), (3, ''), (4, '')],
transform=attrgetter('integer', 'text')
)
def test_update_time(self):
CaseTestModel.objects.update(
time=Case(
# fails on sqlite if output_field is not set explicitly on all
# Values containing times
When(integer=1, then=Value(time(1), output_field=models.TimeField())),
When(integer=2, then=Value(time(2), output_field=models.TimeField())),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, time(1)), (2, time(2)), (3, None), (2, time(2)), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'time')
)
def test_update_url(self):
CaseTestModel.objects.update(
url=Case(
When(integer=1, then=Value('http://1.example.com/')),
When(integer=2, then=Value('http://2.example.com/')),
default=Value(''),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[
(1, 'http://1.example.com/'), (2, 'http://2.example.com/'), (3, ''), (2, 'http://2.example.com/'),
(3, ''), (3, ''), (4, '')
],
transform=attrgetter('integer', 'url')
)
def test_update_uuid(self):
CaseTestModel.objects.update(
uuid=Case(
# fails on sqlite if output_field is not set explicitly on all
# Values containing UUIDs
When(integer=1, then=Value(
UUID('11111111111111111111111111111111'),
output_field=models.UUIDField(),
)),
When(integer=2, then=Value(
UUID('22222222222222222222222222222222'),
output_field=models.UUIDField(),
)),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[
(1, UUID('11111111111111111111111111111111')), (2, UUID('22222222222222222222222222222222')), (3, None),
(2, UUID('22222222222222222222222222222222')), (3, None), (3, None), (4, None)
],
transform=attrgetter('integer', 'uuid')
)
def test_update_fk(self):
obj1, obj2 = CaseTestModel.objects.all()[:2]
CaseTestModel.objects.update(
fk=Case(
When(integer=1, then=obj1.pk),
When(integer=2, then=obj2.pk),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, obj1.pk), (2, obj2.pk), (3, None), (2, obj2.pk), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'fk_id')
)
def test_lookup_in_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
test=Case(
When(integer__lt=2, then=Value('less than 2')),
When(integer__gt=2, then=Value('greater than 2')),
default=Value('equal to 2'),
output_field=models.CharField(),
),
).order_by('pk'),
[
(1, 'less than 2'), (2, 'equal to 2'), (3, 'greater than 2'), (2, 'equal to 2'), (3, 'greater than 2'),
(3, 'greater than 2'), (4, 'greater than 2')
],
transform=attrgetter('integer', 'test')
)
def test_lookup_different_fields(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
test=Case(
When(integer=2, integer2=3, then=Value('when')),
default=Value('default'),
output_field=models.CharField(),
),
).order_by('pk'),
[
(1, 1, 'default'), (2, 3, 'when'), (3, 4, 'default'), (2, 2, 'default'), (3, 4, 'default'),
(3, 3, 'default'), (4, 5, 'default')
],
transform=attrgetter('integer', 'integer2', 'test')
)
def test_combined_q_object(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
test=Case(
When(Q(integer=2) | Q(integer2=3), then=Value('when')),
default=Value('default'),
output_field=models.CharField(),
),
).order_by('pk'),
[
(1, 1, 'default'), (2, 3, 'when'), (3, 4, 'default'), (2, 2, 'when'), (3, 4, 'default'),
(3, 3, 'when'), (4, 5, 'default')
],
transform=attrgetter('integer', 'integer2', 'test')
)
def test_order_by_conditional_implicit(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(integer__lte=2).annotate(test=Case(
When(integer=1, then=2),
When(integer=2, then=1),
default=3,
output_field=models.IntegerField(),
)).order_by('test', 'pk'),
[(2, 1), (2, 1), (1, 2)],
transform=attrgetter('integer', 'test')
)
def test_order_by_conditional_explicit(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(integer__lte=2).annotate(test=Case(
When(integer=1, then=2),
When(integer=2, then=1),
default=3,
output_field=models.IntegerField(),
)).order_by(F('test').asc(), 'pk'),
[(2, 1), (2, 1), (1, 2)],
transform=attrgetter('integer', 'test')
)
class CaseDocumentationExamples(TestCase):
@classmethod
def setUpTestData(cls):
Client.objects.create(
name='Jane Doe',
account_type=Client.REGULAR,
registered_on=date.today() - timedelta(days=36),
)
Client.objects.create(
name='James Smith',
account_type=Client.GOLD,
registered_on=date.today() - timedelta(days=5),
)
Client.objects.create(
name='Jack Black',
account_type=Client.PLATINUM,
registered_on=date.today() - timedelta(days=10 * 365),
)
def test_simple_example(self):
self.assertQuerysetEqual(
Client.objects.annotate(
discount=Case(
When(account_type=Client.GOLD, then=Value('5%')),
When(account_type=Client.PLATINUM, then=Value('10%')),
default=Value('0%'),
output_field=models.CharField(),
),
).order_by('pk'),
[('Jane Doe', '0%'), ('James Smith', '5%'), ('Jack Black', '10%')],
transform=attrgetter('name', 'discount')
)
def test_lookup_example(self):
a_month_ago = date.today() - timedelta(days=30)
a_year_ago = date.today() - timedelta(days=365)
self.assertQuerysetEqual(
Client.objects.annotate(
discount=Case(
When(registered_on__lte=a_year_ago, then=Value('10%')),
When(registered_on__lte=a_month_ago, then=Value('5%')),
default=Value('0%'),
output_field=models.CharField(),
),
).order_by('pk'),
[('Jane Doe', '5%'), ('James Smith', '0%'), ('Jack Black', '10%')],
transform=attrgetter('name', 'discount')
)
def test_conditional_update_example(self):
a_month_ago = date.today() - timedelta(days=30)
a_year_ago = date.today() - timedelta(days=365)
Client.objects.update(
account_type=Case(
When(registered_on__lte=a_year_ago, then=Value(Client.PLATINUM)),
When(registered_on__lte=a_month_ago, then=Value(Client.GOLD)),
default=Value(Client.REGULAR),
),
)
self.assertQuerysetEqual(
Client.objects.all().order_by('pk'),
[('Jane Doe', 'G'), ('James Smith', 'R'), ('Jack Black', 'P')],
transform=attrgetter('name', 'account_type')
)
def test_conditional_aggregation_example(self):
Client.objects.create(
name='Jean Grey',
account_type=Client.REGULAR,
registered_on=date.today(),
)
Client.objects.create(
name='James Bond',
account_type=Client.PLATINUM,
registered_on=date.today(),
)
Client.objects.create(
name='Jane Porter',
account_type=Client.PLATINUM,
registered_on=date.today(),
)
self.assertEqual(
Client.objects.aggregate(
regular=models.Sum(Case(
When(account_type=Client.REGULAR, then=1),
output_field=models.IntegerField(),
)),
gold=models.Sum(Case(
When(account_type=Client.GOLD, then=1),
output_field=models.IntegerField(),
)),
platinum=models.Sum(Case(
When(account_type=Client.PLATINUM, then=1),
output_field=models.IntegerField(),
)),
),
{'regular': 2, 'gold': 1, 'platinum': 3}
)
|
|
# -*- coding: utf-8 -*-
"""
Script to "calculate" the detector.
The script estimates the number of photons landing on the scintillator
from the source and the number of photons reaching the detector.
Also it displays the geometrical situation depending no the chosen parameters.
You can run this script to produce several frames of output as so:
(or use the command at the end of the script to also start Fiji and do some
more stuff)
for f in {10..15..1};
do for o in {45..50..1};
do for s in {5..10..1};
do ./CalculateDetector.py -f $f -o $o -s $s -p;
done;
done;
done
"""
import numpy
from scipy import constants
from scipy import integrate
import matplotlib.pylab as plt
from matplotlib.patches import Wedge, Rectangle
from optparse import OptionParser
import sys
import os
# ##################### SETUP ######################
# Use Pythons Optionparser to define and read the options, and also
# give some help to the user
parser = OptionParser()
usage = "usage: %prog [options] arg"
parser.add_option('-s', '--ScreenSize', dest='FOV', type='float', default=4.5,
help='Field of view in centimeters, i.e. desired screen '
'size (default=43 cm)', metavar='43')
parser.add_option('-o', '--OpeningAngle', dest='OpeningAngle', default=90.0,
type='float',
help='Opening angle of the lens in degrees (default=90)',
metavar='45')
parser.add_option('-n', '--NumericalAperture', dest='NA', default=0.4,
type='float',
help='Numerical Aperture of the lens',
metavar='0.6')
parser.add_option('-f', '--FStop', dest='FStop', default=1.2, type='float',
help='F-Stop of the lens',
metavar='0.8')
parser.add_option('-c', '--CCDSize', dest='SensorSize', default=3.0,
type='float',
help='Size of the CCD/CMOS sensor (in millimeters!), '
'Default=7 mm/0.7 cm', metavar='7')
parser.add_option('-e', '--Energy', dest='InputEnergy', default=50.4,
type='float',
help='Energy of the x-ray photons in kV (default=50 kV)',
metavar='120')
parser.add_option('-l', '--LinePairs', dest='LinePairs', default=5.0,
type='float',
help='Desired resolution in lp/mm (default=2.5 lp/mm)',
metavar='4')
parser.add_option('-p', '--print', dest='Output', default=False,
action='store_true',
help='Save/Print output files to disk', metavar=1)
(options, args) = parser.parse_args()
options.SensorSize /= 10
options.InputEnergy *= 1000
# show the help if some important parameters are not given
if options.FOV is None \
or options.OpeningAngle is None \
or options.SensorSize is None \
or options.InputEnergy is None \
or options.LinePairs is None:
parser.print_help()
print 'Example:'
print 'The command below shows you the configuration for a setup with a ' \
'screen size of 20.5 cm (half the required size), a lens with an ' \
'opening angle of 45 deg, a small sensor of 7 mm and an x-ray ' \
'energy of 50 kV:'
print ''
print sys.argv[0], '-s 20.5 -o 45 -c 7 -e 50'
print ''
sys.exit(1)
print 80 * '_'
# CALCULATE
# Intensifying screen
# http://www.sprawls.org/ppmi2/FILMSCR/:
# > Although the total energy of the light emitted by a screen is much less
# than the total x-ray energy the screen receives, the light energy is much
# more efficient in exposing film because it is "repackaged" into a much larger
# number of photons. If we assume a 5% energy conversion efficiency, then one
# 50-keV x-ray photon can produce 1,000 blue-green light photons with an energy
# of 2.5 eV each.
ScreenAbsorption = 0.1
ScreenConversion = 0.5
ScreenEmission = 1
ScreenOutput = ScreenAbsorption * ScreenConversion * ScreenEmission
# nm (green according to http://is.gd/AWmNpp)
Wavelength = 500e-9
# E = h * nu, nu = c / lambda
PhotonEnergyJ = constants.h * constants.c / Wavelength
PhotonEnergyeV = PhotonEnergyJ / constants.eV
# print 'Visible light photons with a wavelength of',int(Wavelength*1e9),\
# 'nm have an energy of',round(PhotonEnergyJ,22),'J or',\
# round(PhotonEnergyeV,3),'eV.'
PhotonsAfterScintillator = options.InputEnergy / PhotonEnergyeV * ScreenOutput
print 'For each', options.InputEnergy / 1000, 'kV x-ray photon'
print ' * we have', int(round(PhotonsAfterScintillator)), 'visible light', \
'photons after the scintillator (with a'
print ' conversion efficiency of', ScreenOutput * 100, '%).'
# Lens
LensReflectance = 0.02
LensAbsorption = 0.02
# Assume a set of double plano-convex lenses, with 4% loss per lens
LensTransmission = 1 - (2 * LensReflectance) - (2 * LensAbsorption)
PhotonsAfterLens = PhotonsAfterScintillator * LensTransmission
# ~ tan(\alpha/2) = (FOV/2) / Distance
# ~ Distance = (FOV/2)/tan(\alpha/2)
WorkingDistance = (options.FOV / 2) / numpy.tan(
numpy.deg2rad(options.OpeningAngle) / 2)
print ' * we have', int(round(PhotonsAfterLens)), 'visible light photons', \
'after the lens couple (with a'
print ' transmission of', LensTransmission * 100, '%).'
# Sensor
QESensor = 0.4
ProducedElectrons = PhotonsAfterLens * QESensor
Demagnification = options.FOV / options.SensorSize
SensorPosition = WorkingDistance / Demagnification
print ' * we get', int(round(ProducedElectrons)), 'electrons on the', \
'detector (with a QE of', str(QESensor) + ').'
# LinePairs
LinePairsScintillator = options.FOV * 10 * options.LinePairs
PixelsNeeded = LinePairsScintillator * 2
SensorPixelSize = options.SensorSize / PixelsNeeded
# Comparison with Flatpanel detectors
FlatPanelPixelSize = 0.194 # mm
ScintillatorThickness = 1.0 # mm
ConversionEfficiency = 1.0
NumericalApertureCalculated = FlatPanelPixelSize / (ScintillatorThickness / 2)
NumericalApertureAverage = \
integrate.quad(lambda x: numpy.arctan(FlatPanelPixelSize / (2 * x)),
0.01, 1)[0]
NumericalApertureDetermined = (SensorPosition * 10) / (
options.FStop * 2 * SensorPosition * 10 / (1 / Demagnification))
FStopJBAG = 0.8
NumericalApertureJBAG = 1 / (2 * FStopJBAG)
# PLOT
# Plot optical configuration
# Draw the stuff we calculated above
fig = plt.figure(1, figsize=(32, 18))
Thickness = 1.0
SupportThickness = 0.5
XRaySourcePosition = 25
# Optical Configuration
plt.subplot(211)
plt.axis('equal')
# axes = plt.gca()
# axes.axes.get_yaxis().set_ticks([])
plt.title('Angular opening: ' + str('%.2f' % options.OpeningAngle) +
', Screen size: ' + str('%.2f' % options.FOV) +
'cm, Working Distance: ' + str('%.2f' % round(WorkingDistance, 2)) +
'cm\nScintillator Efficiency: ' + str(round(ScreenOutput, 2) * 100)
+ '%, Lens transmission: ' + str(round(LensTransmission, 2) * 100)
+ '%, QE sensor: ' + str(QESensor))
plt.xlabel('Distance [cm]')
plt.ylabel('Distance [cm]')
# Optical Axis
plt.axhline(color='k', linestyle='--')
# X-rays
x = numpy.arange(0, XRaySourcePosition - Thickness - SupportThickness, 0.1)
for yshift in numpy.arange(-options.FOV / 2,
options.FOV / 2,
options.FOV / 10.0):
plt.plot(-x - Thickness - SupportThickness, numpy.sin(x) + yshift, 'k')
# Scintillator
ScintillatorSupport = Rectangle(
(-Thickness - SupportThickness, (options.FOV / 2) + SupportThickness),
Thickness + SupportThickness, -options.FOV - SupportThickness * 2,
facecolor="black")
plt.gca().add_patch(ScintillatorSupport)
Scintillator = Rectangle((-Thickness, options.FOV / 2), Thickness,
-options.FOV, facecolor="lightgreen")
plt.gca().add_patch(Scintillator)
# Light-Cone
# Opening angle
wedgecolor = 'r'
Wedge = Wedge((WorkingDistance, 0), -WorkingDistance * .25,
-(options.OpeningAngle / 2), (options.OpeningAngle / 2),
fill=False, color=wedgecolor)
plt.gca().add_patch(Wedge)
# Light Beams
beamcolor = wedgecolor
# Scintillator - Lens
plt.plot([0, WorkingDistance], [options.FOV / 2, 0], beamcolor)
plt.plot([0, WorkingDistance], [-options.FOV / 2, 0], beamcolor)
# Lens - Sensor
plt.plot([WorkingDistance, WorkingDistance + SensorPosition],
[0, options.FOV / 2 / Demagnification], beamcolor)
plt.plot([WorkingDistance, WorkingDistance + SensorPosition],
[0, -options.FOV / 2 / Demagnification], beamcolor)
# Camera
Sensor = Rectangle((WorkingDistance + SensorPosition, options.SensorSize / 2),
Thickness / 4, -options.SensorSize, facecolor="black")
plt.gca().add_patch(Sensor)
Housing = Rectangle((WorkingDistance + SensorPosition + Thickness / 4,
options.SensorSize / 2 / .618), Thickness / 4 / .618,
-options.SensorSize / .618, facecolor="black")
plt.gca().add_patch(Housing)
# Text
step = options.FOV / 8.0
plt.text(1.618 * WorkingDistance, options.FOV / 2,
'- 1 ' + str(options.InputEnergy / 1000) + ' kV x-ray photon')
plt.text(1.618 * WorkingDistance, options.FOV / 2 - step,
'- ' + str(int(PhotonsAfterScintillator)) + ' ' + str(
Wavelength * 1e9) + ' nm photons after scintillator')
plt.text(1.618 * WorkingDistance, options.FOV / 2 - 2 * step,
'- ' + str(int(PhotonsAfterLens)) + ' ' + str(
Wavelength * 1e9) + ' nm photons after lens')
plt.text(1.618 * WorkingDistance, options.FOV / 2 - 3 * step,
'- ' + str(int(ProducedElectrons)) + ' electrons on sensor')
plt.text(1.618 * WorkingDistance, options.FOV / 2 - 4 * step,
'- Opening Angle: ' + str(
options.OpeningAngle) + ' deg') # http://is.gd/pxodor
plt.text(1.618 * WorkingDistance, options.FOV / 2 - 5 * step,
'- Sensorsize: ' + str(options.SensorSize) + ' cm')
plt.text(1.618 * WorkingDistance, options.FOV / 2 - 6 * step,
'- Demagnification: ' + str('%.2f' % Demagnification) + 'x')
plt.text(1.618 * WorkingDistance, options.FOV / 2 - 7 * step,
'- To achieve ' + str('%.2f' % options.LinePairs) + ' lp/mm, we need')
plt.text(1.618 * WorkingDistance, options.FOV / 2 - 8 * step,
' a sensor with ' + str(
round(PixelsNeeded ** 2 / 1e6, 2)) + ' Mpx (' + str(
int(PixelsNeeded)) + 'x' + str(int(PixelsNeeded)) + ' px)')
plt.text(1.618 * WorkingDistance, options.FOV / 2 - 9 * step,
' resulting in a pixelsize of ' + str(
'%.2f' % (SensorPixelSize * 1000)) + ' um.')
# Plot NA
plt.subplot(234)
plt.axis('equal')
Magnification = numpy.arange(0, 1.01, 0.01)
for FStop in [0.5, 0.8, 1, 1.2, 1.4, 2]:
plt.plot(Magnification, Magnification / (2 * FStop * (1 + Magnification)),
label='f/' + str('%0.2f' % FStop))
plt.plot(Magnification,
Magnification / (2 * options.FStop * (1 + Magnification)), 'g--',
linewidth=5, label='f/' + str('%0.2f' % options.FStop))
plt.legend(loc='upper left')
plt.hlines(NumericalApertureAverage, 0, 1)
plt.text(0.618, NumericalApertureAverage, 'NA flat panel')
plt.hlines(NumericalApertureDetermined, 0, 1)
plt.text(0.618, NumericalApertureDetermined, 'simulated NA of our lens')
plt.hlines(NumericalApertureJBAG, 0, 1)
plt.text(0.618, NumericalApertureJBAG, 'NA JBAG (?)')
plt.vlines(1 / Demagnification, 0, 1, 'g', '--')
plt.text(1 / Demagnification + 0.25, 0.8, 'Our calculated\nDemagnification: ' +
str(Demagnification) + 'x=' + str(round(1 / Demagnification, 3)))
plt.title('NA')
plt.xlabel('Magnification')
plt.ylabel('NA')
plt.xlim([0, 1])
# Plot X-ray spectra
plt.subplot(235)
# http://stackoverflow.com/a/11249430/323100
Spectra = [
(os.path.join(os.getcwd(), 'Spectra/Xray-Spectrum_040kV.txt')),
(os.path.join(os.getcwd(), 'Spectra/Xray-Spectrum_046kV.txt')),
(os.path.join(os.getcwd(), 'Spectra/Xray-Spectrum_053kV.txt')),
(os.path.join(os.getcwd(), 'Spectra/Xray-Spectrum_060kV.txt')),
(os.path.join(os.getcwd(), 'Spectra/Xray-Spectrum_070kV.txt')),
(os.path.join(os.getcwd(), 'Spectra/Xray-Spectrum_080kV.txt')),
(os.path.join(os.getcwd(), 'Spectra/Xray-Spectrum_090kV.txt')),
(os.path.join(os.getcwd(), 'Spectra/Xray-Spectrum_100kV.txt')),
(os.path.join(os.getcwd(), 'Spectra/Xray-Spectrum_120kV.txt'))
]
AnodeMaterial = [str(open(FileName).readlines()[1].split()[3]) for FileName in
Spectra]
Energy = [int(open(FileName).readlines()[2].split()[4]) for FileName in
Spectra]
Ripple = [float(open(FileName).readlines()[3].split()[4]) for FileName in
Spectra]
AirKerma = [float(open(FileName).readlines()[4].split()[3]) for FileName in
Spectra]
MeanEnergy = [float(open(FileName).readlines()[5].split()[3]) for FileName in
Spectra]
FilterMaterial = [str(open(FileName).readlines()[9].split()[1]) for FileName in
Spectra]
FilterThickness = [int(open(FileName).readlines()[9].split()[2]) for FileName
in Spectra]
Data = [(numpy.loadtxt(FileName)) for FileName in Spectra]
for i in range(len(Spectra)):
plt.plot(Data[i][:, 0], Data[i][:, 1],
label=str(Energy[i]) + 'kV, Mean=' + str(
round(MeanEnergy[i], 2)) + 'keV')
# plt.plot( Data[i][:,0], Data[i][:,1], label=str(Energy[i]) +'kV')
plt.legend(loc='best')
plt.title(
'X-ray spectra for ' + AnodeMaterial[0] + ' Anode,\n' + FilterMaterial[
0] + ' Filter with ' + str(FilterThickness[0]) + ' mm Thickness')
plt.xlabel('Energy [kV]')
plt.ylabel('Photons')
# Plot of Ball Lenses
plt.subplot(236)
Dia = numpy.arange(0, 15, 0.2)
NA = (0.918919 * (-1.0 + Dia)) / Dia
FNo = (0.544118 * Dia) / (-1.0 + Dia)
plt.plot(Dia, NA, 'r', label='NA')
plt.plot(Dia, FNo, 'g', label='FNo')
plt.legend(loc='best')
plt.xlim([1.5, 10])
plt.ylim([0.3, 1.2])
for i in (2, 8):
plt.axvline(i, color='k')
if i > 3:
plt.axhline(NA[numpy.where(Dia == i)], color='k')
plt.axhline(FNo[numpy.where(Dia == i)], color='k')
plt.savefig('CalculateDetector.png')
# OUTPUT
if options.Output:
Prefix = 'Config'
try:
os.mkdir(os.path.join(os.getcwd(), Prefix))
except OSError:
print 'Directory', os.path.join(os.getcwd(),
Prefix), 'already exists, did not ' \
'create it...'
print
# We should probably do something more clever with "print "%10.4f" %
# options" than the stuff below
SaveName = Prefix + str(options).replace('{', '_').replace('}', ''). \
replace("'", '').replace(': ', '_').replace(', ', '-'). \
replace('-Output_True', '').replace('9999999999999', '')
# getting the output of 'options' and doing some string-replacement to get
# a nice filename for the output.
# FIGURE
plt.savefig(os.path.join(Prefix, ''.join([SaveName, '.png'])),
dpi=fig.dpi)
print 'Figure saved to ' + os.path.join(Prefix,
''.join([SaveName, '.png']))
print
# LOGFILE
# Redirect console-output to a file according to
# http://stackoverflow.com/a/4829801/323100
# open the result file in write mode
logfile = open(os.path.join(Prefix, ''.join([SaveName, '.txt'])), 'w')
# store the default system handler to be able to restore it
old_stdout = sys.stdout
# Now your file is used by print as destination
sys.stdout = logfile
print 'Call the script with the commandline below to get the same result.'
print ' '.join(sys.argv)
print 80 * '-'
print 'If we define the intensifying screen:'
print '\t- to have an absorption of', 100 * ScreenAbsorption, '%'
print '\t- to convert', 100 * ScreenConversion, \
'% of the incoming x-rays to visible light'
print '\t- and to have an emmittance of', 100 * ScreenAbsorption, \
'% of all converted photons'
print 'we have a total efficiency of the screen of ', 100 * ScreenOutput, \
'%.'
print
print 'One incoming', options.InputEnergy / 1000, \
'keV x-ray photon will thus produce:'
print '\t-', int(round(PhotonsAfterScintillator)), \
'photons with a wavelength of', \
int(Wavelength * 1e9), 'nm (or', round(PhotonEnergyeV, 3), 'eV).'
print '\t-', int(round(PhotonsAfterLens)), 'of these photons (' + \
str(
LensTransmission * 100) + \
' %) will arrive at the sensor'
print '\t- which will produce', int(round(ProducedElectrons)), \
'electrons on a sensor with a QE of', QESensor
print 'To achieve', options.LinePairs, 'lp/mm on a', options.FOV, \
'cm scintillator, we need a sensor with', \
round(int(PixelsNeeded) ** 2 / 1e6, 1), 'Mpx (' + \
str(int(PixelsNeeded)) + 'x' + str(int(PixelsNeeded)), \
'px), which results in pixels with a physical size of', \
round(SensorPixelSize * 1000, 2), 'um on a', options.SensorSize, \
'cm sensor.'
print 'For the chosen optical configuration of:'
print '\t- FOV =', '%.2f' % options.FOV, 'cm and'
print '\t- Opening angle =', '%.2f' % options.OpeningAngle + 'deg we get a'
print '\t- Working distance of', '%.2f' % WorkingDistance, 'cm'
print
print 'Numerical Aperture:'
print '\t- calculated NA:', NumericalApertureCalculated, \
'(central element in scintillator layer of FPD)'
print '\t- average NA:', NumericalApertureAverage, \
'(average NA on optical axis assuming 10 um distance between ' \
'scintillator and detector)'
print '\t- NA JBAG lenses:', NumericalApertureJBAG, \
'(assuming F=1/2NA -> NA = 1/2F, with F =', FStopJBAG, ')'
print '\t- NA for our sensor:', NumericalApertureDetermined, \
'(according to Rene = SensorDistance / (FStop * 2 * SensorDistance/' \
'Magnification)'
sys.stdout = old_stdout # here we restore the default behavior
logfile.close() # do not forget to close your file
print 'Logfile saved to ' + os.path.join(Prefix,
''.join([SaveName, '.txt']))
print
else:
plt.show()
print 'The options were:'
# getting the output of 'options' and doing some string-replacement to get a
# nice filename for the output.
print str(options).replace('{', '').replace('}', '').replace("'", '').replace(
', ', '\n')
print 80 * '_'
print 'Call the script with the commandline below to get the same result...'
print ' '.join(sys.argv)
if options.Output:
print
print 'use the command below to open all the generated .png images with ' \
'Fiji.'
viewcommand = '/home/scratch/Apps/Fiji.app/fiji-linux -eval run("Image ' \
'Sequence...", "open=' + os.getcwd() + \
' starting=1 increment=1 scale=100 file=png or=[] ' \
'sort");\' &'
print viewcommand
print 80 * '_'
# # kill all runnig fiji jobs
# killall fiji-linux;
# # remove all calculated images
# rm *.png;
# # calculate some stuff
# for f in {10..43..15}; # Field of View
# do echo FOV $f;
# for o in {10..150..15}; # Opening Angle
# do echo OpeningAngle $o;
# for s in {5..25..15}; # Sensor Size
# do echo SensorSize $s;
# ./CalculateDetector.py -f $f -o $o -s $s -p;
# done;
# done;
# done
# # open fiji
# /home/scratch/Apps/Fiji.app/fiji-linux -eval 'run("Image Sequence...",
# "open=/afs/psi.ch/project/EssentialMed/Dev starting=1 increment=1 scale=100
# file=png or=[] sort");' & # start fiji
|
|
"""
Author: Dr. John T. Hwang <hwangjt@umich.edu>
This package is distributed under New BSD license.
"""
import numpy as np
import scipy.sparse
from numbers import Integral
from smt.utils.linear_solvers import get_solver, LinearSolver, VALID_SOLVERS
from smt.utils.line_search import get_line_search_class, LineSearch, VALID_LINE_SEARCHES
from smt.utils.caching import cached_operation
from smt.surrogate_models.surrogate_model import SurrogateModel
class RMTS(SurrogateModel):
"""
Regularized Minimal-energy Tensor-product Spline interpolant base class for RMTC and RMTB.
"""
name = "RMTS"
def _initialize(self):
super(RMTS, self)._initialize()
declare = self.options.declare
supports = self.supports
declare(
"xlimits",
types=np.ndarray,
desc="Lower/upper bounds in each dimension - ndarray [nx, 2]",
)
declare(
"smoothness",
1.0,
types=(Integral, float, tuple, list, np.ndarray),
desc="Smoothness parameter in each dimension - length nx. None implies uniform",
)
declare(
"regularization_weight",
1e-14,
types=(Integral, float),
desc="Weight of the term penalizing the norm of the spline coefficients."
+ " This is useful as an alternative to energy minimization "
+ " when energy minimization makes the training time too long.",
)
declare(
"energy_weight",
1e-4,
types=(Integral, float),
desc="The weight of the energy minimization terms",
)
declare(
"extrapolate",
False,
types=bool,
desc="Whether to perform linear extrapolation for external evaluation points",
)
declare(
"min_energy",
True,
types=bool,
desc="Whether to perform energy minimization",
)
declare(
"approx_order", 4, types=Integral, desc="Exponent in the approximation term"
)
declare(
"solver",
"krylov",
values=VALID_SOLVERS,
types=LinearSolver,
desc="Linear solver",
)
declare(
"derivative_solver",
"krylov",
values=VALID_SOLVERS,
types=LinearSolver,
desc="Linear solver used for computing output derivatives (dy_dyt)",
)
declare(
"grad_weight",
0.5,
types=(Integral, float),
desc="Weight on gradient training data",
)
declare(
"solver_tolerance",
1e-12,
types=(Integral, float),
desc="Convergence tolerance for the nonlinear solver",
)
declare(
"nonlinear_maxiter",
10,
types=Integral,
desc="Maximum number of nonlinear solver iterations",
)
declare(
"line_search",
"backtracking",
values=VALID_LINE_SEARCHES,
types=LineSearch,
desc="Line search algorithm",
)
declare(
"save_energy_terms",
False,
types=bool,
desc="Whether to cache energy terms in the data_dir directory",
)
declare(
"data_dir",
None,
values=(None,),
types=str,
desc="Directory for loading / saving cached data; None means do not save or load",
)
declare(
"max_print_depth",
5,
types=Integral,
desc="Maximum depth (level of nesting) to print operation descriptions and times",
)
supports["training_derivatives"] = True
supports["derivatives"] = True
supports["output_derivatives"] = True
def _setup_hessian(self):
diag = np.ones(self.num["dof"])
arange = np.arange(self.num["dof"])
full_hess = scipy.sparse.csc_matrix((diag, (arange, arange)))
return full_hess
def _compute_jac(self, ix1, ix2, x):
data, rows, cols = self._compute_jac_raw(ix1, ix2, x)
n = x.shape[0]
full_jac = scipy.sparse.csc_matrix(
(data, (rows, cols)), shape=(n, self.num["coeff"])
)
if self.full_dof2coeff is not None:
full_jac = full_jac * self.full_dof2coeff
return full_jac
def _compute_approx_terms(self):
# This computes the approximation terms for the training points.
# We loop over kx: 0 is for values and kx>0 represents.
# the 1-based index of the derivative given by the training point data.
num = self.num
xlimits = self.options["xlimits"]
full_jac_dict = {}
for kx in self.training_points[None]:
xt, yt = self.training_points[None][kx]
xmin = np.min(xt, axis=0)
xmax = np.max(xt, axis=0)
assert np.all(xlimits[:, 0] <= xmin), (
"Training points below min for %s" % kx
)
assert np.all(xlimits[:, 1] >= xmax), (
"Training points above max for %s" % kx
)
if kx == 0:
c = 1.0
else:
self.options["grad_weight"] / xlimits.shape[0]
full_jac = self._compute_jac(kx, 0, xt)
full_jac_dict[kx] = (full_jac, full_jac.T.tocsc(), c)
return full_jac_dict
def _compute_energy_terms(self):
# This computes the energy terms that are to be minimized.
# The quadrature points are the centroids of the multi-dimensional elements.
num = self.num
xlimits = self.options["xlimits"]
inputs = {}
inputs["nx"] = xlimits.shape[0]
inputs["elem_list"] = num["elem_list"]
if self.__class__.__name__ == "RMTB":
inputs["num_ctrl_list"] = num["ctrl_list"]
inputs["order_list"] = num["order_list"]
if self.options["save_energy_terms"]:
cache_dir = self.options["data_dir"]
else:
cache_dir = None
with cached_operation(inputs, cache_dir) as outputs:
if outputs:
sq_mtx = outputs["sq_mtx"]
else:
n = np.prod(2 * num["elem_list"])
x = np.empty(n * num["x"])
self.rmtsc.compute_quadrature_points(
n, np.array(2 * num["elem_list"], dtype=np.int32), x
)
x = x.reshape((n, num["x"]))
sq_mtx = [None] * num["x"]
for kx in range(num["x"]):
mtx = self._compute_jac(kx + 1, kx + 1, x)
sq_mtx[kx] = (
mtx.T.tocsc() * mtx * (xlimits[kx, 1] - xlimits[kx, 0]) ** 4
)
outputs["sq_mtx"] = sq_mtx
elem_vol = np.prod((xlimits[:, 1] - xlimits[:, 0]) / (2 * num["elem_list"]))
total_vol = np.prod(xlimits[:, 1] - xlimits[:, 0])
full_hess = scipy.sparse.csc_matrix((num["dof"], num["dof"]))
for kx in range(num["x"]):
full_hess += sq_mtx[kx] * (
elem_vol
/ total_vol
* self.options["smoothness"][kx]
/ (xlimits[kx, 1] - xlimits[kx, 0]) ** 4
)
return full_hess
def _opt_func(self, sol, p, yt_dict):
full_hess = self.full_hess
full_jac_dict = self.full_jac_dict
func = 0.5 * np.dot(sol, full_hess * sol)
for kx in self.training_points[None]:
full_jac, full_jac_T, c = full_jac_dict[kx]
yt = yt_dict[kx]
func += 0.5 * c * np.sum((full_jac * sol - yt) ** p)
return func
def _opt_grad(self, sol, p, yt_dict):
full_hess = self.full_hess
full_jac_dict = self.full_jac_dict
grad = full_hess * sol
for kx in self.training_points[None]:
full_jac, full_jac_T, c = full_jac_dict[kx]
yt = yt_dict[kx]
grad += 0.5 * c * full_jac_T * p * (full_jac * sol - yt) ** (p - 1)
return grad
def _opt_dgrad_dyt(self, sol, p, yt_dict, kx):
full_hess = self.full_hess
full_jac_dict = self.full_jac_dict
full_jac, full_jac_T, c = full_jac_dict[kx]
yt = yt_dict[kx]
diag_vec = p * (p - 1) * (full_jac * sol - yt) ** (p - 2)
diag_mtx = scipy.sparse.diags(diag_vec, format="csc")
mtx = 0.5 * c * full_jac_T.dot(diag_mtx)
return -mtx.todense()
def _opt_hess(self, sol, p, yt_dict):
full_hess = self.full_hess
full_jac_dict = self.full_jac_dict
hess = scipy.sparse.csc_matrix(full_hess)
for kx in self.training_points[None]:
full_jac, full_jac_T, c = full_jac_dict[kx]
yt = yt_dict[kx]
diag_vec = p * (p - 1) * (full_jac * sol - yt) ** (p - 2)
diag_mtx = scipy.sparse.diags(diag_vec, format="csc")
hess += 0.5 * c * full_jac_T * diag_mtx * full_jac
return hess
def _opt_norm(self, sol, p, yt_dict):
full_hess = self.full_hess
full_jac_dict = self.full_jac_dict
grad = self._opt_grad(sol, p, yt_dict)
return np.linalg.norm(grad)
def _get_yt_dict(self, ind_y):
yt_dict = {}
for kx in self.training_points[None]:
xt, yt = self.training_points[None][kx]
yt_dict[kx] = yt[:, ind_y]
return yt_dict
def _run_newton_solver(self, sol):
num = self.num
options = self.options
solver = get_solver(options["solver"])
ls_class = get_line_search_class(options["line_search"])
total_size = int(num["dof"])
rhs = np.zeros((total_size, num["y"]))
d_sol = np.zeros((total_size, num["y"]))
p = self.options["approx_order"]
for ind_y in range(rhs.shape[1]):
with self.printer._timed_context("Solving for output %i" % ind_y):
yt_dict = self._get_yt_dict(ind_y)
norm = self._opt_norm(sol[:, ind_y], p, yt_dict)
fval = self._opt_func(sol[:, ind_y], p, yt_dict)
self.printer(
"Iteration (num., iy, grad. norm, func.) : %3i %3i %15.9e %15.9e"
% (0, ind_y, norm, fval)
)
iter_count = 0
while (
iter_count < options["nonlinear_maxiter"]
and norm > options["solver_tolerance"]
):
with self.printer._timed_context():
with self.printer._timed_context("Assembling linear system"):
mtx = self._opt_hess(sol[:, ind_y], p, yt_dict)
rhs[:, ind_y] = -self._opt_grad(sol[:, ind_y], p, yt_dict)
with self.printer._timed_context("Initializing linear solver"):
solver._setup(mtx, self.printer)
with self.printer._timed_context("Solving linear system"):
solver._solve(rhs[:, ind_y], d_sol[:, ind_y], ind_y=ind_y)
func = lambda x: self._opt_func(x, p, yt_dict)
grad = lambda x: self._opt_grad(x, p, yt_dict)
# sol[:, ind_y] += d_sol[:, ind_y]
ls = ls_class(sol[:, ind_y], d_sol[:, ind_y], func, grad)
with self.printer._timed_context("Performing line search"):
sol[:, ind_y] = ls(1.0)
norm = self._opt_norm(sol[:, ind_y], p, yt_dict)
fval = self._opt_func(sol[:, ind_y], p, yt_dict)
self.printer(
"Iteration (num., iy, grad. norm, func.) : %3i %3i %15.9e %15.9e"
% (iter_count, ind_y, norm, fval)
)
self.mtx = mtx
iter_count += 1
def _solve(self):
num = self.num
options = self.options
solver = get_solver(options["solver"])
ls_class = get_line_search_class(options["line_search"])
total_size = int(num["dof"])
rhs = np.zeros((total_size, num["y"]))
sol = np.zeros((total_size, num["y"]))
d_sol = np.zeros((total_size, num["y"]))
with self.printer._timed_context(
"Solving initial startup problem (n=%i)" % total_size
):
approx_order = options["approx_order"]
nonlinear_maxiter = options["nonlinear_maxiter"]
options["approx_order"] = 2
options["nonlinear_maxiter"] = 1
self._run_newton_solver(sol)
options["approx_order"] = approx_order
options["nonlinear_maxiter"] = nonlinear_maxiter
with self.printer._timed_context(
"Solving nonlinear problem (n=%i)" % total_size
):
self._run_newton_solver(sol)
return sol
def _new_train(self):
"""
Train the model
"""
with self.printer._timed_context("Pre-computing matrices", "assembly"):
with self.printer._timed_context("Computing dof2coeff", "dof2coeff"):
self.full_dof2coeff = self._compute_dof2coeff()
with self.printer._timed_context("Initializing Hessian", "init_hess"):
self.full_hess = (
self._setup_hessian() * self.options["regularization_weight"]
)
if self.options["min_energy"]:
with self.printer._timed_context("Computing energy terms", "energy"):
self.full_hess += (
self._compute_energy_terms() * self.options["energy_weight"]
)
with self.printer._timed_context("Computing approximation terms", "approx"):
self.full_jac_dict = self._compute_approx_terms()
with self.printer._timed_context(
"Solving for degrees of freedom", "total_solution"
):
self.sol = self._solve()
if self.full_dof2coeff is not None:
self.sol_coeff = self.full_dof2coeff * self.sol
else:
self.sol_coeff = self.sol
def _train(self):
"""
Train the model
"""
self._setup()
tmp = self.rmtsc
self.rmtsc = None
inputs = {"self": self}
with cached_operation(inputs, self.options["data_dir"]) as outputs:
self.rmtsc = tmp
if outputs:
self.sol_coeff = outputs["sol_coeff"]
self.sol = outputs["sol"]
self.mtx = outputs["mtx"]
self.full_dof2coeff = outputs["full_dof2coeff"]
self.full_hess = outputs["full_hess"]
self.full_jac_dict = outputs["full_jac_dict"]
else:
self._new_train()
outputs["sol_coeff"] = self.sol_coeff
outputs["sol"] = self.sol
outputs["mtx"] = self.mtx
outputs["full_dof2coeff"] = self.full_dof2coeff
outputs["full_hess"] = self.full_hess
outputs["full_jac_dict"] = self.full_jac_dict
def _predict_values(self, x):
"""
Evaluates the model at a set of points.
Arguments
---------
x : np.ndarray [n_evals, dim]
Evaluation point input variable values
Returns
-------
y : np.ndarray
Evaluation point output variable values
"""
mtx = self._compute_prediction_mtx(x, 0)
y = mtx.dot(self.sol_coeff)
return y
def _predict_derivatives(self, x, kx):
"""
Evaluates the derivatives at a set of points.
Arguments
---------
x : np.ndarray [n_evals, dim]
Evaluation point input variable values
kx : int
The 0-based index of the input variable with respect to which derivatives are desired.
Returns
-------
y : np.ndarray
Derivative values.
"""
mtx = self._compute_prediction_mtx(x, kx + 1)
y = mtx.dot(self.sol_coeff)
return y
def _compute_prediction_mtx(self, x, kx):
n = x.shape[0]
num = self.num
options = self.options
data, rows, cols = self._compute_jac_raw(kx, 0, x)
# In the explanation below, n is the number of dimensions, and
# a_k and b_k are the lower and upper bounds for x_k.
#
# A C1 extrapolation can get very tricky, so we implement a simple C0
# extrapolation. We basically linarly extrapolate from the nearest
# domain point. For example, if n = 4 and x2 > b2 and x3 > b3:
# f(x1,x2,x3,x4) = f(x1,b2,b3,x4) + dfdx2 (x2-b2) + dfdx3 (x3-b3)
# where the derivatives are evaluated at x1,b2,b3,x4 (called b) and
# dfdx1|x = dfdx1|b + d2fdx1dx2|b (x2-b2) + d2fdx1dx3|b (x3-b3)
# dfdx2|x = dfdx2|b.
# The dfdx2|x derivative is what it is because f and all derivatives
# evaluated at x1,b2,b3,x4 are constant with respect to changes in x2.
# On the other hand, the dfdx1|x derivative is what it is because
# f and all derivatives evaluated at x1,b2,b3,x4 change with x1.
# The extrapolation function is non-differentiable at boundaries:
# i.e., where x_k = a_k or x_k = b_k for at least one k.
if options["extrapolate"]:
# First we evaluate the vector pointing to each evaluation points
# from the nearest point on the domain, in a matrix called dx.
# If the ith evaluation point is not external, dx[i, :] = 0.
dx = np.empty(n * num["support"] * num["x"])
self.rmtsc.compute_ext_dist(n, num["support"], x.flatten(), dx)
dx = dx.reshape((n * num["support"], num["x"]))
isexternal = np.array(np.array(dx, bool), float)
for ix in range(num["x"]):
# Now we compute the first order term where we have a
# derivative times (x_k - b_k) or (x_k - a_k).
data_tmp, rows, cols = self._compute_jac_raw(kx, ix + 1, x)
data_tmp *= dx[:, ix]
# If we are evaluating a derivative (with index kx),
# we zero the first order terms for which dx_k = 0.
if kx != 0:
data_tmp *= 1 - isexternal[:, kx - 1]
data += data_tmp
mtx = scipy.sparse.csc_matrix((data, (rows, cols)), shape=(n, num["coeff"]))
return mtx
def _predict_output_derivatives(self, x):
# dy_dyt = dy_dw * (dR_dw)^{-1} * dR_dyt
n = x.shape[0]
nw = self.mtx.shape[0]
nx = x.shape[1]
ny = self.sol.shape[1]
p = self.options["approx_order"]
dy_dw = self._compute_prediction_mtx(x, 0)
if self.full_dof2coeff is not None:
dy_dw = dy_dw * self.full_dof2coeff
dy_dw = dy_dw.todense()
dR_dw = self.mtx
dy_dyt = {}
for kx in self.training_points[None]:
nt = self.training_points[None][kx][0].shape[0]
dR_dyt = np.zeros((nw, nt, ny))
for ind_y in range(ny):
yt_dict = self._get_yt_dict(ind_y)
dR_dyt[:, :, ind_y] = self._opt_dgrad_dyt(
self.sol[:, ind_y], p, yt_dict, kx
)
solver = get_solver(self.options["derivative_solver"])
solver._setup(dR_dw, self.printer)
dw_dyt = np.zeros((nw, nt, ny))
for ind_t in range(nt):
for ind_y in range(ny):
solver._solve(
dR_dyt[:, ind_t, ind_y], dw_dyt[:, ind_t, ind_y], ind_y=ind_y
)
dw_dyt[:, ind_t, ind_y] *= -1.0
if kx == 0:
dy_dyt[None] = np.einsum("ij,jkl->ikl", dy_dw, dw_dyt)
else:
dy_dyt[kx - 1] = np.einsum("ij,jkl->ikl", dy_dw, dw_dyt)
return dy_dyt
|
|
# Copyright (c) 2019, CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
try:
import simplejson as json
except ImportError:
import json
from django.contrib.auth.models import User
from rest_framework import serializers
from rois_manager.models import Slice, Core, FocusRegion
from clinical_annotations_manager.models import SliceAnnotation, CoreAnnotation, \
FocusRegionAnnotation, GleasonElement
from rois_manager.serializers import SliceSerializer, CoreSerializer, FocusRegionSerializer
class SliceAnnotationSerializer(serializers.ModelSerializer):
author = serializers.SlugRelatedField(
slug_field='username',
queryset=User.objects.all()
)
gleason_4_percentage = serializers.SerializerMethodField()
class Meta:
model = SliceAnnotation
fields = ('id', 'author', 'slice', 'annotation_step', 'action_start_time', 'action_complete_time',
'creation_date', 'high_grade_pin', 'pah', 'chronic_inflammation', 'acute_inflammation',
'periglandular_inflammation', 'intraglandular_inflammation', 'stromal_inflammation',
'gleason_4_percentage')
read_only_fields = ('id', 'creation_date', 'gleason_4_percentage')
write_only_fields = ('annotation_step',)
@staticmethod
def get_gleason_4_percentage(obj):
return obj.get_gleason_4_percentage()
class SliceAnnotationDetailsSerializer(SliceAnnotationSerializer):
slice = SliceSerializer(read_only=True)
class SliceAnnotationInfosSerializer(serializers.ModelSerializer):
class Meta:
model = SliceAnnotation
fields = ('id', 'annotation_step')
read_only_fields = ('id', 'annotation_step')
class CoreAnnotationSerializer(serializers.ModelSerializer):
author = serializers.SlugRelatedField(
slug_field='username',
queryset=User.objects.all()
)
gleason_score = serializers.SerializerMethodField()
gleason_4_percentage = serializers.SerializerMethodField()
class Meta:
model = CoreAnnotation
fields = ('id', 'author', 'core', 'annotation_step', 'action_start_time', 'action_complete_time',
'creation_date', 'primary_gleason', 'secondary_gleason', 'gleason_score',
'gleason_4_percentage', 'gleason_group')
read_only_fields = ('id', 'creation_date', 'gleason_score', 'gleason_4_percentage')
write_only_fields = ('annotation_step',)
@staticmethod
def get_gleason_score(obj):
return '%d + %d' % (obj.primary_gleason, obj.secondary_gleason)
@staticmethod
def get_gleason_4_percentage(obj):
return obj.get_gleason_4_percentage()
class CoreAnnotationDetailsSerializer(CoreAnnotationSerializer):
core = CoreSerializer(read_only=True)
class CoreAnnotationInfosSerializer(serializers.ModelSerializer):
class Meta:
model = CoreAnnotation
fields = ('id', 'annotation_step')
read_only_fields = ('id', 'annotation_step')
class GleasonElementSerializer(serializers.ModelSerializer):
gleason_label = serializers.SerializerMethodField()
class Meta:
model = GleasonElement
fields = ('id', 'gleason_type', 'gleason_label', 'json_path', 'area',
'cellular_density_helper_json', 'cellular_density', 'cells_count',
'creation_date', 'action_start_time', 'action_complete_time')
read_only_fields = ('gleason_label',)
@staticmethod
def get_gleason_label(obj):
return obj.get_gleason_type_label()
@staticmethod
def validate_json_path(value):
try:
json.loads(value)
return value
except ValueError:
raise serializers.ValidationError('Not a valid JSON in \'json_path\' field')
@staticmethod
def validate_cellular_density_helper_json(value):
if value is None:
return value
try:
json.loads(value)
return value
except ValueError:
raise serializers.ValidationError('Not a valid JSON in \'cellular_density_helper_json\' field')
class FocusRegionAnnotationSerializer(serializers.ModelSerializer):
author = serializers.SlugRelatedField(
slug_field='username',
queryset=User.objects.all()
)
gleason_elements = GleasonElementSerializer(many=True)
class Meta:
model = FocusRegionAnnotation
fields = ('id', 'author', 'focus_region', 'annotation_step', 'action_start_time', 'action_complete_time',
'creation_date', 'perineural_involvement', 'intraductal_carcinoma', 'ductal_carcinoma',
'poorly_formed_glands', 'cribriform_pattern', 'small_cell_signet_ring', 'hypernephroid_pattern',
'mucinous', 'comedo_necrosis', 'inflammation', 'pah', 'atrophic_lesions', 'adenosis',
'cellular_density_helper_json', 'cellular_density', 'cells_count', 'gleason_elements')
read_only_fields = ('creation_date',)
write_only_fields = ('id', 'annotation_step', 'gleason_elements', 'author')
def create(self, validated_data):
gleason_elements_data = validated_data.pop('gleason_elements')
annotation = FocusRegionAnnotation.objects.create(**validated_data)
for element_data in gleason_elements_data:
GleasonElement.objects.create(focus_region_annotation=annotation, **element_data)
return annotation
class FocusRegionAnnotationDetailsSerializer(FocusRegionAnnotationSerializer):
focus_region = FocusRegionSerializer(read_only=True)
class FocusRegionAnnotationInfosSerializer(serializers.ModelSerializer):
class Meta:
model = FocusRegionAnnotation
fields = ('id', 'annotation_step')
read_only_fields = ('id', 'annotation_step')
class AnnotatedFocusRegionSerializer(serializers.ModelSerializer):
clinical_annotations = FocusRegionAnnotationInfosSerializer(many=True)
class Meta:
model = FocusRegion
fields = ('id', 'label', 'core', 'roi_json', 'length', 'area',
'tissue_status', 'clinical_annotations')
read_only_fields = fields
class AnnotatedCoreSerializer(serializers.ModelSerializer):
focus_regions = AnnotatedFocusRegionSerializer(many=True)
clinical_annotations = CoreAnnotationInfosSerializer(many=True)
positive = serializers.SerializerMethodField()
class Meta:
model = Core
fields = ('id', 'label', 'slice', 'roi_json', 'length', 'area', 'tumor_length',
'focus_regions', 'clinical_annotations', 'positive')
read_only_fields = fields
@staticmethod
def get_positive(obj):
for fr in obj.focus_regions.all():
if fr.is_cancerous_region():
return True
return False
class AnnotatedSliceSerializer(serializers.ModelSerializer):
cores = AnnotatedCoreSerializer(many=True)
clinical_annotations = SliceAnnotationInfosSerializer(many=True)
class Meta:
model = Slice
fields = ('id', 'label', 'slide', 'roi_json', 'cores', 'clinical_annotations')
read_only_fields = fields
|
|
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Functionality related to notifications common to multiple layers of
the system.
"""
import datetime
from oslo_config import cfg
from oslo_context import context as common_context
from oslo_log import log
from oslo_utils import excutils
from oslo_utils import timeutils
import six
import nova.context
from nova import exception
from nova.i18n import _LE
from nova.image import glance
from nova import network
from nova.network import model as network_model
from nova import objects
from nova.objects import base as obj_base
from nova import rpc
from nova import utils
LOG = log.getLogger(__name__)
notify_opts = [
cfg.StrOpt('notify_on_state_change',
help='If set, send compute.instance.update notifications on instance '
'state changes. Valid values are None for no notifications, '
'"vm_state" for notifications on VM state changes, or '
'"vm_and_task_state" for notifications on VM and task state '
'changes.'),
cfg.BoolOpt('notify_api_faults', default=False,
help='If set, send api.fault notifications on caught exceptions '
'in the API service.'),
cfg.StrOpt('default_notification_level',
default='INFO',
choices=('DEBUG', 'INFO', 'WARN', 'ERROR', 'CRITICAL'),
help='Default notification level for outgoing notifications'),
cfg.StrOpt('default_publisher_id',
help='Default publisher_id for outgoing notifications'),
]
CONF = cfg.CONF
CONF.register_opts(notify_opts)
def notify_decorator(name, fn):
"""Decorator for notify which is used from utils.monkey_patch().
:param name: name of the function
:param fn: - object of the function
:returns: fn -- decorated function
"""
def wrapped_func(*args, **kwarg):
body = {}
body['args'] = []
body['kwarg'] = {}
for arg in args:
body['args'].append(arg)
for key in kwarg:
body['kwarg'][key] = kwarg[key]
ctxt = (common_context.get_context_from_function_and_args(
fn, args, kwarg) or
common_context.get_current() or
nova.context.RequestContext())
notifier = rpc.get_notifier('api',
publisher_id=(CONF.default_publisher_id
or CONF.host))
method = getattr(notifier, CONF.default_notification_level.lower(),
'info')
method(ctxt, name, body)
return fn(*args, **kwarg)
return wrapped_func
def send_api_fault(url, status, exception):
"""Send an api.fault notification."""
if not CONF.notify_api_faults:
return
payload = {'url': url, 'exception': six.text_type(exception),
'status': status}
rpc.get_notifier('api').error(common_context.get_current() or
nova.context.get_admin_context(),
'api.fault',
payload)
def send_update(context, old_instance, new_instance, service="compute",
host=None):
"""Send compute.instance.update notification to report any changes occurred
in that instance
"""
if not CONF.notify_on_state_change:
# skip all this if updates are disabled
return
update_with_state_change = False
old_vm_state = old_instance["vm_state"]
new_vm_state = new_instance["vm_state"]
old_task_state = old_instance["task_state"]
new_task_state = new_instance["task_state"]
# we should check if we need to send a state change or a regular
# notification
if old_vm_state != new_vm_state:
# yes, the vm state is changing:
update_with_state_change = True
elif (CONF.notify_on_state_change.lower() == "vm_and_task_state" and
old_task_state != new_task_state):
# yes, the task state is changing:
update_with_state_change = True
if update_with_state_change:
# send a notification with state changes
# value of verify_states need not be True as the check for states is
# already done here
send_update_with_states(context, new_instance, old_vm_state,
new_vm_state, old_task_state, new_task_state, service, host)
else:
try:
old_display_name = None
if new_instance["display_name"] != old_instance["display_name"]:
old_display_name = old_instance["display_name"]
_send_instance_update_notification(context, new_instance,
service=service, host=host,
old_display_name=old_display_name)
except exception.InstanceNotFound:
LOG.debug('Failed to send instance update notification. The '
'instance could not be found and was most likely '
'deleted.', instance=new_instance)
except Exception:
LOG.exception(_LE("Failed to send state update notification"),
instance=new_instance)
def send_update_with_states(context, instance, old_vm_state, new_vm_state,
old_task_state, new_task_state, service="compute", host=None,
verify_states=False):
"""Send compute.instance.update notification to report changes if there
are any, in the instance
"""
if not CONF.notify_on_state_change:
# skip all this if updates are disabled
return
fire_update = True
# send update notification by default
if verify_states:
# check whether we need to send notification related to state changes
fire_update = False
# do not send notification if the conditions for vm and(or) task state
# are not satisfied
if old_vm_state != new_vm_state:
# yes, the vm state is changing:
fire_update = True
elif (CONF.notify_on_state_change.lower() == "vm_and_task_state" and
old_task_state != new_task_state):
# yes, the task state is changing:
fire_update = True
if fire_update:
# send either a state change or a regular notification
try:
_send_instance_update_notification(context, instance,
old_vm_state=old_vm_state, old_task_state=old_task_state,
new_vm_state=new_vm_state, new_task_state=new_task_state,
service=service, host=host)
except exception.InstanceNotFound:
LOG.debug('Failed to send instance update notification. The '
'instance could not be found and was most likely '
'deleted.', instance=instance)
except Exception:
LOG.exception(_LE("Failed to send state update notification"),
instance=instance)
def _compute_states_payload(instance, old_vm_state=None,
old_task_state=None, new_vm_state=None, new_task_state=None):
# If the states were not specified we assume the current instance
# states are the correct information. This is important to do for
# both old and new states because otherwise we create some really
# confusing nofications like:
#
# None(None) => Building(none)
#
# When we really were just continuing to build
if new_vm_state is None:
new_vm_state = instance["vm_state"]
if new_task_state is None:
new_task_state = instance["task_state"]
if old_vm_state is None:
old_vm_state = instance["vm_state"]
if old_task_state is None:
old_task_state = instance["task_state"]
states_payload = {
"old_state": old_vm_state,
"state": new_vm_state,
"old_task_state": old_task_state,
"new_task_state": new_task_state,
}
return states_payload
def _send_instance_update_notification(context, instance, old_vm_state=None,
old_task_state=None, new_vm_state=None, new_task_state=None,
service="compute", host=None, old_display_name=None):
"""Send 'compute.instance.update' notification to inform observers
about instance state changes.
"""
payload = info_from_instance(context, instance, None, None)
# determine how we'll report states
payload.update(
_compute_states_payload(
instance, old_vm_state, old_task_state,
new_vm_state, new_task_state))
# add audit fields:
(audit_start, audit_end) = audit_period_bounds(current_period=True)
payload["audit_period_beginning"] = audit_start
payload["audit_period_ending"] = audit_end
# add bw usage info:
bw = bandwidth_usage(instance, audit_start)
payload["bandwidth"] = bw
# add old display name if it is changed
if old_display_name:
payload["old_display_name"] = old_display_name
rpc.get_notifier(service, host).info(context,
'compute.instance.update', payload)
def audit_period_bounds(current_period=False):
"""Get the start and end of the relevant audit usage period
:param current_period: if True, this will generate a usage for the
current usage period; if False, this will generate a usage for the
previous audit period.
"""
begin, end = utils.last_completed_audit_period()
if current_period:
audit_start = end
audit_end = timeutils.utcnow()
else:
audit_start = begin
audit_end = end
return (audit_start, audit_end)
def bandwidth_usage(instance_ref, audit_start,
ignore_missing_network_data=True):
"""Get bandwidth usage information for the instance for the
specified audit period.
"""
admin_context = nova.context.get_admin_context(read_deleted='yes')
def _get_nwinfo_old_skool():
"""Support for getting network info without objects."""
if (instance_ref.get('info_cache') and
instance_ref['info_cache'].get('network_info') is not None):
cached_info = instance_ref['info_cache']['network_info']
if isinstance(cached_info, network_model.NetworkInfo):
return cached_info
return network_model.NetworkInfo.hydrate(cached_info)
try:
return network.API().get_instance_nw_info(admin_context,
instance_ref)
except Exception:
try:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Failed to get nw_info'),
instance=instance_ref)
except Exception:
if ignore_missing_network_data:
return
raise
# FIXME(comstud): Temporary as we transition to objects.
if isinstance(instance_ref, obj_base.NovaObject):
nw_info = instance_ref.info_cache.network_info
if nw_info is None:
nw_info = network_model.NetworkInfo()
else:
nw_info = _get_nwinfo_old_skool()
macs = [vif['address'] for vif in nw_info]
uuids = [instance_ref["uuid"]]
bw_usages = objects.BandwidthUsageList.get_by_uuids(admin_context, uuids,
audit_start)
bw = {}
for b in bw_usages:
if b.mac in macs:
label = 'net-name-not-found-%s' % b.mac
for vif in nw_info:
if vif['address'] == b.mac:
label = vif['network']['label']
break
bw[label] = dict(bw_in=b.bw_in, bw_out=b.bw_out)
return bw
def image_meta(system_metadata):
"""Format image metadata for use in notifications from the instance
system metadata.
"""
image_meta = {}
for md_key, md_value in six.iteritems(system_metadata):
if md_key.startswith('image_'):
image_meta[md_key[6:]] = md_value
return image_meta
def info_from_instance(context, instance, network_info,
system_metadata, **kw):
"""Get detailed instance information for an instance which is common to all
notifications.
:param:instance: nova.objects.Instance
:param:network_info: network_info provided if not None
:param:system_metadata: system_metadata DB entries for the instance,
if not None
.. note::
Currently unused here in trunk, but needed for potential custom
modifications.
"""
def null_safe_str(s):
return str(s) if s else ''
def null_safe_int(s):
return int(s) if s else ''
def null_safe_isotime(s):
if isinstance(s, datetime.datetime):
return timeutils.strtime(s)
else:
return str(s) if s else ''
image_ref_url = glance.generate_image_url(instance.image_ref)
instance_type = instance.get_flavor()
instance_type_name = instance_type.get('name', '')
instance_flavorid = instance_type.get('flavorid', '')
instance_info = dict(
# Owner properties
tenant_id=instance.project_id,
user_id=instance.user_id,
# Identity properties
instance_id=instance.uuid,
display_name=instance.display_name,
reservation_id=instance.reservation_id,
hostname=instance.hostname,
# Type properties
instance_type=instance_type_name,
instance_type_id=instance.instance_type_id,
instance_flavor_id=instance_flavorid,
architecture=instance.architecture,
# Capacity properties
memory_mb=instance.memory_mb,
disk_gb=instance.root_gb + instance.ephemeral_gb,
vcpus=instance.vcpus,
# Note(dhellmann): This makes the disk_gb value redundant, but
# we are keeping it for backwards-compatibility with existing
# users of notifications.
root_gb=instance.root_gb,
ephemeral_gb=instance.ephemeral_gb,
# Location properties
host=instance.host,
node=instance.node,
availability_zone=instance.availability_zone,
cell_name=null_safe_str(instance.cell_name),
# Date properties
created_at=str(instance.created_at),
# Terminated and Deleted are slightly different (although being
# terminated and not deleted is a transient state), so include
# both and let the recipient decide which they want to use.
terminated_at=null_safe_isotime(instance.get('terminated_at', None)),
deleted_at=null_safe_isotime(instance.get('deleted_at', None)),
launched_at=null_safe_isotime(instance.get('launched_at', None)),
# Image properties
image_ref_url=image_ref_url,
os_type=instance.os_type,
kernel_id=instance.kernel_id,
ramdisk_id=instance.ramdisk_id,
# Status properties
state=instance.vm_state,
state_description=null_safe_str(instance.task_state),
progress=null_safe_int(instance.progress),
# accessIPs
access_ip_v4=instance.access_ip_v4,
access_ip_v6=instance.access_ip_v6,
)
if network_info is not None:
fixed_ips = []
for vif in network_info:
for ip in vif.fixed_ips():
ip["label"] = vif["network"]["label"]
ip["vif_mac"] = vif["address"]
fixed_ips.append(ip)
instance_info['fixed_ips'] = fixed_ips
# add image metadata
image_meta_props = image_meta(instance.system_metadata)
instance_info["image_meta"] = image_meta_props
# add instance metadata
instance_info['metadata'] = instance.metadata
instance_info.update(kw)
return instance_info
|
|
# Copyright (c) 2016 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
# Gabor Dozsa
# System components used by the bigLITTLE.py configuration script
import m5
from m5.objects import *
m5.util.addToPath('../../')
from common.Caches import *
from common import CpuConfig
class L1I(L1_ICache):
tag_latency = 1
data_latency = 1
response_latency = 1
mshrs = 4
tgts_per_mshr = 8
size = '48kB'
assoc = 3
class L1D(L1_DCache):
tag_latency = 2
data_latency = 2
response_latency = 1
mshrs = 16
tgts_per_mshr = 16
size = '32kB'
assoc = 2
write_buffers = 16
class WalkCache(PageTableWalkerCache):
tag_latency = 4
data_latency = 4
response_latency = 4
mshrs = 6
tgts_per_mshr = 8
size = '1kB'
assoc = 8
write_buffers = 16
class L2(L2Cache):
tag_latency = 12
data_latency = 12
response_latency = 5
mshrs = 32
tgts_per_mshr = 8
size = '1MB'
assoc = 16
write_buffers = 8
clusivity='mostly_excl'
class L3(Cache):
size = '16MB'
assoc = 16
tag_latency = 20
data_latency = 20
response_latency = 20
mshrs = 20
tgts_per_mshr = 12
clusivity='mostly_excl'
class MemBus(SystemXBar):
badaddr_responder = BadAddr(warn_access="warn")
default = Self.badaddr_responder.pio
class CpuCluster(SubSystem):
def __init__(self, system, num_cpus, cpu_clock, cpu_voltage,
cpu_type, l1i_type, l1d_type, wcache_type, l2_type):
super(CpuCluster, self).__init__()
self._cpu_type = cpu_type
self._l1i_type = l1i_type
self._l1d_type = l1d_type
self._wcache_type = wcache_type
self._l2_type = l2_type
assert num_cpus > 0
self.voltage_domain = VoltageDomain(voltage=cpu_voltage)
self.clk_domain = SrcClockDomain(clock=cpu_clock,
voltage_domain=self.voltage_domain)
self.cpus = [ self._cpu_type(cpu_id=system.numCpus() + idx,
clk_domain=self.clk_domain)
for idx in range(num_cpus) ]
for cpu in self.cpus:
cpu.createThreads()
cpu.createInterruptController()
cpu.socket_id = system.numCpuClusters()
system.addCpuCluster(self, num_cpus)
def requireCaches(self):
return self._cpu_type.require_caches()
def memoryMode(self):
return self._cpu_type.memory_mode()
def addL1(self):
for cpu in self.cpus:
l1i = None if self._l1i_type is None else self._l1i_type()
l1d = None if self._l1d_type is None else self._l1d_type()
iwc = None if self._wcache_type is None else self._wcache_type()
dwc = None if self._wcache_type is None else self._wcache_type()
cpu.addPrivateSplitL1Caches(l1i, l1d, iwc, dwc)
def addL2(self, clk_domain):
if self._l2_type is None:
return
self.toL2Bus = L2XBar(width=64, clk_domain=clk_domain)
self.l2 = self._l2_type()
for cpu in self.cpus:
cpu.connectAllPorts(self.toL2Bus)
self.toL2Bus.master = self.l2.cpu_side
def connectMemSide(self, bus):
bus.slave
try:
self.l2.mem_side = bus.slave
except AttributeError:
for cpu in self.cpus:
cpu.connectAllPorts(bus)
class AtomicCluster(CpuCluster):
def __init__(self, system, num_cpus, cpu_clock, cpu_voltage="1.0V"):
cpu_config = [ CpuConfig.get("atomic"), None, None, None, None ]
super(AtomicCluster, self).__init__(system, num_cpus, cpu_clock,
cpu_voltage, *cpu_config)
def addL1(self):
pass
class SimpleSystem(LinuxArmSystem):
cache_line_size = 64
def __init__(self, **kwargs):
super(SimpleSystem, self).__init__(**kwargs)
self.voltage_domain = VoltageDomain(voltage="1.0V")
self.clk_domain = SrcClockDomain(clock="1GHz",
voltage_domain=Parent.voltage_domain)
self.realview = VExpress_GEM5_V1()
self.gic_cpu_addr = self.realview.gic.cpu_addr
self.flags_addr = self.realview.realview_io.pio_addr + 0x30
self.membus = MemBus()
self.intrctrl = IntrControl()
self.terminal = Terminal()
self.vncserver = VncServer()
self.iobus = IOXBar()
# CPUs->PIO
self.iobridge = Bridge(delay='50ns')
# Device DMA -> MEM
self.dmabridge = Bridge(delay='50ns',
ranges=self.realview._mem_regions)
self._pci_devices = 0
self._clusters = []
self._num_cpus = 0
def attach_pci(self, dev):
dev.pci_bus, dev.pci_dev, dev.pci_func = (0, self._pci_devices + 1, 0)
self._pci_devices += 1
self.realview.attachPciDevice(dev, self.iobus)
def connect(self):
self.iobridge.master = self.iobus.slave
self.iobridge.slave = self.membus.master
self.dmabridge.master = self.membus.slave
self.dmabridge.slave = self.iobus.master
self.gic_cpu_addr = self.realview.gic.cpu_addr
self.realview.attachOnChipIO(self.membus, self.iobridge)
self.realview.attachIO(self.iobus)
self.system_port = self.membus.slave
def numCpuClusters(self):
return len(self._clusters)
def addCpuCluster(self, cpu_cluster, num_cpus):
assert cpu_cluster not in self._clusters
assert num_cpus > 0
self._clusters.append(cpu_cluster)
self._num_cpus += num_cpus
def numCpus(self):
return self._num_cpus
def addCaches(self, need_caches, last_cache_level):
if not need_caches:
# connect each cluster to the memory hierarchy
for cluster in self._clusters:
cluster.connectMemSide(self.membus)
return
cluster_mem_bus = self.membus
assert last_cache_level >= 1 and last_cache_level <= 3
for cluster in self._clusters:
cluster.addL1()
if last_cache_level > 1:
for cluster in self._clusters:
cluster.addL2(cluster.clk_domain)
if last_cache_level > 2:
max_clock_cluster = max(self._clusters,
key=lambda c: c.clk_domain.clock[0])
self.l3 = L3(clk_domain=max_clock_cluster.clk_domain)
self.toL3Bus = L2XBar(width=64)
self.toL3Bus.master = self.l3.cpu_side
self.l3.mem_side = self.membus.slave
cluster_mem_bus = self.toL3Bus
# connect each cluster to the memory hierarchy
for cluster in self._clusters:
cluster.connectMemSide(cluster_mem_bus)
|
|
#!/usr/bin/python
#coding:utf-8
###########################################################
### Utilities for Plexon data collection
### Written by Huangxin
###########################################################
import numpy as np
import logging
logger = logging.getLogger('SpikeRecord.Plexon')
from SpikeRecord import Plexon
def reconstruct_word_in_python(WORD_BITS,bits_num,unstrobed_bits,words_buffer,timestamps_buffer):
bits_indices = np.array([0]*WORD_BITS)
oldest_timestamps = np.array([unstrobed_bits[bit][0] for bit in xrange(WORD_BITS)])
# synonyms
bits_num = bits_num
unstrobed_bits = unstrobed_bits
words_buffer = words_buffer
timestamps_buffer = timestamps_buffer
where = np.where
left_shift = np.left_shift
timestamps_min = oldest_timestamps.min
words_count = 0
indices_sum = 0
while indices_sum < bits_num:
timestamp = timestamps_min()
word_bits = where(oldest_timestamps==timestamp)[0]
# construct word from bits
word = left_shift(1,word_bits).sum()
# increment the indices of previous word bits
bits_indices[word_bits] += 1
# increment indices sum so that the loop will run until all bits are processed
indices_sum += word_bits.size
# update oldest timestamp of previous word bits
oldest_timestamps[word_bits] = unstrobed_bits[word_bits,bits_indices[word_bits]]
# fill word and timestamp in buffers
words_buffer[words_count] = word
timestamps_buffer[words_count] = timestamp
words_count += 1
return words_count
try:
import _unstrobed_word
reconstruct_word = _unstrobed_word.reconstruct_word_32
except ImportError or ValueError:
reconstruct_word = reconstruct_word_in_python
logger.info("Cannot import C version of reconstruct_word. Building a C version is highly recommended. We will use Python version this time.")
class PlexUtil(object):
"""
Utilities for data collection
"""
def __init__(self):
self.last_word = None
self.last_timestamp = None
def GetSpikesInfo(self,data):
"""
GetSpikesInfo(data) -> info
Return spike units collected in this period of time.
Parameters
----------
data: dict
{'type', 'channel', 'unit', 'timestamp'} dictionary from the return value of PlexClient.GetTimeStampArray().
Returns
info: list of units for every spikes occurring channels
[(channel, units)]
"""
sorted_spikes = (data['type'] == Plexon.PL_SingleWFType) & (data['unit'] > 0)
info = []
for channel in np.unique(data['channel'][sorted_spikes]):
channel_units = map(chr, np.unique(data['unit'][sorted_spikes & (data['channel'] == channel)]) + (ord('a')-1))
info.append((channel, channel_units))
return info
def GetSpikeTrains(self,data):
spike_trains = {}
sorted_spikes = (data['type'] == Plexon.PL_SingleWFType) & (data['unit'] > 0)
for channel in np.unique(data['channel'][sorted_spikes]):
spike_trains[channel] = {}
for unit in map(chr,np.unique(data['unit'][sorted_spikes & (data['channel'] == channel)]) + (ord('a')-1)):
spike_trains[channel][unit] = self.GetSpikeTrain(data, channel=channel, unit=unit)
return spike_trains
def GetSpikeTrain(self, data, channel, unit):
"""
GetSpikeTrain(data) -> spike_train
Return sorted spikes of the specific unit in one channel.
Parameters
----------
data: dict
{'type', 'channel', 'unit', 'timestamp'} dictionary from the return value of PlexClient.GetTimeStampArray().
channel: int
currently 1-128
unit: str
a-z
Returns
-------
spiketrain: array
timestamp array of the specific unit
"""
unit_spikes = (data['type'] == Plexon.PL_SingleWFType) & \
(data['channel'] == channel) & \
(data['unit'] == ord(unit)-ord('a')+1)
return np.copy(data['timestamp'][unit_spikes])
def GetEventsNum(self, data):
return len(data['timestamp'])
#@profile
def GetExtEvents(self, data, event, bit=None, online=True):
"""
GetExtEvents(data) -> extevents
Return external events.
Parameters
----------
data: dict
{'type', 'channel', 'unit', 'timestamp'} dictionary from the return value of PlexClient.GetTimeStampArray().
event: string
event types: 'first_strobe_word','second_strobe_word','start','stop','pause','resume','unstrobed_bit'
bit: int
currently 1-32, used only in unstrobed_bit event
Returns
-------
extevents: timestamp array of 'first_strobe', 'second_strobe', 'start', 'stop', 'pause', 'resume' events. for strobe_word events
the array is contained in a dictionary which take the key 'value' as the strobed word and the key 'timestamp' as event stamp.
"""
ext_event_type = (data['type'] == Plexon.PL_ExtEventType)
#extevents = data['type'][ext_event_type]
channel = data['channel'][ext_event_type]
unit = data['unit'][ext_event_type]
timestamp = data['timestamp'][ext_event_type]
# for strobed word event
if event in ('first_strobe_word','second_strobe_word'):
strobed_events = (channel == Plexon.PL_StrobedExtChannel)
strobed_unit = unit[strobed_events]
strobed_timestamp = timestamp[strobed_events]
first_strobe = (strobed_unit & 0x8000) == 0
second_strobe = (strobed_unit & 0x8000) != 0
if event == 'first_strobe_word':
return {'value':strobed_unit[first_strobe] & 0x7FFF , 'timestamp':strobed_timestamp[first_strobe]}
else:
return {'value':strobed_unit[second_strobe] , 'timestamp':strobed_timestamp[second_strobe]}
# for start event
if event == 'start':
return timestamp[channel == Plexon.PL_StartExtChannel]
# for stop event
if event == 'stop':
return timestamp[channel == Plexon.PL_StopExtChannel]
# for pause event
if event == 'pause':
return timestamp[channel == Plexon.PL_Pause]
# for resume event
if event == 'resume':
return timestamp[channel == Plexon.PL_Resume]
# for unstrobed events
if event == 'unstrobed_bit':
return timestamp[channel == bit + 1 ]
# reconstruct unstrobed word from unstrobed bits
if event == 'unstrobed_word':
infinity = float('inf')
WORD_BITS = 32
# add an additional infinity in array end so that index of unstrobed_bits will not get out of range
unstrobed_bits_list = [timestamp[channel == bit+1] for bit in xrange(WORD_BITS)]
bits_length = [len(unstrobed_bits_list[bit]) for bit in xrange(WORD_BITS)] # actural bits length
max_length = max(bits_length)
bits_num = sum(bits_length)
# make 2d array of timestamp
unstrobed_bits = np.array([np.append(unstrobed_bits_list[bit], [infinity]*(max_length-bits_length[bit]+1)) \
for bit in xrange(WORD_BITS)],dtype=np.float32)
# create numpy buffer to hold words and timestamps
words_buffer = np.empty(bits_num,dtype=np.int32)
timestamps_buffer = np.empty(bits_num,dtype=np.float32)
words_count = reconstruct_word(WORD_BITS,bits_num,unstrobed_bits,words_buffer,timestamps_buffer)
words = words_buffer[:words_count]
timestamps = timestamps_buffer[:words_count]
if len(timestamps) and self.last_timestamp == timestamps[0]:
words[0] += self.last_word
elif self.last_word is not None:
words = np.append(self.last_word, words)
timestamps = np.append(self.last_timestamp, timestamps)
if len(timestamps)==1:
self.last_word = None
self.last_timestamp = None
return {'value': np.array(words), 'timestamp': np.array(timestamps)}
if online: # in offline mode all timestamps are read at once
if len(timestamps):
self.last_word = words[-1]
self.last_timestamp = timestamps[-1]
return {'value': np.array(words[:-1]), 'timestamp': np.array(timestamps[:-1])}
else:
return {'value': np.array(words), 'timestamp': np.array(timestamps)}
|
|
"""
Preprocess pipeline
"""
import datetime
import logging
import os.path
import numpy as np
from .. import read_config
from ..batch import BatchProcessorFactory
from .detect import threshold_detection
from .filter import whitening_matrix, whitening, localized_whitening_matrix, whitening_score, butterworth
from .score import get_score_pca, get_pca_suff_stat, get_pca_projection
from .standarize import standarize, sd
from ..neuralnetwork import NeuralNetDetector, NeuralNetTriage, nn_detection
# remove this
Q = None
Q_score = None
def run():
"""Execute preprocessing pipeline
Returns
-------
score: list
List of size n_channels, each list contains a (clear spikes x
number of features x number of channels) multidimensional array
score for every clear spike
clear_index: list
List of size n_channels, each list contains the indexes in
spike_times (first column) where the spike was clear
spike_times: list
List with n_channels elements, each element contains spike times
in the first column and [SECOND COLUMN?]
Examples
--------
.. literalinclude:: ../examples/preprocess.py
"""
logger = logging.getLogger(__name__)
start_time = datetime.datetime.now()
time = {'f': 0, 's': 0, 'd': 0, 'w': 0, 'b': 0, 'e': 0}
# FIXME: remove this
CONFIG = read_config()
whiten_file = open(os.path.join(CONFIG.data.root_folder, 'tmp/whiten.bin'), 'wb')
# initialize processor for raw data
path = os.path.join(CONFIG.data.root_folder, CONFIG.data.recordings)
dtype = CONFIG.recordings.dtype
# initialize factory
factory = BatchProcessorFactory(path_to_file=None,
dtype=None,
n_channels=CONFIG.recordings.n_channels,
max_memory=CONFIG.resources.max_memory,
buffer_size=None)
if CONFIG.preprocess.filter == 1:
_b = datetime.datetime.now()
# make batch processor for raw data -> buterworth -> filtered
bp = factory.make(path_to_file=path, dtype=dtype,
buffer_size=0)
logger.info('Initialized butterworth batch processor: {}'
.format(bp))
# run filtering
path = os.path.join(CONFIG.data.root_folder, 'tmp/filtered.bin')
dtype = bp.process_function(butterworth,
path,
CONFIG.filter.low_pass_freq,
CONFIG.filter.high_factor,
CONFIG.filter.order,
CONFIG.recordings.sampling_rate)
time['f'] += (datetime.datetime.now()-_b).total_seconds()
# TODO: cache computations
# make batch processor for filtered -> standarize -> standarized
_b = datetime.datetime.now()
bp = factory.make(path_to_file=path, dtype=dtype, buffer_size=0)
# compute the standard deviation using the first batch only
batch1 = next(bp)
sd_ = sd(batch1, CONFIG.recordings.sampling_rate)
# make another batch processor
bp = factory.make(path_to_file=path, dtype=dtype, buffer_size=0)
logger.info('Initialized standarization batch processor: {}'
.format(bp))
# run standarization
path = os.path.join(CONFIG.data.root_folder, 'tmp/standarized.bin')
dtype = bp.process_function(standarize,
path,
sd_)
time['s'] += (datetime.datetime.now()-_b).total_seconds()
# create another batch processor for the rest of the pipeline
bp = factory.make(path_to_file=path, dtype=dtype,
buffer_size=CONFIG.BUFF)
logger.info('Initialized preprocess batch processor: {}'
.format(bp))
# initialize output variables
get_score = 1
spike_index_clear = None
spike_index_collision = None
score = None
pca_suff_stat = None
spikes_per_channel = None
for i, batch in enumerate(bp):
# load nueral net detector if necessary:
if CONFIG.spikes.detection == 'nn':
nnDetector = NeuralNetDetector(CONFIG.neural_network_detector.filename,
CONFIG.neural_network_autoencoder.filename)
nnTriage = NeuralNetTriage(CONFIG.neural_network_triage.filename)
else:
nnDetector = None
nnTriage = None
if i > CONFIG.nPortion:
get_score = 0
# process batch
# spike index is defined as a location in each minibatch
(si_clr_batch, score_batch, si_col_batch,
pss_batch, spc_batch,
time) = process_batch(batch, get_score, CONFIG.BUFF, time,
nnDetector=nnDetector,
nnTriage=nnTriage, whiten_file=whiten_file)
# spike time w.r.t. to the whole recording
si_clr_batch[:,0] = si_clr_batch[:,0] + i*CONFIG.batch_size - CONFIG.BUFF
si_col_batch[:,0] = si_col_batch[:,0] + i*CONFIG.batch_size - CONFIG.BUFF
if i == 0:
spike_index_clear = si_clr_batch
spike_index_collision = si_col_batch
score = score_batch
pca_suff_stat = pss_batch
spikes_per_channel = spc_batch
else:
spike_index_clear = np.vstack((spike_index_clear,
si_clr_batch))
spike_index_collision = np.vstack((spike_index_collision,
si_col_batch))
if get_score == 1 and CONFIG.spikes.detection == 'nn':
score = np.concatenate((score, score_batch), axis = 0)
pca_suff_stat += pss_batch
spikes_per_channel += spc_batch
whiten_file.close()
if CONFIG.spikes.detection != 'nn':
_b = datetime.datetime.now()
rot = get_pca_projection(pca_suff_stat, spikes_per_channel,
CONFIG.spikes.temporal_features, CONFIG.neighChannels)
score = get_score_pca(spike_index_clear, rot, CONFIG.neighChannels,
CONFIG.geom, CONFIG.batch_size,
CONFIG.BUFF, CONFIG.nBatches,
os.path.join(CONFIG.data.root_folder,'tmp/whiten.bin'),
CONFIG.scaleToSave)
time['e'] += (datetime.datetime.now()-_b).total_seconds()
# timing
current_time = datetime.datetime.now()
logger.info("Preprocessing done in {0} seconds.".format(
(current_time-start_time).seconds))
logger.info("\tfiltering:\t{0} seconds".format(time['f']))
logger.info("\tstandardization:\t{0} seconds".format(time['s']))
logger.info("\tdetection:\t{0} seconds".format(time['d']))
logger.info("\twhitening:\t{0} seconds".format(time['w']))
logger.info("\tsaving recording:\t{0} seconds".format(time['b']))
logger.info("\tgetting waveforms:\t{0} seconds".format(time['e']))
return score, spike_index_clear, spike_index_collision
def process_batch(rec, get_score, BUFF, time, nnDetector, nnTriage,
whiten_file):
logger = logging.getLogger(__name__)
CONFIG = read_config()
global Q
global Q_score
# nn detection
if CONFIG.spikes.detection == 'nn':
# detect spikes
_b = datetime.datetime.now()
(spike_index_clear,
spike_index_collision,
score) = nn_detection(rec, 10000, BUFF,
CONFIG.neighChannels,
CONFIG.geom,
CONFIG.spikes.temporal_features,
3,
CONFIG.neural_network_detector.threshold_spike,
CONFIG.neural_network_triage.threshold_collision,
nnDetector,
nnTriage
)
# since we alread have scores, no need to calculated sufficient
# statistics for pca
pca_suff_stat = 0
spikes_per_channel = 0
time['d'] += (datetime.datetime.now()-_b).total_seconds()
if get_score ==0:
spike_index_clear = np.zeros((0,2), 'int32')
spike_index_collision = np.vstack((spike_index_collision,
spike_index_clear))
score = None
else:
# whiten signal
_b = datetime.datetime.now()
# get withening matrix per batch or onece in total
if CONFIG.preprocess.whiten_batchwise or Q is None:
Q_score = localized_whitening_matrix(rec,
CONFIG.neighChannels,
CONFIG.geom,
CONFIG.spikeSize)
score = whitening_score(score, spike_index_clear[:,1], Q_score)
time['w'] += (datetime.datetime.now()-_b).total_seconds()
# threshold detection
elif CONFIG.spikes.detection == 'threshold':
# detect spikes
_b = datetime.datetime.now()
spike_index = threshold_detection(rec,
CONFIG.neighChannels,
CONFIG.spikeSize,
CONFIG.stdFactor)
# every spikes are considered as clear spikes as no triage is done
if get_score ==0:
spike_index_clear = np.zeros((0,2), 'int32')
spike_index_collision = spike_index
else:
spike_index_clear = spike_index
spike_index_collision = np.zeros((0,2), 'int32')
score = None
# get sufficient statistics for pca if we don't have projection matrix
pca_suff_stat, spikes_per_channel = get_pca_suff_stat(rec, spike_index,
CONFIG.spikeSize)
time['d'] += (datetime.datetime.now()-_b).total_seconds()
# whiten recording
_b = datetime.datetime.now()
if CONFIG.preprocess.whiten_batchwise or Q is None:
Q = whitening_matrix(rec, CONFIG.neighChannels,
CONFIG.spikeSize)
rec = whitening(rec, Q)
time['w'] += (datetime.datetime.now()-_b).total_seconds()
# Remove spikes detectted in buffer area
spike_index_clear = spike_index_clear[np.logical_and(
spike_index_clear[:, 0] > BUFF,
spike_index_clear[:, 0] < (rec.shape[0] - BUFF))]
spike_index_collision = spike_index_collision[np.logical_and(
spike_index_collision[:, 0] > BUFF,
spike_index_collision[:, 0] < (rec.shape[0] - BUFF))]
_b = datetime.datetime.now()
# save whiten data
chunk = rec*CONFIG.scaleToSave
chunk.reshape(chunk.shape[0]*chunk.shape[1])
chunk.astype('int16').tofile(whiten_file)
time['b'] += (datetime.datetime.now()-_b).total_seconds()
return (spike_index_clear, score, spike_index_collision,
pca_suff_stat, spikes_per_channel, time)
|
|
# -*- coding: iso-8859-1 -*-
"""Get useful information from live Python objects.
This module encapsulates the interface provided by the internal special
attributes (func_*, co_*, im_*, tb_*, etc.) in a friendlier fashion.
It also provides some help for examining source code and class layout.
Here are some of the useful functions provided by this module:
ismodule(), isclass(), ismethod(), isfunction(), isgeneratorfunction(),
isgenerator(), istraceback(), isframe(), iscode(), isbuiltin(),
isroutine() - check object types
getmembers() - get members of an object that satisfy a given condition
getfile(), getsourcefile(), getsource() - find an object's source code
getdoc(), getcomments() - get documentation on an object
getmodule() - determine the module that an object came from
getclasstree() - arrange classes so as to represent their hierarchy
getargspec(), getargvalues(), getcallargs() - get info about function arguments
formatargspec(), formatargvalues() - format an argument spec
getouterframes(), getinnerframes() - get info about frames
currentframe() - get the current stack frame
stack(), trace() - get info about frames on the stack or in a traceback
"""
# This module is in the public domain. No warranties.
__author__ = 'Ka-Ping Yee <ping@lfw.org>'
__date__ = '1 Jan 2001'
import sys
import os
import types
import string
import re
import dis
import imp
import tokenize
import linecache
from operator import attrgetter
from collections import namedtuple
# These constants are from Include/code.h.
CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS = 0x1, 0x2, 0x4, 0x8
CO_NESTED, CO_GENERATOR, CO_NOFREE = 0x10, 0x20, 0x40
# See Include/object.h
TPFLAGS_IS_ABSTRACT = 1 << 20
# ----------------------------------------------------------- type-checking
def ismodule(object):
"""Return true if the object is a module.
Module objects provide these attributes:
__doc__ documentation string
__file__ filename (missing for built-in modules)"""
return isinstance(object, types.ModuleType)
def isclass(object):
"""Return true if the object is a class.
Class objects provide these attributes:
__doc__ documentation string
__module__ name of module in which this class was defined"""
return isinstance(object, (type, types.ClassType))
def ismethod(object):
"""Return true if the object is an instance method.
Instance method objects provide these attributes:
__doc__ documentation string
__name__ name with which this method was defined
im_class class object in which this method belongs
im_func function object containing implementation of method
im_self instance to which this method is bound, or None"""
return isinstance(object, types.MethodType)
def ismethoddescriptor(object):
"""Return true if the object is a method descriptor.
But not if ismethod() or isclass() or isfunction() are true.
This is new in Python 2.2, and, for example, is true of int.__add__.
An object passing this test has a __get__ attribute but not a __set__
attribute, but beyond that the set of attributes varies. __name__ is
usually sensible, and __doc__ often is.
Methods implemented via descriptors that also pass one of the other
tests return false from the ismethoddescriptor() test, simply because
the other tests promise more -- you can, e.g., count on having the
im_func attribute (etc) when an object passes ismethod()."""
return (hasattr(object, "__get__")
and not hasattr(object, "__set__") # else it's a data descriptor
and not ismethod(object) # mutual exclusion
and not isfunction(object)
and not isclass(object))
def isdatadescriptor(object):
"""Return true if the object is a data descriptor.
Data descriptors have both a __get__ and a __set__ attribute. Examples are
properties (defined in Python) and getsets and members (defined in C).
Typically, data descriptors will also have __name__ and __doc__ attributes
(properties, getsets, and members have both of these attributes), but this
is not guaranteed."""
return (hasattr(object, "__set__") and hasattr(object, "__get__"))
if hasattr(types, 'MemberDescriptorType'):
# CPython and equivalent
def ismemberdescriptor(object):
"""Return true if the object is a member descriptor.
Member descriptors are specialized descriptors defined in extension
modules."""
return isinstance(object, types.MemberDescriptorType)
else:
# Other implementations
def ismemberdescriptor(object):
"""Return true if the object is a member descriptor.
Member descriptors are specialized descriptors defined in extension
modules."""
return False
if hasattr(types, 'GetSetDescriptorType'):
# CPython and equivalent
def isgetsetdescriptor(object):
"""Return true if the object is a getset descriptor.
getset descriptors are specialized descriptors defined in extension
modules."""
return isinstance(object, types.GetSetDescriptorType)
else:
# Other implementations
def isgetsetdescriptor(object):
"""Return true if the object is a getset descriptor.
getset descriptors are specialized descriptors defined in extension
modules."""
return False
def isfunction(object):
"""Return true if the object is a user-defined function.
Function objects provide these attributes:
__doc__ documentation string
__name__ name with which this function was defined
func_code code object containing compiled function bytecode
func_defaults tuple of any default values for arguments
func_doc (same as __doc__)
func_globals global namespace in which this function was defined
func_name (same as __name__)"""
return isinstance(object, types.FunctionType)
def isgeneratorfunction(object):
"""Return true if the object is a user-defined generator function.
Generator function objects provide the same attributes as functions.
See help(isfunction) for a list of attributes."""
return bool((isfunction(object) or ismethod(object)) and
object.func_code.co_flags & CO_GENERATOR)
def isgenerator(object):
"""Return true if the object is a generator.
Generator objects provide these attributes:
__iter__ defined to support iteration over container
close raises a new GeneratorExit exception inside the
generator to terminate the iteration
gi_code code object
gi_frame frame object or possibly None once the generator has
been exhausted
gi_running set to 1 when generator is executing, 0 otherwise
next return the next item from the container
send resumes the generator and "sends" a value that becomes
the result of the current yield-expression
throw used to raise an exception inside the generator"""
return isinstance(object, types.GeneratorType)
def istraceback(object):
"""Return true if the object is a traceback.
Traceback objects provide these attributes:
tb_frame frame object at this level
tb_lasti index of last attempted instruction in bytecode
tb_lineno current line number in Python source code
tb_next next inner traceback object (called by this level)"""
return isinstance(object, types.TracebackType)
def isframe(object):
"""Return true if the object is a frame object.
Frame objects provide these attributes:
f_back next outer frame object (this frame's caller)
f_builtins built-in namespace seen by this frame
f_code code object being executed in this frame
f_exc_traceback traceback if raised in this frame, or None
f_exc_type exception type if raised in this frame, or None
f_exc_value exception value if raised in this frame, or None
f_globals global namespace seen by this frame
f_lasti index of last attempted instruction in bytecode
f_lineno current line number in Python source code
f_locals local namespace seen by this frame
f_restricted 0 or 1 if frame is in restricted execution mode
f_trace tracing function for this frame, or None"""
return isinstance(object, types.FrameType)
def iscode(object):
"""Return true if the object is a code object.
Code objects provide these attributes:
co_argcount number of arguments (not including * or ** args)
co_code string of raw compiled bytecode
co_consts tuple of constants used in the bytecode
co_filename name of file in which this code object was created
co_firstlineno number of first line in Python source code
co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg
co_lnotab encoded mapping of line numbers to bytecode indices
co_name name with which this code object was defined
co_names tuple of names of local variables
co_nlocals number of local variables
co_stacksize virtual machine stack space required
co_varnames tuple of names of arguments and local variables"""
return isinstance(object, types.CodeType)
def isbuiltin(object):
"""Return true if the object is a built-in function or method.
Built-in functions and methods provide these attributes:
__doc__ documentation string
__name__ original name of this function or method
__self__ instance to which a method is bound, or None"""
return isinstance(object, types.BuiltinFunctionType)
def isroutine(object):
"""Return true if the object is any kind of function or method."""
return (isbuiltin(object)
or isfunction(object)
or ismethod(object)
or ismethoddescriptor(object))
def isabstract(object):
"""Return true if the object is an abstract base class (ABC)."""
return bool(isinstance(object, type) and object.__flags__ & TPFLAGS_IS_ABSTRACT)
def getmembers(object, predicate=None):
"""Return all members of an object as (name, value) pairs sorted by name.
Optionally, only return members that satisfy a given predicate."""
results = []
for key in dir(object):
try:
value = getattr(object, key)
except AttributeError:
continue
if not predicate or predicate(value):
results.append((key, value))
results.sort()
return results
Attribute = namedtuple('Attribute', 'name kind defining_class object')
def classify_class_attrs(cls):
"""Return list of attribute-descriptor tuples.
For each name in dir(cls), the return list contains a 4-tuple
with these elements:
0. The name (a string).
1. The kind of attribute this is, one of these strings:
'class method' created via classmethod()
'static method' created via staticmethod()
'property' created via property()
'method' any other flavor of method
'data' not a method
2. The class which defined this attribute (a class).
3. The object as obtained directly from the defining class's
__dict__, not via getattr. This is especially important for
data attributes: C.data is just a data object, but
C.__dict__['data'] may be a data descriptor with additional
info, like a __doc__ string.
"""
mro = getmro(cls)
names = dir(cls)
result = []
for name in names:
# Get the object associated with the name, and where it was defined.
# Getting an obj from the __dict__ sometimes reveals more than
# using getattr. Static and class methods are dramatic examples.
# Furthermore, some objects may raise an Exception when fetched with
# getattr(). This is the case with some descriptors (bug #1785).
# Thus, we only use getattr() as a last resort.
homecls = None
for base in (cls,) + mro:
if name in base.__dict__:
obj = base.__dict__[name]
homecls = base
break
else:
obj = getattr(cls, name)
homecls = getattr(obj, "__objclass__", homecls)
# Classify the object.
if isinstance(obj, staticmethod):
kind = "static method"
elif isinstance(obj, classmethod):
kind = "class method"
elif isinstance(obj, property):
kind = "property"
elif ismethoddescriptor(obj):
kind = "method"
elif isdatadescriptor(obj):
kind = "data"
else:
obj_via_getattr = getattr(cls, name)
if (ismethod(obj_via_getattr) or
ismethoddescriptor(obj_via_getattr)):
kind = "method"
else:
kind = "data"
obj = obj_via_getattr
result.append(Attribute(name, kind, homecls, obj))
return result
# ----------------------------------------------------------- class helpers
def _searchbases(cls, accum):
# Simulate the "classic class" search order.
if cls in accum:
return
accum.append(cls)
for base in cls.__bases__:
_searchbases(base, accum)
def getmro(cls):
"Return tuple of base classes (including cls) in method resolution order."
if hasattr(cls, "__mro__"):
return cls.__mro__
else:
result = []
_searchbases(cls, result)
return tuple(result)
# -------------------------------------------------- source code extraction
def indentsize(line):
"""Return the indent size, in spaces, at the start of a line of text."""
expline = string.expandtabs(line)
return len(expline) - len(string.lstrip(expline))
def getdoc(object):
"""Get the documentation string for an object.
All tabs are expanded to spaces. To clean up docstrings that are
indented to line up with blocks of code, any whitespace than can be
uniformly removed from the second line onwards is removed."""
try:
doc = object.__doc__
except AttributeError:
return None
if not isinstance(doc, types.StringTypes):
return None
return cleandoc(doc)
def cleandoc(doc):
"""Clean up indentation from docstrings.
Any whitespace that can be uniformly removed from the second line
onwards is removed."""
try:
lines = string.split(string.expandtabs(doc), '\n')
except UnicodeError:
return None
else:
# Find minimum indentation of any non-blank lines after first line.
margin = sys.maxint
for line in lines[1:]:
content = len(string.lstrip(line))
if content:
indent = len(line) - content
margin = min(margin, indent)
# Remove indentation.
if lines:
lines[0] = lines[0].lstrip()
if margin < sys.maxint:
for i in range(1, len(lines)): lines[i] = lines[i][margin:]
# Remove any trailing or leading blank lines.
while lines and not lines[-1]:
lines.pop()
while lines and not lines[0]:
lines.pop(0)
return string.join(lines, '\n')
def getfile(object):
"""Work out which source or compiled file an object was defined in."""
if ismodule(object):
if hasattr(object, '__file__'):
return object.__file__
raise TypeError('{!r} is a built-in module'.format(object))
if isclass(object):
object = sys.modules.get(object.__module__)
if hasattr(object, '__file__'):
return object.__file__
raise TypeError('{!r} is a built-in class'.format(object))
if ismethod(object):
object = object.im_func
if isfunction(object):
object = object.func_code
if istraceback(object):
object = object.tb_frame
if isframe(object):
object = object.f_code
if iscode(object):
return object.co_filename
raise TypeError('{!r} is not a module, class, method, '
'function, traceback, frame, or code object'.format(object))
ModuleInfo = namedtuple('ModuleInfo', 'name suffix mode module_type')
def getmoduleinfo(path):
"""Get the module name, suffix, mode, and module type for a given file."""
filename = os.path.basename(path)
suffixes = map(lambda info:
(-len(info[0]), info[0], info[1], info[2]),
imp.get_suffixes())
suffixes.sort() # try longest suffixes first, in case they overlap
for neglen, suffix, mode, mtype in suffixes:
if filename[neglen:] == suffix:
return ModuleInfo(filename[:neglen], suffix, mode, mtype)
def getmodulename(path):
"""Return the module name for a given file, or None."""
info = getmoduleinfo(path)
if info: return info[0]
def getsourcefile(object):
"""Return the filename that can be used to locate an object's source.
Return None if no way can be identified to get the source.
"""
filename = getfile(object)
if string.lower(filename[-4:]) in ('.pyc', '.pyo'):
filename = filename[:-4] + '.py'
for suffix, mode, kind in imp.get_suffixes():
if 'b' in mode and string.lower(filename[-len(suffix):]) == suffix:
# Looks like a binary file. We want to only return a text file.
return None
if os.path.exists(filename):
return filename
# only return a non-existent filename if the module has a PEP 302 loader
if hasattr(getmodule(object, filename), '__loader__'):
return filename
# or it is in the linecache
if filename in linecache.cache:
return filename
def getabsfile(object, _filename=None):
"""Return an absolute path to the source or compiled file for an object.
The idea is for each object to have a unique origin, so this routine
normalizes the result as much as possible."""
if _filename is None:
_filename = getsourcefile(object) or getfile(object)
return os.path.normcase(os.path.abspath(_filename))
modulesbyfile = {}
_filesbymodname = {}
def getmodule(object, _filename=None):
"""Return the module an object was defined in, or None if not found."""
if ismodule(object):
return object
if hasattr(object, '__module__'):
return sys.modules.get(object.__module__)
# Try the filename to modulename cache
if _filename is not None and _filename in modulesbyfile:
return sys.modules.get(modulesbyfile[_filename])
# Try the cache again with the absolute file name
try:
file = getabsfile(object, _filename)
except TypeError:
return None
if file in modulesbyfile:
return sys.modules.get(modulesbyfile[file])
# Update the filename to module name cache and check yet again
# Copy sys.modules in order to cope with changes while iterating
for modname, module in sys.modules.items():
if ismodule(module) and hasattr(module, '__file__'):
f = module.__file__
if f == _filesbymodname.get(modname, None):
# Have already mapped this module, so skip it
continue
_filesbymodname[modname] = f
f = getabsfile(module)
# Always map to the name the module knows itself by
modulesbyfile[f] = modulesbyfile[
os.path.realpath(f)] = module.__name__
if file in modulesbyfile:
return sys.modules.get(modulesbyfile[file])
# Check the main module
main = sys.modules['__main__']
if not hasattr(object, '__name__'):
return None
if hasattr(main, object.__name__):
mainobject = getattr(main, object.__name__)
if mainobject is object:
return main
# Check builtins
builtin = sys.modules['__builtin__']
if hasattr(builtin, object.__name__):
builtinobject = getattr(builtin, object.__name__)
if builtinobject is object:
return builtin
def findsource(object):
"""Return the entire source file and starting line number for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a list of all the lines
in the file and the line number indexes a line in that list. An IOError
is raised if the source code cannot be retrieved."""
file = getfile(object)
sourcefile = getsourcefile(object)
if not sourcefile and file[:1] + file[-1:] != '<>':
raise IOError('source code not available')
file = sourcefile if sourcefile else file
module = getmodule(object, file)
if module:
lines = linecache.getlines(file, module.__dict__)
else:
lines = linecache.getlines(file)
if not lines:
raise IOError('could not get source code')
if ismodule(object):
return lines, 0
if isclass(object):
name = object.__name__
pat = re.compile(r'^(\s*)class\s*' + name + r'\b')
# make some effort to find the best matching class definition:
# use the one with the least indentation, which is the one
# that's most probably not inside a function definition.
candidates = []
for i in range(len(lines)):
match = pat.match(lines[i])
if match:
# if it's at toplevel, it's already the best one
if lines[i][0] == 'c':
return lines, i
# else add whitespace to candidate list
candidates.append((match.group(1), i))
if candidates:
# this will sort by whitespace, and by line number,
# less whitespace first
candidates.sort()
return lines, candidates[0][1]
else:
raise IOError('could not find class definition')
if ismethod(object):
object = object.im_func
if isfunction(object):
object = object.func_code
if istraceback(object):
object = object.tb_frame
if isframe(object):
object = object.f_code
if iscode(object):
if not hasattr(object, 'co_firstlineno'):
raise IOError('could not find function definition')
lnum = object.co_firstlineno - 1
pat = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)')
while lnum > 0:
if pat.match(lines[lnum]): break
lnum = lnum - 1
return lines, lnum
raise IOError('could not find code object')
def getcomments(object):
"""Get lines of comments immediately preceding an object's source code.
Returns None when source can't be found.
"""
try:
lines, lnum = findsource(object)
except (IOError, TypeError):
return None
if ismodule(object):
# Look for a comment block at the top of the file.
start = 0
if lines and lines[0][:2] == '#!': start = 1
while start < len(lines) and string.strip(lines[start]) in ('', '#'):
start = start + 1
if start < len(lines) and lines[start][:1] == '#':
comments = []
end = start
while end < len(lines) and lines[end][:1] == '#':
comments.append(string.expandtabs(lines[end]))
end = end + 1
return string.join(comments, '')
# Look for a preceding block of comments at the same indentation.
elif lnum > 0:
indent = indentsize(lines[lnum])
end = lnum - 1
if end >= 0 and string.lstrip(lines[end])[:1] == '#' and \
indentsize(lines[end]) == indent:
comments = [string.lstrip(string.expandtabs(lines[end]))]
if end > 0:
end = end - 1
comment = string.lstrip(string.expandtabs(lines[end]))
while comment[:1] == '#' and indentsize(lines[end]) == indent:
comments[:0] = [comment]
end = end - 1
if end < 0: break
comment = string.lstrip(string.expandtabs(lines[end]))
while comments and string.strip(comments[0]) == '#':
comments[:1] = []
while comments and string.strip(comments[-1]) == '#':
comments[-1:] = []
return string.join(comments, '')
class EndOfBlock(Exception): pass
class BlockFinder:
"""Provide a tokeneater() method to detect the end of a code block."""
def __init__(self):
self.indent = 0
self.islambda = False
self.started = False
self.passline = False
self.last = 1
def tokeneater(self, type, token, srow_scol, erow_ecol, line):
srow, scol = srow_scol
erow, ecol = erow_ecol
if not self.started:
# look for the first "def", "class" or "lambda"
if token in ("def", "class", "lambda"):
if token == "lambda":
self.islambda = True
self.started = True
self.passline = True # skip to the end of the line
elif type == tokenize.NEWLINE:
self.passline = False # stop skipping when a NEWLINE is seen
self.last = srow
if self.islambda: # lambdas always end at the first NEWLINE
raise EndOfBlock
elif self.passline:
pass
elif type == tokenize.INDENT:
self.indent = self.indent + 1
self.passline = True
elif type == tokenize.DEDENT:
self.indent = self.indent - 1
# the end of matching indent/dedent pairs end a block
# (note that this only works for "def"/"class" blocks,
# not e.g. for "if: else:" or "try: finally:" blocks)
if self.indent <= 0:
raise EndOfBlock
elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL):
# any other token on the same indentation level end the previous
# block as well, except the pseudo-tokens COMMENT and NL.
raise EndOfBlock
def getblock(lines):
"""Extract the block of code at the top of the given list of lines."""
blockfinder = BlockFinder()
try:
tokenize.tokenize(iter(lines).next, blockfinder.tokeneater)
except (EndOfBlock, IndentationError):
pass
return lines[:blockfinder.last]
def getsourcelines(object):
"""Return a list of source lines and starting line number for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a list of the lines
corresponding to the object and the line number indicates where in the
original source file the first line of code was found. An IOError is
raised if the source code cannot be retrieved."""
lines, lnum = findsource(object)
if ismodule(object): return lines, 0
else: return getblock(lines[lnum:]), lnum + 1
def getsource(object):
"""Return the text of the source code for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a single string. An
IOError is raised if the source code cannot be retrieved."""
lines, lnum = getsourcelines(object)
return string.join(lines, '')
# --------------------------------------------------- class tree extraction
def walktree(classes, children, parent):
"""Recursive helper function for getclasstree()."""
results = []
classes.sort(key=attrgetter('__module__', '__name__'))
for c in classes:
results.append((c, c.__bases__))
if c in children:
results.append(walktree(children[c], children, c))
return results
def getclasstree(classes, unique=0):
"""Arrange the given list of classes into a hierarchy of nested lists.
Where a nested list appears, it contains classes derived from the class
whose entry immediately precedes the list. Each entry is a 2-tuple
containing a class and a tuple of its base classes. If the 'unique'
argument is true, exactly one entry appears in the returned structure
for each class in the given list. Otherwise, classes using multiple
inheritance and their descendants will appear multiple times."""
children = {}
roots = []
for c in classes:
if c.__bases__:
for parent in c.__bases__:
if not parent in children:
children[parent] = []
if c not in children[parent]:
children[parent].append(c)
if unique and parent in classes: break
elif c not in roots:
roots.append(c)
for parent in children:
if parent not in classes:
roots.append(parent)
return walktree(roots, children, None)
# ------------------------------------------------ argument list extraction
Arguments = namedtuple('Arguments', 'args varargs keywords')
def getargs(co):
"""Get information about the arguments accepted by a code object.
Three things are returned: (args, varargs, varkw), where 'args' is
a list of argument names (possibly containing nested lists), and
'varargs' and 'varkw' are the names of the * and ** arguments or None."""
if not iscode(co):
raise TypeError('{!r} is not a code object'.format(co))
nargs = co.co_argcount
names = co.co_varnames
args = list(names[:nargs])
step = 0
# The following acrobatics are for anonymous (tuple) arguments.
for i in range(nargs):
if args[i][:1] in ('', '.'):
stack, remain, count = [], [], []
while step < len(co.co_code):
op = ord(co.co_code[step])
step = step + 1
if op >= dis.HAVE_ARGUMENT:
opname = dis.opname[op]
value = ord(co.co_code[step]) + ord(co.co_code[step+1])*256
step = step + 2
if opname in ('UNPACK_TUPLE', 'UNPACK_SEQUENCE'):
remain.append(value)
count.append(value)
elif opname in ('STORE_FAST', 'STORE_DEREF'):
if opname == 'STORE_FAST':
stack.append(names[value])
else:
stack.append(co.co_cellvars[value])
# Special case for sublists of length 1: def foo((bar))
# doesn't generate the UNPACK_TUPLE bytecode, so if
# `remain` is empty here, we have such a sublist.
if not remain:
stack[0] = [stack[0]]
break
else:
remain[-1] = remain[-1] - 1
while remain[-1] == 0:
remain.pop()
size = count.pop()
stack[-size:] = [stack[-size:]]
if not remain: break
remain[-1] = remain[-1] - 1
if not remain: break
args[i] = stack[0]
varargs = None
if co.co_flags & CO_VARARGS:
varargs = co.co_varnames[nargs]
nargs = nargs + 1
varkw = None
if co.co_flags & CO_VARKEYWORDS:
varkw = co.co_varnames[nargs]
return Arguments(args, varargs, varkw)
ArgSpec = namedtuple('ArgSpec', 'args varargs keywords defaults')
def getargspec(func):
"""Get the names and default values of a function's arguments.
A tuple of four things is returned: (args, varargs, varkw, defaults).
'args' is a list of the argument names (it may contain nested lists).
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'defaults' is an n-tuple of the default values of the last n arguments.
"""
if ismethod(func):
func = func.im_func
if not isfunction(func):
raise TypeError('{!r} is not a Python function'.format(func))
args, varargs, varkw = getargs(func.func_code)
return ArgSpec(args, varargs, varkw, func.func_defaults)
ArgInfo = namedtuple('ArgInfo', 'args varargs keywords locals')
def getargvalues(frame):
"""Get information about arguments passed into a particular frame.
A tuple of four things is returned: (args, varargs, varkw, locals).
'args' is a list of the argument names (it may contain nested lists).
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'locals' is the locals dictionary of the given frame."""
args, varargs, varkw = getargs(frame.f_code)
return ArgInfo(args, varargs, varkw, frame.f_locals)
def joinseq(seq):
if len(seq) == 1:
return '(' + seq[0] + ',)'
else:
return '(' + string.join(seq, ', ') + ')'
def strseq(object, convert, join=joinseq):
"""Recursively walk a sequence, stringifying each element."""
if type(object) in (list, tuple):
return join(map(lambda o, c=convert, j=join: strseq(o, c, j), object))
else:
return convert(object)
def formatargspec(args, varargs=None, varkw=None, defaults=None,
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value),
join=joinseq):
"""Format an argument spec from the 4 values returned by getargspec.
The first four arguments are (args, varargs, varkw, defaults). The
other four arguments are the corresponding optional formatting functions
that are called to turn names and values into strings. The ninth
argument is an optional function to format the sequence of arguments."""
specs = []
if defaults:
firstdefault = len(args) - len(defaults)
for i, arg in enumerate(args):
spec = strseq(arg, formatarg, join)
if defaults and i >= firstdefault:
spec = spec + formatvalue(defaults[i - firstdefault])
specs.append(spec)
if varargs is not None:
specs.append(formatvarargs(varargs))
if varkw is not None:
specs.append(formatvarkw(varkw))
return '(' + string.join(specs, ', ') + ')'
def formatargvalues(args, varargs, varkw, locals,
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value),
join=joinseq):
"""Format an argument spec from the 4 values returned by getargvalues.
The first four arguments are (args, varargs, varkw, locals). The
next four arguments are the corresponding optional formatting functions
that are called to turn names and values into strings. The ninth
argument is an optional function to format the sequence of arguments."""
def convert(name, locals=locals,
formatarg=formatarg, formatvalue=formatvalue):
return formatarg(name) + formatvalue(locals[name])
specs = []
for i in range(len(args)):
specs.append(strseq(args[i], convert, join))
if varargs:
specs.append(formatvarargs(varargs) + formatvalue(locals[varargs]))
if varkw:
specs.append(formatvarkw(varkw) + formatvalue(locals[varkw]))
return '(' + string.join(specs, ', ') + ')'
def getcallargs(func, *positional, **named):
"""Get the mapping of arguments to values.
A dict is returned, with keys the function argument names (including the
names of the * and ** arguments, if any), and values the respective bound
values from 'positional' and 'named'."""
args, varargs, varkw, defaults = getargspec(func)
f_name = func.__name__
arg2value = {}
# The following closures are basically because of tuple parameter unpacking.
assigned_tuple_params = []
def assign(arg, value):
if isinstance(arg, str):
arg2value[arg] = value
else:
assigned_tuple_params.append(arg)
value = iter(value)
for i, subarg in enumerate(arg):
try:
subvalue = next(value)
except StopIteration:
raise ValueError('need more than %d %s to unpack' %
(i, 'values' if i > 1 else 'value'))
assign(subarg,subvalue)
try:
next(value)
except StopIteration:
pass
else:
raise ValueError('too many values to unpack')
def is_assigned(arg):
if isinstance(arg,str):
return arg in arg2value
return arg in assigned_tuple_params
if ismethod(func) and func.im_self is not None:
# implicit 'self' (or 'cls' for classmethods) argument
positional = (func.im_self,) + positional
num_pos = len(positional)
num_total = num_pos + len(named)
num_args = len(args)
num_defaults = len(defaults) if defaults else 0
for arg, value in zip(args, positional):
assign(arg, value)
if varargs:
if num_pos > num_args:
assign(varargs, positional[-(num_pos-num_args):])
else:
assign(varargs, ())
elif 0 < num_args < num_pos:
raise TypeError('%s() takes %s %d %s (%d given)' % (
f_name, 'at most' if defaults else 'exactly', num_args,
'arguments' if num_args > 1 else 'argument', num_total))
elif num_args == 0 and num_total:
if varkw:
if num_pos:
# XXX: We should use num_pos, but Python also uses num_total:
raise TypeError('%s() takes exactly 0 arguments '
'(%d given)' % (f_name, num_total))
else:
raise TypeError('%s() takes no arguments (%d given)' %
(f_name, num_total))
for arg in args:
if isinstance(arg, str) and arg in named:
if is_assigned(arg):
raise TypeError("%s() got multiple values for keyword "
"argument '%s'" % (f_name, arg))
else:
assign(arg, named.pop(arg))
if defaults: # fill in any missing values with the defaults
for arg, value in zip(args[-num_defaults:], defaults):
if not is_assigned(arg):
assign(arg, value)
if varkw:
assign(varkw, named)
elif named:
unexpected = next(iter(named))
try:
unicode
except NameError:
pass
else:
if isinstance(unexpected, unicode):
unexpected = unexpected.encode(sys.getdefaultencoding(), 'replace')
raise TypeError("%s() got an unexpected keyword argument '%s'" %
(f_name, unexpected))
unassigned = num_args - len([arg for arg in args if is_assigned(arg)])
if unassigned:
num_required = num_args - num_defaults
raise TypeError('%s() takes %s %d %s (%d given)' % (
f_name, 'at least' if defaults else 'exactly', num_required,
'arguments' if num_required > 1 else 'argument', num_total))
return arg2value
# -------------------------------------------------- stack frame extraction
Traceback = namedtuple('Traceback', 'filename lineno function code_context index')
def getframeinfo(frame, context=1):
"""Get information about a frame or traceback object.
A tuple of five things is returned: the filename, the line number of
the current line, the function name, a list of lines of context from
the source code, and the index of the current line within that list.
The optional second argument specifies the number of lines of context
to return, which are centered around the current line."""
if istraceback(frame):
lineno = frame.tb_lineno
frame = frame.tb_frame
else:
lineno = frame.f_lineno
if not isframe(frame):
raise TypeError('{!r} is not a frame or traceback object'.format(frame))
filename = getsourcefile(frame) or getfile(frame)
if context > 0:
start = lineno - 1 - context//2
try:
lines, lnum = findsource(frame)
except IOError:
lines = index = None
else:
start = max(start, 1)
start = max(0, min(start, len(lines) - context))
lines = lines[start:start+context]
index = lineno - 1 - start
else:
lines = index = None
return Traceback(filename, lineno, frame.f_code.co_name, lines, index)
def getlineno(frame):
"""Get the line number from a frame object, allowing for optimization."""
# FrameType.f_lineno is now a descriptor that grovels co_lnotab
return frame.f_lineno
def getouterframes(frame, context=1):
"""Get a list of records for a frame and all higher (calling) frames.
Each record contains a frame object, filename, line number, function
name, a list of lines of context, and index within the context."""
framelist = []
while frame:
framelist.append((frame,) + getframeinfo(frame, context))
frame = frame.f_back
return framelist
def getinnerframes(tb, context=1):
"""Get a list of records for a traceback's frame and all lower frames.
Each record contains a frame object, filename, line number, function
name, a list of lines of context, and index within the context."""
framelist = []
while tb:
framelist.append((tb.tb_frame,) + getframeinfo(tb, context))
tb = tb.tb_next
return framelist
if hasattr(sys, '_getframe'):
currentframe = sys._getframe
else:
currentframe = lambda _=None: None
def stack(context=1):
"""Return a list of records for the stack above the caller's frame."""
return getouterframes(sys._getframe(1), context)
def trace(context=1):
"""Return a list of records for the stack below the current exception."""
return getinnerframes(sys.exc_info()[2], context)
|
|
"""
Information-theoretic calculations
"""
import numpy as np
import pandas as pd
from sklearn import cross_validation
EPSILON = 100 * np.finfo(float).eps
def bin_range_strings(bins, fmt=':g'):
"""Given a list of bins, make a list of strings of those bin ranges
Parameters
----------
bins : list_like
List of anything, usually values of bin edges
Returns
-------
bin_ranges : list
List of bin ranges
>>> bin_range_strings((0, 0.5, 1))
['0-0.5', '0.5-1']
"""
return [('{' + fmt + '}-{' + fmt + '}').format(i, j)
for i, j in zip(bins, bins[1:])]
def _check_prob_dist(x):
if np.any(x < 0):
raise ValueError('Each column of the input dataframes must be '
'**non-negative** probability distributions')
try:
if np.any(np.abs(x.sum() - np.ones(x.shape[1])) > EPSILON):
raise ValueError('Each column of the input dataframe must be '
'probability distributions that **sum to 1**')
except IndexError:
if np.any(np.abs(x.sum() - 1) > EPSILON):
raise ValueError('Each column of the input dataframe must be '
'probability distributions that **sum to 1**')
def binify(data, bins):
"""Makes a histogram of each column the provided binsize
Parameters
----------
data : pandas.DataFrame
A samples x features dataframe. Each feature (column) will be binned
into the provided bins
bins : iterable
Bins you would like to use for this data. Must include the final bin
value, e.g. (0, 0.5, 1) for the two bins (0, 0.5) and (0.5, 1).
nbins = len(bins) - 1
Returns
-------
binned : pandas.DataFrame
An nbins x features DataFrame of each column binned across rows
"""
if bins is None:
raise ValueError('Must specify "bins"')
if isinstance(data, pd.DataFrame):
binned = data.apply(lambda x: pd.Series(np.histogram(x, bins=bins,
range=(0, 1))[0]))
elif isinstance(data, pd.Series):
binned = pd.Series(np.histogram(data, bins=bins, range=(0, 1))[0])
else:
raise ValueError('`data` must be either a 1d vector or 2d matrix')
binned.index = bin_range_strings(bins)
# Normalize so each column sums to 1
binned = binned / binned.sum().astype(float)
return binned
def kld(p, q):
"""Kullback-Leiber divergence of two probability distributions pandas
dataframes, p and q
Parameters
----------
p : pandas.DataFrame
An nbins x features DataFrame, or (nbins,) Series
q : pandas.DataFrame
An nbins x features DataFrame, or (nbins,) Series
Returns
-------
kld : pandas.Series
Kullback-Lieber divergence of the common columns between the
dataframe. E.g. between 1st column in p and 1st column in q, and 2nd
column in p and 2nd column in q.
Raises
------
ValueError
If the data provided is not a probability distribution, i.e. it has
negative values or its columns do not sum to 1, raise ValueError
Notes
-----
The input to this function must be probability distributions, not raw
values. Otherwise, the output makes no sense.
"""
try:
_check_prob_dist(p)
_check_prob_dist(q)
except ValueError:
return np.nan
# If one of them is zero, then the other should be considered to be 0.
# In this problem formulation, log0 = 0
p = p.replace(0, np.nan)
q = q.replace(0, np.nan)
return (np.log2(p / q) * p).sum(axis=0)
def jsd(p, q):
"""Finds the per-column JSD between dataframes p and q
Jensen-Shannon divergence of two probability distrubutions pandas
dataframes, p and q. These distributions are usually created by running
binify() on the dataframe.
Parameters
----------
p : pandas.DataFrame
An nbins x features DataFrame.
q : pandas.DataFrame
An nbins x features DataFrame.
Returns
-------
jsd : pandas.Series
Jensen-Shannon divergence of each column with the same names between
p and q
Raises
------
ValueError
If the data provided is not a probability distribution, i.e. it has
negative values or its columns do not sum to 1, raise ValueError
"""
try:
_check_prob_dist(p)
_check_prob_dist(q)
except ValueError:
return np.nan
weight = 0.5
m = weight * (p + q)
result = weight * kld(p, m) + (1 - weight) * kld(q, m)
return result
def entropy(binned, base=2):
"""Find the entropy of each column of a dataframe
Parameters
----------
binned : pandas.DataFrame
A nbins x features DataFrame of probability distributions, where each
column sums to 1
base : numeric
The log-base of the entropy. Default is 2, so the resulting entropy
is in bits.
Returns
-------
entropy : pandas.Seires
Entropy values for each column of the dataframe.
Raises
------
ValueError
If the data provided is not a probability distribution, i.e. it has
negative values or its columns do not sum to 1, raise ValueError
"""
try:
_check_prob_dist(binned)
except ValueError:
np.nan
return -((np.log(binned) / np.log(base)) * binned).sum(axis=0)
def binify_and_jsd(df1, df2, bins, pair=None):
"""Binify and calculate jensen-shannon divergence between two dataframes
Parameters
----------
df1, df2 : pandas.DataFrames
Dataframes to calculate JSD between columns of. Must have overlapping
column names
bins : array-like
Bins to use for transforming df{1,2} into probability distributions
pair : str, optional
Name of the pair to save as the name of the series
Returns
-------
divergence : pandas.Series
The Jensen-Shannon divergence between columns of df1, df2
"""
binned1 = binify(df1, bins=bins).dropna(how='all', axis=1)
binned2 = binify(df2, bins=bins).dropna(how='all', axis=1)
binned1, binned2 = binned1.align(binned2, axis=1, join='inner')
series = np.sqrt(jsd(binned1, binned2))
series.name = pair
return series
def cross_phenotype_jsd(data, groupby, bins, n_iter=100):
"""Jensen-Shannon divergence of features across phenotypes
Parameters
----------
data : pandas.DataFrame
A (n_samples, n_features) Dataframe
groupby : mappable
A samples to phenotypes mapping
n_iter : int
Number of bootstrap resampling iterations to perform for the
within-group comparisons
n_bins : int
Number of bins to binify the singles data on
Returns
-------
jsd_df : pandas.DataFrame
A (n_features, n_phenotypes^2) dataframe of the JSD between each
feature between and within phenotypes
"""
grouped = data.groupby(groupby)
jsds = []
seen = set([])
for phenotype1, df1 in grouped:
for phenotype2, df2 in grouped:
pair = tuple(sorted([phenotype1, phenotype2]))
if pair in seen:
continue
seen.add(pair)
if phenotype1 == phenotype2:
seriess = []
bs = cross_validation.Bootstrap(df1.shape[0], n_iter=n_iter,
train_size=0.5)
for i, (ind1, ind2) in enumerate(bs):
df1_subset = df1.iloc[ind1, :]
df2_subset = df2.iloc[ind2, :]
seriess.append(
binify_and_jsd(df1_subset, df2_subset, None, bins))
series = pd.concat(seriess, axis=1, names=None).mean(axis=1)
series.name = pair
jsds.append(series)
else:
series = binify_and_jsd(df1, df2, pair, bins)
jsds.append(series)
return pd.concat(jsds, axis=1)
def jsd_df_to_2d(jsd_df):
"""Transform a tall JSD dataframe to a square matrix of mean JSDs
Parameters
----------
jsd_df : pandas.DataFrame
A (n_features, n_phenotypes^2) dataframe of the JSD between each
feature between and within phenotypes
Returns
-------
jsd_2d : pandas.DataFrame
A (n_phenotypes, n_phenotypes) symmetric dataframe of the mean JSD
between and within phenotypes
"""
jsd_2d = jsd_df.mean().reset_index()
jsd_2d = jsd_2d.rename(
columns={'level_0': 'phenotype1', 'level_1': 'phenotype2', 0: 'jsd'})
jsd_2d = jsd_2d.pivot(index='phenotype1', columns='phenotype2',
values='jsd')
return jsd_2d + np.tril(jsd_2d.T, -1)
|
|
"""Miscellaneous inheritance-related tests, many very old.
These are generally tests derived from specific user issues.
"""
from sqlalchemy.testing import eq_
from sqlalchemy import *
from sqlalchemy import util
from sqlalchemy.orm import *
from sqlalchemy.orm.interfaces import MANYTOONE
from sqlalchemy.testing import AssertsExecutionResults
from sqlalchemy import testing
from sqlalchemy.testing.util import function_named
from sqlalchemy.testing import fixtures
from test.orm import _fixtures
from sqlalchemy.testing import eq_
from sqlalchemy.testing.schema import Table, Column
class AttrSettable(object):
def __init__(self, **kwargs):
[setattr(self, k, v) for k, v in kwargs.items()]
def __repr__(self):
return self.__class__.__name__ + "(%s)" % (hex(id(self)))
class RelationshipTest1(fixtures.MappedTest):
"""test self-referential relationships on polymorphic mappers"""
@classmethod
def define_tables(cls, metadata):
global people, managers
people = Table('people', metadata,
Column('person_id', Integer, Sequence('person_id_seq',
optional=True),
primary_key=True),
Column('manager_id', Integer,
ForeignKey('managers.person_id',
use_alter=True, name="mpid_fq")),
Column('name', String(50)),
Column('type', String(30)))
managers = Table('managers', metadata,
Column('person_id', Integer, ForeignKey('people.person_id'),
primary_key=True),
Column('status', String(30)),
Column('manager_name', String(50))
)
def teardown(self):
people.update(values={people.c.manager_id:None}).execute()
super(RelationshipTest1, self).teardown()
def test_parent_refs_descendant(self):
class Person(AttrSettable):
pass
class Manager(Person):
pass
mapper(Person, people, properties={
'manager':relationship(Manager, primaryjoin=(
people.c.manager_id ==
managers.c.person_id),
uselist=False, post_update=True)
})
mapper(Manager, managers, inherits=Person,
inherit_condition=people.c.person_id==managers.c.person_id)
eq_(class_mapper(Person).get_property('manager').synchronize_pairs,
[(managers.c.person_id,people.c.manager_id)])
session = create_session()
p = Person(name='some person')
m = Manager(name='some manager')
p.manager = m
session.add(p)
session.flush()
session.expunge_all()
p = session.query(Person).get(p.person_id)
m = session.query(Manager).get(m.person_id)
assert p.manager is m
def test_descendant_refs_parent(self):
class Person(AttrSettable):
pass
class Manager(Person):
pass
mapper(Person, people)
mapper(Manager, managers, inherits=Person,
inherit_condition=people.c.person_id==
managers.c.person_id,
properties={
'employee':relationship(Person, primaryjoin=(
people.c.manager_id ==
managers.c.person_id),
foreign_keys=[people.c.manager_id],
uselist=False, post_update=True)
})
session = create_session()
p = Person(name='some person')
m = Manager(name='some manager')
m.employee = p
session.add(m)
session.flush()
session.expunge_all()
p = session.query(Person).get(p.person_id)
m = session.query(Manager).get(m.person_id)
assert m.employee is p
class RelationshipTest2(fixtures.MappedTest):
"""test self-referential relationships on polymorphic mappers"""
@classmethod
def define_tables(cls, metadata):
global people, managers, data
people = Table('people', metadata,
Column('person_id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)),
Column('type', String(30)))
managers = Table('managers', metadata,
Column('person_id', Integer, ForeignKey('people.person_id'),
primary_key=True),
Column('manager_id', Integer, ForeignKey('people.person_id')),
Column('status', String(30)),
)
data = Table('data', metadata,
Column('person_id', Integer, ForeignKey('managers.person_id'),
primary_key=True),
Column('data', String(30))
)
def testrelationshiponsubclass_j1_nodata(self):
self.do_test("join1", False)
def testrelationshiponsubclass_j2_nodata(self):
self.do_test("join2", False)
def testrelationshiponsubclass_j1_data(self):
self.do_test("join1", True)
def testrelationshiponsubclass_j2_data(self):
self.do_test("join2", True)
def testrelationshiponsubclass_j3_nodata(self):
self.do_test("join3", False)
def testrelationshiponsubclass_j3_data(self):
self.do_test("join3", True)
def do_test(self, jointype="join1", usedata=False):
class Person(AttrSettable):
pass
class Manager(Person):
pass
if jointype == "join1":
poly_union = polymorphic_union({
'person':people.select(people.c.type=='person'),
'manager':join(people, managers,
people.c.person_id==managers.c.person_id)
}, None)
polymorphic_on=poly_union.c.type
elif jointype == "join2":
poly_union = polymorphic_union({
'person':people.select(people.c.type=='person'),
'manager':managers.join(people,
people.c.person_id==managers.c.person_id)
}, None)
polymorphic_on=poly_union.c.type
elif jointype == "join3":
poly_union = None
polymorphic_on = people.c.type
if usedata:
class Data(object):
def __init__(self, data):
self.data = data
mapper(Data, data)
mapper(Person, people,
with_polymorphic=('*', poly_union),
polymorphic_identity='person',
polymorphic_on=polymorphic_on)
if usedata:
mapper(Manager, managers,
inherits=Person,
inherit_condition=people.c.person_id==
managers.c.person_id,
polymorphic_identity='manager',
properties={
'colleague':relationship(
Person,
primaryjoin=managers.c.manager_id==
people.c.person_id,
lazy='select', uselist=False),
'data':relationship(Data, uselist=False)
}
)
else:
mapper(Manager, managers, inherits=Person,
inherit_condition=people.c.person_id==
managers.c.person_id,
polymorphic_identity='manager',
properties={
'colleague':relationship(Person,
primaryjoin=managers.c.manager_id==
people.c.person_id,
lazy='select', uselist=False)
}
)
sess = create_session()
p = Person(name='person1')
m = Manager(name='manager1')
m.colleague = p
if usedata:
m.data = Data('ms data')
sess.add(m)
sess.flush()
sess.expunge_all()
p = sess.query(Person).get(p.person_id)
m = sess.query(Manager).get(m.person_id)
assert m.colleague is p
if usedata:
assert m.data.data == 'ms data'
class RelationshipTest3(fixtures.MappedTest):
"""test self-referential relationships on polymorphic mappers"""
@classmethod
def define_tables(cls, metadata):
global people, managers, data
people = Table('people', metadata,
Column('person_id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('colleague_id', Integer, ForeignKey('people.person_id')),
Column('name', String(50)),
Column('type', String(30)))
managers = Table('managers', metadata,
Column('person_id', Integer, ForeignKey('people.person_id'),
primary_key=True),
Column('status', String(30)),
)
data = Table('data', metadata,
Column('person_id', Integer, ForeignKey('people.person_id'),
primary_key=True),
Column('data', String(30))
)
def _generate_test(jointype="join1", usedata=False):
def do_test(self):
class Person(AttrSettable):
pass
class Manager(Person):
pass
if usedata:
class Data(object):
def __init__(self, data):
self.data = data
if jointype == "join1":
poly_union = polymorphic_union({
'manager':managers.join(people,
people.c.person_id==managers.c.person_id),
'person':people.select(people.c.type=='person')
}, None)
elif jointype =="join2":
poly_union = polymorphic_union({
'manager':join(people, managers,
people.c.person_id==managers.c.person_id),
'person':people.select(people.c.type=='person')
}, None)
elif jointype == 'join3':
poly_union = people.outerjoin(managers)
elif jointype == "join4":
poly_union=None
if usedata:
mapper(Data, data)
if usedata:
mapper(Person, people,
with_polymorphic=('*', poly_union),
polymorphic_identity='person',
polymorphic_on=people.c.type,
properties={
'colleagues':relationship(Person,
primaryjoin=people.c.colleague_id==
people.c.person_id,
remote_side=people.c.colleague_id,
uselist=True),
'data':relationship(Data, uselist=False)
}
)
else:
mapper(Person, people,
with_polymorphic=('*', poly_union),
polymorphic_identity='person',
polymorphic_on=people.c.type,
properties={
'colleagues':relationship(Person,
primaryjoin=people.c.colleague_id==people.c.person_id,
remote_side=people.c.colleague_id, uselist=True)
}
)
mapper(Manager, managers, inherits=Person,
inherit_condition=people.c.person_id==
managers.c.person_id,
polymorphic_identity='manager')
sess = create_session()
p = Person(name='person1')
p2 = Person(name='person2')
p3 = Person(name='person3')
m = Manager(name='manager1')
p.colleagues.append(p2)
m.colleagues.append(p3)
if usedata:
p.data = Data('ps data')
m.data = Data('ms data')
sess.add(m)
sess.add(p)
sess.flush()
sess.expunge_all()
p = sess.query(Person).get(p.person_id)
p2 = sess.query(Person).get(p2.person_id)
p3 = sess.query(Person).get(p3.person_id)
m = sess.query(Person).get(m.person_id)
assert len(p.colleagues) == 1
assert p.colleagues == [p2]
assert m.colleagues == [p3]
if usedata:
assert p.data.data == 'ps data'
assert m.data.data == 'ms data'
do_test = function_named(
do_test, 'test_relationship_on_base_class_%s_%s' % (
jointype, data and "nodata" or "data"))
return do_test
for jointype in ["join1", "join2", "join3", "join4"]:
for data in (True, False):
func = _generate_test(jointype, data)
setattr(RelationshipTest3, func.__name__, func)
del func
class RelationshipTest4(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
global people, engineers, managers, cars
people = Table('people', metadata,
Column('person_id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)))
engineers = Table('engineers', metadata,
Column('person_id', Integer, ForeignKey('people.person_id'),
primary_key=True),
Column('status', String(30)))
managers = Table('managers', metadata,
Column('person_id', Integer, ForeignKey('people.person_id'),
primary_key=True),
Column('longer_status', String(70)))
cars = Table('cars', metadata,
Column('car_id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('owner', Integer, ForeignKey('people.person_id')))
def test_many_to_one_polymorphic(self):
"""in this test, the polymorphic union is between two subclasses, but
does not include the base table by itself in the union. however, the
primaryjoin condition is going to be against the base table, and its a
many-to-one relationship (unlike the test in polymorph.py) so the
column in the base table is explicit. Can the ClauseAdapter figure out
how to alias the primaryjoin to the polymorphic union ?"""
# class definitions
class Person(object):
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def __repr__(self):
return "Ordinary person %s" % self.name
class Engineer(Person):
def __repr__(self):
return "Engineer %s, status %s" % \
(self.name, self.status)
class Manager(Person):
def __repr__(self):
return "Manager %s, status %s" % \
(self.name, self.longer_status)
class Car(object):
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def __repr__(self):
return "Car number %d" % self.car_id
# create a union that represents both types of joins.
employee_join = polymorphic_union(
{
'engineer':people.join(engineers),
'manager':people.join(managers),
}, "type", 'employee_join')
person_mapper = mapper(Person, people,
with_polymorphic=('*', employee_join),
polymorphic_on=employee_join.c.type,
polymorphic_identity='person')
engineer_mapper = mapper(Engineer, engineers,
inherits=person_mapper,
polymorphic_identity='engineer')
manager_mapper = mapper(Manager, managers,
inherits=person_mapper,
polymorphic_identity='manager')
car_mapper = mapper(Car, cars,
properties= {'employee':
relationship(person_mapper)})
session = create_session()
# creating 5 managers named from M1 to E5
for i in range(1,5):
session.add(Manager(name="M%d" % i,
longer_status="YYYYYYYYY"))
# creating 5 engineers named from E1 to E5
for i in range(1,5):
session.add(Engineer(name="E%d" % i,status="X"))
session.flush()
engineer4 = session.query(Engineer).\
filter(Engineer.name=="E4").first()
manager3 = session.query(Manager).\
filter(Manager.name=="M3").first()
car1 = Car(employee=engineer4)
session.add(car1)
car2 = Car(employee=manager3)
session.add(car2)
session.flush()
session.expunge_all()
def go():
testcar = session.query(Car).options(
joinedload('employee')
).get(car1.car_id)
assert str(testcar.employee) == "Engineer E4, status X"
self.assert_sql_count(testing.db, go, 1)
car1 = session.query(Car).get(car1.car_id)
usingGet = session.query(person_mapper).get(car1.owner)
usingProperty = car1.employee
assert str(engineer4) == "Engineer E4, status X"
assert str(usingGet) == "Engineer E4, status X"
assert str(usingProperty) == "Engineer E4, status X"
session.expunge_all()
# and now for the lightning round, eager !
def go():
testcar = session.query(Car).options(
joinedload('employee')
).get(car1.car_id)
assert str(testcar.employee) == "Engineer E4, status X"
self.assert_sql_count(testing.db, go, 1)
session.expunge_all()
s = session.query(Car)
c = s.join("employee").filter(Person.name=="E4")[0]
assert c.car_id==car1.car_id
class RelationshipTest5(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
global people, engineers, managers, cars
people = Table('people', metadata,
Column('person_id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)),
Column('type', String(50)))
engineers = Table('engineers', metadata,
Column('person_id', Integer, ForeignKey('people.person_id'),
primary_key=True),
Column('status', String(30)))
managers = Table('managers', metadata,
Column('person_id', Integer, ForeignKey('people.person_id'),
primary_key=True),
Column('longer_status', String(70)))
cars = Table('cars', metadata,
Column('car_id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('owner', Integer, ForeignKey('people.person_id')))
def test_eager_empty(self):
"""test parent object with child relationship to an inheriting mapper,
using eager loads, works when there are no child objects present"""
class Person(object):
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def __repr__(self):
return "Ordinary person %s" % self.name
class Engineer(Person):
def __repr__(self):
return "Engineer %s, status %s" % \
(self.name, self.status)
class Manager(Person):
def __repr__(self):
return "Manager %s, status %s" % \
(self.name, self.longer_status)
class Car(object):
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def __repr__(self):
return "Car number %d" % self.car_id
person_mapper = mapper(Person, people,
polymorphic_on=people.c.type,
polymorphic_identity='person')
engineer_mapper = mapper(Engineer, engineers,
inherits=person_mapper,
polymorphic_identity='engineer')
manager_mapper = mapper(Manager, managers,
inherits=person_mapper,
polymorphic_identity='manager')
car_mapper = mapper(Car, cars, properties= {
'manager':relationship(
manager_mapper, lazy='joined')})
sess = create_session()
car1 = Car()
car2 = Car()
car2.manager = Manager()
sess.add(car1)
sess.add(car2)
sess.flush()
sess.expunge_all()
carlist = sess.query(Car).all()
assert carlist[0].manager is None
assert carlist[1].manager.person_id == car2.manager.person_id
class RelationshipTest6(fixtures.MappedTest):
"""test self-referential relationships on a single joined-table
inheritance mapper"""
@classmethod
def define_tables(cls, metadata):
global people, managers, data
people = Table('people', metadata,
Column('person_id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)),
)
managers = Table('managers', metadata,
Column('person_id', Integer, ForeignKey('people.person_id'),
primary_key=True),
Column('colleague_id', Integer,
ForeignKey('managers.person_id')),
Column('status', String(30)),
)
def test_basic(self):
class Person(AttrSettable):
pass
class Manager(Person):
pass
mapper(Person, people)
mapper(Manager, managers, inherits=Person,
inherit_condition=people.c.person_id==\
managers.c.person_id,
properties={
'colleague':relationship(Manager,
primaryjoin=managers.c.colleague_id==\
managers.c.person_id,
lazy='select', uselist=False)
}
)
sess = create_session()
m = Manager(name='manager1')
m2 =Manager(name='manager2')
m.colleague = m2
sess.add(m)
sess.flush()
sess.expunge_all()
m = sess.query(Manager).get(m.person_id)
m2 = sess.query(Manager).get(m2.person_id)
assert m.colleague is m2
class RelationshipTest7(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
global people, engineers, managers, cars, offroad_cars
cars = Table('cars', metadata,
Column('car_id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(30)))
offroad_cars = Table('offroad_cars', metadata,
Column('car_id',Integer, ForeignKey('cars.car_id'),
nullable=False,primary_key=True))
people = Table('people', metadata,
Column('person_id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('car_id', Integer, ForeignKey('cars.car_id'),
nullable=False),
Column('name', String(50)))
engineers = Table('engineers', metadata,
Column('person_id', Integer, ForeignKey('people.person_id'),
primary_key=True),
Column('field', String(30)))
managers = Table('managers', metadata,
Column('person_id', Integer, ForeignKey('people.person_id'),
primary_key=True),
Column('category', String(70)))
def test_manytoone_lazyload(self):
"""test that lazy load clause to a polymorphic child mapper generates
correctly [ticket:493]"""
class PersistentObject(object):
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
class Status(PersistentObject):
def __repr__(self):
return "Status %s" % self.name
class Person(PersistentObject):
def __repr__(self):
return "Ordinary person %s" % self.name
class Engineer(Person):
def __repr__(self):
return "Engineer %s, field %s" % (self.name,
self.field)
class Manager(Person):
def __repr__(self):
return "Manager %s, category %s" % (self.name,
self.category)
class Car(PersistentObject):
def __repr__(self):
return "Car number %d, name %s" % \
(self.car_id, self.name)
class Offraod_Car(Car):
def __repr__(self):
return "Offroad Car number %d, name %s" % \
(self.car_id,self.name)
employee_join = polymorphic_union(
{
'engineer':people.join(engineers),
'manager':people.join(managers),
}, "type", 'employee_join')
car_join = polymorphic_union(
{
'car' : cars.outerjoin(offroad_cars).\
select(offroad_cars.c.car_id == None).reduce_columns(),
'offroad' : cars.join(offroad_cars)
}, "type", 'car_join')
car_mapper = mapper(Car, cars,
with_polymorphic=('*', car_join) ,polymorphic_on=car_join.c.type,
polymorphic_identity='car',
)
offroad_car_mapper = mapper(Offraod_Car, offroad_cars,
inherits=car_mapper, polymorphic_identity='offroad')
person_mapper = mapper(Person, people,
with_polymorphic=('*', employee_join),
polymorphic_on=employee_join.c.type,
polymorphic_identity='person',
properties={
'car':relationship(car_mapper)
})
engineer_mapper = mapper(Engineer, engineers,
inherits=person_mapper,
polymorphic_identity='engineer')
manager_mapper = mapper(Manager, managers,
inherits=person_mapper,
polymorphic_identity='manager')
session = create_session()
basic_car=Car(name="basic")
offroad_car=Offraod_Car(name="offroad")
for i in range(1,4):
if i%2:
car=Car()
else:
car=Offraod_Car()
session.add(Manager(name="M%d" % i,
category="YYYYYYYYY",car=car))
session.add(Engineer(name="E%d" % i,field="X",car=car))
session.flush()
session.expunge_all()
r = session.query(Person).all()
for p in r:
assert p.car_id == p.car.car_id
class RelationshipTest8(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
global taggable, users
taggable = Table('taggable', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('type', String(30)),
Column('owner_id', Integer, ForeignKey('taggable.id')),
)
users = Table ('users', metadata,
Column('id', Integer, ForeignKey('taggable.id'),
primary_key=True),
Column('data', String(50)),
)
def test_selfref_onjoined(self):
class Taggable(fixtures.ComparableEntity):
pass
class User(Taggable):
pass
mapper( Taggable, taggable,
polymorphic_on=taggable.c.type,
polymorphic_identity='taggable',
properties = {
'owner' : relationship (User,
primaryjoin=taggable.c.owner_id ==taggable.c.id,
remote_side=taggable.c.id
),
})
mapper(User, users, inherits=Taggable,
polymorphic_identity='user',
inherit_condition=users.c.id == taggable.c.id,
)
u1 = User(data='u1')
t1 = Taggable(owner=u1)
sess = create_session()
sess.add(t1)
sess.flush()
sess.expunge_all()
eq_(
sess.query(Taggable).order_by(Taggable.id).all(),
[User(data='u1'), Taggable(owner=User(data='u1'))]
)
class GenerativeTest(fixtures.TestBase, AssertsExecutionResults):
@classmethod
def setup_class(cls):
# cars---owned by--- people (abstract) --- has a --- status
# | ^ ^ |
# | | | |
# | engineers managers |
# | |
# +--------------------------------------- has a ------+
global metadata, status, people, engineers, managers, cars
metadata = MetaData(testing.db)
# table definitions
status = Table('status', metadata,
Column('status_id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(20)))
people = Table('people', metadata,
Column('person_id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('status_id', Integer, ForeignKey('status.status_id'),
nullable=False),
Column('name', String(50)))
engineers = Table('engineers', metadata,
Column('person_id', Integer, ForeignKey('people.person_id'),
primary_key=True),
Column('field', String(30)))
managers = Table('managers', metadata,
Column('person_id', Integer, ForeignKey('people.person_id'),
primary_key=True),
Column('category', String(70)))
cars = Table('cars', metadata,
Column('car_id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('status_id', Integer, ForeignKey('status.status_id'),
nullable=False),
Column('owner', Integer, ForeignKey('people.person_id'),
nullable=False))
metadata.create_all()
@classmethod
def teardown_class(cls):
metadata.drop_all()
def teardown(self):
clear_mappers()
for t in reversed(metadata.sorted_tables):
t.delete().execute()
def testjointo(self):
# class definitions
class PersistentObject(object):
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
class Status(PersistentObject):
def __repr__(self):
return "Status %s" % self.name
class Person(PersistentObject):
def __repr__(self):
return "Ordinary person %s" % self.name
class Engineer(Person):
def __repr__(self):
return "Engineer %s, field %s, status %s" % (
self.name, self.field, self.status)
class Manager(Person):
def __repr__(self):
return "Manager %s, category %s, status %s" % (
self.name, self.category, self.status)
class Car(PersistentObject):
def __repr__(self):
return "Car number %d" % self.car_id
# create a union that represents both types of joins.
employee_join = polymorphic_union(
{
'engineer':people.join(engineers),
'manager':people.join(managers),
}, "type", 'employee_join')
status_mapper = mapper(Status, status)
person_mapper = mapper(Person, people,
with_polymorphic=('*', employee_join),
polymorphic_on=employee_join.c.type,
polymorphic_identity='person',
properties={'status':relationship(status_mapper)})
engineer_mapper = mapper(Engineer, engineers,
inherits=person_mapper,
polymorphic_identity='engineer')
manager_mapper = mapper(Manager, managers,
inherits=person_mapper,
polymorphic_identity='manager')
car_mapper = mapper(Car, cars, properties= {
'employee':relationship(person_mapper),
'status':relationship(status_mapper)})
session = create_session()
active = Status(name="active")
dead = Status(name="dead")
session.add(active)
session.add(dead)
session.flush()
# TODO: we haven't created assertions for all
# the data combinations created here
# creating 5 managers named from M1 to M5
# and 5 engineers named from E1 to E5
# M4, M5, E4 and E5 are dead
for i in range(1,5):
if i<4:
st=active
else:
st=dead
session.add(Manager(name="M%d" % i,
category="YYYYYYYYY",status=st))
session.add(Engineer(name="E%d" % i,field="X",status=st))
session.flush()
# get E4
engineer4 = session.query(engineer_mapper).\
filter_by(name="E4").one()
# create 2 cars for E4, one active and one dead
car1 = Car(employee=engineer4,status=active)
car2 = Car(employee=engineer4,status=dead)
session.add(car1)
session.add(car2)
session.flush()
# this particular adapt used to cause a recursion overflow;
# added here for testing
e = exists([Car.owner], Car.owner==employee_join.c.person_id)
Query(Person)._adapt_clause(employee_join, False, False)
r = session.query(Person).filter(Person.name.like('%2')).\
join('status').\
filter_by(name="active").\
order_by(Person.person_id)
eq_(str(list(r)), "[Manager M2, category YYYYYYYYY, status "
"Status active, Engineer E2, field X, "
"status Status active]")
r = session.query(Engineer).join('status').\
filter(Person.name.in_(
['E2', 'E3', 'E4', 'M4', 'M2', 'M1']) &
(status.c.name=="active")).order_by(Person.name)
eq_(str(list(r)), "[Engineer E2, field X, status Status "
"active, Engineer E3, field X, status "
"Status active]")
r = session.query(Person).filter(exists([1],
Car.owner==Person.person_id))
eq_(str(list(r)), "[Engineer E4, field X, status Status dead]")
class MultiLevelTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
global table_Employee, table_Engineer, table_Manager
table_Employee = Table( 'Employee', metadata,
Column( 'name', type_= String(100), ),
Column( 'id', primary_key= True, type_= Integer,
test_needs_autoincrement=True),
Column( 'atype', type_= String(100), ),
)
table_Engineer = Table( 'Engineer', metadata,
Column( 'machine', type_= String(100), ),
Column( 'id', Integer, ForeignKey( 'Employee.id', ),
primary_key= True),
)
table_Manager = Table( 'Manager', metadata,
Column( 'duties', type_= String(100), ),
Column( 'id', Integer, ForeignKey( 'Engineer.id', ),
primary_key= True, ),
)
def test_threelevels(self):
class Employee( object):
def set( me, **kargs):
for k,v in kargs.items(): setattr( me, k, v)
return me
def __str__(me):
return str(me.__class__.__name__)+':'+str(me.name)
__repr__ = __str__
class Engineer(Employee):
pass
class Manager(Engineer):
pass
pu_Employee = polymorphic_union( {
'Manager': table_Employee.join(
table_Engineer).join( table_Manager),
'Engineer': select([table_Employee,
table_Engineer.c.machine],
table_Employee.c.atype == 'Engineer',
from_obj=[
table_Employee.join(table_Engineer)]),
'Employee': table_Employee.select(
table_Employee.c.atype == 'Employee'),
}, None, 'pu_employee', )
mapper_Employee = mapper( Employee, table_Employee,
polymorphic_identity= 'Employee',
polymorphic_on= pu_Employee.c.atype,
with_polymorphic=('*', pu_Employee),
)
pu_Engineer = polymorphic_union( {
'Manager': table_Employee.join( table_Engineer).
join( table_Manager),
'Engineer': select([table_Employee,
table_Engineer.c.machine],
table_Employee.c.atype == 'Engineer',
from_obj=[
table_Employee.join(table_Engineer)
]),
}, None, 'pu_engineer', )
mapper_Engineer = mapper( Engineer, table_Engineer,
inherit_condition= table_Engineer.c.id == \
table_Employee.c.id,
inherits= mapper_Employee,
polymorphic_identity= 'Engineer',
polymorphic_on= pu_Engineer.c.atype,
with_polymorphic=('*', pu_Engineer),
)
mapper_Manager = mapper( Manager, table_Manager,
inherit_condition= table_Manager.c.id == \
table_Engineer.c.id,
inherits= mapper_Engineer,
polymorphic_identity= 'Manager',
)
a = Employee().set( name= 'one')
b = Engineer().set( egn= 'two', machine= 'any')
c = Manager().set( name= 'head', machine= 'fast',
duties= 'many')
session = create_session()
session.add(a)
session.add(b)
session.add(c)
session.flush()
assert set(session.query(Employee).all()) == set([a,b,c])
assert set(session.query( Engineer).all()) == set([b,c])
assert session.query( Manager).all() == [c]
class ManyToManyPolyTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
global base_item_table, item_table, base_item_collection_table, \
collection_table
base_item_table = Table(
'base_item', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('child_name', String(255), default=None))
item_table = Table(
'item', metadata,
Column('id', Integer, ForeignKey('base_item.id'),
primary_key=True),
Column('dummy', Integer, default=0))
base_item_collection_table = Table(
'base_item_collection', metadata,
Column('item_id', Integer, ForeignKey('base_item.id')),
Column('collection_id', Integer, ForeignKey('collection.id')))
collection_table = Table(
'collection', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', Unicode(255)))
def test_pjoin_compile(self):
"""test that remote_side columns in the secondary join table
arent attempted to be matched to the target polymorphic
selectable"""
class BaseItem(object): pass
class Item(BaseItem): pass
class Collection(object): pass
item_join = polymorphic_union( {
'BaseItem':base_item_table.select(
base_item_table.c.child_name=='BaseItem'),
'Item':base_item_table.join(item_table),
}, None, 'item_join')
mapper(
BaseItem, base_item_table,
with_polymorphic=('*', item_join),
polymorphic_on=base_item_table.c.child_name,
polymorphic_identity='BaseItem',
properties=dict(collections=relationship(Collection,
secondary=base_item_collection_table,
backref="items")))
mapper(
Item, item_table,
inherits=BaseItem,
polymorphic_identity='Item')
mapper(Collection, collection_table)
class_mapper(BaseItem)
class CustomPKTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
global t1, t2
t1 = Table('t1', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('type', String(30), nullable=False),
Column('data', String(30)))
# note that the primary key column in t2 is named differently
t2 = Table('t2', metadata,
Column('t2id', Integer, ForeignKey('t1.id'), primary_key=True),
Column('t2data', String(30)))
def test_custompk(self):
"""test that the primary_key attribute is propagated to the
polymorphic mapper"""
class T1(object):pass
class T2(T1):pass
# create a polymorphic union with the select against the base table first.
# with the join being second, the alias of the union will
# pick up two "primary key" columns. technically the alias should have a
# 2-col pk in any case but the leading select has a NULL for the "t2id" column
d = util.OrderedDict()
d['t1'] = t1.select(t1.c.type=='t1')
d['t2'] = t1.join(t2)
pjoin = polymorphic_union(d, None, 'pjoin')
mapper(T1, t1, polymorphic_on=t1.c.type,
polymorphic_identity='t1',
with_polymorphic=('*', pjoin),
primary_key=[pjoin.c.id])
mapper(T2, t2, inherits=T1, polymorphic_identity='t2')
ot1 = T1()
ot2 = T2()
sess = create_session()
sess.add(ot1)
sess.add(ot2)
sess.flush()
sess.expunge_all()
# query using get(), using only one value.
# this requires the select_table mapper
# has the same single-col primary key.
assert sess.query(T1).get(ot1.id).id == ot1.id
ot1 = sess.query(T1).get(ot1.id)
ot1.data = 'hi'
sess.flush()
def test_pk_collapses(self):
"""test that a composite primary key attribute formed by a join
is "collapsed" into its minimal columns"""
class T1(object):pass
class T2(T1):pass
# create a polymorphic union with the select against the base table first.
# with the join being second, the alias of the union will
# pick up two "primary key" columns. technically the alias should have a
# 2-col pk in any case but the leading select has a NULL for the "t2id" column
d = util.OrderedDict()
d['t1'] = t1.select(t1.c.type=='t1')
d['t2'] = t1.join(t2)
pjoin = polymorphic_union(d, None, 'pjoin')
mapper(T1, t1, polymorphic_on=t1.c.type,
polymorphic_identity='t1',
with_polymorphic=('*', pjoin))
mapper(T2, t2, inherits=T1, polymorphic_identity='t2')
assert len(class_mapper(T1).primary_key) == 1
ot1 = T1()
ot2 = T2()
sess = create_session()
sess.add(ot1)
sess.add(ot2)
sess.flush()
sess.expunge_all()
# query using get(), using only one value. this requires the
# select_table mapper
# has the same single-col primary key.
assert sess.query(T1).get(ot1.id).id == ot1.id
ot1 = sess.query(T1).get(ot1.id)
ot1.data = 'hi'
sess.flush()
class InheritingEagerTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
global people, employees, tags, peopleTags
people = Table('people', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('_type', String(30), nullable=False),
)
employees = Table('employees', metadata,
Column('id', Integer, ForeignKey('people.id'),
primary_key=True),
)
tags = Table('tags', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('label', String(50), nullable=False),
)
peopleTags = Table('peopleTags', metadata,
Column('person_id', Integer,
ForeignKey('people.id')),
Column('tag_id', Integer,
ForeignKey('tags.id')),
)
def test_basic(self):
"""test that Query uses the full set of mapper._eager_loaders
when generating SQL"""
class Person(fixtures.ComparableEntity):
pass
class Employee(Person):
def __init__(self, name='bob'):
self.name = name
class Tag(fixtures.ComparableEntity):
def __init__(self, label):
self.label = label
mapper(Person, people, polymorphic_on=people.c._type,
polymorphic_identity='person', properties={
'tags': relationship(Tag,
secondary=peopleTags,
backref='people', lazy='joined')
})
mapper(Employee, employees, inherits=Person,
polymorphic_identity='employee')
mapper(Tag, tags)
session = create_session()
bob = Employee()
session.add(bob)
tag = Tag('crazy')
bob.tags.append(tag)
tag = Tag('funny')
bob.tags.append(tag)
session.flush()
session.expunge_all()
# query from Employee with limit, query needs to apply eager limiting subquery
instance = session.query(Employee).\
filter_by(id=1).limit(1).first()
assert len(instance.tags) == 2
class MissingPolymorphicOnTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
tablea = Table('tablea', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('adata', String(50)),
)
tableb = Table('tableb', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('aid', Integer, ForeignKey('tablea.id')),
Column('data', String(50)),
)
tablec = Table('tablec', metadata,
Column('id', Integer, ForeignKey('tablea.id'),
primary_key=True),
Column('cdata', String(50)),
)
tabled = Table('tabled', metadata,
Column('id', Integer, ForeignKey('tablec.id'),
primary_key=True),
Column('ddata', String(50)),
)
@classmethod
def setup_classes(cls):
class A(cls.Comparable):
pass
class B(cls.Comparable):
pass
class C(A):
pass
class D(C):
pass
def test_polyon_col_setsup(self):
tablea, tableb, tablec, tabled = self.tables.tablea, \
self.tables.tableb, self.tables.tablec, self.tables.tabled
A, B, C, D = self.classes.A, self.classes.B, self.classes.C, \
self.classes.D
poly_select = select(
[tablea, tableb.c.data.label('discriminator')],
from_obj=tablea.join(tableb)).alias('poly')
mapper(B, tableb)
mapper(A, tablea,
with_polymorphic=('*', poly_select),
polymorphic_on=poly_select.c.discriminator,
properties={
'b':relationship(B, uselist=False)
})
mapper(C, tablec, inherits=A,polymorphic_identity='c')
mapper(D, tabled, inherits=C, polymorphic_identity='d')
c = C(cdata='c1', adata='a1', b=B(data='c'))
d = D(cdata='c2', adata='a2', ddata='d2', b=B(data='d'))
sess = create_session()
sess.add(c)
sess.add(d)
sess.flush()
sess.expunge_all()
eq_(
sess.query(A).all(),
[
C(cdata='c1', adata='a1'),
D(cdata='c2', adata='a2', ddata='d2')
]
)
class JoinedInhAdjacencyTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('people', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('type', String(30)),
)
Table('users', metadata,
Column('id', Integer, ForeignKey('people.id'),
primary_key=True),
Column('supervisor_id', Integer, ForeignKey('people.id')),
)
Table('dudes', metadata,
Column('id', Integer, ForeignKey('users.id'),
primary_key=True),
)
@classmethod
def setup_classes(cls):
class Person(cls.Comparable):
pass
class User(Person):
pass
class Dude(User):
pass
def _roundtrip(self):
Person, User = self.classes.Person, self.classes.User
sess = Session()
u1 = User()
u2 = User()
u2.supervisor = u1
sess.add_all([u1, u2])
sess.commit()
assert u2.supervisor is u1
def _dude_roundtrip(self):
Dude, User = self.classes.Dude, self.classes.User
sess = Session()
u1 = User()
d1 = Dude()
d1.supervisor = u1
sess.add_all([u1, d1])
sess.commit()
assert d1.supervisor is u1
def test_joined_to_base(self):
people, users = self.tables.people, self.tables.users
Person, User = self.classes.Person, self.classes.User
mapper(Person, people,
polymorphic_on=people.c.type,
polymorphic_identity='person',
)
mapper(User, users, inherits=Person,
polymorphic_identity='user',
inherit_condition=(users.c.id == people.c.id),
properties = {
'supervisor': relationship(Person,
primaryjoin=users.c.supervisor_id==people.c.id,
),
}
)
assert User.supervisor.property.direction is MANYTOONE
self._roundtrip()
def test_joined_to_same_subclass(self):
people, users = self.tables.people, self.tables.users
Person, User = self.classes.Person, self.classes.User
mapper(Person, people,
polymorphic_on=people.c.type,
polymorphic_identity='person',
)
mapper(User, users, inherits=Person,
polymorphic_identity='user',
inherit_condition=(users.c.id == people.c.id),
properties = {
'supervisor': relationship(User,
primaryjoin=users.c.supervisor_id==people.c.id,
remote_side=people.c.id,
foreign_keys=[users.c.supervisor_id]
),
}
)
assert User.supervisor.property.direction is MANYTOONE
self._roundtrip()
def test_joined_subclass_to_superclass(self):
people, users, dudes = self.tables.people, self.tables.users, \
self.tables.dudes
Person, User, Dude = self.classes.Person, self.classes.User, \
self.classes.Dude
mapper(Person, people,
polymorphic_on=people.c.type,
polymorphic_identity='person',
)
mapper(User, users, inherits=Person,
polymorphic_identity='user',
inherit_condition=(users.c.id == people.c.id),
)
mapper(Dude, dudes, inherits=User,
polymorphic_identity='dude',
inherit_condition=(dudes.c.id==users.c.id),
properties={
'supervisor': relationship(User,
primaryjoin=users.c.supervisor_id==people.c.id,
remote_side=people.c.id,
foreign_keys=[users.c.supervisor_id]
),
}
)
assert Dude.supervisor.property.direction is MANYTOONE
self._dude_roundtrip()
class Ticket2419Test(fixtures.DeclarativeMappedTest):
"""Test [ticket:2419]'s test case."""
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class A(Base):
__tablename__ = "a"
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
class B(Base):
__tablename__ = "b"
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
ds = relationship("D")
es = relationship("E")
class C(A):
__tablename__ = "c"
id = Column(Integer, ForeignKey('a.id'), primary_key=True)
b_id = Column(Integer, ForeignKey('b.id'))
b = relationship("B", primaryjoin=b_id==B.id)
class D(Base):
__tablename__ = "d"
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
b_id = Column(Integer, ForeignKey('b.id'))
class E(Base):
__tablename__ = 'e'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
b_id = Column(Integer, ForeignKey('b.id'))
@testing.fails_on("oracle",
"seems like oracle's query engine can't "
"handle this, not clear if there's an "
"expression-level bug on our end though")
def test_join_w_eager_w_any(self):
A, B, C, D, E = self.classes.A, self.classes.B, \
self.classes.C, self.classes.D, \
self.classes.E
s = Session(testing.db)
b = B(ds=[D()])
s.add_all([
C(
b=b
)
])
s.commit()
q = s.query(B, B.ds.any(D.id==1)).options(joinedload_all("es"))
q = q.join(C, C.b_id==B.id)
q = q.limit(5)
eq_(
q.all(),
[(b, True)]
)
|
|
"""
test all other .agg behavior
"""
import datetime as dt
from functools import partial
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
PeriodIndex,
Series,
date_range,
period_range,
)
import pandas._testing as tm
from pandas.core.base import SpecificationError
from pandas.io.formats.printing import pprint_thing
def test_agg_api():
# GH 6337
# https://stackoverflow.com/questions/21706030/pandas-groupby-agg-function-column-dtype-error
# different api for agg when passed custom function with mixed frame
df = DataFrame(
{
"data1": np.random.randn(5),
"data2": np.random.randn(5),
"key1": ["a", "a", "b", "b", "a"],
"key2": ["one", "two", "one", "two", "one"],
}
)
grouped = df.groupby("key1")
def peak_to_peak(arr):
return arr.max() - arr.min()
with tm.assert_produces_warning(
FutureWarning,
match=r"\['key2'\] did not aggregate successfully",
):
expected = grouped.agg([peak_to_peak])
expected.columns = ["data1", "data2"]
with tm.assert_produces_warning(
FutureWarning,
match=r"\['key2'\] did not aggregate successfully",
):
result = grouped.agg(peak_to_peak)
tm.assert_frame_equal(result, expected)
def test_agg_datetimes_mixed():
data = [[1, "2012-01-01", 1.0], [2, "2012-01-02", 2.0], [3, None, 3.0]]
df1 = DataFrame(
{
"key": [x[0] for x in data],
"date": [x[1] for x in data],
"value": [x[2] for x in data],
}
)
data = [
[
row[0],
(dt.datetime.strptime(row[1], "%Y-%m-%d").date() if row[1] else None),
row[2],
]
for row in data
]
df2 = DataFrame(
{
"key": [x[0] for x in data],
"date": [x[1] for x in data],
"value": [x[2] for x in data],
}
)
df1["weights"] = df1["value"] / df1["value"].sum()
gb1 = df1.groupby("date").aggregate(np.sum)
df2["weights"] = df1["value"] / df1["value"].sum()
gb2 = df2.groupby("date").aggregate(np.sum)
assert len(gb1) == len(gb2)
def test_agg_period_index():
prng = period_range("2012-1-1", freq="M", periods=3)
df = DataFrame(np.random.randn(3, 2), index=prng)
rs = df.groupby(level=0).sum()
assert isinstance(rs.index, PeriodIndex)
# GH 3579
index = period_range(start="1999-01", periods=5, freq="M")
s1 = Series(np.random.rand(len(index)), index=index)
s2 = Series(np.random.rand(len(index)), index=index)
df = DataFrame.from_dict({"s1": s1, "s2": s2})
grouped = df.groupby(df.index.month)
list(grouped)
def test_agg_dict_parameter_cast_result_dtypes():
# GH 12821
df = DataFrame(
{
"class": ["A", "A", "B", "B", "C", "C", "D", "D"],
"time": date_range("1/1/2011", periods=8, freq="H"),
}
)
df.loc[[0, 1, 2, 5], "time"] = None
# test for `first` function
exp = df.loc[[0, 3, 4, 6]].set_index("class")
grouped = df.groupby("class")
tm.assert_frame_equal(grouped.first(), exp)
tm.assert_frame_equal(grouped.agg("first"), exp)
tm.assert_frame_equal(grouped.agg({"time": "first"}), exp)
tm.assert_series_equal(grouped.time.first(), exp["time"])
tm.assert_series_equal(grouped.time.agg("first"), exp["time"])
# test for `last` function
exp = df.loc[[0, 3, 4, 7]].set_index("class")
grouped = df.groupby("class")
tm.assert_frame_equal(grouped.last(), exp)
tm.assert_frame_equal(grouped.agg("last"), exp)
tm.assert_frame_equal(grouped.agg({"time": "last"}), exp)
tm.assert_series_equal(grouped.time.last(), exp["time"])
tm.assert_series_equal(grouped.time.agg("last"), exp["time"])
# count
exp = Series([2, 2, 2, 2], index=Index(list("ABCD"), name="class"), name="time")
tm.assert_series_equal(grouped.time.agg(len), exp)
tm.assert_series_equal(grouped.time.size(), exp)
exp = Series([0, 1, 1, 2], index=Index(list("ABCD"), name="class"), name="time")
tm.assert_series_equal(grouped.time.count(), exp)
def test_agg_cast_results_dtypes():
# similar to GH12821
# xref #11444
u = [dt.datetime(2015, x + 1, 1) for x in range(12)]
v = list("aaabbbbbbccd")
df = DataFrame({"X": v, "Y": u})
result = df.groupby("X")["Y"].agg(len)
expected = df.groupby("X")["Y"].count()
tm.assert_series_equal(result, expected)
def test_aggregate_float64_no_int64():
# see gh-11199
df = DataFrame({"a": [1, 2, 3, 4, 5], "b": [1, 2, 2, 4, 5], "c": [1, 2, 3, 4, 5]})
expected = DataFrame({"a": [1, 2.5, 4, 5]}, index=[1, 2, 4, 5])
expected.index.name = "b"
result = df.groupby("b")[["a"]].mean()
tm.assert_frame_equal(result, expected)
expected = DataFrame({"a": [1, 2.5, 4, 5], "c": [1, 2.5, 4, 5]}, index=[1, 2, 4, 5])
expected.index.name = "b"
result = df.groupby("b")[["a", "c"]].mean()
tm.assert_frame_equal(result, expected)
def test_aggregate_api_consistency():
# GH 9052
# make sure that the aggregates via dict
# are consistent
df = DataFrame(
{
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
"B": ["one", "one", "two", "two", "two", "two", "one", "two"],
"C": np.random.randn(8) + 1.0,
"D": np.arange(8),
}
)
grouped = df.groupby(["A", "B"])
c_mean = grouped["C"].mean()
c_sum = grouped["C"].sum()
d_mean = grouped["D"].mean()
d_sum = grouped["D"].sum()
result = grouped["D"].agg(["sum", "mean"])
expected = pd.concat([d_sum, d_mean], axis=1)
expected.columns = ["sum", "mean"]
tm.assert_frame_equal(result, expected, check_like=True)
result = grouped.agg([np.sum, np.mean])
expected = pd.concat([c_sum, c_mean, d_sum, d_mean], axis=1)
expected.columns = MultiIndex.from_product([["C", "D"], ["sum", "mean"]])
tm.assert_frame_equal(result, expected, check_like=True)
result = grouped[["D", "C"]].agg([np.sum, np.mean])
expected = pd.concat([d_sum, d_mean, c_sum, c_mean], axis=1)
expected.columns = MultiIndex.from_product([["D", "C"], ["sum", "mean"]])
tm.assert_frame_equal(result, expected, check_like=True)
result = grouped.agg({"C": "mean", "D": "sum"})
expected = pd.concat([d_sum, c_mean], axis=1)
tm.assert_frame_equal(result, expected, check_like=True)
result = grouped.agg({"C": ["mean", "sum"], "D": ["mean", "sum"]})
expected = pd.concat([c_mean, c_sum, d_mean, d_sum], axis=1)
expected.columns = MultiIndex.from_product([["C", "D"], ["mean", "sum"]])
msg = r"Column\(s\) \['r', 'r2'\] do not exist"
with pytest.raises(KeyError, match=msg):
grouped[["D", "C"]].agg({"r": np.sum, "r2": np.mean})
def test_agg_dict_renaming_deprecation():
# 15931
df = DataFrame({"A": [1, 1, 1, 2, 2], "B": range(5), "C": range(5)})
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
df.groupby("A").agg(
{"B": {"foo": ["sum", "max"]}, "C": {"bar": ["count", "min"]}}
)
msg = r"Column\(s\) \['ma'\] do not exist"
with pytest.raises(KeyError, match=msg):
df.groupby("A")[["B", "C"]].agg({"ma": "max"})
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
df.groupby("A").B.agg({"foo": "count"})
def test_agg_compat():
# GH 12334
df = DataFrame(
{
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
"B": ["one", "one", "two", "two", "two", "two", "one", "two"],
"C": np.random.randn(8) + 1.0,
"D": np.arange(8),
}
)
g = df.groupby(["A", "B"])
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
g["D"].agg({"C": ["sum", "std"]})
with pytest.raises(SpecificationError, match=msg):
g["D"].agg({"C": "sum", "D": "std"})
def test_agg_nested_dicts():
# API change for disallowing these types of nested dicts
df = DataFrame(
{
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
"B": ["one", "one", "two", "two", "two", "two", "one", "two"],
"C": np.random.randn(8) + 1.0,
"D": np.arange(8),
}
)
g = df.groupby(["A", "B"])
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
g.aggregate({"r1": {"C": ["mean", "sum"]}, "r2": {"D": ["mean", "sum"]}})
with pytest.raises(SpecificationError, match=msg):
g.agg({"C": {"ra": ["mean", "std"]}, "D": {"rb": ["mean", "std"]}})
# same name as the original column
# GH9052
with pytest.raises(SpecificationError, match=msg):
g["D"].agg({"result1": np.sum, "result2": np.mean})
with pytest.raises(SpecificationError, match=msg):
g["D"].agg({"D": np.sum, "result2": np.mean})
def test_agg_item_by_item_raise_typeerror():
df = DataFrame(np.random.randint(10, size=(20, 10)))
def raiseException(df):
pprint_thing("----------------------------------------")
pprint_thing(df.to_string())
raise TypeError("test")
with pytest.raises(TypeError, match="test"):
with tm.assert_produces_warning(FutureWarning, match="Dropping invalid"):
df.groupby(0).agg(raiseException)
def test_series_agg_multikey():
ts = tm.makeTimeSeries()
grouped = ts.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.agg(np.sum)
expected = grouped.sum()
tm.assert_series_equal(result, expected)
def test_series_agg_multi_pure_python():
data = DataFrame(
{
"A": [
"foo",
"foo",
"foo",
"foo",
"bar",
"bar",
"bar",
"bar",
"foo",
"foo",
"foo",
],
"B": [
"one",
"one",
"one",
"two",
"one",
"one",
"one",
"two",
"two",
"two",
"one",
],
"C": [
"dull",
"dull",
"shiny",
"dull",
"dull",
"shiny",
"shiny",
"dull",
"shiny",
"shiny",
"shiny",
],
"D": np.random.randn(11),
"E": np.random.randn(11),
"F": np.random.randn(11),
}
)
def bad(x):
assert len(x.values.base) > 0
return "foo"
result = data.groupby(["A", "B"]).agg(bad)
expected = data.groupby(["A", "B"]).agg(lambda x: "foo")
tm.assert_frame_equal(result, expected)
def test_agg_consistency():
# agg with ([]) and () not consistent
# GH 6715
def P1(a):
return np.percentile(a.dropna(), q=1)
df = DataFrame(
{
"col1": [1, 2, 3, 4],
"col2": [10, 25, 26, 31],
"date": [
dt.date(2013, 2, 10),
dt.date(2013, 2, 10),
dt.date(2013, 2, 11),
dt.date(2013, 2, 11),
],
}
)
g = df.groupby("date")
expected = g.agg([P1])
expected.columns = expected.columns.levels[0]
result = g.agg(P1)
tm.assert_frame_equal(result, expected)
def test_agg_callables():
# GH 7929
df = DataFrame({"foo": [1, 2], "bar": [3, 4]}).astype(np.int64)
class fn_class:
def __call__(self, x):
return sum(x)
equiv_callables = [
sum,
np.sum,
lambda x: sum(x),
lambda x: x.sum(),
partial(sum),
fn_class(),
]
expected = df.groupby("foo").agg(sum)
for ecall in equiv_callables:
result = df.groupby("foo").agg(ecall)
tm.assert_frame_equal(result, expected)
@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) columns with ndarrays
def test_agg_over_numpy_arrays():
# GH 3788
df = DataFrame(
[
[1, np.array([10, 20, 30])],
[1, np.array([40, 50, 60])],
[2, np.array([20, 30, 40])],
],
columns=["category", "arraydata"],
)
gb = df.groupby("category")
expected_data = [[np.array([50, 70, 90])], [np.array([20, 30, 40])]]
expected_index = Index([1, 2], name="category")
expected_column = ["arraydata"]
expected = DataFrame(expected_data, index=expected_index, columns=expected_column)
alt = gb.sum(numeric_only=False)
tm.assert_frame_equal(alt, expected)
result = gb.agg("sum", numeric_only=False)
tm.assert_frame_equal(result, expected)
# FIXME: the original version of this test called `gb.agg(sum)`
# and that raises TypeError if `numeric_only=False` is passed
@pytest.mark.parametrize("as_period", [True, False])
def test_agg_tzaware_non_datetime_result(as_period):
# discussed in GH#29589, fixed in GH#29641, operating on tzaware values
# with function that is not dtype-preserving
dti = date_range("2012-01-01", periods=4, tz="UTC")
if as_period:
dti = dti.tz_localize(None).to_period("D")
df = DataFrame({"a": [0, 0, 1, 1], "b": dti})
gb = df.groupby("a")
# Case that _does_ preserve the dtype
result = gb["b"].agg(lambda x: x.iloc[0])
expected = Series(dti[::2], name="b")
expected.index.name = "a"
tm.assert_series_equal(result, expected)
# Cases that do _not_ preserve the dtype
result = gb["b"].agg(lambda x: x.iloc[0].year)
expected = Series([2012, 2012], name="b")
expected.index.name = "a"
tm.assert_series_equal(result, expected)
result = gb["b"].agg(lambda x: x.iloc[-1] - x.iloc[0])
expected = Series([pd.Timedelta(days=1), pd.Timedelta(days=1)], name="b")
expected.index.name = "a"
if as_period:
expected = Series([pd.offsets.Day(1), pd.offsets.Day(1)], name="b")
expected.index.name = "a"
tm.assert_series_equal(result, expected)
def test_agg_timezone_round_trip():
# GH 15426
ts = pd.Timestamp("2016-01-01 12:00:00", tz="US/Pacific")
df = DataFrame({"a": 1, "b": [ts + dt.timedelta(minutes=nn) for nn in range(10)]})
result1 = df.groupby("a")["b"].agg(np.min).iloc[0]
result2 = df.groupby("a")["b"].agg(lambda x: np.min(x)).iloc[0]
result3 = df.groupby("a")["b"].min().iloc[0]
assert result1 == ts
assert result2 == ts
assert result3 == ts
dates = [
pd.Timestamp(f"2016-01-0{i:d} 12:00:00", tz="US/Pacific") for i in range(1, 5)
]
df = DataFrame({"A": ["a", "b"] * 2, "B": dates})
grouped = df.groupby("A")
ts = df["B"].iloc[0]
assert ts == grouped.nth(0)["B"].iloc[0]
assert ts == grouped.head(1)["B"].iloc[0]
assert ts == grouped.first()["B"].iloc[0]
# GH#27110 applying iloc should return a DataFrame
assert ts == grouped.apply(lambda x: x.iloc[0]).iloc[0, 1]
ts = df["B"].iloc[2]
assert ts == grouped.last()["B"].iloc[0]
# GH#27110 applying iloc should return a DataFrame
assert ts == grouped.apply(lambda x: x.iloc[-1]).iloc[0, 1]
def test_sum_uint64_overflow():
# see gh-14758
# Convert to uint64 and don't overflow
df = DataFrame([[1, 2], [3, 4], [5, 6]], dtype=object)
df = df + 9223372036854775807
index = Index(
[9223372036854775808, 9223372036854775810, 9223372036854775812], dtype=np.uint64
)
expected = DataFrame(
{1: [9223372036854775809, 9223372036854775811, 9223372036854775813]},
index=index,
)
expected.index.name = 0
result = df.groupby(0).sum(numeric_only=False)
tm.assert_frame_equal(result, expected)
# out column is non-numeric, so with numeric_only=True it is dropped
result2 = df.groupby(0).sum(numeric_only=True)
expected2 = expected[[]]
tm.assert_frame_equal(result2, expected2)
@pytest.mark.parametrize(
"structure, expected",
[
(tuple, DataFrame({"C": {(1, 1): (1, 1, 1), (3, 4): (3, 4, 4)}})),
(list, DataFrame({"C": {(1, 1): [1, 1, 1], (3, 4): [3, 4, 4]}})),
(
lambda x: tuple(x),
DataFrame({"C": {(1, 1): (1, 1, 1), (3, 4): (3, 4, 4)}}),
),
(
lambda x: list(x),
DataFrame({"C": {(1, 1): [1, 1, 1], (3, 4): [3, 4, 4]}}),
),
],
)
def test_agg_structs_dataframe(structure, expected):
df = DataFrame(
{"A": [1, 1, 1, 3, 3, 3], "B": [1, 1, 1, 4, 4, 4], "C": [1, 1, 1, 3, 4, 4]}
)
result = df.groupby(["A", "B"]).aggregate(structure)
expected.index.names = ["A", "B"]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"structure, expected",
[
(tuple, Series([(1, 1, 1), (3, 4, 4)], index=[1, 3], name="C")),
(list, Series([[1, 1, 1], [3, 4, 4]], index=[1, 3], name="C")),
(lambda x: tuple(x), Series([(1, 1, 1), (3, 4, 4)], index=[1, 3], name="C")),
(lambda x: list(x), Series([[1, 1, 1], [3, 4, 4]], index=[1, 3], name="C")),
],
)
def test_agg_structs_series(structure, expected):
# Issue #18079
df = DataFrame(
{"A": [1, 1, 1, 3, 3, 3], "B": [1, 1, 1, 4, 4, 4], "C": [1, 1, 1, 3, 4, 4]}
)
result = df.groupby("A")["C"].aggregate(structure)
expected.index.name = "A"
tm.assert_series_equal(result, expected)
def test_agg_category_nansum(observed):
categories = ["a", "b", "c"]
df = DataFrame(
{"A": pd.Categorical(["a", "a", "b"], categories=categories), "B": [1, 2, 3]}
)
result = df.groupby("A", observed=observed).B.agg(np.nansum)
expected = Series(
[3, 3, 0],
index=pd.CategoricalIndex(["a", "b", "c"], categories=categories, name="A"),
name="B",
)
if observed:
expected = expected[expected != 0]
tm.assert_series_equal(result, expected)
def test_agg_list_like_func():
# GH 18473
df = DataFrame({"A": [str(x) for x in range(3)], "B": [str(x) for x in range(3)]})
grouped = df.groupby("A", as_index=False, sort=False)
result = grouped.agg({"B": lambda x: list(x)})
expected = DataFrame(
{"A": [str(x) for x in range(3)], "B": [[str(x)] for x in range(3)]}
)
tm.assert_frame_equal(result, expected)
def test_agg_lambda_with_timezone():
# GH 23683
df = DataFrame(
{
"tag": [1, 1],
"date": [
pd.Timestamp("2018-01-01", tz="UTC"),
pd.Timestamp("2018-01-02", tz="UTC"),
],
}
)
result = df.groupby("tag").agg({"date": lambda e: e.head(1)})
expected = DataFrame(
[pd.Timestamp("2018-01-01", tz="UTC")],
index=Index([1], name="tag"),
columns=["date"],
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"err_cls",
[
NotImplementedError,
RuntimeError,
KeyError,
IndexError,
OSError,
ValueError,
ArithmeticError,
AttributeError,
],
)
def test_groupby_agg_err_catching(err_cls):
# make sure we suppress anything other than TypeError or AssertionError
# in _python_agg_general
# Use a non-standard EA to make sure we don't go down ndarray paths
from pandas.tests.extension.decimal.array import (
DecimalArray,
make_data,
to_decimal,
)
data = make_data()[:5]
df = DataFrame(
{"id1": [0, 0, 0, 1, 1], "id2": [0, 1, 0, 1, 1], "decimals": DecimalArray(data)}
)
expected = Series(to_decimal([data[0], data[3]]))
def weird_func(x):
# weird function that raise something other than TypeError or IndexError
# in _python_agg_general
if len(x) == 0:
raise err_cls
return x.iloc[0]
result = df["decimals"].groupby(df["id1"]).agg(weird_func)
tm.assert_series_equal(result, expected, check_names=False)
|
|
"""Commands part of Websocket API."""
import asyncio
import logging
import voluptuous as vol
from homeassistant.auth.permissions.const import CAT_ENTITIES, POLICY_READ
from homeassistant.components.websocket_api.const import ERR_NOT_FOUND
from homeassistant.const import EVENT_STATE_CHANGED, EVENT_TIME_CHANGED, MATCH_ALL
from homeassistant.core import DOMAIN as HASS_DOMAIN, callback
from homeassistant.exceptions import (
HomeAssistantError,
ServiceNotFound,
TemplateError,
Unauthorized,
)
from homeassistant.helpers import config_validation as cv, entity
from homeassistant.helpers.event import TrackTemplate, async_track_template_result
from homeassistant.helpers.service import async_get_all_descriptions
from homeassistant.loader import IntegrationNotFound, async_get_integration
from . import const, decorators, messages
_LOGGER = logging.getLogger(__name__)
# mypy: allow-untyped-calls, allow-untyped-defs
@callback
def async_register_commands(hass, async_reg):
"""Register commands."""
async_reg(hass, handle_subscribe_events)
async_reg(hass, handle_unsubscribe_events)
async_reg(hass, handle_call_service)
async_reg(hass, handle_get_states)
async_reg(hass, handle_get_services)
async_reg(hass, handle_get_config)
async_reg(hass, handle_ping)
async_reg(hass, handle_render_template)
async_reg(hass, handle_manifest_list)
async_reg(hass, handle_manifest_get)
async_reg(hass, handle_entity_source)
async_reg(hass, handle_subscribe_trigger)
async_reg(hass, handle_test_condition)
def pong_message(iden):
"""Return a pong message."""
return {"id": iden, "type": "pong"}
@callback
@decorators.websocket_command(
{
vol.Required("type"): "subscribe_events",
vol.Optional("event_type", default=MATCH_ALL): str,
}
)
def handle_subscribe_events(hass, connection, msg):
"""Handle subscribe events command."""
# Circular dep
# pylint: disable=import-outside-toplevel
from .permissions import SUBSCRIBE_WHITELIST
event_type = msg["event_type"]
if event_type not in SUBSCRIBE_WHITELIST and not connection.user.is_admin:
raise Unauthorized
if event_type == EVENT_STATE_CHANGED:
@callback
def forward_events(event):
"""Forward state changed events to websocket."""
if not connection.user.permissions.check_entity(
event.data["entity_id"], POLICY_READ
):
return
connection.send_message(messages.event_message(msg["id"], event))
else:
@callback
def forward_events(event):
"""Forward events to websocket."""
if event.event_type == EVENT_TIME_CHANGED:
return
connection.send_message(messages.event_message(msg["id"], event.as_dict()))
connection.subscriptions[msg["id"]] = hass.bus.async_listen(
event_type, forward_events
)
connection.send_message(messages.result_message(msg["id"]))
@callback
@decorators.websocket_command(
{
vol.Required("type"): "unsubscribe_events",
vol.Required("subscription"): cv.positive_int,
}
)
def handle_unsubscribe_events(hass, connection, msg):
"""Handle unsubscribe events command."""
subscription = msg["subscription"]
if subscription in connection.subscriptions:
connection.subscriptions.pop(subscription)()
connection.send_message(messages.result_message(msg["id"]))
else:
connection.send_message(
messages.error_message(
msg["id"], const.ERR_NOT_FOUND, "Subscription not found."
)
)
@decorators.websocket_command(
{
vol.Required("type"): "call_service",
vol.Required("domain"): str,
vol.Required("service"): str,
vol.Optional("service_data"): dict,
}
)
@decorators.async_response
async def handle_call_service(hass, connection, msg):
"""Handle call service command."""
blocking = True
if msg["domain"] == HASS_DOMAIN and msg["service"] in ["restart", "stop"]:
blocking = False
try:
await hass.services.async_call(
msg["domain"],
msg["service"],
msg.get("service_data"),
blocking,
connection.context(msg),
)
connection.send_message(
messages.result_message(msg["id"], {"context": connection.context(msg)})
)
except ServiceNotFound as err:
if err.domain == msg["domain"] and err.service == msg["service"]:
connection.send_message(
messages.error_message(
msg["id"], const.ERR_NOT_FOUND, "Service not found."
)
)
else:
connection.send_message(
messages.error_message(
msg["id"], const.ERR_HOME_ASSISTANT_ERROR, str(err)
)
)
except HomeAssistantError as err:
connection.logger.exception(err)
connection.send_message(
messages.error_message(msg["id"], const.ERR_HOME_ASSISTANT_ERROR, str(err))
)
except Exception as err: # pylint: disable=broad-except
connection.logger.exception(err)
connection.send_message(
messages.error_message(msg["id"], const.ERR_UNKNOWN_ERROR, str(err))
)
@callback
@decorators.websocket_command({vol.Required("type"): "get_states"})
def handle_get_states(hass, connection, msg):
"""Handle get states command."""
if connection.user.permissions.access_all_entities("read"):
states = hass.states.async_all()
else:
entity_perm = connection.user.permissions.check_entity
states = [
state
for state in hass.states.async_all()
if entity_perm(state.entity_id, "read")
]
connection.send_message(messages.result_message(msg["id"], states))
@decorators.websocket_command({vol.Required("type"): "get_services"})
@decorators.async_response
async def handle_get_services(hass, connection, msg):
"""Handle get services command."""
descriptions = await async_get_all_descriptions(hass)
connection.send_message(messages.result_message(msg["id"], descriptions))
@callback
@decorators.websocket_command({vol.Required("type"): "get_config"})
def handle_get_config(hass, connection, msg):
"""Handle get config command."""
connection.send_message(messages.result_message(msg["id"], hass.config.as_dict()))
@decorators.websocket_command({vol.Required("type"): "manifest/list"})
@decorators.async_response
async def handle_manifest_list(hass, connection, msg):
"""Handle integrations command."""
integrations = await asyncio.gather(
*[
async_get_integration(hass, domain)
for domain in hass.config.components
# Filter out platforms.
if "." not in domain
]
)
connection.send_result(
msg["id"], [integration.manifest for integration in integrations]
)
@decorators.websocket_command(
{vol.Required("type"): "manifest/get", vol.Required("integration"): str}
)
@decorators.async_response
async def handle_manifest_get(hass, connection, msg):
"""Handle integrations command."""
try:
integration = await async_get_integration(hass, msg["integration"])
connection.send_result(msg["id"], integration.manifest)
except IntegrationNotFound:
connection.send_error(msg["id"], const.ERR_NOT_FOUND, "Integration not found")
@callback
@decorators.websocket_command({vol.Required("type"): "ping"})
def handle_ping(hass, connection, msg):
"""Handle ping command."""
connection.send_message(pong_message(msg["id"]))
@callback
@decorators.websocket_command(
{
vol.Required("type"): "render_template",
vol.Required("template"): cv.template,
vol.Optional("entity_ids"): cv.entity_ids,
vol.Optional("variables"): dict,
}
)
def handle_render_template(hass, connection, msg):
"""Handle render_template command."""
template = msg["template"]
template.hass = hass
variables = msg.get("variables")
info = None
@callback
def _template_listener(event, updates):
nonlocal info
track_template_result = updates.pop()
result = track_template_result.result
if isinstance(result, TemplateError):
_LOGGER.error(
"TemplateError('%s') " "while processing template '%s'",
result,
track_template_result.template,
)
result = None
connection.send_message(
messages.event_message(
msg["id"], {"result": result, "listeners": info.listeners} # type: ignore
)
)
info = async_track_template_result(
hass, [TrackTemplate(template, variables)], _template_listener
)
connection.subscriptions[msg["id"]] = info.async_remove
connection.send_result(msg["id"])
hass.loop.call_soon_threadsafe(info.async_refresh)
@callback
@decorators.websocket_command(
{vol.Required("type"): "entity/source", vol.Optional("entity_id"): [cv.entity_id]}
)
def handle_entity_source(hass, connection, msg):
"""Handle entity source command."""
raw_sources = entity.entity_sources(hass)
entity_perm = connection.user.permissions.check_entity
if "entity_id" not in msg:
if connection.user.permissions.access_all_entities("read"):
sources = raw_sources
else:
sources = {
entity_id: source
for entity_id, source in raw_sources.items()
if entity_perm(entity_id, "read")
}
connection.send_message(messages.result_message(msg["id"], sources))
return
sources = {}
for entity_id in msg["entity_id"]:
if not entity_perm(entity_id, "read"):
raise Unauthorized(
context=connection.context(msg),
permission=POLICY_READ,
perm_category=CAT_ENTITIES,
)
source = raw_sources.get(entity_id)
if source is None:
connection.send_error(msg["id"], ERR_NOT_FOUND, "Entity not found")
return
sources[entity_id] = source
connection.send_result(msg["id"], sources)
@callback
@decorators.websocket_command(
{
vol.Required("type"): "subscribe_trigger",
vol.Required("trigger"): cv.TRIGGER_SCHEMA,
vol.Optional("variables"): dict,
}
)
@decorators.require_admin
@decorators.async_response
async def handle_subscribe_trigger(hass, connection, msg):
"""Handle subscribe trigger command."""
# Circular dep
# pylint: disable=import-outside-toplevel
from homeassistant.helpers import trigger
trigger_config = await trigger.async_validate_trigger_config(hass, msg["trigger"])
@callback
def forward_triggers(variables, context=None):
"""Forward events to websocket."""
connection.send_message(
messages.event_message(
msg["id"], {"variables": variables, "context": context}
)
)
connection.subscriptions[msg["id"]] = (
await trigger.async_initialize_triggers(
hass,
trigger_config,
forward_triggers,
const.DOMAIN,
const.DOMAIN,
connection.logger.log,
variables=msg.get("variables"),
)
) or (
# Some triggers won't return an unsub function. Since the caller expects
# a subscription, we're going to fake one.
lambda: None
)
connection.send_result(msg["id"])
@decorators.websocket_command(
{
vol.Required("type"): "test_condition",
vol.Required("condition"): cv.CONDITION_SCHEMA,
vol.Optional("variables"): dict,
}
)
@decorators.require_admin
@decorators.async_response
async def handle_test_condition(hass, connection, msg):
"""Handle test condition command."""
# Circular dep
# pylint: disable=import-outside-toplevel
from homeassistant.helpers import condition
check_condition = await condition.async_from_config(hass, msg["condition"])
connection.send_result(
msg["id"], {"result": check_condition(hass, msg.get("variables"))}
)
|
|
##########################################################################
#
# Copyright (c) 2012-2014, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import weakref
import threading
import functools
import IECore
import Gaffer
import GafferUI
class BrowserEditor( GafferUI.Editor ) :
def __init__( self, scriptNode, **kw ) :
self.__column = GafferUI.ListContainer( borderWidth = 8, spacing = 6 )
GafferUI.Editor.__init__( self, self.__column, scriptNode, **kw )
with self.__column :
with GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal, spacing = 6 ) :
GafferUI.Label( "Mode" )
modeMenu = GafferUI.MultiSelectionMenu(
allowMultipleSelection = False,
allowEmptySelection = False,
)
for mode in self.__modes :
modeMenu.append( mode[0] )
modeMenu.selectionChangedSignal().connect( Gaffer.WeakMethod( self.__modeChanged ), scoped = False )
self.__pathChooser = GafferUI.PathChooserWidget( Gaffer.DictPath( {}, "/" ), previewTypes=GafferUI.PathPreviewWidget.types() )
self.__pathChooser.pathWidget().setVisible( False )
self.__modeInstances = {}
self.__currentModeInstance = None
modeMenu.setSelection( [ self.__modes[0][0] ] )
## Returns the PathChooserWidget which forms the majority of this ui.
def pathChooser( self ) :
return self.__pathChooser
def __repr__( self ) :
return "GafferUI.BrowserEditor( scriptNode )"
def __modeChanged( self, modeMenu ) :
label = modeMenu.getSelection()[0]
if label not in self.__modeInstances :
for mode in self.__modes :
if mode[0] == label :
self.__modeInstances[label] = mode[1]( self )
break
if self.__currentModeInstance is not None :
self.__currentModeInstance.disconnect()
self.__currentModeInstance = self.__modeInstances[label]
self.__currentModeInstance.connect()
class Mode( object ) :
def __init__( self, browser, splitPosition = 0.5 ) :
self.__browser = weakref.ref( browser ) # avoid circular references
self.__directoryPath = None
self.__displayMode = None
self.__splitPosition = splitPosition
# create the op matcher on a separate thread, as it may take a while to trawl
# through all the available ops.
self.__opMatcher = "__loading__"
threading.Thread( target = self.__createOpMatcher ).start()
def browser( self ) :
return self.__browser()
def connect( self ) :
if self.__directoryPath is None :
self.__directoryPath = self._initialPath()
self.__displayMode = self._initialDisplayMode()
self.__columns = self._initialColumns()
# we need a little bit of jiggery pokery, because the PathChooserWidget edits
# the main path as a leaf path, and we're more interested in setting the current
# directory.
pathElements = self.__directoryPath[:]
self.browser().pathChooser().setPath( self.__directoryPath )
self.browser().pathChooser().directoryPathWidget().getPath()[:] = pathElements
self.browser().pathChooser().pathListingWidget().setDisplayMode( self.__displayMode )
self.browser().pathChooser().pathListingWidget().setColumns( self.__columns )
# we've potentially changed to an entirely different type of path, so we must make
# sure that the bookmarks we use are suitable for that. we do this here in the base
# class to make sure that the path and bookmarks never get out of sync, but derived
# classes can still modify the bookmarks if they know better.
self.browser().pathChooser().setBookmarks(
GafferUI.Bookmarks.acquire(
self.browser().scriptNode(),
pathType = self.__directoryPath.__class__
)
)
self.__contextMenuConnection = self.browser().pathChooser().pathListingWidget().contextMenuSignal().connect(
Gaffer.WeakMethod( self.__contextMenu ), scoped = True
)
splitContainer = self.browser().pathChooser().pathListingWidget().ancestor( GafferUI.SplitContainer )
splitContainer.setSizes( ( self.__splitPosition, 1.0 - self.__splitPosition ) )
def disconnect( self ) :
self.__directoryPath[:] = self.browser().pathChooser().directoryPathWidget().getPath()[:]
self.__displayMode = self.browser().pathChooser().pathListingWidget().getDisplayMode()
self.__contextMenuConnection = None
# store current split position so we can restore it in connect()
splitContainer = self.browser().pathChooser().pathListingWidget().ancestor( GafferUI.SplitContainer )
sizes = splitContainer.getSizes()
self.__splitPosition = float( sizes[0] ) / sum( sizes )
## Must be implemented by derived classes to return the initial directory path to be viewed.
def _initialPath( self ) :
raise NotImplementedError
## May be reimplemented by derived classes to change the initial display mode of the path listing
def _initialDisplayMode( self ) :
return GafferUI.PathListingWidget.DisplayMode.List
## Must be reimplemented by derived classes to specify the columns to be displayed in the PathListingWidget.
def _initialColumns( self ) :
raise NotImplementedError
## May be reimplemented by derived classes to return a custom OpMatcher to be used
# to provide action menu items for the ui.
def _createOpMatcher( self ) :
try :
import GafferCortex
except ImportError :
return None
## \todo Remove dependency on GafferCortex. Consider removing OpMatcher
# entirely and introducing mechanism for matching TaskNodes to files
# instead.
return GafferCortex.OpMatcher.defaultInstance()
def __contextMenu( self, pathListing ) :
if self.__opMatcher is None :
return False
menuDefinition = IECore.MenuDefinition()
if self.__opMatcher == "__loading__" :
menuDefinition.append( "/Loading actions...", { "active" : False } )
else :
selectedPaths = pathListing.getSelectedPaths()
if len( selectedPaths ) == 1 :
parameterValue = selectedPaths[0]
else :
parameterValue = selectedPaths
menuDefinition.append( "/Actions", { "subMenu" : functools.partial( Gaffer.WeakMethod( self.__actionsSubMenu ), parameterValue ) } )
self.__menu = GafferUI.Menu( menuDefinition )
if len( menuDefinition.items() ) :
self.__menu.popup( parent = pathListing.ancestor( GafferUI.Window ) )
return True
def __actionsSubMenu( self, parameterValue ) :
menuDefinition = IECore.MenuDefinition()
ops = self.__opMatcher.matches( parameterValue )
if len( ops ) :
for op, parameter in ops :
menuDefinition.append( "/%s (%s)" % ( op.typeName(), parameter.name ), { "command" : self.__opDialogueCommand( op ) } )
else :
menuDefinition.append( "/None available", { "active" : False } )
return menuDefinition
def __createOpMatcher( self ) :
self.__opMatcher = self._createOpMatcher()
def __opDialogueCommand( self, op ) :
def showDialogue( menu ) :
## \todo Remove dependency on GafferCortexUI. See `_createOpMatcher()``.
import GafferCortexUI
dialogue = GafferCortexUI.OpDialogue(
op,
postExecuteBehaviour = GafferCortexUI.OpDialogue.PostExecuteBehaviour.Close,
executeInBackground=True
)
dialogue.waitForResult( parentWindow = menu.ancestor( GafferUI.Window ) )
return showDialogue
__modes = []
@classmethod
def registerMode( cls, label, modeCreator ) :
# first remove any existing modes of the same label
cls.__modes = [ m for m in cls.__modes if m[0] != label ]
cls.__modes.append( ( label, modeCreator ) )
GafferUI.Editor.registerType( "Browser", BrowserEditor )
class FileSystemMode( BrowserEditor.Mode ) :
def __init__( self, browser ) :
BrowserEditor.Mode.__init__( self, browser )
def _initialPath( self ) :
return Gaffer.FileSystemPath(
os.getcwd(),
filter = Gaffer.FileSystemPath.createStandardFilter(),
)
def _initialColumns( self ) :
return list( GafferUI.PathListingWidget.defaultFileSystemColumns )
BrowserEditor.registerMode( "Files", FileSystemMode )
BrowserEditor.FileSystemMode = FileSystemMode
class FileSequenceMode( BrowserEditor.Mode ) :
def __init__( self, browser ) :
BrowserEditor.Mode.__init__( self, browser )
def connect( self ) :
BrowserEditor.Mode.connect( self )
# we want to share our bookmarks with the non-sequence filesystem paths
self.browser().pathChooser().setBookmarks(
GafferUI.Bookmarks.acquire(
self.browser().scriptNode(),
pathType = Gaffer.FileSystemPath
)
)
def _initialPath( self ) :
return Gaffer.SequencePath(
Gaffer.FileSystemPath( os.getcwd() ),
filter = Gaffer.FileSystemPath.createStandardFilter(),
)
def _initialColumns( self ) :
return list( GafferUI.PathListingWidget.defaultFileSystemColumns )
BrowserEditor.registerMode( "File Sequences", FileSequenceMode )
BrowserEditor.FileSequenceMode = FileSequenceMode
|
|
from __future__ import print_function
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import subprocess
# # # # # # # # # # # # #
# Function definitions. #
# # # # # # # # # # # # #
# Range function for floats.
def frange(start, end=None, inc=None):
"A range function, that does accept floats"
if end == None:
end = start + 0.0
start = 0.0
else: start += 0.0 # force it to be a float
if inc == None:
inc = 1.0
count = int((end - start) / inc)
if start + count * inc != end + inc:
count += 1
L = [None,] * count
for i in xrange(count):
L[i] = start + i * inc
return L
# Write text generated into the figure. Used in add_text.
def write_text(loc_x, loc_y, text, ha, va):
return plt.text(loc_x, loc_y, text,
ha=ha,
va=va,
transform = ax.transAxes,
fontsize = 18)
# Add all other parameters than the primary and secondary variable into the text.
def add_text(text_location):
if text_location == "center left":
loc_x = 0.03
loc_y = 0.56
ha='left'
va='center'
elif text_location == "upper right":
loc_x = 0.97
loc_y = 0.94
ha='right'
va='center'
elif text_location == "lower right":
loc_x = 0.97
loc_y = 0.18
ha = 'right'
va = 'center'
elif text_location == "lower left":
loc_x = 0.03
loc_y = 0.18
ha = 'left'
va = 'center'
elif text_location == "center right":
loc_x = 0.97
loc_y = 0.56
ha = 'right'
va = 'center'
elif text_location == "upper left":
loc_x = 0.03
loc_y = 0.94
ha = 'left'
va = 'center'
elif text_location == "upper center":
loc_x = 0.50
loc_y = 0.94
ha = 'center'
va = 'center'
elif text_location == "lower center":
loc_x = 0.50
loc_y = 0.18
ha = 'center'
va = 'center'
elif text_location != "none":
sys.exit("Please set the text_location parameter.")
else:
return
parameter_dict = {"phase": r'$\phi = ' + "{0:.2f}".format(phase) + '\pi$', "T": '$T = ' + "{0:.2f}".format(T) + 'E_T$', "delta": r'$\Delta = ' + "{0:.0f}".format(delta) + ' E_T$', "exchange": '$h = ' + "{0:.2f}".format(exchange) + ' E_T$', "exchange_angle": r'$\theta = ' + "{0:.2f}".format(exchange_angle) + '^\circ$', "alpha_1": "{0:.2f}".format(alpha_1), "alpha_2": "{0:.2f}".format(alpha_2), "alpha_3": "{0:.2f}".format(alpha_3), "alpha_4": "{0:.2f}".format(alpha_4)}
del parameter_dict[primary_variable]
del parameter_dict[secondary_variable]
line1, line2, line3 = ("" for i in range(3))
for parameter in ["phase", "T", "delta"]:
if parameter in parameter_dict:
if len(line1) > 0:
line1 += '$,$ '
line1 += parameter_dict[parameter]
if suppress_zeros == "alphas" or suppress_zeros == "both":
non_zero = []
for parameter in ["alpha_1", "alpha_2", "alpha_3", "alpha_4"]:
if parameter in parameter_dict:
if float(parameter_dict[parameter]) != 0.00:
non_zero.append(parameter)
parameters = non_zero
elif suppress_zeros != "False" and suppress_zeros != "h_angle":
sys.exit("Please set suppress_zeros parameter to 'alphas', 'h_angle', 'both' or 'False'.")
else:
parameters = ["alpha_1", "alpha_2", "alpha_3", "alpha_4"]
if len(parameters) == 1:
line2 += r'$' + '\\' + non_zero[0] + ' = ' + parameter_dict[non_zero[0]] + '/L$'
else:
line2 += '$('
for parameter in parameters:
if parameter in parameter_dict:
if len(line2) > 3:
line2 += ', '
line2 += '\\' + parameter
line2 += ') = ('
k = 0
for parameter in parameters:
if parameter in parameter_dict:
if k > 0:
line2 += ', '
line2 += parameter_dict[parameter]
k += 1
line2 += ') /L$'
if suppress_zeros == "h_angle" or suppress_zeros == "both":
for parameter in ["exchange"]:
if parameter in parameter_dict:
line3 += parameter_dict[parameter]
else:
for parameter in ["exchange", "exchange_angle"]:
if parameter in parameter_dict:
if len(line3) > 0:
line3 += '$,$ '
line3 += parameter_dict[parameter]
if text_location == "lower left" or text_location == "lower center" or text_location == "lower right":
if len(line2) == 0:
write_text(loc_x, loc_y - 0.06, line1, ha, va)
write_text(loc_x, loc_y - 0.12, line3, ha, va)
elif len(line3) == 0:
write_text(loc_x, loc_y - 0.06, line1, ha, va)
write_text(loc_x, loc_y - 0.12, line2, ha, va)
else:
write_text(loc_x, loc_y, line1, ha, va)
write_text(loc_x, loc_y - 0.06, line2, ha, va)
write_text(loc_x, loc_y - 0.12, line3, ha, va)
elif text_location == "center left" or text_location == "center right":
if len(line2) == 0:
write_text(loc_x, loc_y - 0.03, line1, ha, va)
write_text(loc_x, loc_y - 0.09, line3, ha, va)
elif len(line3) == 0:
write_text(loc_x, loc_y - 0.03, line1, ha, va)
write_text(loc_x, loc_y - 0.09, line2, ha, va)
else:
write_text(loc_x, loc_y, line1, ha, va)
write_text(loc_x, loc_y - 0.06, line2, ha, va)
write_text(loc_x, loc_y - 0.12, line3, ha, va)
else:
if len(line2) == 0:
write_text(loc_x, loc_y, line1, ha, va)
write_text(loc_x, loc_y - 0.06, line3, ha, va)
elif len(line3) == 0:
write_text(loc_x, loc_y, line1, ha, va)
write_text(loc_x, loc_y - 0.06, line2, ha, va)
else:
write_text(loc_x, loc_y, line1, ha, va)
write_text(loc_x, loc_y - 0.06, line2, ha, va)
write_text(loc_x, loc_y - 0.12, line3, ha, va)
# # # # # # # # # # # # # # #
# Function definitions end. #
# # # # # # # # # # # # # # #
L = 1 # Lenght of the nanowire.
# Get all the parameters from the run script.
phase = float(sys.argv[1])
alpha_1 = float(sys.argv[2])
alpha_2 = float(sys.argv[3])
alpha_3 = float(sys.argv[4])
alpha_4 = float(sys.argv[5])
exchange = float(sys.argv[6])
exchange_angle = float(sys.argv[7])
T = float(sys.argv[8])
delta = float(sys.argv[9])
primary_variable = sys.argv[10]
primary_range_start = float(sys.argv[11])
primary_range_stop = float(sys.argv[12])
primary_range_spacing = float(sys.argv[13])
secondary_variable = sys.argv[14]
secondary_range_start = float(sys.argv[15])
secondary_range_stop = float(sys.argv[16])
secondary_range_spacing = float(sys.argv[17])
legend_location = sys.argv[18]
text_location = sys.argv[19]
suppress_zeros = sys.argv[20]
dashed_line = sys.argv[21]
res_dir = sys.argv[22]
fig_dir = sys.argv[23]
primary_range = frange(primary_range_start, primary_range_stop, primary_range_spacing)
secondary_range = frange(secondary_range_start, secondary_range_stop, secondary_range_spacing)
# Generate data array from the results.
data = [[] for i in range(len(secondary_range))]
l = 0
for j in secondary_range:
exec("%s = %.2f" % (secondary_variable, j))
for i in primary_range:
exec("%s = %.2f" % (primary_variable, i))
file = res_dir + 'Phase_difference_' + "{0:.2f}".format(phase) + '/T_' + "{0:.2f}".format(T) + '/Rashba_' + "{0:.2f}".format(alpha_1) + '_' + "{0:.2f}".format(alpha_2) + '_' + "{0:.2f}".format(alpha_3) + '_' + "{0:.2f}".format(alpha_4) + '/h-' + "{0:.2f}".format(exchange) + '-angle-' + "{0:.2f}".format(exchange_angle) + '-supercurrent.txt'
subprocess.call("sed -n 5p " + file + " | awk '{print $1}' > temp.dat", shell=True)
data[l].append(np.loadtxt('temp.dat', delimiter=' '))
subprocess.call("rm temp.dat", shell=True)
l += 1
# Plot the results with proper labeling.
colors = ['b', 'g', 'r', 'c', 'm', '#FF9900', '#66FF33', 'k', '#FFFF00', '#996633', '#FF99FF']
fig = plt.figure()
for i in range(len(secondary_range)):
if secondary_variable == "alpha_1":
plt.plot(primary_range, data[i], '-', linewidth=1.5, color = colors[i], label=r'$\alpha_1 = ' + '{0:.2f}'.format(secondary_range[i]) + '/ L$')
elif secondary_variable == "alpha_2":
plt.plot(primary_range, data[i], '-', linewidth=1.5, color = colors[i], label=r'$\alpha_2 = ' + '{0:.2f}'.format(secondary_range[i]) + '/ L$')
elif secondary_variable == "alpha_3":
plt.plot(primary_range, data[i], '-', linewidth=1.5, color = colors[i], label=r'$\alpha_3 = ' + '{0:.2f}'.format(secondary_range[i]) + '/ L$')
elif secondary_variable == "alpha_4":
plt.plot(primary_range, data[i], '-', linewidth=1.5, color = colors[i], label=r'$\alpha_4 = ' + '{0:.2f}'.format(secondary_range[i]) + '/ L$')
elif secondary_variable == "exchange":
plt.plot(primary_range, data[i], '-', linewidth=1.5, color = colors[i], label='$h = ' + '{0:.2f}'.format(secondary_range[i]) + ' E_T$')
elif secondary_variable == "phase":
plt.plot(primary_range, data[i], '-', linewidth=1.5, color = colors[i], label='$\phi = ' + '{0:.2f}'.format(secondary_range[i]) + ' \pi$')
elif secondary_variable == "exchange_angle":
plt.plot(primary_range, data[i], '-', linewidth=1.5, color = colors[i], label=r'$\theta = ' + '{0:.2f}'.format(secondary_range[i]) + '^\circ$')
else:
plt.plot(primary_range, data[i], '-', linewidth=1.5, color = colors[i], label='$T = ' + '{0:.2f}'.format(secondary_range[i]) + ' E_T$')
if primary_variable == "alpha_1":
plt.xlabel(r'$\alpha_1 / L$', fontsize = 19)
elif primary_variable == "alpha_2":
plt.xlabel(r'$\alpha_2 / L$', fontsize = 19)
elif primary_variable == "alpha_3":
plt.xlabel(r'$\alpha_3 / L$', fontsize = 19)
elif primary_variable == "alpha_4":
plt.xlabel(r'$\alpha_4 / L$', fontsize = 19)
elif primary_variable == "exchange":
plt.xlabel(r'$h E_T$', fontsize = 19)
elif primary_variable == "phase":
plt.xlabel(r'$\phi \pi$', fontsize = 19)
elif primary_variable == "exchange_angle":
plt.xlabel(r'$\theta$', fontsize = 19)
else:
plt.xlabel(r'$T E_T$', fontsize = 19)
plt.ylabel(r'$I_S e R_N / E_T$', fontsize = 19)
# Prints horizontal dashed line to at y = 0
if dashed_line == "True":
plt.axhline(y=0, xmin=primary_range[0], xmax=primary_range[-1], color='k', linestyle='--')
plt.xticks(fontsize = 14)
plt.yticks(fontsize = 14)
# This adds a text stating the rest of the variables used in the calculation.
ax = fig.add_subplot(111)
add_text(text_location)
# Legend box containing labels.
if legend_location != "none":
plt.legend(loc=legend_location, handlelength=1.6, handletextpad=0.4, fontsize = 16)
# Finally save the figure into the subdirectory specified in the plot script.
fig.savefig(fig_dir + 'Figure.png')
|
|
# -*- encoding: utf-8 -*-
'''
Hubble Nova plugin for running arbitrary commands and checking the output of
those commands
This module is deprecated, and must be explicitly enabled in pillar/minion
config via the hubblestack:nova:enable_command_module (should be set to True
to enable this module). This allows nova to run arbitrary commands via yaml
profiles.
:maintainer: HubbleStack / basepi
:maturity: 2016.7.0
:platform: All
:requires: SaltStack
Sample YAML data, with inline comments:
# Top level key lets the module know it should look at this data
command:
# Unique ID for this set of audits
nodev:
data:
# 'osfinger' grain, for multiplatform support
'Red Hat Enterprise Linux Server-6':
# tag is required
tag: CIS-1.1.10
# `commands` is a list of commands with individual flags
commands:
# Command to be run
- 'grep "[[:space:]]/home[[:space:]]" /etc/fstab':
# Check the output for this pattern
# If match_output not provided, any output will be a match
match_output: nodev
# Use regex when matching the output (default False)
match_output_regex: False
# Invert the success criteria. If True, a match will cause failure (default False)
fail_if_matched: False
- 'mount | grep /home':
match_output: nodev
match_output_regex: False
# Match each line of the output against our pattern
# Any that don't match will make the audit fail (default False)
match_output_by_line: True
- ?
|
echo 'this is a multi-line'
echo 'bash script'
echo 'note the special ? syntax'
:
# Shell through which the script will be run, must be abs path
shell: /bin/bash
match_output: this
# Aggregation strategy for multiple commands. Defaults to 'and', other option is 'or'
aggregation: 'and'
# Catch-all, if no other osfinger match was found
'*':
tag: generic_tag
commands:
- 'grep "[[:space:]]/home[[:space:]]" /etc/fstab':
match_output: nodev
match_output_regex: False
fail_if_matched: False
- 'mount | grep /home':
match_output: nodev
match_output_regex: False
match_output_by_line: True
aggregation: 'and'
# Description will be output with the results
description: '/home should be nodev'
'''
from __future__ import absolute_import
import logging
import fnmatch
import yaml
import os
import copy
import re
import salt.utils
log = logging.getLogger(__name__)
def __virtual__():
if salt.utils.is_windows():
return False, 'This audit module only runs on linux'
return True
def audit(data_list, tags, debug=False):
'''
Run the command audits contained in the data_list
'''
__data__ = {}
for profile, data in data_list:
_merge_yaml(__data__, data, profile)
__tags__ = _get_tags(__data__)
if debug:
log.debug('command audit __data__:')
log.debug(__data__)
log.debug('command audit __tags__:')
log.debug(__tags__)
ret = {'Success': [], 'Failure': [], 'Controlled': []}
if __tags__ and not __salt__['config.get']('hubblestack:nova:enable_command_module',
False):
ret['Error'] = ['command module has not been explicitly enabled in '
'config. Please set hubblestack:nova:enable_command_module '
'to True in pillar or minion config to allow this module.']
return ret
for tag in __tags__:
if fnmatch.fnmatch(tag, tags):
for tag_data in __tags__[tag]:
if 'control' in tag_data:
ret['Controlled'].append(tag_data)
continue
if 'commands' not in tag_data:
continue
command_results = []
for command_data in tag_data['commands']:
for command, command_args in command_data.iteritems():
if 'shell' in command_args:
cmd_ret = __salt__['cmd.run'](command,
python_shell=True,
shell=command_args['shell'])
else:
cmd_ret = __salt__['cmd.run'](command,
python_shell=True)
found = False
if cmd_ret:
found = True
if 'match_output' in command_args:
if command_args.get('match_output_by_line'):
cmd_ret_lines = cmd_ret.splitlines()
else:
cmd_ret_lines = [cmd_ret]
for line in cmd_ret_lines:
if command_args.get('match_output_regex'):
if not re.match(command_args['match_output'], line):
found = False
else: # match without regex
if command_args['match_output'] not in line:
found = False
if command_args.get('fail_if_matched'):
found = not found
command_results.append(found)
aggregation = tag_data.get('aggregation', 'and')
if aggregation.lower() == 'or':
if any(command_results):
ret['Success'].append(tag_data)
else:
ret['Failure'].append(tag_data)
else: # assume 'and' if it's not 'or'
if all(command_results):
ret['Success'].append(tag_data)
else:
ret['Failure'].append(tag_data)
return ret
def _merge_yaml(ret, data, profile=None):
'''
Merge two yaml dicts together at the command level
'''
if 'command' not in ret:
ret['command'] = []
if 'command' in data:
for key, val in data['command'].iteritems():
if profile and isinstance(val, dict):
val['nova_profile'] = profile
ret['command'].append({key: val})
return ret
def _get_tags(data):
'''
Retrieve all the tags for this distro from the yaml
'''
ret = {}
distro = __grains__.get('osfinger')
for audit_dict in data.get('command', []):
# command:0
for audit_id, audit_data in audit_dict.iteritems():
# command:0:nodev
tags_dict = audit_data.get('data', {})
# command:0:nodev:data
tags = None
for osfinger in tags_dict:
if osfinger == '*':
continue
osfinger_list = [finger.strip() for finger in osfinger.split(',')]
for osfinger_glob in osfinger_list:
if fnmatch.fnmatch(distro, osfinger_glob):
tags = tags_dict.get(osfinger)
break
if tags is not None:
break
# If we didn't find a match, check for a '*'
if tags is None:
tags = tags_dict.get('*', {})
# command:0:nodev:data:Debian-8
if 'tag' not in tags:
tags['tag'] = ''
tag = tags['tag']
if tag not in ret:
ret[tag] = []
formatted_data = {'tag': tag,
'module': 'command'}
formatted_data.update(audit_data)
formatted_data.update(tags)
formatted_data.pop('data')
ret[tag].append(formatted_data)
return ret
|
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log
from oslo_middleware import sizelimit
from oslo_serialization import jsonutils
import six
from keystone.common import authorization
from keystone.common import wsgi
from keystone import exception
from keystone.i18n import _LW
from keystone.models import token_model
from keystone.openstack.common import versionutils
CONF = cfg.CONF
LOG = log.getLogger(__name__)
# Header used to transmit the auth token
AUTH_TOKEN_HEADER = 'X-Auth-Token'
# Header used to transmit the subject token
SUBJECT_TOKEN_HEADER = 'X-Subject-Token'
# Environment variable used to pass the request context
CONTEXT_ENV = wsgi.CONTEXT_ENV
# Environment variable used to pass the request params
PARAMS_ENV = wsgi.PARAMS_ENV
class TokenAuthMiddleware(wsgi.Middleware):
def process_request(self, request):
token = request.headers.get(AUTH_TOKEN_HEADER)
context = request.environ.get(CONTEXT_ENV, {})
context['token_id'] = token
if SUBJECT_TOKEN_HEADER in request.headers:
context['subject_token_id'] = (
request.headers.get(SUBJECT_TOKEN_HEADER))
request.environ[CONTEXT_ENV] = context
class AdminTokenAuthMiddleware(wsgi.Middleware):
"""A trivial filter that checks for a pre-defined admin token.
Sets 'is_admin' to true in the context, expected to be checked by
methods that are admin-only.
"""
def process_request(self, request):
token = request.headers.get(AUTH_TOKEN_HEADER)
context = request.environ.get(CONTEXT_ENV, {})
context['is_admin'] = (token == CONF.admin_token)
request.environ[CONTEXT_ENV] = context
class PostParamsMiddleware(wsgi.Middleware):
"""Middleware to allow method arguments to be passed as POST parameters.
Filters out the parameters `self`, `context` and anything beginning with
an underscore.
"""
def process_request(self, request):
params_parsed = request.params
params = {}
for k, v in six.iteritems(params_parsed):
if k in ('self', 'context'):
continue
if k.startswith('_'):
continue
params[k] = v
request.environ[PARAMS_ENV] = params
class JsonBodyMiddleware(wsgi.Middleware):
"""Middleware to allow method arguments to be passed as serialized JSON.
Accepting arguments as JSON is useful for accepting data that may be more
complex than simple primitives.
Filters out the parameters `self`, `context` and anything beginning with
an underscore.
"""
def process_request(self, request):
# Abort early if we don't have any work to do
params_json = request.body
if not params_json:
return
# Reject unrecognized content types. Empty string indicates
# the client did not explicitly set the header
if request.content_type not in ('application/json', ''):
e = exception.ValidationError(attribute='application/json',
target='Content-Type header')
return wsgi.render_exception(e, request=request)
params_parsed = {}
try:
params_parsed = jsonutils.loads(params_json)
except ValueError:
e = exception.ValidationError(attribute='valid JSON',
target='request body')
return wsgi.render_exception(e, request=request)
finally:
if not params_parsed:
params_parsed = {}
if not isinstance(params_parsed, dict):
e = exception.ValidationError(attribute='valid JSON object',
target='request body')
return wsgi.render_exception(e, request=request)
params = {}
for k, v in six.iteritems(params_parsed):
if k in ('self', 'context'):
continue
if k.startswith('_'):
continue
params[k] = v
request.environ[PARAMS_ENV] = params
class XmlBodyMiddleware(wsgi.Middleware):
"""De/serialize XML to/from JSON."""
def print_warning(self):
LOG.warning(_LW('XML support has been removed as of the Kilo release '
'and should not be referenced or used in deployment. '
'Please remove references to XmlBodyMiddleware from '
'your configuration. This compatibility stub will be '
'removed in the L release'))
def __init__(self, *args, **kwargs):
super(XmlBodyMiddleware, self).__init__(*args, **kwargs)
self.print_warning()
class XmlBodyMiddlewareV2(XmlBodyMiddleware):
"""De/serialize XML to/from JSON for v2.0 API."""
def __init__(self, *args, **kwargs):
pass
class XmlBodyMiddlewareV3(XmlBodyMiddleware):
"""De/serialize XML to/from JSON for v3 API."""
def __init__(self, *args, **kwargs):
pass
class NormalizingFilter(wsgi.Middleware):
"""Middleware filter to handle URL normalization."""
def process_request(self, request):
"""Normalizes URLs."""
# Removes a trailing slash from the given path, if any.
if (len(request.environ['PATH_INFO']) > 1 and
request.environ['PATH_INFO'][-1] == '/'):
request.environ['PATH_INFO'] = request.environ['PATH_INFO'][:-1]
# Rewrites path to root if no path is given.
elif not request.environ['PATH_INFO']:
request.environ['PATH_INFO'] = '/'
class RequestBodySizeLimiter(sizelimit.RequestBodySizeLimiter):
@versionutils.deprecated(
versionutils.deprecated.KILO,
in_favor_of='oslo_middleware.sizelimit.RequestBodySizeLimiter',
remove_in=+1,
what='keystone.middleware.RequestBodySizeLimiter')
def __init__(self, *args, **kwargs):
super(RequestBodySizeLimiter, self).__init__(*args, **kwargs)
class AuthContextMiddleware(wsgi.Middleware):
"""Build the authentication context from the request auth token."""
def _build_auth_context(self, request):
token_id = request.headers.get(AUTH_TOKEN_HEADER).strip()
if token_id == CONF.admin_token:
# NOTE(gyee): no need to proceed any further as the special admin
# token is being handled by AdminTokenAuthMiddleware. This code
# will not be impacted even if AdminTokenAuthMiddleware is removed
# from the pipeline as "is_admin" is default to "False". This code
# is independent of AdminTokenAuthMiddleware.
return {}
context = {'token_id': token_id}
context['environment'] = request.environ
try:
token_ref = token_model.KeystoneToken(
token_id=token_id,
token_data=self.token_provider_api.validate_token(token_id))
# TODO(gyee): validate_token_bind should really be its own
# middleware
wsgi.validate_token_bind(context, token_ref)
return authorization.token_to_auth_context(token_ref)
except exception.TokenNotFound:
LOG.warning(_LW('RBAC: Invalid token'))
raise exception.Unauthorized()
def process_request(self, request):
if AUTH_TOKEN_HEADER not in request.headers:
LOG.debug(('Auth token not in the request header. '
'Will not build auth context.'))
return
if authorization.AUTH_CONTEXT_ENV in request.environ:
msg = _LW('Auth context already exists in the request environment')
LOG.warning(msg)
return
auth_context = self._build_auth_context(request)
LOG.debug('RBAC: auth_context: %s', auth_context)
request.environ[authorization.AUTH_CONTEXT_ENV] = auth_context
|
|
from __future__ import division, print_function, absolute_import
import numpy as np
import warnings
from ..utils.six.moves import xrange
from dipy.core.geometry import cart2sphere, sphere2cart, vector_norm
from dipy.core.onetime import auto_attr
from dipy.reconst.recspeed import remove_similar_vertices
__all__ = ['Sphere', 'HemiSphere', 'faces_from_sphere_vertices',
'unique_edges']
def _all_specified(*args):
for a in args:
if a is None:
return False
return True
def _some_specified(*args):
for a in args:
if a is not None:
return True
return False
def faces_from_sphere_vertices(vertices):
"""
Triangulate a set of vertices on the sphere.
Parameters
----------
vertices : (M, 3) ndarray
XYZ coordinates of vertices on the sphere.
Returns
-------
faces : (N, 3) ndarray
Indices into vertices; forms triangular faces.
"""
from scipy.spatial import Delaunay
faces = Delaunay(vertices).convex_hull
if len(vertices) < 2**16:
return np.asarray(faces, np.uint16)
else:
return faces
def unique_edges(faces, return_mapping=False):
"""Extract all unique edges from given triangular faces.
Parameters
----------
faces : (N, 3) ndarray
Vertex indices forming triangular faces.
return_mapping : bool
If true, a mapping to the edges of each face is returned.
Returns
-------
edges : (N, 2) ndarray
Unique edges.
mapping : (N, 3)
For each face, [x, y, z], a mapping to it's edges [a, b, c].
::
y
/\
/ \
a/ \b
/ \
/ \
/__________\
x c z
"""
faces = np.asarray(faces)
edges = np.concatenate([faces[:, 0:2], faces[:, 1:3], faces[:, ::2]])
if return_mapping:
ue, inverse = unique_sets(edges, return_inverse=True)
return ue, inverse.reshape((3, -1)).T
else:
return unique_sets(edges)
def unique_sets(sets, return_inverse=False):
"""Remove duplicate sets.
Parameters
----------
sets : array (N, k)
N sets of size k.
return_inverse : bool
If True, also returns the indices of unique_sets that can be used
to reconstruct `sets` (the original ordering of each set may not be
preserved).
Return
------
unique_sets : array
Unique sets.
inverse : array (N,)
The indices to reconstruct `sets` from `unique_sets`.
"""
sets = np.sort(sets, 1)
order = np.lexsort(sets.T)
sets = sets[order]
flag = np.ones(len(sets), 'bool')
flag[1:] = (sets[1:] != sets[:-1]).any(-1)
uniqsets = sets[flag]
if return_inverse:
inverse = np.empty_like(order)
inverse[order] = np.arange(len(order))
index = flag.cumsum() - 1
return uniqsets, index[inverse]
else:
return uniqsets
class Sphere(object):
"""Points on the unit sphere.
The sphere can be constructed using one of three conventions::
Sphere(x, y, z)
Sphere(xyz=xyz)
Sphere(theta=theta, phi=phi)
Parameters
----------
x, y, z : 1-D array_like
Vertices as x-y-z coordinates.
theta, phi : 1-D array_like
Vertices as spherical coordinates. Theta and phi are the inclination
and azimuth angles respectively.
xyz : (N, 3) ndarray
Vertices as x-y-z coordinates.
faces : (N, 3) ndarray
Indices into vertices that form triangular faces. If unspecified,
the faces are computed using a Delaunay triangulation.
edges : (N, 2) ndarray
Edges between vertices. If unspecified, the edges are
derived from the faces.
"""
def __init__(self, x=None, y=None, z=None,
theta=None, phi=None,
xyz=None,
faces=None, edges=None):
all_specified = _all_specified(x, y, z) + _all_specified(xyz) + \
_all_specified(theta, phi)
one_complete = (_some_specified(x, y, z) + _some_specified(xyz) +
_some_specified(theta, phi))
if not (all_specified == 1 and one_complete == 1):
raise ValueError("Sphere must be constructed using either "
"(x,y,z), (theta, phi) or xyz.")
if edges is not None and faces is None:
raise ValueError("Either specify both faces and "
"edges, only faces, or neither.")
if edges is not None:
self.edges = np.asarray(edges)
if faces is not None:
self.faces = np.asarray(faces)
if theta is not None:
self.theta = np.array(theta, copy=False, ndmin=1)
self.phi = np.array(phi, copy=False, ndmin=1)
return
if xyz is not None:
xyz = np.asarray(xyz)
x, y, z = xyz.T
x, y, z = (np.asarray(t) for t in (x, y, z))
r, self.theta, self.phi = cart2sphere(x, y, z)
if not np.allclose(r, 1):
warnings.warn("Vertices are not on the unit sphere.")
@auto_attr
def vertices(self):
return np.column_stack(sphere2cart(1, self.theta, self.phi))
@property
def x(self):
return self.vertices[:, 0]
@property
def y(self):
return self.vertices[:, 1]
@property
def z(self):
return self.vertices[:, 2]
@auto_attr
def faces(self):
faces = faces_from_sphere_vertices(self.vertices)
return faces
@auto_attr
def edges(self):
return unique_edges(self.faces)
def subdivide(self, n=1):
"""Subdivides each face of the sphere into four new faces.
New vertices are created at a, b, and c. Then each face [x, y, z] is
divided into faces [x, a, c], [y, a, b], [z, b, c], and [a, b, c].
::
y
/\
/ \
a/____\b
/\ /\
/ \ / \
/____\/____\
x c z
Parameters
----------
n : int, optional
The number of subdivisions to preform.
Returns
-------
new_sphere : Sphere
The subdivided sphere.
"""
vertices = self.vertices
faces = self.faces
for i in xrange(n):
edges, mapping = unique_edges(faces, return_mapping=True)
new_vertices = vertices[edges].sum(1)
new_vertices /= vector_norm(new_vertices, keepdims=True)
mapping += len(vertices)
vertices = np.vstack([vertices, new_vertices])
x, y, z = faces.T
a, b, c = mapping.T
face1 = np.column_stack([x, a, c])
face2 = np.column_stack([y, b, a])
face3 = np.column_stack([z, c, b])
face4 = mapping
faces = np.concatenate([face1, face2, face3, face4])
if len(vertices) < 2**16:
faces = np.asarray(faces, dtype='uint16')
return Sphere(xyz=vertices, faces=faces)
def find_closest(self, xyz):
"""
Find the index of the vertex in the Sphere closest to the input vector
Parameters
----------
xyz : array-like, 3 elements
A unit vector
Return
------
idx : int
The index into the Sphere.vertices array that gives the closest
vertex (in angle).
"""
cos_sim = np.dot(self.vertices, xyz)
return np.argmax(cos_sim)
class HemiSphere(Sphere):
"""Points on the unit sphere.
A HemiSphere is similar to a Sphere but it takes antipodal symmetry into
account. Antipodal symmetry means that point v on a HemiSphere is the same
as the point -v. Duplicate points are discarded when constructing a
HemiSphere (including antipodal duplicates). `edges` and `faces` are
remapped to the remaining points as closely as possible.
The HemiSphere can be constructed using one of three conventions::
HemiSphere(x, y, z)
HemiSphere(xyz=xyz)
HemiSphere(theta=theta, phi=phi)
Parameters
----------
x, y, z : 1-D array_like
Vertices as x-y-z coordinates.
theta, phi : 1-D array_like
Vertices as spherical coordinates. Theta and phi are the inclination
and azimuth angles respectively.
xyz : (N, 3) ndarray
Vertices as x-y-z coordinates.
faces : (N, 3) ndarray
Indices into vertices that form triangular faces. If unspecified,
the faces are computed using a Delaunay triangulation.
edges : (N, 2) ndarray
Edges between vertices. If unspecified, the edges are
derived from the faces.
tol : float
Angle in degrees. Vertices that are less than tol degrees apart are
treated as duplicates.
See Also
--------
Sphere
"""
def __init__(self, x=None, y=None, z=None,
theta=None, phi=None,
xyz=None,
faces=None, edges=None, tol=1e-5):
"""Create a HemiSphere from points"""
sphere = Sphere(x=x, y=y, z=z, theta=theta, phi=phi, xyz=xyz)
uniq_vertices, mapping = remove_similar_vertices(sphere.vertices, tol,
return_mapping=True)
uniq_vertices *= 1 - 2*(uniq_vertices[:, -1:] < 0)
if faces is not None:
faces = np.asarray(faces)
faces = unique_sets(mapping[faces])
if edges is not None:
edges = np.asarray(edges)
edges = unique_sets(mapping[edges])
Sphere.__init__(self, xyz=uniq_vertices, edges=edges, faces=faces)
@classmethod
def from_sphere(klass, sphere, tol=1e-5):
"""Create instance from a Sphere"""
return klass(theta=sphere.theta, phi=sphere.phi,
edges=sphere.edges, faces=sphere.faces, tol=tol)
def mirror(self):
"""Create a full Sphere from a HemiSphere"""
n = len(self.vertices)
vertices = np.vstack([self.vertices, -self.vertices])
edges = np.vstack([self.edges, n + self.edges])
_switch_vertex(edges[:, 0], edges[:, 1], vertices)
faces = np.vstack([self.faces, n + self.faces])
_switch_vertex(faces[:, 0], faces[:, 1], vertices)
_switch_vertex(faces[:, 0], faces[:, 2], vertices)
return Sphere(xyz=vertices, edges=edges, faces=faces)
@auto_attr
def faces(self):
vertices = np.vstack([self.vertices, -self.vertices])
faces = faces_from_sphere_vertices(vertices)
return unique_sets(faces % len(self.vertices))
def subdivide(self, n=1):
"""Create a more subdivided HemiSphere
See Sphere.subdivide for full documentation.
"""
sphere = self.mirror()
sphere = sphere.subdivide(n)
return HemiSphere.from_sphere(sphere)
def find_closest(self, xyz):
"""
Find the index of the vertex in the Sphere closest to the input vector,
taking into account antipodal symmetry
Parameters
----------
xyz : array-like, 3 elements
A unit vector
Return
------
idx : int
The index into the Sphere.vertices array that gives the closest
vertex (in angle).
"""
cos_sim = abs(np.dot(self.vertices, xyz))
return np.argmax(cos_sim)
def _switch_vertex(index1, index2, vertices):
"""When we mirror an edge (a, b). We can either create (a, b) and (a', b')
OR (a, b') and (a', b). The angles of edges (a, b) and (a, b') are
supplementary, so we choose the two new edges such that their angles are
less than 90 degrees.
"""
n = len(vertices)
A = vertices[index1]
B = vertices[index2]
is_far = (A * B).sum(-1) < 0
index2[is_far] = index2[is_far] + (n / 2.0)
index2 %= n
def _get_forces(charges):
r"""Given a set of charges on the surface of the sphere gets total force
those charges exert on each other.
The force exerted by one charge on another is given by Coulomb's law. For
this simulation we use charges of equal magnitude so this force can be
written as $\vec{r}/r^3$, up to a constant factor, where $\vec{r}$ is the
separation of the two charges and $r$ is the magnitude of $\vec{r}$. Forces
are additive so the total force on each of the charges is the sum of the
force exerted by each other charge in the system. Charges do not exert a
force on themselves. The electric potential can similarly be written as
$1/r$ and is also additive.
"""
all_charges = np.concatenate((charges, -charges))
all_charges = all_charges[:, None]
r = charges - all_charges
r_mag = np.sqrt((r*r).sum(-1))[:, :, None]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
force = r / r_mag**3
potential = 1. / r_mag
d = np.arange(len(charges))
force[d, d] = 0
force = force.sum(0)
force_r_comp = (charges*force).sum(-1)[:, None]
f_theta = force - force_r_comp*charges
potential[d, d] = 0
potential = 2*potential.sum()
return f_theta, potential
def disperse_charges(hemi, iters, const=.2):
"""Models electrostatic repulsion on the unit sphere
Places charges on a sphere and simulates the repulsive forces felt by each
one. Allows the charges to move for some number of iterations and returns
their final location as well as the total potential of the system at each
step.
Parameters
----------
hemi : HemiSphere
Points on a unit sphere.
iters : int
Number of iterations to run.
const : float
Using a smaller const could provide a more accurate result, but will
need more iterations to converge.
Returns
-------
hemi : HemiSphere
Distributed points on a unit sphere.
potential : ndarray
The electrostatic potential at each iteration. This can be useful to
check if the repulsion converged to a minimum.
Note:
-----
This function is meant to be used with diffusion imaging so antipodal
symmetry is assumed. Therefor each charge must not only be unique, but if
there is a charge at +x, there cannot be a charge at -x. These are treated
as the same location and because the distance between the two charges will
be zero, the result will be unstable.
"""
if not isinstance(hemi, HemiSphere):
raise ValueError("expecting HemiSphere")
charges = hemi.vertices
forces, v = _get_forces(charges)
force_mag = np.sqrt((forces*forces).sum())
const = const / force_mag.max()
potential = np.empty(iters)
v_min = v
for ii in xrange(iters):
new_charges = charges + forces * const
norms = np.sqrt((new_charges**2).sum(-1))
new_charges /= norms[:, None]
new_forces, v = _get_forces(new_charges)
if v <= v_min:
charges = new_charges
forces = new_forces
potential[ii] = v_min = v
else:
const /= 2.
potential[ii] = v_min
return HemiSphere(xyz=charges), potential
def interp_rbf(data, sphere_origin, sphere_target,
function='multiquadric', epsilon=None, smooth=0.1,
norm="angle"):
"""Interpolate data on the sphere, using radial basis functions.
Parameters
----------
data : (N,) ndarray
Function values on the unit sphere.
sphere_origin : Sphere
Positions of data values.
sphere_target : Sphere
M target positions for which to interpolate.
function : {'multiquadric', 'inverse', 'gaussian'}
Radial basis function.
epsilon : float
Radial basis function spread parameter. Defaults to approximate average
distance between nodes.
a good start
smooth : float
values greater than zero increase the smoothness of the
approximation with 0 as pure interpolation. Default: 0.1
norm : str
A string indicating the function that returns the
"distance" between two points.
'angle' - The angle between two vectors
'euclidean_norm' - The Euclidean distance
Returns
-------
v : (M,) ndarray
Interpolated values.
See Also
--------
scipy.interpolate.Rbf
"""
from scipy.interpolate import Rbf
def angle(x1, x2):
xx = np.arccos((x1 * x2).sum(axis=0))
xx[np.isnan(xx)] = 0
return xx
def euclidean_norm(x1, x2):
return np.sqrt(((x1 - x2)**2).sum(axis=0))
if norm == "angle":
norm = angle
elif norm == "euclidean_norm":
w_s = "The Eucldian norm used for interpolation is inaccurate "
w_s += "and will be deprecated in future versions. Please consider "
w_s += "using the 'angle' norm instead"
warnings.warn(w_s, DeprecationWarning)
norm = euclidean_norm
# Workaround for bug in older versions of SciPy that don't allow
# specification of epsilon None:
if epsilon is not None:
kwargs = {'function': function,
'epsilon': epsilon,
'smooth' : smooth,
'norm' : norm}
else:
kwargs = {'function': function,
'smooth': smooth,
'norm' : norm}
rbfi = Rbf(sphere_origin.x, sphere_origin.y, sphere_origin.z, data,
**kwargs)
return rbfi(sphere_target.x, sphere_target.y, sphere_target.z)
def euler_characteristic_check(sphere, chi=2):
r"""Checks the euler characteristic of a sphere
If $f$ = number of faces, $e$ = number_of_edges and $v$ = number of
vertices, the Euler formula says $f-e+v = 2$ for a mesh on a sphere. More
generally, whether $f -e + v == \chi$ where $\chi$ is the Euler
characteristic of the mesh.
- Open chain (track) has $\chi=1$
- Closed chain (loop) has $\chi=0$
- Disk has $\chi=1$
- Sphere has $\chi=2$
- HemiSphere has $\chi=1$
Parameters
----------
sphere : Sphere
A Sphere instance with vertices, edges and faces attributes.
chi : int, optional
The Euler characteristic of the mesh to be checked
Returns
-------
check : bool
True if the mesh has Euler characteristic $\chi$
Examples
--------
>>> euler_characteristic_check(unit_octahedron)
True
>>> hemisphere = HemiSphere.from_sphere(unit_icosahedron)
>>> euler_characteristic_check(hemisphere, chi=1)
True
"""
v = sphere.vertices.shape[0]
e = sphere.edges.shape[0]
f = sphere.faces.shape[0]
return (f - e + v) == chi
octahedron_vertices = np.array(
[[1.0, 0.0, 0.0],
[-1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, -1.0, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, -1.0], ])
octahedron_faces = np.array(
[[0, 4, 2],
[1, 5, 3],
[4, 2, 1],
[5, 3, 0],
[1, 4, 3],
[0, 5, 2],
[0, 4, 3],
[1, 5, 2], ], dtype='uint16')
t = (1 + np.sqrt(5)) / 2
icosahedron_vertices = np.array(
[[t, 1, 0], # 0
[-t, 1, 0], # 1
[t, -1, 0], # 2
[-t, -1, 0], # 3
[1, 0, t], # 4
[1, 0, -t], # 5
[-1, 0, t], # 6
[-1, 0, -t], # 7
[0, t, 1], # 8
[0, -t, 1], # 9
[0, t, -1], # 10
[0, -t, -1], ]) # 11
icosahedron_vertices /= vector_norm(icosahedron_vertices, keepdims=True)
icosahedron_faces = np.array(
[[8, 4, 0],
[2, 5, 0],
[2, 5, 11],
[9, 2, 11],
[2, 4, 0],
[9, 2, 4],
[10, 8, 1],
[10, 8, 0],
[10, 5, 0],
[6, 3, 1],
[9, 6, 3],
[6, 8, 1],
[6, 8, 4],
[9, 6, 4],
[7, 10, 1],
[7, 10, 5],
[7, 3, 1],
[7, 3, 11],
[9, 3, 11],
[7, 5, 11], ], dtype='uint16')
unit_octahedron = Sphere(xyz=octahedron_vertices, faces=octahedron_faces)
unit_icosahedron = Sphere(xyz=icosahedron_vertices, faces=icosahedron_faces)
hemi_icosahedron = HemiSphere.from_sphere(unit_icosahedron)
|
|
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test fee estimation code."""
from decimal import Decimal
import random
from test_framework.messages import CTransaction, CTxIn, CTxOut, COutPoint, ToHex, COIN
from test_framework.script import CScript, OP_1, OP_DROP, OP_2, OP_HASH160, OP_EQUAL, hash160, OP_TRUE
from test_framework.test_framework import GuldenTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_greater_than_or_equal,
connect_nodes,
satoshi_round,
sync_blocks,
sync_mempools,
)
# Construct 2 trivial P2SH's and the ScriptSigs that spend them
# So we can create many transactions without needing to spend
# time signing.
REDEEM_SCRIPT_1 = CScript([OP_1, OP_DROP])
REDEEM_SCRIPT_2 = CScript([OP_2, OP_DROP])
P2SH_1 = CScript([OP_HASH160, hash160(REDEEM_SCRIPT_1), OP_EQUAL])
P2SH_2 = CScript([OP_HASH160, hash160(REDEEM_SCRIPT_2), OP_EQUAL])
# Associated ScriptSig's to spend satisfy P2SH_1 and P2SH_2
SCRIPT_SIG = [CScript([OP_TRUE, REDEEM_SCRIPT_1]), CScript([OP_TRUE, REDEEM_SCRIPT_2])]
def small_txpuzzle_randfee(from_node, conflist, unconflist, amount, min_fee, fee_increment):
"""Create and send a transaction with a random fee.
The transaction pays to a trivial P2SH script, and assumes that its inputs
are of the same form.
The function takes a list of confirmed outputs and unconfirmed outputs
and attempts to use the confirmed list first for its inputs.
It adds the newly created outputs to the unconfirmed list.
Returns (raw transaction, fee)."""
# It's best to exponentially distribute our random fees
# because the buckets are exponentially spaced.
# Exponentially distributed from 1-128 * fee_increment
rand_fee = float(fee_increment) * (1.1892 ** random.randint(0, 28))
# Total fee ranges from min_fee to min_fee + 127*fee_increment
fee = min_fee - fee_increment + satoshi_round(rand_fee)
tx = CTransaction()
total_in = Decimal("0.00000000")
while total_in <= (amount + fee) and len(conflist) > 0:
t = conflist.pop(0)
total_in += t["amount"]
tx.vin.append(CTxIn(COutPoint(int(t["txid"], 16), t["vout"]), b""))
if total_in <= amount + fee:
while total_in <= (amount + fee) and len(unconflist) > 0:
t = unconflist.pop(0)
total_in += t["amount"]
tx.vin.append(CTxIn(COutPoint(int(t["txid"], 16), t["vout"]), b""))
if total_in <= amount + fee:
raise RuntimeError("Insufficient funds: need %d, have %d" % (amount + fee, total_in))
tx.vout.append(CTxOut(int((total_in - amount - fee) * COIN), P2SH_1))
tx.vout.append(CTxOut(int(amount * COIN), P2SH_2))
# These transactions don't need to be signed, but we still have to insert
# the ScriptSig that will satisfy the ScriptPubKey.
for inp in tx.vin:
inp.scriptSig = SCRIPT_SIG[inp.prevout.n]
txid = from_node.sendrawtransaction(hexstring=ToHex(tx))
unconflist.append({"txid": txid, "vout": 0, "amount": total_in - amount - fee})
unconflist.append({"txid": txid, "vout": 1, "amount": amount})
return (ToHex(tx), fee)
def split_inputs(from_node, txins, txouts, initial_split=False):
"""Generate a lot of inputs so we can generate a ton of transactions.
This function takes an input from txins, and creates and sends a transaction
which splits the value into 2 outputs which are appended to txouts.
Previously this was designed to be small inputs so they wouldn't have
a high coin age when the notion of priority still existed."""
prevtxout = txins.pop()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(int(prevtxout["txid"], 16), prevtxout["vout"]), b""))
half_change = satoshi_round(prevtxout["amount"] / 2)
rem_change = prevtxout["amount"] - half_change - Decimal("0.00001000")
tx.vout.append(CTxOut(int(half_change * COIN), P2SH_1))
tx.vout.append(CTxOut(int(rem_change * COIN), P2SH_2))
# If this is the initial split we actually need to sign the transaction
# Otherwise we just need to insert the proper ScriptSig
if (initial_split):
completetx = from_node.signrawtransaction(ToHex(tx))["hex"]
else:
tx.vin[0].scriptSig = SCRIPT_SIG[prevtxout["vout"]]
completetx = ToHex(tx)
txid = from_node.sendrawtransaction(hexstring=completetx)
txouts.append({"txid": txid, "vout": 0, "amount": half_change})
txouts.append({"txid": txid, "vout": 1, "amount": rem_change})
def check_estimates(node, fees_seen):
"""Call estimatesmartfee and verify that the estimates meet certain invariants."""
delta = 1.0e-6 # account for rounding error
last_feerate = float(max(fees_seen))
all_smart_estimates = [node.estimatesmartfee(i) for i in range(1, 26)]
for i, e in enumerate(all_smart_estimates): # estimate is for i+1
feerate = float(e["feerate"])
assert_greater_than(feerate, 0)
if feerate + delta < min(fees_seen) or feerate - delta > max(fees_seen):
raise AssertionError("Estimated fee (%f) out of range (%f,%f)"
% (feerate, min(fees_seen), max(fees_seen)))
if feerate - delta > last_feerate:
raise AssertionError("Estimated fee (%f) larger than last fee (%f) for lower number of confirms"
% (feerate, last_feerate))
last_feerate = feerate
if i == 0:
assert_equal(e["blocks"], 2)
else:
assert_greater_than_or_equal(i + 1, e["blocks"])
class EstimateFeeTest(GuldenTestFramework):
def set_test_params(self):
self.num_nodes = 3
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
"""
We'll setup the network to have 3 nodes that all mine with different parameters.
But first we need to use one node to create a lot of outputs
which we will use to generate our transactions.
"""
self.add_nodes(3, extra_args=[["-maxorphantx=1000", "-whitelist=127.0.0.1"],
["-blockmaxweight=68000", "-maxorphantx=1000"],
["-blockmaxweight=32000", "-maxorphantx=1000"]])
# Use node0 to mine blocks for input splitting
# Node1 mines small blocks but that are bigger than the expected transaction rate.
# NOTE: the CreateNewBlock code starts counting block weight at 4,000 weight,
# (68k weight is room enough for 120 or so transactions)
# Node2 is a stingy miner, that
# produces too small blocks (room for only 55 or so transactions)
self.start_nodes()
self.import_deterministic_coinbase_privkeys()
self.stop_nodes()
def transact_and_mine(self, numblocks, mining_node):
min_fee = Decimal("0.00001")
# We will now mine numblocks blocks generating on average 100 transactions between each block
# We shuffle our confirmed txout set before each set of transactions
# small_txpuzzle_randfee will use the transactions that have inputs already in the chain when possible
# resorting to tx's that depend on the mempool when those run out
for i in range(numblocks):
random.shuffle(self.confutxo)
for j in range(random.randrange(100 - 50, 100 + 50)):
from_index = random.randint(1, 2)
(txhex, fee) = small_txpuzzle_randfee(self.nodes[from_index], self.confutxo,
self.memutxo, Decimal("0.005"), min_fee, min_fee)
tx_kbytes = (len(txhex) // 2) / 1000.0
self.fees_per_kb.append(float(fee) / tx_kbytes)
sync_mempools(self.nodes[0:3], wait=.1)
mined = mining_node.getblock(mining_node.generate(1)[0], True)["tx"]
sync_blocks(self.nodes[0:3], wait=.1)
# update which txouts are confirmed
newmem = []
for utx in self.memutxo:
if utx["txid"] in mined:
self.confutxo.append(utx)
else:
newmem.append(utx)
self.memutxo = newmem
def run_test(self):
self.log.info("This test is time consuming, please be patient")
self.log.info("Splitting inputs so we can generate tx's")
# Start node0
self.start_node(0)
self.txouts = []
self.txouts2 = []
# Split a coinbase into two transaction puzzle outputs
split_inputs(self.nodes[0], self.nodes[0].listunspent(0), self.txouts, True)
# Mine
while (len(self.nodes[0].getrawmempool()) > 0):
self.nodes[0].generate(1)
# Repeatedly split those 2 outputs, doubling twice for each rep
# Use txouts to monitor the available utxo, since these won't be tracked in wallet
reps = 0
while (reps < 5):
# Double txouts to txouts2
while (len(self.txouts) > 0):
split_inputs(self.nodes[0], self.txouts, self.txouts2)
while (len(self.nodes[0].getrawmempool()) > 0):
self.nodes[0].generate(1)
# Double txouts2 to txouts
while (len(self.txouts2) > 0):
split_inputs(self.nodes[0], self.txouts2, self.txouts)
while (len(self.nodes[0].getrawmempool()) > 0):
self.nodes[0].generate(1)
reps += 1
self.log.info("Finished splitting")
# Now we can connect the other nodes, didn't want to connect them earlier
# so the estimates would not be affected by the splitting transactions
self.start_node(1)
self.start_node(2)
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[2], 1)
self.sync_all()
self.fees_per_kb = []
self.memutxo = []
self.confutxo = self.txouts # Start with the set of confirmed txouts after splitting
self.log.info("Will output estimates for 1/2/3/6/15/25 blocks")
for i in range(2):
###self.log.info("Creating transactions and mining them with a block size that can't keep up")
# Create transactions and mine 10 small blocks with node 2, but create txs faster than we can mine
###self.transact_and_mine(10, self.nodes[2])
###check_estimates(self.nodes[1], self.fees_per_kb)
self.log.info("Creating transactions and mining them at a block size that is just big enough")
# Generate transactions while mining 10 more blocks, this time with node1
# which mines blocks with capacity just above the rate that transactions are being created
self.transact_and_mine(10, self.nodes[1])
check_estimates(self.nodes[1], self.fees_per_kb)
# Finish by mining a normal-sized block:
while len(self.nodes[1].getrawmempool()) > 0:
self.nodes[1].generate(1)
sync_blocks(self.nodes[0:3], wait=.1)
self.log.info("Final estimates after emptying mempools")
check_estimates(self.nodes[1], self.fees_per_kb)
if __name__ == '__main__':
EstimateFeeTest().main()
|
|
from nose.tools import * # flake8: noqa
from framework.auth.core import Auth
from osf.models import AbstractNode as Node
from website.util import permissions
from api.base.settings.defaults import API_BASE
from tests.base import ApiTestCase
from osf_tests.factories import (
NodeFactory,
ProjectFactory,
RegistrationFactory,
AuthUserFactory,
ForkFactory
)
class TestNodeForksList(ApiTestCase):
def setUp(self):
super(TestNodeForksList, self).setUp()
self.user = AuthUserFactory()
self.private_project = ProjectFactory()
self.private_project.add_contributor(self.user, permissions=[permissions.READ, permissions.WRITE])
self.private_project.save()
self.component = NodeFactory(parent=self.private_project, creator=self.user)
self.pointer = ProjectFactory(creator=self.user)
self.private_project.add_pointer(self.pointer, auth=Auth(self.user), save=True)
self.private_fork = ForkFactory(project=self.private_project, user=self.user)
self.private_project_url = '/{}nodes/{}/forks/'.format(API_BASE, self.private_project._id)
self.public_project = ProjectFactory(is_public=True, creator=self.user)
self.public_project.save()
self.public_component = NodeFactory(parent=self.public_project, creator=self.user, is_public=True)
self.public_project_url = '/{}nodes/{}/forks/'.format(API_BASE, self.public_project._id)
self.public_fork = ForkFactory(project=self.public_project, user=self.user)
self.user_two = AuthUserFactory()
def test_can_access_public_node_forks_list_when_unauthenticated(self):
res = self.app.get(self.public_project_url)
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 0)
# Fork defaults to private
assert_equal(self.public_fork.is_public, False)
self.public_fork.is_public = True
self.public_fork.save()
res = self.app.get(self.public_project_url)
assert_equal(len(res.json['data']), 1)
assert_equal(self.public_fork.is_public, True)
data = res.json['data'][0]
assert_equal(data['attributes']['title'], 'Fork of ' + self.public_project.title)
assert_equal(data['id'], self.public_fork._id)
assert_equal(data['attributes']['registration'], False)
assert_equal(data['attributes']['fork'], True)
def test_can_access_public_node_forks_list_authenticated_contributor(self):
res = self.app.get(self.public_project_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(self.public_fork.is_public, False)
assert_equal(len(res.json['data']), 1)
data = res.json['data'][0]
assert_equal(data['attributes']['title'], 'Fork of ' + self.public_project.title)
assert_equal(data['id'], self.public_fork._id)
assert_equal(data['attributes']['registration'], False)
assert_equal(data['attributes']['fork'], True)
def test_can_access_public_node_forks_list_authenticated_non_contributor(self):
res = self.app.get(self.public_project_url, auth=self.user_two.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 0)
# Fork defaults to private
assert_equal(self.public_fork.is_public, False)
self.public_fork.is_public = True
self.public_fork.save()
res = self.app.get(self.public_project_url)
assert_equal(len(res.json['data']), 1)
assert_equal(self.public_fork.is_public, True)
data = res.json['data'][0]
assert_equal(data['attributes']['title'], 'Fork of ' + self.public_project.title)
assert_equal(data['id'], self.public_fork._id)
assert_equal(data['attributes']['registration'], False)
assert_equal(data['attributes']['fork'], True)
def test_cannot_access_private_node_forks_list_unauthenticated(self):
res = self.app.get(self.private_project_url, expect_errors=True)
assert_equal(res.status_code, 401)
assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.')
def test_authenticated_contributor_can_access_private_node_forks_list(self):
res = self.app.get(self.private_project_url + '?embed=children&embed=node_links&embed=logs&embed=contributors&embed=forked_from', auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 1)
data = res.json['data'][0]
assert_equal(data['attributes']['title'], 'Fork of ' + self.private_project.title)
assert_equal(data['id'], self.private_fork._id)
fork_contributors = data['embeds']['contributors']['data'][0]['embeds']['users']['data']
assert_equal(fork_contributors['attributes']['family_name'], self.user.family_name)
assert_equal(fork_contributors['id'], self.user._id)
forked_children = data['embeds']['children']['data'][0]
assert_equal(forked_children['id'], self.component.forks.first()._id)
assert_equal(forked_children['attributes']['title'], self.component.title)
forked_node_links = data['embeds']['node_links']['data'][0]['embeds']['target_node']['data']
assert_equal(forked_node_links['id'], self.pointer._id)
assert_equal(forked_node_links['attributes']['title'], self.pointer.title)
auth = Auth(self.user)
expected_logs = list(self.private_project.get_aggregate_logs_queryset(auth).values_list('action', flat=True))
expected_logs.append('node_forked')
forked_logs = data['embeds']['logs']['data']
forked_log_actions = [log['attributes']['action'] for log in forked_logs]
assert_equal(set(expected_logs), set(forked_log_actions))
assert_equal(len(set(forked_log_actions)), len(set(expected_logs)))
forked_from = data['embeds']['forked_from']['data']
assert_equal(forked_from['id'], self.private_project._id)
def test_authenticated_non_contributor_cannot_access_private_node_forks_list(self):
res = self.app.get(self.private_project_url, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
class TestNodeForkCreate(ApiTestCase):
def setUp(self):
super(TestNodeForkCreate, self).setUp()
self.user = AuthUserFactory()
self.user_two = AuthUserFactory()
self.user_three = AuthUserFactory()
self.private_project = ProjectFactory(creator=self.user)
self.fork_data = {
'data': {
'type': 'nodes'
}
}
self.fork_data_with_title = {
'data': {
'type': 'nodes',
'attributes':
{'title': 'My Forked Project'}
}
}
self.private_project_url = '/{}nodes/{}/forks/'.format(API_BASE, self.private_project._id)
self.public_project = ProjectFactory(is_public=True, creator=self.user)
self.public_project_url = '/{}nodes/{}/forks/'.format(API_BASE, self.public_project._id)
def test_create_fork_from_public_project_with_new_title(self):
res = self.app.post_json_api(self.public_project_url, self.fork_data_with_title, auth=self.user.auth)
assert_equal(res.status_code, 201)
assert_equal(res.json['data']['id'], self.public_project.forks.first()._id)
assert_equal(res.json['data']['attributes']['title'], self.fork_data_with_title['data']['attributes']['title'])
def test_create_fork_from_private_project_with_new_title(self):
res = self.app.post_json_api(self.private_project_url, self.fork_data_with_title, auth=self.user.auth)
assert_equal(res.status_code, 201)
assert_equal(res.json['data']['id'], self.private_project.forks.first()._id)
assert_equal(res.json['data']['attributes']['title'], self.fork_data_with_title['data']['attributes']['title'])
def test_can_fork_public_node_logged_in(self):
res = self.app.post_json_api(self.public_project_url, self.fork_data, auth=self.user_two.auth)
assert_equal(res.status_code, 201)
assert_equal(res.json['data']['id'], self.public_project.forks.first()._id)
assert_equal(res.json['data']['attributes']['title'], 'Fork of ' + self.public_project.title)
def test_cannot_fork_public_node_logged_out(self):
res = self.app.post_json_api(self.public_project_url, self.fork_data, expect_errors=True)
assert_equal(res.status_code, 401)
assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.')
def test_can_fork_public_node_logged_in_contributor(self):
res = self.app.post_json_api(self.public_project_url, self.fork_data, auth=self.user.auth)
assert_equal(res.status_code, 201)
assert_equal(res.json['data']['id'], self.public_project.forks.first()._id)
assert_equal(res.json['data']['attributes']['title'], 'Fork of ' + self.public_project.title)
def test_cannot_fork_private_node_logged_out(self):
res = self.app.post_json_api(self.private_project_url, self.fork_data, expect_errors=True)
assert_equal(res.status_code, 401)
assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.')
def test_cannot_fork_private_node_logged_in_non_contributor(self):
res = self.app.post_json_api(self.private_project_url, self.fork_data, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
def test_can_fork_private_node_logged_in_contributor(self):
res = self.app.post_json_api(self.private_project_url + '?embed=children&embed=node_links&embed=logs&embed=contributors&embed=forked_from', self.fork_data, auth=self.user.auth)
assert_equal(res.status_code, 201)
data = res.json['data']
assert_equal(data['attributes']['title'], 'Fork of ' + self.private_project.title)
fork_contributors = data['embeds']['contributors']['data'][0]['embeds']['users']['data']
assert_equal(fork_contributors['attributes']['family_name'], self.user.family_name)
assert_equal(fork_contributors['id'], self.user._id)
forked_from = data['embeds']['forked_from']['data']
assert_equal(forked_from['id'], self.private_project._id)
def test_fork_private_components_no_access(self):
url = self.public_project_url + '?embed=children'
private_component = NodeFactory(parent=self.public_project, creator=self.user_two, is_public=False)
res = self.app.post_json_api(url, self.fork_data, auth=self.user_three.auth)
assert_equal(res.status_code, 201)
# Private components that you do not have access to are not forked
assert_equal(res.json['data']['embeds']['children']['links']['meta']['total'], 0)
def test_fork_components_you_can_access(self):
url = self.private_project_url + '?embed=children'
new_component = NodeFactory(parent=self.private_project, creator=self.user)
res = self.app.post_json_api(url, self.fork_data, auth=self.user.auth)
assert_equal(res.status_code, 201)
assert_equal(res.json['data']['embeds']['children']['links']['meta']['total'], 1)
assert_equal(res.json['data']['embeds']['children']['data'][0]['id'], new_component.forks.first()._id)
assert_equal(res.json['data']['embeds']['children']['data'][0]['attributes']['title'], new_component.title)
def test_fork_private_node_links(self):
private_pointer = ProjectFactory(creator=self.user_two)
actual_pointer = self.private_project.add_pointer(private_pointer, auth=Auth(self.user_two), save=True)
url = self.private_project_url + '?embed=node_links'
# Node link is forked, but shows up as a private node link
res = self.app.post_json_api(url, self.fork_data, auth=self.user.auth)
assert_equal(res.status_code, 201)
assert_equal(res.json['data']['embeds']['node_links']['data'][0]['embeds']['target_node']['errors'][0]['detail'],
'You do not have permission to perform this action.')
assert_equal(res.json['data']['embeds']['node_links']['links']['meta']['total'], 1)
self.private_project.rm_pointer(actual_pointer, auth=Auth(self.user_two))
def test_fork_node_links_you_can_access(self):
pointer = ProjectFactory(creator=self.user)
self.private_project.add_pointer(pointer, auth=Auth(self.user_two), save=True)
url = self.private_project_url + '?embed=node_links'
res = self.app.post_json_api(url, self.fork_data, auth=self.user.auth)
assert_equal(res.status_code, 201)
assert_equal(res.json['data']['embeds']['node_links']['data'][0]['embeds']['target_node']['data']['id'], pointer._id)
assert_equal(res.json['data']['embeds']['node_links']['links']['meta']['total'], 1)
def test_can_fork_registration(self):
registration = RegistrationFactory(project=self.private_project, user=self.user)
url = '/{}registrations/{}/forks/'.format(API_BASE, registration._id)
res = self.app.post_json_api(url, self.fork_data, auth=self.user.auth)
assert_equal(res.status_code, 201)
assert_equal(res.json['data']['id'], registration.forks.first()._id)
assert_equal(res.json['data']['attributes']['title'], 'Fork of ' + registration.title)
def test_read_only_contributor_can_fork_private_registration(self):
read_only_user = AuthUserFactory()
self.private_project.add_contributor(read_only_user, permissions=[permissions.READ], save=True)
res = self.app.post_json_api(self.private_project_url, self.fork_data, auth=read_only_user.auth)
assert_equal(res.status_code, 201)
assert_equal(res.json['data']['id'], self.private_project.forks.first()._id)
assert_equal(res.json['data']['attributes']['title'], 'Fork of ' + self.private_project.title)
|
|
# -*- coding: utf-8 -*-
import os
import datetime
import httplib as http
import time
import functools
import furl
import itsdangerous
import jwe
import jwt
import mock
import pytest
from django.utils import timezone
from django.contrib.auth.models import Permission
from framework.auth import cas, signing
from framework.auth.core import Auth
from framework.exceptions import HTTPError
from nose.tools import * # noqa
from osf_tests import factories
from tests.base import OsfTestCase, get_default_metaschema
from api_tests.utils import create_test_file
from osf_tests.factories import (AuthUserFactory, ProjectFactory,
RegistrationFactory)
from website import settings
from addons.base import views
from addons.github.exceptions import ApiError
from addons.github.models import GithubFolder, GithubFile, GithubFileNode
from addons.github.tests.factories import GitHubAccountFactory
from addons.osfstorage.models import OsfStorageFileNode
from addons.osfstorage.tests.factories import FileVersionFactory
from osf.models import Session, MetaSchema, QuickFilesNode
from osf.models import files as file_models
from osf.models.files import BaseFileNode, TrashedFileNode, FileVersion
from website.project import new_private_link
from website.project.views.node import _view_project as serialize_node
from website.project.views.node import serialize_addons, collect_node_config_js
from website.util import api_url_for, rubeus
from dateutil.parser import parse as parse_date
from framework import sentry
class SetEnvironMiddleware(object):
def __init__(self, app, **kwargs):
self.app = app
self.kwargs = kwargs
def __call__(self, environ, start_response):
environ.update(self.kwargs)
return self.app(environ, start_response)
class TestAddonAuth(OsfTestCase):
def setUp(self):
super(TestAddonAuth, self).setUp()
self.user = AuthUserFactory()
self.auth_obj = Auth(user=self.user)
self.node = ProjectFactory(creator=self.user)
self.session = Session(data={'auth_user_id': self.user._id})
self.session.save()
self.cookie = itsdangerous.Signer(settings.SECRET_KEY).sign(self.session._id)
self.configure_addon()
self.JWE_KEY = jwe.kdf(settings.WATERBUTLER_JWE_SECRET.encode('utf-8'), settings.WATERBUTLER_JWE_SALT.encode('utf-8'))
def configure_addon(self):
self.user.add_addon('github')
self.user_addon = self.user.get_addon('github')
self.oauth_settings = GitHubAccountFactory(display_name='john')
self.oauth_settings.save()
self.user.external_accounts.add(self.oauth_settings)
self.user.save()
self.node.add_addon('github', self.auth_obj)
self.node_addon = self.node.get_addon('github')
self.node_addon.user = 'john'
self.node_addon.repo = 'youre-my-best-friend'
self.node_addon.user_settings = self.user_addon
self.node_addon.external_account = self.oauth_settings
self.node_addon.save()
self.user_addon.oauth_grants[self.node._id] = {self.oauth_settings._id: []}
self.user_addon.save()
def build_url(self, **kwargs):
options = {'payload': jwe.encrypt(jwt.encode({'data': dict(dict(
action='download',
nid=self.node._id,
provider=self.node_addon.config.short_name), **kwargs),
'exp': timezone.now() + datetime.timedelta(seconds=settings.WATERBUTLER_JWT_EXPIRATION),
}, settings.WATERBUTLER_JWT_SECRET, algorithm=settings.WATERBUTLER_JWT_ALGORITHM), self.JWE_KEY)}
return api_url_for('get_auth', **options)
def test_auth_download(self):
url = self.build_url()
res = self.app.get(url, auth=self.user.auth)
data = jwt.decode(jwe.decrypt(res.json['payload'].encode('utf-8'), self.JWE_KEY), settings.WATERBUTLER_JWT_SECRET, algorithm=settings.WATERBUTLER_JWT_ALGORITHM)['data']
assert_equal(data['auth'], views.make_auth(self.user))
assert_equal(data['credentials'], self.node_addon.serialize_waterbutler_credentials())
assert_equal(data['settings'], self.node_addon.serialize_waterbutler_settings())
expected_url = furl.furl(self.node.api_url_for('create_waterbutler_log', _absolute=True, _internal=True))
observed_url = furl.furl(data['callback_url'])
observed_url.port = expected_url.port
assert_equal(expected_url, observed_url)
def test_auth_render_action_returns_200(self):
url = self.build_url(action='render')
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
def test_auth_render_action_requires_read_permission(self):
node = ProjectFactory(is_public=False)
url = self.build_url(action='render', nid=node._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_auth_export_action_returns_200(self):
url = self.build_url(action='export')
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
def test_auth_export_action_requires_read_permission(self):
node = ProjectFactory(is_public=False)
url = self.build_url(action='export', nid=node._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_auth_missing_args(self):
url = self.build_url(cookie=None)
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 401)
def test_auth_bad_cookie(self):
url = self.build_url(cookie=self.cookie)
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 200)
data = jwt.decode(jwe.decrypt(res.json['payload'].encode('utf-8'), self.JWE_KEY), settings.WATERBUTLER_JWT_SECRET, algorithm=settings.WATERBUTLER_JWT_ALGORITHM)['data']
assert_equal(data['auth'], views.make_auth(self.user))
assert_equal(data['credentials'], self.node_addon.serialize_waterbutler_credentials())
assert_equal(data['settings'], self.node_addon.serialize_waterbutler_settings())
expected_url = furl.furl(self.node.api_url_for('create_waterbutler_log', _absolute=True, _internal=True))
observed_url = furl.furl(data['callback_url'])
observed_url.port = expected_url.port
assert_equal(expected_url, observed_url)
def test_auth_cookie(self):
url = self.build_url(cookie=self.cookie[::-1])
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 401)
def test_auth_missing_addon(self):
url = self.build_url(provider='queenhub')
res = self.app.get(url, expect_errors=True, auth=self.user.auth)
assert_equal(res.status_code, 400)
@mock.patch('addons.base.views.cas.get_client')
def test_auth_bad_bearer_token(self, mock_cas_client):
mock_cas_client.return_value = mock.Mock(profile=mock.Mock(return_value=cas.CasResponse(authenticated=False)))
url = self.build_url()
res = self.app.get(url, headers={'Authorization': 'Bearer invalid_access_token'}, expect_errors=True)
assert_equal(res.status_code, 403)
class TestAddonLogs(OsfTestCase):
def setUp(self):
super(TestAddonLogs, self).setUp()
self.user = AuthUserFactory()
self.user_non_contrib = AuthUserFactory()
self.auth_obj = Auth(user=self.user)
self.node = ProjectFactory(creator=self.user)
self.file = OsfStorageFileNode.create(
node=self.node,
path='/testfile',
_id='testfile',
name='testfile',
materialized_path='/testfile'
)
self.file.save()
self.session = Session(data={'auth_user_id': self.user._id})
self.session.save()
self.cookie = itsdangerous.Signer(settings.SECRET_KEY).sign(self.session._id)
self.configure_addon()
def configure_addon(self):
self.user.add_addon('github')
self.user_addon = self.user.get_addon('github')
self.oauth_settings = GitHubAccountFactory(display_name='john')
self.oauth_settings.save()
self.user.external_accounts.add(self.oauth_settings)
self.user.save()
self.node.add_addon('github', self.auth_obj)
self.node_addon = self.node.get_addon('github')
self.node_addon.user = 'john'
self.node_addon.repo = 'youre-my-best-friend'
self.node_addon.user_settings = self.user_addon
self.node_addon.external_account = self.oauth_settings
self.node_addon.save()
self.user_addon.oauth_grants[self.node._id] = {self.oauth_settings._id: []}
self.user_addon.save()
def configure_osf_addon(self):
self.project = ProjectFactory(creator=self.user)
self.node_addon = self.project.get_addon('osfstorage')
self.node_addon.save()
def build_payload(self, metadata, **kwargs):
options = dict(
auth={'id': self.user._id},
action='create',
provider=self.node_addon.config.short_name,
metadata=metadata,
time=time.time() + 1000,
)
options.update(kwargs)
options = {
key: value
for key, value in options.iteritems()
if value is not None
}
message, signature = signing.default_signer.sign_payload(options)
return {
'payload': message,
'signature': signature,
}
@mock.patch('website.notifications.events.files.FileAdded.perform')
def test_add_log(self, mock_perform):
path = 'pizza'
url = self.node.api_url_for('create_waterbutler_log')
payload = self.build_payload(metadata={'path': path})
nlogs = self.node.logs.count()
self.app.put_json(url, payload, headers={'Content-Type': 'application/json'})
self.node.reload()
assert_equal(self.node.logs.count(), nlogs + 1)
# # Mocking form_message and perform so that the payload need not be exact.
# assert_true(mock_form_message.called, "form_message not called")
assert_true(mock_perform.called, 'perform not called')
@pytest.mark.enable_quickfiles_creation
def test_waterbutler_hook_succeeds_for_quickfiles_nodes(self):
quickfiles = QuickFilesNode.objects.get_for_user(self.user)
materialized_path = 'pizza'
url = quickfiles.api_url_for('create_waterbutler_log')
payload = self.build_payload(metadata={'path': 'abc123', 'materialized': materialized_path, 'kind': 'file'}, provider='osfstorage')
resp = self.app.put_json(url, payload, headers={'Content-Type': 'application/json'})
assert resp.status_code == 200
def test_add_log_missing_args(self):
path = 'pizza'
url = self.node.api_url_for('create_waterbutler_log')
payload = self.build_payload(metadata={'path': path}, auth=None)
nlogs = self.node.logs.count()
res = self.app.put_json(
url,
payload,
headers={'Content-Type': 'application/json'},
expect_errors=True,
)
assert_equal(res.status_code, 400)
self.node.reload()
assert_equal(self.node.logs.count(), nlogs)
def test_add_log_no_user(self):
path = 'pizza'
url = self.node.api_url_for('create_waterbutler_log')
payload = self.build_payload(metadata={'path': path}, auth={'id': None})
nlogs = self.node.logs.count()
res = self.app.put_json(
url,
payload,
headers={'Content-Type': 'application/json'},
expect_errors=True,
)
assert_equal(res.status_code, 400)
self.node.reload()
assert_equal(self.node.logs.count(), nlogs)
def test_add_log_no_addon(self):
path = 'pizza'
node = ProjectFactory(creator=self.user)
url = node.api_url_for('create_waterbutler_log')
payload = self.build_payload(metadata={'path': path})
nlogs = node.logs.count()
res = self.app.put_json(
url,
payload,
headers={'Content-Type': 'application/json'},
expect_errors=True,
)
assert_equal(res.status_code, 400)
self.node.reload()
assert_equal(node.logs.count(), nlogs)
def test_add_log_bad_action(self):
path = 'pizza'
url = self.node.api_url_for('create_waterbutler_log')
payload = self.build_payload(metadata={'path': path}, action='dance')
nlogs = self.node.logs.count()
res = self.app.put_json(
url,
payload,
headers={'Content-Type': 'application/json'},
expect_errors=True,
)
assert_equal(res.status_code, 400)
self.node.reload()
assert_equal(self.node.logs.count(), nlogs)
def test_action_file_rename(self):
url = self.node.api_url_for('create_waterbutler_log')
payload = self.build_payload(
action='rename',
metadata={
'path': 'foo',
},
source={
'materialized': 'foo',
'provider': 'github',
'node': {'_id': self.node._id},
'name': 'new.txt',
'kind': 'file',
},
destination={
'path': 'foo',
'materialized': 'foo',
'provider': 'github',
'node': {'_id': self.node._id},
'name': 'old.txt',
'kind': 'file',
},
)
self.app.put_json(
url,
payload,
headers={'Content-Type': 'application/json'}
)
self.node.reload()
assert_equal(
self.node.logs.latest().action,
'github_addon_file_renamed',
)
def test_action_downloads_contrib(self):
url = self.node.api_url_for('create_waterbutler_log')
download_actions=('download_file', 'download_zip')
wb_url = settings.WATERBUTLER_URL + '?version=1'
for action in download_actions:
payload = self.build_payload(metadata={'path': '/testfile',
'nid': self.node._id},
action_meta={'is_mfr_render': False},
request_meta={'url': wb_url},
action=action)
nlogs = self.node.logs.count()
res = self.app.put_json(
url,
payload,
headers={'Content-Type': 'application/json'},
expect_errors=False,
)
assert_equal(res.status_code, 200)
self.node.reload()
assert_equal(self.node.logs.count(), nlogs)
def test_add_file_osfstorage_log(self):
self.configure_osf_addon()
path = 'pizza'
url = self.node.api_url_for('create_waterbutler_log')
payload = self.build_payload(metadata={'materialized': path, 'kind': 'file', 'path': path})
nlogs = self.node.logs.count()
self.app.put_json(url, payload, headers={'Content-Type': 'application/json'})
self.node.reload()
assert_equal(self.node.logs.count(), nlogs + 1)
assert('urls' in self.node.logs.filter(action='osf_storage_file_added')[0].params)
def test_add_folder_osfstorage_log(self):
self.configure_osf_addon()
path = 'pizza'
url = self.node.api_url_for('create_waterbutler_log')
payload = self.build_payload(metadata={'materialized': path, 'kind': 'folder', 'path': path})
nlogs = self.node.logs.count()
self.app.put_json(url, payload, headers={'Content-Type': 'application/json'})
self.node.reload()
assert_equal(self.node.logs.count(), nlogs + 1)
assert('urls' not in self.node.logs.filter(action='osf_storage_file_added')[0].params)
class TestCheckAuth(OsfTestCase):
def setUp(self):
super(TestCheckAuth, self).setUp()
self.user = AuthUserFactory()
self.node = ProjectFactory(creator=self.user)
def test_has_permission(self):
res = views.check_access(self.node, Auth(user=self.user), 'upload', None)
assert_true(res)
def test_not_has_permission_read_public(self):
self.node.is_public = True
self.node.save()
views.check_access(self.node, Auth(), 'download', None)
def test_not_has_permission_read_has_link(self):
link = new_private_link('red-special', self.user, [self.node], anonymous=False)
views.check_access(self.node, Auth(private_key=link.key), 'download', None)
def test_not_has_permission_logged_in(self):
user2 = AuthUserFactory()
with assert_raises(HTTPError) as exc_info:
views.check_access(self.node, Auth(user=user2), 'download', None)
assert_equal(exc_info.exception.code, 403)
def test_not_has_permission_not_logged_in(self):
with assert_raises(HTTPError) as exc_info:
views.check_access(self.node, Auth(), 'download', None)
assert_equal(exc_info.exception.code, 401)
def test_has_permission_on_parent_node_upload_pass_if_registration(self):
component_admin = AuthUserFactory()
ProjectFactory(creator=component_admin, parent=self.node)
registration = RegistrationFactory(project=self.node)
component_registration = registration._nodes.first()
assert_false(component_registration.has_permission(self.user, 'write'))
res = views.check_access(component_registration, Auth(user=self.user), 'upload', None)
assert_true(res)
def test_has_permission_on_parent_node_metadata_pass_if_registration(self):
component_admin = AuthUserFactory()
component = ProjectFactory(creator=component_admin, parent=self.node, is_public=False)
component_registration = RegistrationFactory(project=component, creator=component_admin)
assert_false(component_registration.has_permission(self.user, 'read'))
res = views.check_access(component_registration, Auth(user=self.user), 'metadata', None)
assert_true(res)
def test_has_permission_on_parent_node_upload_fail_if_not_registration(self):
component_admin = AuthUserFactory()
component = ProjectFactory(creator=component_admin, parent=self.node)
assert_false(component.has_permission(self.user, 'write'))
with assert_raises(HTTPError):
views.check_access(component, Auth(user=self.user), 'upload', None)
def test_has_permission_on_parent_node_copyfrom(self):
component_admin = AuthUserFactory()
component = ProjectFactory(creator=component_admin, is_public=False, parent=self.node)
assert_false(component.has_permission(self.user, 'write'))
res = views.check_access(component, Auth(user=self.user), 'copyfrom', None)
assert_true(res)
class TestCheckPreregAuth(OsfTestCase):
def setUp(self):
super(TestCheckPreregAuth, self).setUp()
self.prereg_challenge_admin_user = AuthUserFactory()
administer_permission = Permission.objects.get(codename='administer_prereg')
self.prereg_challenge_admin_user.user_permissions.add(administer_permission)
self.prereg_challenge_admin_user.save()
prereg_schema = MetaSchema.objects.get(name='Prereg Challenge', schema_version=2)
self.user = AuthUserFactory()
self.node = factories.ProjectFactory(creator=self.user)
self.parent = factories.ProjectFactory()
self.child = factories.NodeFactory(parent=self.parent)
self.draft_registration = factories.DraftRegistrationFactory(
initiator=self.user,
registration_schema=prereg_schema,
branched_from=self.parent
)
def test_has_permission_download_prereg_challenge_admin(self):
res = views.check_access(self.draft_registration.branched_from,
Auth(user=self.prereg_challenge_admin_user), 'download', None)
assert_true(res)
def test_has_permission_download_on_component_prereg_challenge_admin(self):
try:
res = views.check_access(self.draft_registration.branched_from._nodes.first(),
Auth(user=self.prereg_challenge_admin_user), 'download', None)
except Exception:
self.fail()
assert_true(res)
def test_has_permission_download_not_prereg_challenge_admin(self):
new_user = AuthUserFactory()
with assert_raises(HTTPError) as exc_info:
views.check_access(self.draft_registration.branched_from,
Auth(user=new_user), 'download', None)
assert_equal(exc_info.exception.code, http.FORBIDDEN)
def test_has_permission_download_prereg_challenge_admin_not_draft(self):
with assert_raises(HTTPError) as exc_info:
views.check_access(self.node,
Auth(user=self.prereg_challenge_admin_user), 'download', None)
assert_equal(exc_info.exception.code, http.FORBIDDEN)
def test_has_permission_write_prereg_challenge_admin(self):
with assert_raises(HTTPError) as exc_info:
views.check_access(self.draft_registration.branched_from,
Auth(user=self.prereg_challenge_admin_user), 'write', None)
assert_equal(exc_info.exception.code, http.FORBIDDEN)
class TestCheckOAuth(OsfTestCase):
def setUp(self):
super(TestCheckOAuth, self).setUp()
self.user = AuthUserFactory()
self.node = ProjectFactory(creator=self.user)
def test_has_permission_private_not_authenticated(self):
component_admin = AuthUserFactory()
component = ProjectFactory(creator=component_admin, is_public=False, parent=self.node)
cas_resp = cas.CasResponse(authenticated=False)
assert_false(component.has_permission(self.user, 'write'))
with assert_raises(HTTPError) as exc_info:
views.check_access(component, Auth(user=self.user), 'download', cas_resp)
assert_equal(exc_info.exception.code, 403)
def test_has_permission_private_no_scope_forbidden(self):
component_admin = AuthUserFactory()
component = ProjectFactory(creator=component_admin, is_public=False, parent=self.node)
cas_resp = cas.CasResponse(authenticated=True, status=None, user=self.user._id,
attributes={'accessTokenScope': {}})
assert_false(component.has_permission(self.user, 'write'))
with assert_raises(HTTPError) as exc_info:
views.check_access(component, Auth(user=self.user), 'download', cas_resp)
assert_equal(exc_info.exception.code, 403)
def test_has_permission_public_irrelevant_scope_allowed(self):
component_admin = AuthUserFactory()
component = ProjectFactory(creator=component_admin, is_public=True, parent=self.node)
cas_resp = cas.CasResponse(authenticated=True, status=None, user=self.user._id,
attributes={'accessTokenScope': {'osf.users.all_read'}})
assert_false(component.has_permission(self.user, 'write'))
res = views.check_access(component, Auth(user=self.user), 'download', cas_resp)
assert_true(res)
def test_has_permission_private_irrelevant_scope_forbidden(self):
component_admin = AuthUserFactory()
component = ProjectFactory(creator=component_admin, is_public=False, parent=self.node)
cas_resp = cas.CasResponse(authenticated=True, status=None, user=self.user._id,
attributes={'accessTokenScope': {'osf.users.all_read'}})
assert_false(component.has_permission(self.user, 'write'))
with assert_raises(HTTPError) as exc_info:
views.check_access(component, Auth(user=self.user), 'download', cas_resp)
assert_equal(exc_info.exception.code, 403)
def test_has_permission_decommissioned_scope_no_error(self):
component_admin = AuthUserFactory()
component = ProjectFactory(creator=component_admin, is_public=False, parent=self.node)
cas_resp = cas.CasResponse(authenticated=True, status=None, user=self.user._id,
attributes={'accessTokenScope': {
'decommissioned.scope+write',
'osf.nodes.data_read',
}})
assert_false(component.has_permission(self.user, 'write'))
res = views.check_access(component, Auth(user=self.user), 'download', cas_resp)
assert_true(res)
def test_has_permission_write_scope_read_action(self):
component_admin = AuthUserFactory()
component = ProjectFactory(creator=component_admin, is_public=False, parent=self.node)
cas_resp = cas.CasResponse(authenticated=True, status=None, user=self.user._id,
attributes={'accessTokenScope': {'osf.nodes.data_write'}})
assert_false(component.has_permission(self.user, 'write'))
res = views.check_access(component, Auth(user=self.user), 'download', cas_resp)
assert_true(res)
def test_has_permission_read_scope_write_action_forbidden(self):
component = ProjectFactory(creator=self.user, is_public=False, parent=self.node)
cas_resp = cas.CasResponse(authenticated=True, status=None, user=self.user._id,
attributes={'accessTokenScope': {'osf.nodes.data_read'}})
assert_true(component.has_permission(self.user, 'write'))
with assert_raises(HTTPError) as exc_info:
views.check_access(component, Auth(user=self.user), 'upload', cas_resp)
assert_equal(exc_info.exception.code, 403)
def assert_urls_equal(url1, url2):
furl1 = furl.furl(url1)
furl2 = furl.furl(url2)
for attr in ['scheme', 'host', 'port']:
setattr(furl1, attr, None)
setattr(furl2, attr, None)
# Note: furl params are ordered and cause trouble
assert_equal(dict(furl1.args), dict(furl2.args))
furl1.args = {}
furl2.args = {}
assert_equal(furl1, furl2)
def mock_touch(self, bearer, version=None, revision=None, **kwargs):
if version:
if self.versions:
try:
return self.versions[int(version) - 1]
except (IndexError, ValueError):
return None
else:
return None
return file_models.FileVersion()
@mock.patch('addons.github.models.GithubFileNode.touch', mock_touch)
@mock.patch('addons.github.models.GitHubClient.repo', mock.Mock(side_effect=ApiError))
class TestAddonFileViews(OsfTestCase):
def setUp(self):
super(TestAddonFileViews, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(creator=self.user)
self.user.add_addon('github')
self.project.add_addon('github', auth=Auth(self.user))
self.user_addon = self.user.get_addon('github')
self.node_addon = self.project.get_addon('github')
self.oauth = GitHubAccountFactory()
self.oauth.save()
self.user.external_accounts.add(self.oauth)
self.user.save()
self.node_addon.user_settings = self.user_addon
self.node_addon.external_account = self.oauth
self.node_addon.repo = 'Truth'
self.node_addon.user = 'E'
self.node_addon.save()
self.user_addon.oauth_grants[self.project._id] = {self.oauth._id: []}
self.user_addon.save()
def set_sentry(status):
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
enabled, sentry.enabled = sentry.enabled, status
func(*args, **kwargs)
sentry.enabled = enabled
return wrapped
return wrapper
with_sentry = set_sentry(True)
def get_test_file(self):
version = file_models.FileVersion(identifier='1')
version.save()
ret = GithubFile(
name='Test',
node=self.project,
path='/test/Test',
materialized_path='/test/Test',
)
ret.save()
ret.versions.add(version)
return ret
def get_second_test_file(self):
version = file_models.FileVersion(identifier='1')
version.save()
ret = GithubFile(
name='Test2',
node=self.project,
path='/test/Test2',
materialized_path='/test/Test2',
)
ret.save()
ret.versions.add(version)
return ret
def get_uppercased_ext_test_file(self):
version = file_models.FileVersion(identifier='1')
version.save()
ret = GithubFile(
name='Test2.pdf',
node=self.project,
path='/test/Test2',
materialized_path='/test/Test2',
)
ret.save()
ret.versions.add(version)
return ret
def get_ext_test_file(self):
version = file_models.FileVersion(identifier='1')
version.save()
ret = GithubFile(
name='Test2.pdf',
node=self.project,
path='/test/Test2',
materialized_path='/test/Test2',
)
ret.save()
ret.versions.add(version)
return ret
def get_mako_return(self):
ret = serialize_node(self.project, Auth(self.user), primary=True)
ret.update({
'error': '',
'provider': '',
'file_path': '',
'sharejs_uuid': '',
'private': '',
'urls': {
'files': '',
'render': '',
'sharejs': '',
'mfr': '',
'profile_image': '',
'external': '',
'archived_from': '',
},
'size': '',
'extra': '',
'file_name': '',
'materialized_path': '',
'file_id': '',
})
ret.update(rubeus.collect_addon_assets(self.project))
return ret
def test_redirects_to_guid(self):
file_node = self.get_test_file()
guid = file_node.get_guid(create=True)
resp = self.app.get(
self.project.web_url_for(
'addon_view_or_download_file',
path=file_node.path.strip('/'),
provider='github'
),
auth=self.user.auth
)
assert_equals(resp.status_code, 302)
assert_equals(resp.location, 'http://localhost:80/{}/'.format(guid._id))
def test_action_download_redirects_to_download_with_param(self):
file_node = self.get_test_file()
guid = file_node.get_guid(create=True)
resp = self.app.get('/{}/?action=download'.format(guid._id), auth=self.user.auth)
assert_equals(resp.status_code, 302)
location = furl.furl(resp.location)
assert_urls_equal(location.url, file_node.generate_waterbutler_url(action='download', direct=None, version=''))
def test_action_download_redirects_to_download_with_path(self):
file_node = self.get_uppercased_ext_test_file()
guid = file_node.get_guid(create=True)
resp = self.app.get('/{}/download?format=pdf'.format(guid._id), auth=self.user.auth)
assert_equals(resp.status_code, 302)
location = furl.furl(resp.location)
assert_equal(location.url, file_node.generate_waterbutler_url(action='download', direct=None, version='', format='pdf'))
def test_action_download_redirects_to_download_with_path_uppercase(self):
file_node = self.get_uppercased_ext_test_file()
guid = file_node.get_guid(create=True)
resp = self.app.get('/{}/download?format=pdf'.format(guid._id), auth=self.user.auth)
assert_equals(resp.status_code, 302)
location = furl.furl(resp.location)
assert_equal(location.url, file_node.generate_waterbutler_url(action='download', direct=None, version='', format='pdf'))
def test_action_download_redirects_to_download_with_version(self):
file_node = self.get_test_file()
guid = file_node.get_guid(create=True)
resp = self.app.get('/{}/?action=download&revision=1'.format(guid._id), auth=self.user.auth)
assert_equals(resp.status_code, 302)
location = furl.furl(resp.location)
# Note: version is added but us but all other url params are added as well
assert_urls_equal(location.url, file_node.generate_waterbutler_url(action='download', direct=None, revision=1, version=''))
@mock.patch('addons.base.views.addon_view_file')
@pytest.mark.enable_bookmark_creation
def test_action_view_calls_view_file(self, mock_view_file):
self.user.reload()
self.project.reload()
file_node = self.get_test_file()
guid = file_node.get_guid(create=True)
mock_view_file.return_value = self.get_mako_return()
self.app.get('/{}/?action=view'.format(guid._id), auth=self.user.auth)
args, kwargs = mock_view_file.call_args
assert_equals(kwargs, {})
assert_equals(args[0].user._id, self.user._id)
assert_equals(args[1], self.project)
assert_equals(args[2], file_node)
assert_true(isinstance(args[3], file_node.touch(None).__class__))
@mock.patch('addons.base.views.addon_view_file')
@pytest.mark.enable_bookmark_creation
def test_no_action_calls_view_file(self, mock_view_file):
self.user.reload()
self.project.reload()
file_node = self.get_test_file()
guid = file_node.get_guid(create=True)
mock_view_file.return_value = self.get_mako_return()
self.app.get('/{}/'.format(guid._id), auth=self.user.auth)
args, kwargs = mock_view_file.call_args
assert_equals(kwargs, {})
assert_equals(args[0].user._id, self.user._id)
assert_equals(args[1], self.project)
assert_equals(args[2], file_node)
assert_true(isinstance(args[3], file_node.touch(None).__class__))
def test_download_create_guid(self):
file_node = self.get_test_file()
assert_is(file_node.get_guid(), None)
self.app.get(
self.project.web_url_for(
'addon_view_or_download_file',
path=file_node.path.strip('/'),
provider='github',
),
auth=self.user.auth
)
assert_true(file_node.get_guid())
@pytest.mark.enable_bookmark_creation
def test_view_file_does_not_delete_file_when_requesting_invalid_version(self):
with mock.patch('addons.github.models.NodeSettings.is_private',
new_callable=mock.PropertyMock) as mock_is_private:
mock_is_private.return_value = False
file_node = self.get_test_file()
assert_is(file_node.get_guid(), None)
url = self.project.web_url_for(
'addon_view_or_download_file',
path=file_node.path.strip('/'),
provider='github',
)
# First view generated GUID
self.app.get(url, auth=self.user.auth)
self.app.get(url + '?version=invalid', auth=self.user.auth, expect_errors=True)
assert_is_not_none(BaseFileNode.load(file_node._id))
assert_is_none(TrashedFileNode.load(file_node._id))
def test_unauthorized_addons_raise(self):
path = 'cloudfiles'
self.node_addon.user_settings = None
self.node_addon.save()
resp = self.app.get(
self.project.web_url_for(
'addon_view_or_download_file',
path=path,
provider='github',
action='download'
),
auth=self.user.auth,
expect_errors=True
)
assert_equals(resp.status_code, 401)
def test_nonstorage_addons_raise(self):
resp = self.app.get(
self.project.web_url_for(
'addon_view_or_download_file',
path='sillywiki',
provider='wiki',
action='download'
),
auth=self.user.auth,
expect_errors=True
)
assert_equals(resp.status_code, 400)
def test_head_returns_url_and_redriect(self):
file_node = self.get_test_file()
guid = file_node.get_guid(create=True)
resp = self.app.head('/{}/'.format(guid._id), auth=self.user.auth)
location = furl.furl(resp.location)
assert_equals(resp.status_code, 302)
assert_urls_equal(location.url, file_node.generate_waterbutler_url(direct=None, version=''))
def test_head_returns_url_with_version_and_redirect(self):
file_node = self.get_test_file()
guid = file_node.get_guid(create=True)
resp = self.app.head('/{}/?revision=1&foo=bar'.format(guid._id), auth=self.user.auth)
location = furl.furl(resp.location)
# Note: version is added but us but all other url params are added as well
assert_equals(resp.status_code, 302)
assert_urls_equal(location.url, file_node.generate_waterbutler_url(direct=None, revision=1, version='', foo='bar'))
def test_nonexistent_addons_raise(self):
path = 'cloudfiles'
self.project.delete_addon('github', Auth(self.user))
self.project.save()
resp = self.app.get(
self.project.web_url_for(
'addon_view_or_download_file',
path=path,
provider='github',
action='download'
),
auth=self.user.auth,
expect_errors=True
)
assert_equals(resp.status_code, 400)
def test_unauth_addons_raise(self):
path = 'cloudfiles'
self.node_addon.user_settings = None
self.node_addon.save()
resp = self.app.get(
self.project.web_url_for(
'addon_view_or_download_file',
path=path,
provider='github',
action='download'
),
auth=self.user.auth,
expect_errors=True
)
assert_equals(resp.status_code, 401)
def test_delete_action_creates_trashed_file_node(self):
file_node = self.get_test_file()
payload = {
'provider': file_node.provider,
'metadata': {
'path': '/test/Test',
'materialized': '/test/Test'
}
}
views.addon_delete_file_node(self=None, node=self.project, user=self.user, event_type='file_removed', payload=payload)
assert_false(GithubFileNode.load(file_node._id))
assert_true(TrashedFileNode.load(file_node._id))
def test_delete_action_for_folder_deletes_subfolders_and_creates_trashed_file_nodes(self):
file_node = self.get_test_file()
subfolder = GithubFolder(
name='folder',
node=self.project,
path='/test/folder/',
materialized_path='/test/folder/',
)
subfolder.save()
payload = {
'provider': file_node.provider,
'metadata': {
'path': '/test/',
'materialized': '/test/'
}
}
views.addon_delete_file_node(self=None, node=self.project, user=self.user, event_type='file_removed', payload=payload)
assert_false(GithubFileNode.load(subfolder._id))
assert_true(TrashedFileNode.load(file_node._id))
@mock.patch('website.archiver.tasks.archive')
def test_archived_from_url(self, mock_archive):
file_node = self.get_test_file()
second_file_node = self.get_second_test_file()
file_node.copied_from = second_file_node
registered_node = self.project.register_node(
schema=get_default_metaschema(),
auth=Auth(self.user),
data=None,
)
archived_from_url = views.get_archived_from_url(registered_node, file_node)
view_url = self.project.web_url_for('addon_view_or_download_file', provider=file_node.provider, path=file_node.copied_from._id)
assert_true(archived_from_url)
assert_urls_equal(archived_from_url, view_url)
@mock.patch('website.archiver.tasks.archive')
def test_archived_from_url_without_copied_from(self, mock_archive):
file_node = self.get_test_file()
registered_node = self.project.register_node(
schema=get_default_metaschema(),
auth=Auth(self.user),
data=None,
)
archived_from_url = views.get_archived_from_url(registered_node, file_node)
assert_false(archived_from_url)
@mock.patch('website.archiver.tasks.archive')
def test_copied_from_id_trashed(self, mock_archive):
file_node = self.get_test_file()
second_file_node = self.get_second_test_file()
file_node.copied_from = second_file_node
self.project.register_node(
schema=get_default_metaschema(),
auth=Auth(self.user),
data=None,
)
trashed_node = second_file_node.delete()
assert_false(trashed_node.copied_from)
@mock.patch('website.archiver.tasks.archive')
def test_missing_modified_date_in_file_data(self, mock_archive):
file_node = self.get_test_file()
file_data = {
'name': 'Test File Update',
'materialized': file_node.materialized_path,
'modified': None
}
file_node.update(revision=None, data=file_data)
assert_equal(len(file_node.history), 1)
assert_equal(file_node.history[0], file_data)
@mock.patch('website.archiver.tasks.archive')
def test_missing_modified_date_in_file_history(self, mock_archive):
file_node = self.get_test_file()
file_node.history.append({'modified': None})
file_data = {
'name': 'Test File Update',
'materialized': file_node.materialized_path,
'modified': None
}
file_node.update(revision=None, data=file_data)
assert_equal(len(file_node.history), 2)
assert_equal(file_node.history[1], file_data)
@with_sentry
@mock.patch('framework.sentry.sentry.captureMessage')
def test_update_logs_to_sentry_when_called_with_disordered_metadata(self, mock_capture):
file_node = self.get_test_file()
file_node.history.append({'modified': parse_date(
'2017-08-22T13:54:32.100900',
ignoretz=True,
default=timezone.now() # Just incase nothing can be parsed
)})
data = {
'name': 'a name',
'materialized': 'materialized',
'modified': '2016-08-22T13:54:32.100900'
}
file_node.update(revision=None, user=None, data=data)
mock_capture.assert_called_with(unicode('update() receives metatdata older than the newest entry in file history.'), extra={'session': {}})
class TestLegacyViews(OsfTestCase):
def setUp(self):
super(TestLegacyViews, self).setUp()
self.path = 'mercury.png'
self.user = AuthUserFactory()
self.project = ProjectFactory(creator=self.user)
self.node_addon = self.project.get_addon('osfstorage')
file_record = self.node_addon.get_root().append_file(self.path)
self.expected_path = file_record._id
self.node_addon.save()
file_record.save()
def test_view_file_redirect(self):
url = '/{0}/osffiles/{1}/'.format(self.project._id, self.path)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 301)
expected_url = self.project.web_url_for(
'addon_view_or_download_file',
action='view',
path=self.expected_path,
provider='osfstorage',
)
assert_urls_equal(res.location, expected_url)
def test_download_file_redirect(self):
url = '/{0}/osffiles/{1}/download/'.format(self.project._id, self.path)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 301)
expected_url = self.project.web_url_for(
'addon_view_or_download_file',
path=self.expected_path,
action='download',
provider='osfstorage',
)
assert_urls_equal(res.location, expected_url)
def test_download_file_version_redirect(self):
url = '/{0}/osffiles/{1}/version/3/download/'.format(
self.project._id,
self.path,
)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 301)
expected_url = self.project.web_url_for(
'addon_view_or_download_file',
version=3,
path=self.expected_path,
action='download',
provider='osfstorage',
)
assert_urls_equal(res.location, expected_url)
def test_api_download_file_redirect(self):
url = '/api/v1/project/{0}/osffiles/{1}/'.format(self.project._id, self.path)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 301)
expected_url = self.project.web_url_for(
'addon_view_or_download_file',
path=self.expected_path,
action='download',
provider='osfstorage',
)
assert_urls_equal(res.location, expected_url)
def test_api_download_file_version_redirect(self):
url = '/api/v1/project/{0}/osffiles/{1}/version/3/'.format(
self.project._id,
self.path,
)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 301)
expected_url = self.project.web_url_for(
'addon_view_or_download_file',
version=3,
path=self.expected_path,
action='download',
provider='osfstorage',
)
assert_urls_equal(res.location, expected_url)
def test_no_provider_name(self):
url = '/{0}/files/{1}'.format(
self.project._id,
self.path,
)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 301)
expected_url = self.project.web_url_for(
'addon_view_or_download_file',
action='view',
path=self.expected_path,
provider='osfstorage',
)
assert_urls_equal(res.location, expected_url)
def test_action_as_param(self):
url = '/{}/osfstorage/files/{}/?action=download'.format(
self.project._id,
self.path,
)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 301)
expected_url = self.project.web_url_for(
'addon_view_or_download_file',
path=self.expected_path,
action='download',
provider='osfstorage',
)
assert_urls_equal(res.location, expected_url)
def test_other_addon_redirect(self):
url = '/project/{0}/mycooladdon/files/{1}/'.format(
self.project._id,
self.path,
)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 301)
expected_url = self.project.web_url_for(
'addon_view_or_download_file',
action='view',
path=self.path,
provider='mycooladdon',
)
assert_urls_equal(res.location, expected_url)
def test_other_addon_redirect_download(self):
url = '/project/{0}/mycooladdon/files/{1}/download/'.format(
self.project._id,
self.path,
)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 301)
expected_url = self.project.web_url_for(
'addon_view_or_download_file',
path=self.path,
action='download',
provider='mycooladdon',
)
assert_urls_equal(res.location, expected_url)
class TestViewUtils(OsfTestCase):
def setUp(self):
super(TestViewUtils, self).setUp()
self.user = AuthUserFactory()
self.auth_obj = Auth(user=self.user)
self.node = ProjectFactory(creator=self.user)
self.session = Session(data={'auth_user_id': self.user._id})
self.session.save()
self.cookie = itsdangerous.Signer(settings.SECRET_KEY).sign(self.session._id)
self.configure_addon()
self.JWE_KEY = jwe.kdf(settings.WATERBUTLER_JWE_SECRET.encode('utf-8'), settings.WATERBUTLER_JWE_SALT.encode('utf-8'))
self.mock_api_credentials_are_valid = mock.patch('addons.github.api.GitHubClient.check_authorization', return_value=True)
self.mock_api_credentials_are_valid.start()
def configure_addon(self):
self.user.add_addon('github')
self.user_addon = self.user.get_addon('github')
self.oauth_settings = GitHubAccountFactory(display_name='john')
self.oauth_settings.save()
self.user.external_accounts.add(self.oauth_settings)
self.user.save()
self.node.add_addon('github', self.auth_obj)
self.node_addon = self.node.get_addon('github')
self.node_addon.user = 'john'
self.node_addon.repo = 'youre-my-best-friend'
self.node_addon.user_settings = self.user_addon
self.node_addon.external_account = self.oauth_settings
self.node_addon.save()
self.user_addon.oauth_grants[self.node._id] = {self.oauth_settings._id: []}
self.user_addon.save()
def test_serialize_addons(self):
addon_dicts = serialize_addons(self.node, self.auth_obj)
enabled_addons = [addon for addon in addon_dicts if addon['enabled']]
assert len(enabled_addons) == 2
assert enabled_addons[0]['short_name'] == 'github'
assert enabled_addons[1]['short_name'] == 'osfstorage'
default_addons = [addon for addon in addon_dicts if addon['default']]
assert len(default_addons) == 1
assert default_addons[0]['short_name'] == 'osfstorage'
def test_include_template_json(self):
""" Some addons (github, gitlab) need more specialized template infomation so we want to
ensure we get those extra variables that when the addon is enabled.
"""
addon_dicts = serialize_addons(self.node, self.auth_obj)
enabled_addons = [addon for addon in addon_dicts if addon['enabled']]
assert len(enabled_addons) == 2
assert enabled_addons[1]['short_name'] == 'osfstorage'
assert enabled_addons[0]['short_name'] == 'github'
assert 'node_has_auth' in enabled_addons[0]
assert 'valid_credentials' in enabled_addons[0]
def test_collect_node_config_js(self):
addon_dicts = serialize_addons(self.node, self.auth_obj)
asset_paths = collect_node_config_js(addon_dicts)
# Default addons should be in addon dicts, but they have no js assets because you can't
# connect/disconnect from them, think osfstorage, there's no node-cfg for that.
default_addons = [addon['short_name'] for addon in addon_dicts if addon['default']]
assert not any('/{}/'.format(addon) in asset_paths for addon in default_addons)
|
|
import functools
from django.db.models import Q
from django.core.urlresolvers import reverse
from django.utils.encoding import smart_unicode
from avocado.models import DataField
from avocado.events import usage
from restlib2.http import codes
from restlib2.resources import Resource
from serrano.resources.field.values import FieldValues
from preserialize.serialize import serialize
def item_posthook(instance, data, request, pk):
"Post-serialize hook for item instances."
uri = request.build_absolute_uri
data.update({
'label': unicode(instance),
'value': instance.pk,
})
parent_kwargs = {'pk': pk}
if instance.parent_id:
parent_kwargs['item_pk'] = instance.parent_id
# Add link to self and parent
data['_links'] = {
'self': {
'href': uri(reverse('vocab:item', kwargs={
'pk': pk,
'item_pk': data['id'],
}))
},
'parent': {
'href': uri(reverse('vocab:items', kwargs=parent_kwargs))
}
}
# If this item is not a terminal, add link to child nodes
if not instance.terminal:
data['_links']['children'] = {
'href': uri(reverse('vocab:items', kwargs={
'pk': pk,
'item_pk': data['id'],
}))
}
return data
class ItemBaseResource(Resource):
def is_not_found(self, request, response, pk, item_pk=None):
# Non-integer value
try:
pk = int(pk)
except (ValueError, TypeError):
return True
# Field does not exist
try:
field = DataField.objects.get(pk=pk)
except DataField.DoesNotExist:
return True
# If an item is specified, ensure it exists
if item_pk:
try:
request.item = field.model.objects.get(pk=item_pk)
except field.model.DoesNotExist:
return True
# Attach to request for downstream use
request.instance = field
def prepare(self, request, objects, pk, template=None):
if template is None:
template = {'fields': [':pk']}
posthook = functools.partial(item_posthook, request=request, pk=pk)
return serialize(objects, posthook=posthook, **template)
# TODO if/when serrano becomes more reusable, update and remove
# boilerplate code
class ItemsResource(ItemBaseResource, FieldValues):
"""Resource for vocab items. If no item is specified, items without a
parent are returned, otherwise the children of the specified item are
returned.
This is a modified form of serrano.resources.FieldValues.
"""
template = None
def get_base_values(self, request, instance, params, item_pk=None):
return super(ItemsResource, self).get_base_values(request, instance,
params)
def get_all_values(self, request, instance, params, item_pk=None):
queryset = self.get_base_values(request, instance, params, item_pk)
queryset = queryset.filter(parent__pk=item_pk)
return self.prepare(request, queryset, instance.pk)
def get_search_values(self, request, instance, params, item_pk=None):
queryset = self.get_base_values(request, instance, params, item_pk)
if item_pk:
queryset = queryset.filter(parent__pk=item_pk)
condition = Q()
for field in instance.model.search_fields:
condition = condition | Q(**{
'{0}__icontains'.format(field): params['query']
})
queryset = queryset.filter(condition)
return self.prepare(request, queryset, instance.pk)
def get_random_values(self, request, instance, params, item_pk=None):
queryset = self.get_base_values(request, instance, params, item_pk)
queryset = queryset.order_by('?')[:params['random']]
return self.prepare(request, queryset, instance.pk)
def get(self, request, pk, item_pk=None):
instance = request.instance
params = self.get_params(request)
if params['random']:
return self.get_random_values(request, instance, params, item_pk)
page = params['page']
limit = params['limit']
# If a query term is supplied, perform the icontains search
if params['query']:
usage.log('values', instance=instance, request=request, data={
'query': params['query'],
'item_pk': item_pk,
})
values = self.get_search_values(request, instance, params, item_pk)
else:
values = self.get_all_values(request, instance, params, item_pk)
# No page specified, return everything
if page is None:
return values
paginator = self.get_paginator(values, limit=limit)
page = paginator.page(page)
kwargs = {'pk': pk}
if item_pk:
kwargs['item_pk'] = item_pk
path = reverse('vocab:items', kwargs=kwargs)
links = self.get_page_links(request, path, page, extra=params)
links['parent'] = {
'href': request.build_absolute_uri(reverse('serrano:field',
kwargs={'pk': pk}))
}
return {
'values': page.object_list,
'limit': paginator.per_page,
'num_pages': paginator.num_pages,
'page_num': page.number,
'_links': links,
}
def post(self, request, pk):
instance = self.get_object(request, pk=pk)
params = self.get_params(request)
if not request.data:
data = {
'message': 'Error parsing data',
}
return self.render(request, data,
status=codes.unprocessable_entity)
if isinstance(request.data, dict):
array = [request.data]
else:
array = request.data
values = []
labels = []
array_map = {}
# Separate out the values and labels for the lookup. Track indexes
# maintain order of array
for i, datum in enumerate(array):
# Value takes precedence over label if supplied
if 'value' in datum:
array_map[i] = 'value'
values.append(datum['value'])
elif 'label' in datum:
array_map[i] = 'label'
labels.append(datum['label'])
else:
data = {
'message': 'Error parsing value or lable'
}
return self.render(request, data,
status=codes.unprocessable_entity)
value_field_name = instance.field_name
label_field_names = instance.model.search_fields
# Note, this return a context-aware or naive queryset depending
# on params. Get the value and label fields so they can be filled
# in below.
queryset = self.get_base_values(request, instance, params)\
.values_list(value_field_name, *label_field_names)
lookup = Q()
# Validate based on the label
if labels:
for label_field in label_field_names:
lookup |= Q(**{'{0}__in'.format(label_field): labels})
if values:
lookup |= Q(**{'{0}__in'.format(value_field_name): values})
results = queryset.filter(lookup)
value_labels = {}
label_values = {}
for values in results:
value = values[0]
labels = values[1:]
for label in labels:
value_labels[value] = label
label_values[label] = value
for i, datum in enumerate(array):
if array_map[i] == 'label':
valid = datum['label'] in label_values
value = datum.get('value')
if not value:
if valid:
value = label_values[datum['label']]
else:
value = datum['label']
datum['valid'] = valid
datum['value'] = value
else:
valid = datum['value'] in value_labels
label = datum.get('label')
if not label:
if valid:
label = value_labels[datum['value']]
else:
label = smart_unicode(datum['value'])
datum['valid'] = valid
datum['label'] = label
usage.log('validate', instance=instance, request=request, data={
'count': len(array),
})
# Return the augmented data
return request.data
class ItemResource(ItemBaseResource):
def get(self, request, pk, item_pk):
return self.prepare(request, request.item, pk)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.