gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
import isodate
import json
import re
import roax.schema as s
import unittest
from base64 import b64encode
from io import BytesIO
from datetime import date, datetime
from uuid import UUID
_UTC = isodate.tzinfo.Utc()
class TestSchema(unittest.TestCase):
def _equal(self, fn, val):
self.assertEqual(val, fn(val))
def _error(self, fn, val):
with self.assertRaises(s.SchemaError):
fn(val)
# -- dict -----
def test_dict_validate_success(self):
s.dict({"a": s.str()}, {"a"}).validate({"a": "b"})
def test_dict_validate_error(self):
self._error(s.dict({"c": s.int()}).validate, '{"this": "does not validate"}')
def test_dict_validate_required_success(self):
s.dict({"e": s.float()}, {"e"}).validate({"e": 1.2})
def test_dict_validate_required_error(self):
self._error(s.dict({"f": s.str()}, {"f"}).validate, {})
def test_dict_validate_optional_success(self):
s.dict({"k": s.str(), "l": s.str()}).validate({"k": "m"})
def test_dict_validate_default(self):
s.dict({"n": s.str(default="o")}).validate({})
def test_dict_json_encode_success(self):
self._equal(s.dict({"eja": s.str(), "ejb": s.int()}, {"eja", "ejb"}).json_encode,
{"eja": "foo", "ejb": 123})
def test_dict_json_encode_optional_success(self):
self._equal(s.dict({"ejc": s.float(), "ejd": s.bool()}, {"ejc"}).json_encode,
{"ejc": 123.45})
def test_dict_json_encode_default_success(self):
self.assertEqual(s.dict({"eje": s.bool(default=False)}).json_encode({}), {"eje": False})
def test_dict_json_encode_optional_absent(self):
self._equal(s.dict({"eje": s.bool()}).json_encode, {})
def test_dict_json_encode_error(self):
self._error(s.dict({"ejh": s.int()}, {"ejh"}).json_encode, {"ejh": "not an int"})
def test_dict_json_decode_success(self):
self._equal(s.dict({"dja": s.float(), "djb": s.bool()}, {"dja", "djb"}).json_decode,
{"dja": 802.11, "djb": True})
def test_dict_json_decode_optional_success(self):
self._equal(s.dict({"djc": s.int(), "djd": s.str()}).json_decode, {"djc": 12345})
def test_dict_json_decode_default_success(self):
self.assertEqual(s.dict({"dje": s.str(default="defaulty")}).json_decode({}), {"dje": "defaulty"})
def test_dict_json_decode_additional_property_success(self):
value = {"djf": "baz", "djg": "additional_property"}
self.assertEqual(s.dict({"djf": s.str()}, {"djf"},
additional_properties=True).json_decode(value), value)
def test_dict_json_decode_error(self):
self._error(s.dict({"djx": s.str()}, {"djx"}).json_decode, {"djx": False})
def test_dict_unexpected_property_error(self):
self._error(s.dict({}).validate, {"foo": "bar"})
def test_dict_disallow_none(self):
self._error(s.dict({"foo": s.str()}).json_encode, None)
def test_dict_allow_none(self):
self.assertEqual(s.dict({"foo": s.str()}, nullable=True).json_encode(None), None)
def test_dict_required_str(self):
schema = s.dict(properties={"fjx": s.str(), "fjy": s.str()}, required="fjx,fjy")
self._error(schema.validate, {})
self._error(schema.validate, {"fjx": "foo"})
self._error(schema.validate, {"fjy": "foo"})
schema.validate({"fjx": "foo", "fjy": "foo"})
# -- list -----
def test_list_validate_type_str_success(self):
s.list(items=s.str()).validate(["a", "b", "c"])
def test_list_validate_type_int_success(self):
s.list(items=s.int()).validate([1, 2, 3])
def test_list_validate_type_str_error(self):
self._error(s.list(items=s.str()).validate, [4, 5, 6])
def test_list_validate_type_int_error(self):
self._error(s.list(items=s.int()).validate, ["d", "e", "f"])
def test_list_validate_type_error(self):
self._error(s.list(items=s.bool()).validate, "this_is_not_a_list")
def test_list_validate_min_items_success(self):
s.list(items=s.int(), min_items=2).validate([1, 2, 3])
def test_list_validate_min_items_error(self):
self._error(s.list(items=s.int(), min_items=3).validate, [1, 2])
def test_list_validate_max_items_success(self):
s.list(items=s.int(), max_items=5).validate([1, 2, 3, 4])
def test_list_validate_max_items_error(self):
self._error(s.list(items=s.int(), max_items=6).validate, [1, 2, 3, 4, 5, 6, 7])
def test_list_validate_unique_success(self):
s.list(items=s.int(), unique_items=True).validate([1, 2, 3, 4, 5])
def test_list_validate_unique_error(self):
self._error(s.list(items=s.int(), unique_items=True).validate, [1, 2, 2, 3])
def test_list_json_encode_success(self):
self._equal(s.list(items=s.str()).json_encode, ["a", "b", "c"])
def test_list_json_encode_type_error(self):
self._error(s.list(items=s.str()).json_encode, "i_am_not_a_list")
def test_list_json_encode_item_type_error(self):
self._error(s.list(items=s.str()).json_encode, [1, 2, 3])
def test_list_json_decode_success(self):
self._equal(s.list(items=s.float()).json_decode, [1.2, 3.4, 5.6])
def test_list_json_decode_error(self):
self._error(s.list(items=s.str()).json_decode, "not_a_list_either")
def test_list_str_encode_success(self):
self.assertEqual(s.list(items=s.str()).str_encode(["a", "b", "c"]), "a,b,c")
def test_list_str_decode_success(self):
self.assertEqual(s.list(items=s.str()).str_decode("a,b,c"), ["a", "b", "c"])
def test_list_bin_encode_success(self):
self.assertEqual(json.loads(s.list(items=s.str()).bin_encode(["a", "b", "c"]).decode()), json.loads('["a","b","c"]'))
def test_list_bin_decode_success(self):
self.assertEqual(s.list(items=s.str()).bin_decode(b'["a","b","c"]'), ["a", "b", "c"])
def test_list_str_decode_int_success(self):
self.assertEqual(s.list(items=s.int()).str_decode("12,34,56"), [12, 34, 56])
def test_list_str_decode_float_success(self):
self.assertEqual(s.list(items=s.float()).str_decode("12.34,56.78"), [12.34, 56.78])
def test_list_str_decode_crazy_csv_scenario(self):
self.assertEqual(s.list(items=s.str()).str_decode('a,"b,c",d,"""e"""'), ["a","b,c","d",'"e"'])
def test_list_str_decode_int_error(self):
self._error(s.list(items=s.int()).str_decode, "12,a,34,56")
def test_list_disallow_none(self):
self._error(s.list(items=s.str()).json_encode, None)
def test_list_allow_none(self):
self.assertEqual(s.list(items=s.str(), nullable=True).json_encode(None), None)
# -- set -----
def test_set_validate_type_str_success(self):
s.set(items=s.str()).validate({"a", "b", "c"})
def test_set_validate_type_int_success(self):
s.set(items=s.int()).validate({1, 2, 3})
def test_set_validate_type_str_error(self):
self._error(s.set(items=s.str()).validate, {4, 5, 6})
def test_set_validate_type_int_error(self):
self._error(s.set(items=s.int()).validate, {"d", "e", "f"})
def test_set_validate_type_error(self):
self._error(s.set(items=s.bool()).validate, "this_is_not_a_set")
def test_set_json_encode_success(self):
schema = s.set(s.str())
value = {"a", "b", "c"}
encdec = schema.json_decode(schema.json_encode(value))
self.assertEqual(encdec, value)
def test_set_json_encode_type_error(self):
self._error(s.set(items=s.str()).json_encode, "i_am_not_a_list")
def test_set_json_encode_item_type_error(self):
self._error(s.set(items=s.str()).json_encode, {1, 2, 3})
def test_set_json_decode_success(self):
self._equal(s.set(items=s.float()).json_decode, {1.2, 3.4, 5.6})
def test_set_json_decode_error(self):
self._error(s.set(items=s.str()).json_decode, "not_a_set_either")
def test_set_str_decode_str_success(self):
self.assertEqual(s.set(items=s.str()).str_decode("a,b,c"), {"a", "b", "c"})
def test_set_str_decode_int_success(self):
self.assertEqual(s.set(items=s.int()).str_decode("12,34,56"), {12, 34, 56})
def test_set_str_decode_float_success(self):
self.assertEqual(s.set(items=s.float()).str_decode("12.34,56.78"), {12.34, 56.78})
def test_set_str_decode_crazy_csv_scenario(self):
self.assertEqual(s.set(items=s.str()).str_decode('a,"b,c",d,"""e"""'), {"a","b,c","d",'"e"'})
def test_set_str_decode_int_error(self):
self._error(s.set(items=s.int()).str_decode, "12,a,34,56")
def test_set_bin_encode_success(self):
self.assertEqual(json.loads(s.set(items=s.str()).bin_encode({"a", "b", "c"}).decode()), json.loads('["a","b","c"]'))
def test_set_bin_decode_success(self):
self.assertEqual(s.set(items=s.str()).bin_decode(b'["a","b","c"]'), {"a", "b", "c"})
def test_set_disallow_none(self):
self._error(s.set(items=s.str()).json_encode, None)
def test_set_allow_none(self):
self.assertEqual(s.set(items=s.str(), nullable=True).json_encode(None), None)
# -- str -----
def test_str_validate_type_success(self):
s.str().validate("foo")
def test_str_validate_type_error(self):
self._error(s.str().validate, 123)
def test_str_validate_min_length_success(self):
s.str(min_length=3).validate("12345")
def test_str_validate_min_length_error(self):
self._error(s.str(min_length=4).validate, "123")
def test_str_validate_max_length_success(self):
s.str(max_length=5).validate("12345")
def test_str_validate_max_length_error(self):
self._error(s.str(max_length=6).validate, "1234567")
def test_str_validate_pattern_success(self):
s.str(pattern=re.compile(r"^abc$")).validate("abc")
def test_str_validate_pattern_error(self):
self._error(s.str(pattern=re.compile(r"^def$")).validate, "ghi")
def test_str_json_encode_success(self):
self._equal(s.str().json_encode, "foo")
def test_str_json_encode_error(self):
self._error(s.str().json_encode, 123)
def test_str_json_decode_success(self):
self._equal(s.str().json_decode, "bar")
def test_str_json_decode_error(self):
self._error(s.str().json_decode, [])
def test_str_str_decode_success(self):
self._equal(s.str().str_decode, "qux")
def test_str_validate_enum_success(self):
s.str(enum=["a", "b", "c", "d", "e"]).validate("e")
def test_str_validate_enum_error(self):
self._error(s.str(enum=["f", "g", "h"]).validate, "i")
def test_str_disallow_none(self):
self._error(s.str().json_encode, None)
def test_str_allow_none(self):
self.assertEqual(s.str(nullable=True).json_encode(None), None)
# -- int -----
def test_int_validate_type_success(self):
s.int().validate(123)
def test_int_validate_type_error(self):
self._error(s.int().validate, 123.45)
def test_int_validate_minimum_success(self):
s.int(minimum=1).validate(2)
def test_int_validate_minimum_error(self):
self._error(s.int(minimum=2).validate, 1)
def test_int_validate_maximum_success(self):
s.int(maximum=3).validate(2)
def test_int_validate_maximum_error(self):
self._error(s.int(maximum=4).validate, 5)
def test_int_json_encode_success(self):
self._equal(s.int().json_encode, 6)
def test_int_json_encode_error(self):
self._error(s.int().json_encode, 7.0)
def test_int_json_decode_success_int(self):
self._equal(s.int().json_decode, 8)
def test_int_json_decode_success_round_float(self):
self._equal(s.int().json_decode, 8.0)
def test_int_json_decode_error_float(self):
self._error(s.int().json_decode, 9.1)
def test_int_str_decode_success(self):
self.assertEqual(s.int().str_decode("10"), 10)
def test_int_str_decode_error(self):
self._error(s.int().str_decode, "11.2")
def test_int_validate_enum_success(self):
s.int(enum=[1, 2, 3, 4, 5]).validate(4)
def test_int_validate_enum_error(self):
self._error(s.int(enum=[6, 7, 8, 9]).validate, 3)
def test_int_disallow_none(self):
self._error(s.int().json_encode, None)
def test_int_allow_none(self):
self.assertEqual(s.int(nullable=True).json_encode(None), None)
# -- float -----
def test_float_validate_type_success(self):
s.float().validate(123.45)
def test_float_validate_type_error(self):
self._error(s.float().validate, "123.45")
def test_float_validate_minimum_success(self):
s.float(minimum=1.0).validate(1.1)
def test_float_validate_minimum_error(self):
self._error(s.float(minimum=2.0).validate, 1.9)
def test_float_validate_maximum_success(self):
s.float(maximum=3.0).validate(2.9)
def test_float_validate_maximum_error(self):
self._error(s.float(maximum=4.0).validate, 4.1)
def test_float_json_encode_success(self):
self._equal(s.float().json_encode, 6.1)
def test_float_json_encode_error(self):
self._error(s.float().json_encode, 7)
def test_float_json_decode_int(self):
self.assertEqual(s.float().json_decode(8), 8.0)
def test_float_json_decode_float(self):
self._equal(s.float().json_decode, 9.1)
def test_float_json_decode_error(self):
self._error(s.float().json_decode, "10.2")
def test_float_str_decode_float(self):
self.assertEqual(s.float().str_decode("11.3"), 11.3)
def test_float_str_decode_int(self):
self.assertEqual(s.float().str_decode("12"), 12.0)
def test_float_str_decode_error(self):
self._error(s.float().str_decode, "1,2")
def test_float_validate_enum_success(self):
s.float(enum=[1.2, 3.4, 5.6]).validate(3.4)
def test_float_validate_enum_error(self):
self._error(s.float(enum=[6.7, 8.9, 10.11]).validate, 12.13)
def test_float_disallow_none(self):
self._error(s.float().json_encode, None)
def test_float_allow_none(self):
self.assertEqual(s.float(nullable=True).json_encode(None), None)
# -- bool -----
def test_bool_validate_type_true(self):
s.bool().validate(True)
def test_bool_validate_type_false(self):
s.bool().validate(False)
def test_bool_validate_type_error(self):
self._error(s.bool().validate, "foo")
def test_bool_json_encode_true(self):
self._equal(s.bool().json_encode, True)
def test_bool_json_encode_false(self):
self._equal(s.bool().json_encode, False)
def test_bool_json_encode_error(self):
self._error(s.bool().json_encode, "bar")
def test_bool_json_decode_true(self):
self._equal(s.bool().json_decode, True)
def test_bool_json_decode_false(self):
self._equal(s.bool().json_decode, False)
def test_bool_json_decode_error(self):
self._error(s.bool().json_decode, "baz")
def test_bool_str_decode_true(self):
self.assertEqual(s.bool().str_decode("true"), True)
def test_bool_str_decode_false(self):
self.assertEqual(s.bool().str_decode("false"), False)
def test_bool_str_decode_error(self):
self._error(s.bool().str_decode, "123")
def test_bool_disallow_none(self):
self._error(s.bool().json_encode, None)
def test_bool_allow_none(self):
self.assertEqual(s.bool(nullable=True).json_encode(None), None)
# -- date -----
def test_date_validate_type_success(self):
s.date().validate(date(2015, 6, 7))
def test_date_validate_type_error(self):
self._error(s.date().validate, "this_is_not_a_date")
def test_date_json_encode_success_naive(self):
self.assertEqual(s.date().json_encode(date(2016, 7, 8)), "2016-07-08")
def test_date_json_encode_success_aware(self):
self.assertEqual(s.date().json_encode(date(2017, 6, 7)), "2017-06-07")
def test_date_json_encode_error(self):
self._error(s.date().json_encode, "definitely_not_a_date")
def test_date_json_decode_z(self):
self.assertEqual(s.date().json_decode("2018-08-09"), date(2018, 8, 9))
def test_date_json_decode_offset(self):
self.assertEqual(s.date().json_decode("2019-09-10"), date(2019, 9, 10))
def test_date_json_decode_missing_tz(self):
self.assertEqual(s.date().json_decode("2020-10-11"), date(2020, 10, 11))
def test_date_json_decode_error(self):
self._error(s.date().json_decode, "14256910")
def test_date_str_decode_error(self):
self._error(s.date().str_decode, "14256910")
def test_date_disallow_none(self):
self._error(s.date().json_encode, None)
def test_date_allow_none(self):
self.assertEqual(s.date(nullable=True).json_encode(None), None)
# -- datetime -----
def test_datetime_validate_type_success(self):
s.datetime().validate(datetime(2015, 6, 7, 8, 9, 10, 0, _UTC))
def test_datetime_validate_type_error(self):
self._error(s.datetime().validate, "this_is_not_a_datetime")
def test_datetime_json_encode_success_naive(self):
self.assertEqual(s.datetime().json_encode(datetime(2016, 7, 8, 9, 10, 11)), "2016-07-08T09:10:11Z")
def test_datetime_json_encode_success_aware(self):
self.assertEqual(s.datetime().json_encode(datetime(2017, 6, 7, 8, 9, 10, 0, _UTC)), "2017-06-07T08:09:10Z")
def test_datetime_json_encode_error(self):
self._error(s.datetime().json_encode, "definitely_not_a_datetime")
def test_datetime_json_decode_z(self):
self.assertEqual(s.datetime().json_decode("2018-08-09T10:11:12Z"), datetime(2018, 8, 9, 10, 11, 12, 0, _UTC))
def test_datetime_json_decode_offset(self):
self.assertEqual(s.datetime().json_decode("2019-09-10T11:12:13+01:00"), datetime(2019, 9, 10, 10, 12, 13, 0, _UTC))
def test_datetime_json_decode_missing_tz(self):
self.assertEqual(s.datetime().json_decode("2020-10-11T12:13:14"), datetime(2020, 10, 11, 12, 13, 14, 0, _UTC))
def test_datetime_json_decode_error(self):
self._error(s.datetime().json_decode, "1425691090159")
def test_datetime_str_decode_z(self):
self.assertEqual(s.datetime().str_decode("2021-11-12T13:14:15Z"), datetime(2021, 11, 12, 13, 14, 15, 0, _UTC))
def test_datetime_str_decode_offset(self):
self.assertEqual(s.datetime().str_decode("2022-12-13T14:15:16+01:00"), datetime(2022, 12, 13, 13, 15, 16, 0, _UTC))
def test_datetime_json_decode_missing_tz(self):
self.assertEqual(s.datetime().str_decode("2020-10-11T12:13:14"), datetime(2020, 10, 11, 12, 13, 14, 0, _UTC))
def test_datetime_str_decode_error(self):
self._error(s.datetime().str_decode, "1425691090160")
def test_datetime_disallow_none(self):
self._error(s.datetime().json_encode, None)
def test_datetime_allow_none(self):
self.assertEqual(s.datetime(nullable=True).json_encode(None), None)
def test_datetime_str_decode_retain_microsecond(self):
self.assertEqual(s.datetime(fractional=True).str_decode("2018-01-02T03:04:05.123Z"), datetime(2018, 1, 2, 3, 4, 5, 123000, _UTC))
def test_datetime_str_encode_retain_microsecond(self):
self.assertEqual(s.datetime(fractional=True).str_encode(datetime(2018, 1, 2, 3, 4, 5, 123456, _UTC)), "2018-01-02T03:04:05.123456Z")
def test_datetime_str_decode_truncate_microsecond(self):
self.assertEqual(s.datetime().str_decode("2018-01-02T03:04:05.123456Z"), datetime(2018, 1, 2, 3, 4, 5, 0, _UTC))
def test_datetime_str_encode_truncate_microsecond(self):
self.assertEqual(s.datetime().str_encode(datetime(2018, 1, 2, 3, 4, 5, 123456, _UTC)), "2018-01-02T03:04:05Z")
# -- uuid -----
def test_uuid_validate_type_success(self):
s.uuid().validate(UUID("af327a12-c469-11e4-8e4f-af4f7c44473b"))
def test_uuid_validate_type_error(self):
self._error(s.uuid().validate, "this_is_not_a_uuid")
def test_uuid_json_encode_success(self):
val = "e9979b9c-c469-11e4-a0ad-37ff5ce3a7bf"
self.assertEqual(s.uuid().json_encode(UUID(val)), val)
def test_uuid_json_encode_error(self):
self._error(s.uuid().json_encode, "definitely_not_a_uuid")
def test_uuid_json_decode_success(self):
val = "15a64a3a-c46a-11e4-b790-cb538a10de85"
self.assertEqual(s.uuid().json_decode(val), UUID(val))
def test_uuid_json_decode_error(self):
self._error(s.uuid().json_decode, "this_is_not_a_uuid_either")
def test_uuid_str_decode_success(self):
val = "3629cf84-c46a-11e4-9b09-43a2f172bb56"
self.assertEqual(s.uuid().str_decode(val), UUID(val))
def test_uuid_str_decode_error(self):
self._error(s.uuid().str_decode, "and_neither_is_this")
def test_uuid_disallow_none(self):
self._error(s.uuid().json_encode, None)
def test_uuid_allow_none(self):
self.assertEqual(s.uuid(nullable=True).json_encode(None), None)
# -- bytes -----
def test_bytes_validate_type_success(self):
s.bytes().validate(bytes([1,2,3]))
def test_bytes_validate_type_error(self):
self._error(s.bytes().validate, "this_is_not_a_bytes_object")
def test_bytes_json_encode_success(self):
val = bytes([4,5,6])
self.assertEqual(s.bytes().json_encode(val), b64encode(val).decode())
def test_bytes_json_encode_error(self):
self._error(s.bytes().json_encode, "definitely_not_a_bytes_object")
def test_bytes_json_decode_success(self):
val = bytes([7,8,9])
self.assertEqual(s.bytes().json_decode(b64encode(val).decode()), val)
def test_bytes_json_decode_error(self):
self._error(s.bytes().json_decode, "this_is_not_a_bytes_object_either")
def test_bytes_str_encode_success(self):
val = bytes([0,2,4,6,8])
self.assertEqual(s.bytes().str_encode(val), b64encode(val).decode())
def test_bytes_str_decode_success(self):
val = bytes([1,3,5,7,9])
self.assertEqual(s.bytes().str_decode(b64encode(val).decode()), val)
def test_bytes_str_decode_error(self):
self._error(s.uuid().str_decode, "and_neither_is_this_a_bytes")
def test_bytes_disallow_none(self):
self._error(s.bytes().json_encode, None)
def test_bytes_allow_none(self):
self.assertEqual(s.bytes(nullable=True).json_encode(None), None)
# -- decorators -----
def test_params_decorator_mismatch_a(self):
with self.assertRaises(TypeError):
@s.validate(params={"a": s.str()})
def fn(b):
pass
def test_params_decorator_mismatch_b(self):
with self.assertRaises(TypeError):
@s.validate(params={})
def fn(b):
pass
def test_returns_error(self):
@s.validate(returns=s.str())
def fn():
return 1
with self.assertRaises(ValueError):
fn()
def test_returns_success(self):
@s.validate(returns=s.str())
def fn():
return "str_ftw"
fn()
# -- all_of -----
_all_of_schemas = s.all_of([
s.dict({"a": s.str()}, {"a"}, additional_properties=True),
s.dict({"b": s.int()}, {"b"}, additional_properties=True),
])
def test_all_of_none_match(self):
self._error(self._all_of_schemas.validate, {"c": "nope"})
def test_all_of_one_match(self):
self._error(self._all_of_schemas.validate, {"a": "foo"})
def test_all_of_validation_all_match(self):
self._all_of_schemas.validate({"a": "foo", "b": 1})
def test_all_of_json_code(self):
value = {"a": "foo", "b": 1, "c": [1,2,3]}
schema = self._all_of_schemas
self.assertEqual(schema.json_decode(schema.json_encode(value)), value)
# -- any_of -----
def test_any_of_none_match(self):
self._error(s.any_of([s.str(), s.int()]).validate, 123.45)
def test_any_of_either_match(self):
s.any_of([s.str(), s.int()]).validate("one")
s.any_of([s.str(), s.int()]).validate(1)
def test_any_of_json_codec(self):
for value in [ 123.45, False ]:
schema = s.any_of([s.float(), s.bool()])
self.assertEqual(schema.json_decode(schema.json_encode(value)), value)
# -- one_of -----
def test_one_of_none_match(self):
self._error(s.one_of([s.str(), s.int()]).validate, 123.45)
def test_one_of_either_match(self):
s.one_of([s.str(), s.int()]).validate("one")
s.one_of([s.str(), s.int()]).validate(1)
def test_one_of_validation_all_match(self):
self._error(s.one_of([s.str(), s.str()]).validate, "string")
def test_one_of_json_codec(self):
for value in [ 123, UUID("06b959d0-65e0-11e7-866d-6be08781d5cb"), False ]:
schema = s.one_of([s.int(), s.uuid(), s.bool()])
self.assertEqual(schema.json_decode(schema.json_encode(value)), value)
# -- reader -----
def test_reader_validate_type_success(self):
s.reader().validate(BytesIO())
def test_reader_validate_type_error(self):
self._error(s.reader().validate, "this_is_not_a_reader_object")
if __name__ == "__main__":
unittest.main()
|
|
# http://inamidst.com/saxo/
# Created by Sean B. Palmer
import codecs
import multiprocessing
import os
import re
import socket
import socketserver
import sys
import time
sys.stdout = codecs.getwriter("utf-8")(sys.stdout.detach())
scripts = os.path.dirname(sys.modules["__main__"].__file__)
scripts = os.path.abspath(scripts)
sys.path[:0] = [scripts, os.getcwd()]
connections = 0
test_counter = 0
tests = {}
def test(test_function):
global test_counter
def decorated(conn):
test_function(conn)
test_counter += 1
decorated.number = test_counter
tests[decorated.number] = decorated
return decorated
# @@ quit from a test, then start a new instance
@test
def test_initial_ping(conn):
conn.handshake()
conn.send("PING", "VALUE")
msg = conn.recv()
with open(os.path.join(scripts, "tests.txt"), encoding="utf-8") as f:
text = f.read()
for lines in text.split("\n\n"):
def build(lines):
lines = lines.rstrip("\n")
if not lines:
return
# if not lines.startswith(".tw"):
# return
# @@ expected
@test
def test_function(conn):
conn.handshake()
for line in lines.split("\n"):
line = line.replace("$(BOT)", "saxo")
line = line.replace("$(USER)", "user")
if line.startswith("."):
conn.send(":user!~user@localhost", "PRIVMSG", "#saxo", line)
elif line == "TIMEOUT":
conn.nowt()
elif line.startswith("WAIT "):
time.sleep(int(line.split(" ").pop().strip()))
elif line.startswith("SAY"):
line = line.split(" ", 1).pop()
conn.send(":user!~user@localhost", "PRIVMSG", "#saxo", line)
else:
if line.startswith(": "):
line = "user" + line
got = conn.recv()
conn.equal(got.get("command"), "PRIVMSG",
"Expected PRIVMSG, got %s" % got)
# @@ check it's to #saxo
got = got["parameters"][1]
if "<" in line:
patterns = []
for part in re.findall("<[^>]+>|[^<]+", line):
if part.startswith("<"):
patterns.append(part[1:-1])
else:
patterns.append(re.escape(part))
pattern = "^" + "".join(patterns) + "$"
msg = "Expected %r, got %r" % (pattern, got)
conn.match(pattern, got, msg)
else:
msg = "Expected %r, got %r" % (line, got)
conn.equal(line, got, msg)
# @@ then a nowt?
build(lines[:])
@test
def test_hang(conn):
conn.handshake()
conn.send(":owner!~owner@localhost", "PRIVMSG", "saxo", ".test-hang")
time.sleep(1)
@test
def quit(conn):
conn.send(":localhost", "NOTICE", "*", "Welcome!")
conn.send(":owner!~owner@localhost", "PRIVMSG", "saxo", ".quit")
time.sleep(2)
irc_regex_message = re.compile(br'(?:(:.*?) )?(.*?) (.*)')
irc_regex_address = re.compile(br':?([^!@]*)!?([^@]*)@?(.*)')
irc_regex_parameter = re.compile(br'(?:^|(?<= ))(:.*|[^ ]+)')
def parse_message(octets):
message = {}
octets = octets.rstrip(b'\r\n')
message_match = irc_regex_message.match(octets)
if not message_match:
raise ValueError("Malformed: %r" % octets)
prefix, command, parameters = message_match.groups()
if prefix:
address_match = irc_regex_address.match(prefix)
if address_match:
prefix = address_match.groups()
parameters = irc_regex_parameter.findall(parameters)
if parameters and parameters[-1].startswith(b":"):
parameters[-1] = parameters[-1][1:]
message["command"] = command.decode("ascii", "replace")
message["prefix"] = {"nick": "", "user": "", "host": ""}
if prefix:
message["prefix"]["nick"] = prefix[0].decode("ascii", "replace")
message["prefix"]["user"] = prefix[1].decode("ascii", "replace")
message["prefix"]["host"] = prefix[2].decode("ascii", "replace")
def heuristic_decode(param):
# @@ could get these from config
encodings = ("utf-8", "iso-8859-1", "cp1252")
for encoding in encodings:
try: return param.decode(encoding)
except UnicodeDecodeError as err:
continue
return param.decode("utf-8", "replace")
message["parameters_octets"] = parameters
message["parameters"] = [heuristic_decode(p) for p in parameters]
message["octets"] = octets
return message
class Test(socketserver.StreamRequestHandler):
timeout = 6
def handle(self, *args, **kargs):
global connections, test_counter
connections += 1
self.connection = connections
self.messages = 0
# print(dir(self.server))
self.send(":localhost", "NOTICE", "*", "Test #%s" % self.connection)
if self.connection in tests:
print("Test #%s" % self.connection)
sys.stdout.flush()
tests[self.connection](self)
# print(self.connection, test_counter)
if self.connection == test_counter:
print("Tests complete")
sys.stdout.flush()
self.finish()
os._exit(0)
def match(self, a, b, message):
if not re.match(a, b):
print("ERROR: Test #%s: %s" % (self.connection, message))
sys.stdout.flush()
self.stop()
def equal(self, a, b, message):
if a != b:
print("ERROR: Test #%s: %s" % (self.connection, message))
sys.stdout.flush()
self.stop()
def not_equal(self, a, b, message):
if a == b:
print("ERROR: Test #%s: %s" % (self.connection, message))
sys.stdout.flush()
self.stop()
def stop(self):
sys.exit(0)
def handshake(self):
nick = self.recv()
self.equal(nick["command"], "NICK", "Expected NICK")
user = self.recv()
self.equal(user["command"], "USER", "Expected USER")
# @@ to nick
self.send(":localhost", "001", "saxo", "Welcome")
join = self.recv()
self.equal(join["command"], "JOIN", "Expected JOIN")
who = self.recv()
self.equal(who["command"], "WHO", "Expected WHO")
def recv(self):
while True:
try: octets = self.rfile.readline()
except socket.timeout:
print("ERROR: Test #%s: timeout" % self.connection)
sys.stdout.flush()
self.stop()
break
# Skip blank lines
if octets:
break
message = parse_message(octets)
self.messages += 1
message["count"] = self.messages
return message
def nowt(self):
try: octets = self.rfile.readline()
except socket.timeout:
return True
else:
text = octets.decode("utf-8", "replace")
args = (self.connection, text)
print("ERROR: Test #%s: Expected timeout, got %r" % args)
sys.stdout.flush()
def send(self, *args):
args = list(args)
if len(args) > 1:
args[-1] = ":" + args[-1]
octets = " ".join(args).encode("utf-8", "replace")
octets = octets.replace(b"\r", b"")
octets = octets.replace(b"\n", b"")
if len(octets) > 510:
octets = octets[:510]
self.wfile.write(octets + b"\r\n")
self.wfile.flush()
# def user
# def channel
def finish(self, *args, **kargs):
socketserver.StreamRequestHandler.finish(self)
try:
self.request.shutdown(socket.SHUT_RDWR)
self.request.close()
except socket.error:
...
class Server(socketserver.TCPServer):
# @@ if SystemExit, fine, otherwise raise it and os._exit(1)
def handle_error(self, request, client_address):
etype, evalue, etrace = sys.exc_info()
if etype is SystemExit:
return
import traceback
print("Framework Error:", etype, evalue)
sys.stdout.flush()
traceback.print_exc()
os._exit(1)
def main():
server = Server((socket.gethostname(), 61070), Test)
server.serve_forever()
if __name__ == "__main__":
main()
|
|
import os
import asyncio
import hashlib
from urllib import parse
import xmltodict
from boto.s3.connection import S3Connection
from boto.s3.connection import OrdinaryCallingFormat
from boto.s3.connection import SubdomainCallingFormat
from waterbutler.core import streams
from waterbutler.core import provider
from waterbutler.core import exceptions
from waterbutler.core.path import WaterButlerPath
from waterbutler.providers.s3 import settings
from waterbutler.providers.s3.metadata import S3Revision
from waterbutler.providers.s3.metadata import S3FileMetadata
from waterbutler.providers.s3.metadata import S3FolderMetadata
from waterbutler.providers.s3.metadata import S3FolderKeyMetadata
from waterbutler.providers.s3.metadata import S3FileMetadataHeaders
class S3Provider(provider.BaseProvider):
"""Provider for the Amazon's S3
"""
NAME = 's3'
def __init__(self, auth, credentials, settings):
"""
.. note::
Neither `S3Connection#__init__` nor `S3Connection#get_bucket`
sends a request.
:param dict auth: Not used
:param dict credentials: Dict containing `access_key` and `secret_key`
:param dict settings: Dict containing `bucket`
"""
super().__init__(auth, credentials, settings)
# If a bucket has capital letters in the name
# ordinary calling format MUST be used
if settings['bucket'] != settings['bucket'].lower():
calling_format = OrdinaryCallingFormat()
else:
# if a bucket is out of the us Subdomain calling format MUST be used
calling_format = SubdomainCallingFormat()
self.connection = S3Connection(credentials['access_key'],
credentials['secret_key'], calling_format=calling_format)
self.bucket = self.connection.get_bucket(settings['bucket'], validate=False)
self.encrypt_uploads = self.settings.get('encrypt_uploads', False)
@asyncio.coroutine
def validate_path(self, path, **kwargs):
return WaterButlerPath(path)
def can_intra_copy(self, dest_provider, path=None):
return type(self) == type(dest_provider) and not getattr(path, 'is_dir', False)
def can_intra_move(self, dest_provider, path=None):
return type(self) == type(dest_provider) and not getattr(path, 'is_dir', False)
@asyncio.coroutine
def intra_copy(self, dest_provider, source_path, dest_path):
"""Copy key from one S3 bucket to another. The credentials specified in
`dest_provider` must have read access to `source.bucket`.
"""
exists = yield from dest_provider.exists(dest_path)
dest_key = dest_provider.bucket.new_key(dest_path.path)
# ensure no left slash when joining paths
source_path = '/' + os.path.join(self.settings['bucket'], source_path.path)
headers = {'x-amz-copy-source': parse.quote(source_path)}
url = dest_key.generate_url(
settings.TEMP_URL_SECS,
'PUT',
headers=headers,
)
yield from self.make_request(
'PUT', url,
headers=headers,
expects=(200, ),
throws=exceptions.IntraCopyError,
)
return (yield from dest_provider.metadata(dest_path)), not exists
@asyncio.coroutine
def download(self, path, accept_url=False, version=None, range=None, **kwargs):
"""Returns a ResponseWrapper (Stream) for the specified path
raises FileNotFoundError if the status from S3 is not 200
:param str path: Path to the key you want to download
:param dict \*\*kwargs: Additional arguments that are ignored
:rtype: :class:`waterbutler.core.streams.ResponseStreamReader`
:raises: :class:`waterbutler.core.exceptions.DownloadError`
"""
if not path.is_file:
raise exceptions.DownloadError('No file specified for download', code=400)
if not version or version.lower() == 'latest':
query_parameters = None
else:
query_parameters = {'versionId': version}
if kwargs.get('displayName'):
response_headers = {'response-content-disposition': 'attachment; filename*=UTF-8\'\'{}'.format(parse.quote(kwargs['displayName']))}
else:
response_headers = {'response-content-disposition': 'attachment'}
url = self.bucket.new_key(
path.path
).generate_url(
settings.TEMP_URL_SECS,
query_parameters=query_parameters,
response_headers=response_headers
)
if accept_url:
return url
resp = yield from self.make_request(
'GET',
url,
range=range,
expects=(200, 206),
throws=exceptions.DownloadError,
)
return streams.ResponseStreamReader(resp)
@asyncio.coroutine
def upload(self, stream, path, conflict='replace', **kwargs):
"""Uploads the given stream to S3
:param waterbutler.core.streams.RequestWrapper stream: The stream to put to S3
:param str path: The full path of the key to upload to/into
:rtype: dict, bool
"""
path, exists = yield from self.handle_name_conflict(path, conflict=conflict)
stream.add_writer('md5', streams.HashStreamWriter(hashlib.md5))
resp = yield from self.make_request(
'PUT',
self.bucket.new_key(path.path).generate_url(
settings.TEMP_URL_SECS,
'PUT',
encrypt_key=self.encrypt_uploads
),
data=stream,
headers={'Content-Length': str(stream.size)},
expects=(200, 201, ),
throws=exceptions.UploadError,
)
# md5 is returned as ETag header as long as server side encryption is not used.
# TODO: nice assertion error goes here
assert resp.headers['ETag'].replace('"', '') == stream.writers['md5'].hexdigest
return (yield from self.metadata(path, **kwargs)), not exists
@asyncio.coroutine
def delete(self, path, **kwargs):
"""Deletes the key at the specified path
:param str path: The path of the key to delete
"""
yield from self.make_request(
'DELETE',
self.bucket.new_key(path.path).generate_url(settings.TEMP_URL_SECS, 'DELETE'),
expects=(200, 204, ),
throws=exceptions.DeleteError,
)
@asyncio.coroutine
def revisions(self, path, **kwargs):
"""Get past versions of the requested key
:param str path: The path to a key
:rtype list:
"""
url = self.bucket.generate_url(settings.TEMP_URL_SECS, 'GET', query_parameters={'versions': ''})
resp = yield from self.make_request(
'GET',
url,
params={'prefix': path.path, 'delimiter': '/'},
expects=(200, ),
throws=exceptions.MetadataError,
)
content = yield from resp.read_and_close()
versions = xmltodict.parse(content)['ListVersionsResult'].get('Version') or []
if isinstance(versions, dict):
versions = [versions]
return [
S3Revision(item)
for item in versions
if item['Key'] == path.path
]
@asyncio.coroutine
def metadata(self, path, revision=None, **kwargs):
"""Get Metadata about the requested file or folder
:param WaterButlerPath path: The path to a key or folder
:rtype: dict or list
"""
if path.is_dir:
return (yield from self._metadata_folder(path))
return (yield from self._metadata_file(path, revision=revision))
@asyncio.coroutine
def create_folder(self, path, **kwargs):
"""
:param str path: The path to create a folder at
"""
WaterButlerPath.validate_folder(path)
if (yield from self.exists(path)):
raise exceptions.FolderNamingConflict(str(path))
yield from self.make_request(
'PUT',
self.bucket.new_key(path.path).generate_url(settings.TEMP_URL_SECS, 'PUT'),
expects=(200, 201),
throws=exceptions.CreateFolderError
)
return S3FolderMetadata({'Prefix': path.path})
@asyncio.coroutine
def _metadata_file(self, path, revision=None):
if revision == 'Latest':
revision = None
resp = yield from self.make_request(
'HEAD',
self.bucket.new_key(
path.path
).generate_url(
settings.TEMP_URL_SECS,
'HEAD',
query_parameters={'versionId': revision} if revision else None
),
expects=(200, ),
throws=exceptions.MetadataError,
)
return S3FileMetadataHeaders(path.path, resp.headers)
@asyncio.coroutine
def _metadata_folder(self, path):
resp = yield from self.make_request(
'GET',
self.bucket.generate_url(settings.TEMP_URL_SECS, 'GET'),
params={'prefix': path.path, 'delimiter': '/'},
expects=(200, ),
throws=exceptions.MetadataError,
)
contents = yield from resp.read_and_close()
parsed = xmltodict.parse(contents, strip_whitespace=False)['ListBucketResult']
contents = parsed.get('Contents', [])
prefixes = parsed.get('CommonPrefixes', [])
if not contents and not prefixes and not path.is_root:
# If contents and prefixes are empty then this "folder"
# must exist as a key with a / at the end of the name
# if the path is root there is no need to test if it exists
yield from self.make_request(
'HEAD',
self.bucket.new_key(path.path).generate_url(settings.TEMP_URL_SECS, 'HEAD'),
expects=(200, ),
throws=exceptions.MetadataError,
)
if isinstance(contents, dict):
contents = [contents]
if isinstance(prefixes, dict):
prefixes = [prefixes]
items = [
S3FolderMetadata(item)
for item in prefixes
]
for content in contents:
if content['Key'] == path.path:
continue
if content['Key'].endswith('/'):
items.append(S3FolderKeyMetadata(content))
else:
items.append(S3FileMetadata(content))
return items
|
|
import inspect
import sys
from dateutil.parser import parse
import formencode
import formencode.validators as fev
import formencode.national
import sqlalchemy as sa
from savalidation._internal import is_iterable
_ELV = '_sav_entity_linkers'
# map a SA field type to a formencode validator for use in _ValidatesConstraints
SA_FORMENCODE_MAPPING = {
sa.types.Integer: formencode.validators.Int,
sa.types.Numeric: formencode.validators.Number,
}
class EntityLinker(object):
"""
Wraps a Validator, storing the validator class and subsequent arguments
on the entity class for later use by the entity instances.
validates_something = EntityLinker(SomethingValidator)
class Car(Base, ValidationMixin):
make = sa.Column(String(50))
validates_something(make)
"""
def __init__(self, validator_cls):
self.validator_cls = validator_cls
def __call__(self, *args, **kwargs):
class_locals = sys._getframe(1).f_locals
elvs = class_locals.setdefault(_ELV, [])
elvs.append((self.validator_cls, args, kwargs))
entity_linker = EntityLinker
class FEVMeta(object):
"""
Wraps a formencode validator along with other meta information that
indicates how & when that validator is to be used.
"""
ALL_EVENTS = 'before_flush', 'before_exec'
def __init__(self, fev, field_name=None, event='before_flush', is_converter=False):
if event not in self.ALL_EVENTS:
raise ValueError('got "{0}" for event, should be one of: {1}'.format(event, self.ALL_EVENTS))
self.fev = fev
self.field_name = field_name
self.event = event
self.is_converter = is_converter
def __repr__(self):
return '<FEVMeta: field_name={0}; event={1}; is_conv={2}; fev={3}'.format(
self.field_name, self.event, self.is_converter, self.fev
)
class ValidatorBase(object):
fe_validator = None
default_kwargs = dict()
def __init__(self, entity_cls, *args, **kwargs):
self.entitycls = entity_cls
self.args = args
self.kwargs = kwargs
self.field_names = []
self.fe_args = []
self.fev_metas = []
self.split_field_names_from_fe_args()
self.create_fe_validators()
def split_field_names_from_fe_args(self):
"""
Some validators may want to take position arguments and field
names. This method handles putting the args in the correct
internal variable.
"""
index = 0
for index, unknown_arg in enumerate(self.args):
if self.arg_for_fe_validator(index, unknown_arg):
self.fe_args.append(unknown_arg)
break
self.field_names.append(unknown_arg)
self.fe_args.extend(self.args[index+1:])
def create_fe_validators(self):
kwargs = self.default_kwargs.copy()
kwargs.update(self.kwargs)
convert_flag = kwargs.pop('sav_convert', kwargs.pop('sv_convert', False))
sav_event = kwargs.pop('sav_event', 'before_flush')
for field_to_validate in self.field_names:
self.create_fev_meta(self.fe_validator, field_to_validate, kwargs, sav_event, convert_flag)
def create_fev_meta(self, fev_cls, colname, fe_kwargs={}, sav_event='before_flush', convert_flag=False, auto_not_empty=True):
fe_kwargs = fe_kwargs.copy()
if auto_not_empty and self.sa_column_needs_not_empty(colname):
fe_kwargs['not_empty'] = True
fev = fev_cls(*self.fe_args, **fe_kwargs)
fev_meta = FEVMeta(fev, colname, sav_event, convert_flag)
self.fev_metas.append(fev_meta)
def sa_column_needs_not_empty(self, colname):
col = self.fetch_sa_column(colname)
if not col.nullable and not col.default and not col.server_default:
return True
return False
def fetch_sa_column(self, colname):
return self.entitycls.__mapper__.get_property(colname).columns[0]
def arg_for_fe_validator(self, index, unknown_arg):
return False
class DateTimeConverter(fev.FancyValidator):
def _to_python(self, value, state):
try:
return parse(value)
except ValueError as e:
if 'unknown string format' not in str(e):
raise
raise formencode.Invalid('Unknown date/time string "%s"' % value, value, state)
except TypeError as e:
# can probably be removed if this ever gets fixed:
# https://bugs.launchpad.net/dateutil/+bug/1257985
if "'NoneType' object is not iterable" not in str(e):
raise
raise formencode.Invalid('Unknown date/time string "%s"' % value, value, state)
@entity_linker
class _ValidatesPresenceOf(ValidatorBase):
fe_validator = formencode.FancyValidator
default_kwargs = dict(not_empty=True)
class _ValidatesOneOf(ValidatorBase):
fe_validator = fev.OneOf
def arg_for_fe_validator(self, index, unknown_arg):
return is_iterable(unknown_arg)
class _MinLength(fev.MinLength):
""" need a special class that will allow None through but not '' """
def is_empty(self, value):
# only consider None empty, not an empty string
return value is None
class _ValidatesMinLength(ValidatorBase):
fe_validator = _MinLength
def arg_for_fe_validator(self, index, unknown_arg):
if isinstance(unknown_arg, int):
return True
return False
class _IPAddress(fev.IPAddress):
""" need a special class that will allow None through but not '' """
def is_empty(self, value):
# only consider None empty, not an empty string
return value is None
class _ValidatesIPAddress(ValidatorBase):
fe_validator = _IPAddress
class _URL(fev.URL):
""" need a special class that will allow None through but not '' """
def is_empty(self, value):
# only consider None empty, not an empty string
return value is None
class _ValidatesURL(ValidatorBase):
fe_validator = _URL
class _ValidatesChoices(_ValidatesOneOf):
def create_fe_validators(self):
# the first formencode parameter should be a sequence of pairs. However,
# the FE validator needs just the list of keys that are valid, so we
# strip those off here.
self.fe_args[0] = [k for k,v in self.fe_args[0]]
ValidatorBase.create_fe_validators(self)
@entity_linker
class _ValidatesConstraints(ValidatorBase):
def create_fe_validators(self):
# grab some values from the kwargs that apply to this validator
validate_length = bool(self.kwargs.get('length', True))
validate_nullable = bool(self.kwargs.get('nullable', True))
validate_type = bool(self.kwargs.get('type', True))
excludes = self.kwargs.get('exclude', [])
fe_validators = []
for colname in self.entitycls._sav_column_names():
# get the SA column instance
col = self.entitycls.__mapper__.get_property(colname).columns[0]
# ignore primary keys
if colname in excludes or col.primary_key:
continue
# validate lengths on String and Unicode types, but not Text b/c it shouldn't have a
# length
if validate_length and isinstance(col.type, sa.types.String) \
and not isinstance(col.type, sa.types.Text):
fmeta = FEVMeta(fev.MaxLength(col.type.length), colname)
self.fev_metas.append(fmeta)
# handle fields that are not nullable
if validate_nullable and not col.nullable:
if not col.default and not col.server_default:
validator = formencode.FancyValidator(not_empty=True)
event = 'before_flush'
if col.foreign_keys:
event = 'before_exec'
fmeta = FEVMeta(validator, colname, event)
self.fev_metas.append(fmeta)
# data-type validation
if validate_type:
for sa_type, fe_validator in list(SA_FORMENCODE_MAPPING.items()):
if isinstance(col.type, sa_type):
self.create_fev_meta(fe_validator, colname, auto_not_empty=False)
break
def formencode_factory(fevalidator, **kwargs):
"""
Converts a formencode validator into an object that can be used in
an entity object for validation:
validates_int = formencode_factory(formencode.validators.Int)
class MyCar(Base):
year = Column(Int)
validates_int('year')
"""
class _ValidatesFeValidator(ValidatorBase):
fe_validator = fevalidator
type = 'field'
default_kwargs = kwargs
return EntityLinker(_ValidatesFeValidator)
validates_choices = EntityLinker(_ValidatesChoices)
validates_constraints = _ValidatesConstraints
validates_ipaddr= EntityLinker(_ValidatesIPAddress)
validates_minlen= EntityLinker(_ValidatesMinLength)
validates_one_of = EntityLinker(_ValidatesOneOf)
validates_presence_of = _ValidatesPresenceOf
validates_required = _ValidatesPresenceOf
validates_url = EntityLinker(_ValidatesURL)
validates_email = formencode_factory(fev.Email)
validates_usphone = formencode_factory(formencode.national.USPhoneNumber)
converts_date = formencode_factory(fev.DateConverter, sv_convert=True)
converts_time = formencode_factory(fev.TimeConverter, use_datetime=True, sv_convert=True)
converts_datetime = formencode_factory(DateTimeConverter, sv_convert=True)
|
|
# This file is part of ZS
# Copyright (C) 2013-2014 Nathaniel Smith <njs@pobox.com>
# See file LICENSE.txt for license information.
import json
import hashlib
import os
import os.path
import multiprocessing
import struct
import sys
import getpass
import socket
import traceback
from contextlib import contextmanager
from datetime import datetime
import time
import six
import zs
from zs.common import (ZSError,
MAGIC,
INCOMPLETE_MAGIC,
FIRST_EXTENSION_LEVEL,
CRC_LENGTH,
encoded_crc64xz,
header_data_format,
header_data_length_format,
codec_shorthands,
codecs,
read_format,
read_length_prefixed)
from zs._zs import (pack_data_records, pack_index_records,
unpack_data_records,
write_uleb128)
# how often to poll for pipeline errors while blocking in the main thread, in
# seconds
ERROR_CHECK_FREQ = 0.1
# seconds between spinner updates
SPIN_UPDATE_TIME = 0.3
def _flush_file(f):
f.flush()
os.fsync(f.fileno())
def _encode_header(header):
enc_fields = []
for (field, format) in header_data_format:
if format == "length-prefixed-utf8-json":
# In py2, json.dumps always returns str if ensure_ascii=True (the
# default); if ensure_ascii=False it may or may not return a str
# at its whim. In py3, json.dumps always returns unicode.
str_encoded = json.dumps(header[field], ensure_ascii=True)
# On py3, this is necessary. On py2, this implicitly coerces to
# unicode and then encodes -- but because we know the string only
# contains ascii, the implicit conversion is safe.
encoded = str_encoded.encode("utf-8")
enc_fields.append(struct.pack("<Q", len(encoded)))
enc_fields.append(encoded)
elif format == "NUL-padded-ascii-16":
enc_fields.append(struct.pack("16s",
header[field].encode("ascii")))
else:
enc_fields.append(struct.pack(format, header[field]))
return b"".join(enc_fields)
def test__encode_header():
got = _encode_header({
"root_index_offset": 0x1234567890123456,
"root_index_length": 0x2468864213577531,
"total_file_length": 0x0011223344556677,
"sha256": b"abcdefghijklmnopqrstuvwxyz012345",
"codec": "superzip",
# Carefully chosen to avoid containing any dicts with multiple items,
# so as to ensure a consistent serialization in the face of dict
# randomization.
"metadata": {"this": ["is", "awesome", 10]},
})
expected_metadata = b"{\"this\": [\"is\", \"awesome\", 10]}"
expected = (b"\x56\x34\x12\x90\x78\x56\x34\x12"
b"\x31\x75\x57\x13\x42\x86\x68\x24"
b"\x77\x66\x55\x44\x33\x22\x11\x00"
b"abcdefghijklmnopqrstuvwxyz012345"
b"superzip\x00\x00\x00\x00\x00\x00\x00\x00"
# hex(len(expected_metadata)) == 0x1f
b"\x1f\x00\x00\x00\x00\x00\x00\x00"
+ expected_metadata)
assert got == expected
# A sentinel used to signal that a worker should quit.
class _QUIT(object):
pass
def box_exception():
e_type, e_obj, tb = sys.exc_info()
return (e_type, e_obj, traceback.extract_tb(tb))
def reraise_boxed(box):
e_type, e_obj, extracted_tb = box
orig_tb_str = "".join(traceback.format_list(extracted_tb))
raise ZSError("Error in worker: %s\n\n"
"(Original traceback:\n"
" %s"
" %s: %s\n"
")"
% (e_obj,
orig_tb_str.replace("\n", "\n "),
e_type.__name__,
e_obj,
)
)
# We have a very strict policy on exceptions: any exception anywhere in
# ZSWriter is non-recoverable.
# This context manager is wrapped around all out-of-process code, to ship
# errors back to the main process.
@contextmanager
def errors_to(q):
try:
yield
except:
# we really and truly do want a bare except: here, because even
# KeyboardException should get forwarded to the main process so it has
# a chance to know that the child is dead.
q.put(box_exception())
# This context manager is wrapped around in-process code, to catch errors and
# enforce non-recoverability.
@contextmanager
def errors_close(obj):
try:
yield
except:
obj.close()
raise
class ZSWriter(object):
def __init__(self, path, metadata, branching_factor,
parallelism="guess", codec="lzma", codec_kwargs={},
show_spinner=True, include_default_metadata=True):
"""Create a ZSWriter object.
.. note:: In many cases it'll be easier to just use the command line
'zs make' tool, which is a wrapper around this class.
:arg path: File to write to. Must not already exist.
:arg metadata: Dict or dict-like containing arbitrary metadata for the
.zs file. See :ref:`metadata-conventions`.
:arg branching_factor: The number of entries to put into each *index*
block. We use a simple greedy packing strategy, where we fill up
index blocks until they reach this limit.
:arg parallelism: The number of CPUs to use for compression, or "guess"
to auto-detect. Must be >= 1.
:arg codec: The compression method to use. Valid values are "none",
"deflate", "lzma".
:arg codec_kwargs: kwargs to pass to the codec compress function. All
codecs except 'none' support a compress_level argument. The 'lzma'
codec also supports an extreme=True/False argument.
:arg show_spinner: Whether to show the progress meter.
:arg include_default_metadata: Whether to auto-add some default
metadata (time, host, user).
Once you have a ZSWriter object, you can use the
:meth:`add_data_block` and :meth:`add_file_contents` methods to write
data to it. It is your job to ensure that all records are added in
(ASCIIbetical/memcmp) sorted order.
Once you are done adding records, you must call :meth:`close`. This
will not be done automatically. (This is a feature, to make sure that
errors that cause early termination leave obviously-invalid ZS files
behind.)
The most optimized way to build a ZS file is to use
:meth:`add_file_contents` with terminated (not length-prefixed)
records. However, this is only possible if your records have some
fixed terminator that you can be sure never occurs within a record
itself.
"""
self._path = path
# The testsuite writes lots of ZS files to temporary storage, so
# better take the trouble to use O_EXCL to prevent exposing everyone
# who runs the test suite to security holes...
open_flags = os.O_RDWR | os.O_CREAT | os.O_EXCL
# O_CLOEXEC is better to use than not, but platform specific
# O_BINARY is necessary on windows, unavailable elsewhere
for want_if_available in ["O_CLOEXEC", "O_BINARY"]:
open_flags |= getattr(os, want_if_available, 0)
try:
fd = os.open(path, open_flags, 0o666)
except OSError as e:
raise ZSError("%s: %s" % (path, e))
self._file = os.fdopen(fd, "w+b")
self.metadata = dict(metadata)
if include_default_metadata:
build_info = {"user": getpass.getuser(),
"host": socket.getfqdn(),
"time": datetime.utcnow().isoformat() + "Z",
"version": "zs %s" % (zs.__version__,),
}
self.metadata.setdefault("build-info", build_info)
self.branching_factor = branching_factor
self._show_spinner = show_spinner
if parallelism == "guess":
# XX put an upper bound on this
parallelism = multiprocessing.cpu_count()
self._parallelism = parallelism
self.codec = codec_shorthands.get(codec)
if self.codec is None:
raise ZSError("unknown codec %r (should be one of: %s)"
% (codec, ", ".join(codec_shorthands)))
self._compress_fn = codecs[self.codec][0]
self._codec_kwargs = codec_kwargs
self._header = {
"root_index_offset": 2 ** 63 - 1,
"root_index_length": 0,
"total_file_length": 0,
"sha256": b"\x00" * 32,
"codec": self.codec,
"metadata": self.metadata,
}
self._file.write(INCOMPLETE_MAGIC)
encoded_header = _encode_header(self._header)
self._file.write(struct.pack(header_data_length_format,
len(encoded_header)))
self._file.write(encoded_header)
# Put an invalid CRC on the initial header as well, for good measure
self._file.write(b"\x00" * CRC_LENGTH)
# It is critical that we flush the file before we re-open it in append
# mode in the writer process!
self._file.flush()
self._next_job = 0
assert parallelism > 0
self._compress_queue = multiprocessing.Queue(2 * parallelism)
self._write_queue = multiprocessing.Queue(2 * parallelism)
self._finish_queue = multiprocessing.Queue(1)
self._error_queue = multiprocessing.Queue()
self._compressors = []
for i in range(parallelism):
compress_args = (self._compress_fn, self._codec_kwargs,
self._compress_queue, self._write_queue,
self._error_queue)
p = multiprocessing.Process(target=_compress_worker,
args=compress_args)
p.start()
self._compressors.append(p)
writer_args = (self._path,
self.branching_factor,
self._compress_fn, self._codec_kwargs,
self._write_queue, self._finish_queue,
self._show_spinner, self._error_queue)
self._writer = multiprocessing.Process(target=_write_worker,
args=writer_args)
self._writer.start()
self.closed = False
def _check_open(self):
if self.closed:
raise ZSError("attempted operation on closed ZSWriter")
def _check_error(self):
try:
box = self._error_queue.get_nowait()
except six.moves.queue.Empty:
return
else:
self.close()
reraise_boxed(box)
def _safe_put(self, q, obj):
# put can block, but it might never unblock if the pipeline has
# clogged due to an error. so we have to check for errors occasionally
# while waiting.
while True:
try:
q.put(obj, timeout=ERROR_CHECK_FREQ)
except six.moves.queue.Full:
self._check_error()
else:
break
def _safe_join(self, process):
while process.is_alive():
self._check_error()
process.join(ERROR_CHECK_FREQ)
def add_data_block(self, records):
"""Append the given set of records to the ZS file as a single data
block.
(See :ref:`format` for details on what a data block is.)
:arg records: A list of byte strings giving the contents of each
record.
"""
self._check_open()
with errors_close(self):
if not records:
return
self._safe_put(self._compress_queue,
(self._next_job, "list", records))
self._next_job += 1
def add_file_contents(self, file_handle, approx_block_size,
terminator=b"\n", length_prefixed=None):
"""Split the contents of file_handle into records, and write them to
the ZS file.
The arguments determine how the contents of the file are divided into
records and blocks.
:arg file_handle: A file-like object whose contents are read. This
file is always closed.
:arg approx_block_size: The approximate size of each data block, in
bytes, *before* compression is applied.
:arg terminator: A byte string containing a terminator appended to the
end of each record. Default is a newline.
:arg length_prefixed: If given, records are output in a
length-prefixed format, and ``terminator`` is ignored. Valid values
are the strings ``"uleb128"`` or ``"u64le"``, or ``None``.
"""
self._check_open()
with errors_close(self):
try:
if length_prefixed is None:
return self._afc_terminator(file_handle,
approx_block_size,
terminator)
else:
return self._afc_length_prefixed(file_handle,
approx_block_size,
length_prefixed)
finally:
file_handle.close()
def _afc_terminator(self, file_handle, approx_block_size,
terminator):
# optimized version that doesn't process records one at a time, but
# instead slurps up whole chunks, resynchronizes, and leaves the
# compression worker to do the splitting/rejoining.
partial_record = b""
next_job = self._next_job
read = file_handle.read
while True:
buf = file_handle.read(approx_block_size)
if not buf:
# File should have ended with a newline (and we don't write
# out the trailing empty record that this might imply).
if partial_record:
raise ZSError("file did not end with terminator")
break
buf = partial_record + buf
try:
buf, partial_record = buf.rsplit(terminator, 1)
except ValueError:
assert terminator not in buf
partial_record = buf
continue
#print "PUTTING %s" % (next_job,)
self._safe_put(self._compress_queue,
(next_job, "chunk-sep", buf, terminator))
next_job += 1
self._next_job = next_job
def _afc_length_prefixed(self, file_handle, approx_block_size,
length_prefixed):
records = []
this_block_size = 0
for record in read_length_prefixed(file_handle, length_prefixed):
records.append(record)
this_block_size += len(record)
if this_block_size >= approx_block_size:
self.add_data_block(records)
records = []
this_block_size = 0
if records:
self.add_data_block(records)
def finish(self):
"""Declare this file finished.
This method writes out the root block, updates the header, etc.
Importantly, we do not write out the correct magic number until this
method completes, so no ZS reader will be willing to read your file
until this is called (see :ref:`magic-numbers`).
Do not call this method unless you are sure you have added the right
records. (In particular, you definitely don't want to call this from a
``finally`` block, or automatically from a ``with`` block context
manager.)
Calls :meth:`close`.
"""
self._check_open()
with errors_close(self):
# Stop all the processing queues and wait for them to finish.
for i in range(self._parallelism):
#sys.stderr.write("putting QUIT\n"); sys.stderr.flush()
self._safe_put(self._compress_queue, _QUIT)
for compressor in self._compressors:
self._safe_join(compressor)
#sys.stdout.write("All compressors finished; waiting for writer\n")
# All compressors have now finished their work, and submitted
# everything to the write queue.
self._safe_put(self._write_queue, _QUIT)
self._safe_join(self._writer)
# The writer and compressors have all exited, so any errors they've
# encountered have definitely been enqueued.
self._check_error()
sys.stdout.write("zs: Updating header...\n")
root_index_offset, root_index_length, sha256 = self._finish_queue.get()
#sys.stdout.write("zs: Root index offset: %s\n" % (root_index_offset,))
# Now we have the root offset
self._header["root_index_offset"] = root_index_offset
self._header["root_index_length"] = root_index_length
self._header["sha256"] = sha256
# And can get the total file length
self._file.seek(0, 2)
self._header["total_file_length"] = self._file.tell()
new_encoded_header = _encode_header(self._header)
self._file.seek(len(MAGIC))
# Read the header length and make sure it hasn't changed
old_length, = read_format(self._file, header_data_length_format)
if old_length != len(new_encoded_header):
raise ZSError("header data length changed")
self._file.write(new_encoded_header)
self._file.write(encoded_crc64xz(new_encoded_header))
# Flush the file to disk to make sure that all data is consistent
# before we mark the file as complete.
_flush_file(self._file)
# And now we can write the MAGIC value to mark the file as complete.
self._file.seek(0)
self._file.write(MAGIC)
_flush_file(self._file)
# Done!
self.close()
def close(self):
"""Close the file and terminate all background processing.
Further operations on this ZSWriter object will raise an error.
If you call this method before calling :meth:`finish`, then you will
not have a working ZS file.
This object can be used as a context manager in a ``with`` block, in
which case :meth:`close` will be called automatically, but
:meth:`finish` will not be.
"""
if self.closed:
return
self.closed = True
self._file.close()
for worker in self._compressors + [self._writer]:
worker.terminate()
worker.join()
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def __del__(self):
# __del__ gets called even if we error out during __init__
if hasattr(self, "closed"):
self.close()
# This worker loop compresses data blocks and passes them to the write
# worker.
def _compress_worker(compress_fn, codec_kwargs,
compress_queue, write_queue, error_queue):
# me = os.getpid()
# def fyi(msg):
# sys.stderr.write("compress_worker:%s: %s\n" % (me, msg))
# sys.stderr.flush()
with errors_to(error_queue):
# Local variables for speed
get = compress_queue.get
pdr = pack_data_records
put = write_queue.put
while True:
job = get()
#fyi("got %r" % (job,))
if job is _QUIT:
#fyi("QUIT")
return
if job[1] == "chunk-sep":
idx, job_type, buf, sep = job
records = buf.split(sep)
payload = pdr(records, 2 * len(buf))
elif job[1] == "list":
idx, job_type, records = job
payload = pdr(records)
else: # pragma: no cover
assert False
zpayload = compress_fn(payload, **codec_kwargs)
#fyi("putting")
put((idx, records[0], records[-1], payload, zpayload))
def _write_worker(path, branching_factor,
compress_fn, codec_kwargs,
write_queue, finish_queue,
show_spinner, error_queue):
with errors_to(error_queue):
data_appender = _ZSDataAppender(path, branching_factor,
compress_fn, codec_kwargs,
show_spinner)
pending_jobs = {}
wanted_job = 0
get = write_queue.get
write_block = data_appender.write_block
while True:
job = get()
#sys.stderr.write("write_worker: got\n")
if job is _QUIT:
assert not pending_jobs
header_info = data_appender.close_and_get_header_info()
finish_queue.put(header_info)
return
pending_jobs[job[0]] = job[1:]
while wanted_job in pending_jobs:
#sys.stderr.write("write_worker: writing %s\n" % (wanted_job,))
write_block(0, *pending_jobs[wanted_job])
del pending_jobs[wanted_job]
wanted_job += 1
# This class coordinates writing actual data blocks to the file, and also
# handles generating the index. The hope is that indexing has low enough
# overhead that handling it in serial with the actual writes won't create a
# bottleneck...
class _ZSDataAppender(object):
def __init__(self, path, branching_factor, compress_fn, codec_kwargs,
show_spinner):
self._file = open(path, "ab")
# Opening in append mode should put us at the end of the file, but
# just in case...
self._file.seek(0, 2)
assert self._file.tell() > 0
self._branching_factor = branching_factor
self._compress_fn = compress_fn
self._codec_kwargs = codec_kwargs
# For each level, a list of entries
# each entry is a tuple (first_record, last_record, offset)
# last_record is kept around to ensure that records at each level are
# sorted and non-overlapping, and because in principle we could use
# them to find shorter keys (XX).
self._level_entries = []
self._level_lengths = []
self._hasher = hashlib.sha256()
# spinner-related stuff
self._last_update = 0
self._written_blocks = 0
self._shown_blocks = None
self._show_spinner = show_spinner
def _spin(self, written_bytes, written_blocks, done):
if not self._show_spinner:
return
self._written_blocks += written_blocks
now = time.time()
if done or now - self._last_update > SPIN_UPDATE_TIME:
if self._shown_blocks is not None:
sys.stdout.write("\r")
if self._written_blocks != self._shown_blocks:
sys.stdout.write("zs: Blocks written: %s" # no \n
% (self._written_blocks,))
self._shown_blocks = self._written_blocks
if done:
sys.stdout.write("\n")
sys.stdout.flush()
def write_block(self, level, first_record, last_record, payload, zpayload):
if not (0 <= level < FIRST_EXTENSION_LEVEL):
raise ZSError("invalid level %s" % (level,))
if level == 0:
self._hasher.update(payload)
block_offset = self._file.tell()
block_contents = six.int2byte(level) + zpayload
write_uleb128(len(block_contents), self._file)
self._file.write(block_contents)
self._file.write(encoded_crc64xz(block_contents))
total_block_length = self._file.tell() - block_offset
self._spin(total_block_length, 1, False)
if level >= len(self._level_entries):
# First block we've seen at this level
assert level == len(self._level_entries)
self._level_entries.append([])
# This can only happen if all the previous levels just flushed.
for i in range(level):
assert not self._level_entries[i]
entries = self._level_entries[level]
entries.append((first_record, last_record,
block_offset, total_block_length))
if len(entries) >= self._branching_factor:
self._flush_index(level)
def _flush_index(self, level):
entries = self._level_entries[level]
assert entries
self._level_entries[level] = []
keys = [entry[0] for entry in entries]
offsets = [entry[2] for entry in entries]
block_lengths = [entry[3] for entry in entries]
payload = pack_index_records(keys, offsets, block_lengths)
zpayload = self._compress_fn(payload, **self._codec_kwargs)
first_record = entries[0][0]
last_record = entries[-1][1]
self.write_block(level + 1, first_record, last_record,
payload, zpayload)
def close_and_get_header_info(self):
# We need to create index blocks referring to all dangling
# unreferenced blocks. If at any point we have only a single
# unreferenced index block, then this is our root index.
def have_root():
# Useful invariant: we know that there is always at least one
# unreferenced block at the highest level.
assert len(self._level_entries[-1]) > 0
# If all we have are data blocks, then we aren't done; root must
# be an index block.
if len(self._level_entries) == 1:
return False
# If there's a non-referenced at the non-highest level, we aren't
# done.
for entries in self._level_entries[:-1]:
if entries:
return False
# If the highest level has multiple blocks, we aren't done.
if len(self._level_entries[-1]) > 1:
return False
# Otherwise, we are done!
return True
if not self._level_entries:
raise ZSError("cannot create empty ZS file")
while not have_root():
for level in range(FIRST_EXTENSION_LEVEL):
if self._level_entries[level]:
self._flush_index(level)
break
# wait until the root has been flushed
self._spin(0, 0, True)
_flush_file(self._file)
self._file.close()
root_entry = self._level_entries[-1][0]
return root_entry[-2:] + (self._hasher.digest(),)
assert False # pragma: no cover
|
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on July 10, 2013
@author: alfoa
"""
#External Modules------------------------------------------------------------------------------------
import numpy as np
import time
import sys
#External Modules End--------------------------------------------------------------------------------
#Internal Modules------------------------------------------------------------------------------------
from .PostProcessorInterface import PostProcessorInterface
from utils import InputData, InputTypes
import Files
#Internal Modules End-----------------------------------------------------------
class TopologicalDecomposition(PostProcessorInterface):
"""
TopologicalDecomposition class - Computes an approximated hierarchical
Morse-Smale decomposition from an input point cloud consisting of an
arbitrary number of input parameters and a response value per input point
"""
@classmethod
def getInputSpecification(cls):
"""
Method to get a reference to a class that specifies the input data for
class cls.
@ In, cls, the class for which we are retrieving the specification
@ Out, inputSpecification, InputData.ParameterInput, class to use for
specifying input of cls.
"""
## This will replace the lines above
inputSpecification = super().getInputSpecification()
TDGraphInput = InputData.parameterInputFactory("graph", contentType=InputTypes.StringType)
inputSpecification.addSub(TDGraphInput)
TDGradientInput = InputData.parameterInputFactory("gradient", contentType=InputTypes.StringType)
inputSpecification.addSub(TDGradientInput)
TDBetaInput = InputData.parameterInputFactory("beta", contentType=InputTypes.FloatType)
inputSpecification.addSub(TDBetaInput)
TDKNNInput = InputData.parameterInputFactory("knn", contentType=InputTypes.IntegerType)
inputSpecification.addSub(TDKNNInput)
TDWeightedInput = InputData.parameterInputFactory("weighted", contentType=InputTypes.StringType) #bool
inputSpecification.addSub(TDWeightedInput)
TDInteractiveInput = InputData.parameterInputFactory("interactive", contentType=InputTypes.StringType) #bool
inputSpecification.addSub(TDInteractiveInput)
TDPersistenceInput = InputData.parameterInputFactory("persistence", contentType=InputTypes.StringType)
inputSpecification.addSub(TDPersistenceInput)
TDSimplificationInput = InputData.parameterInputFactory("simplification", contentType=InputTypes.FloatType)
inputSpecification.addSub(TDSimplificationInput)
TDParametersInput = InputData.parameterInputFactory("parameters", contentType=InputTypes.StringType)
inputSpecification.addSub(TDParametersInput)
TDResponseInput = InputData.parameterInputFactory("response", contentType=InputTypes.StringType)
inputSpecification.addSub(TDResponseInput)
TDNormalizationInput = InputData.parameterInputFactory("normalization", contentType=InputTypes.StringType)
inputSpecification.addSub(TDNormalizationInput)
return inputSpecification
def __init__(self):
"""
Constructor
@ In, None
@ Out, None
"""
super().__init__()
self.acceptedGraphParam = ['approximate knn', 'delaunay', 'beta skeleton', \
'relaxed beta skeleton']
self.acceptedPersistenceParam = ['difference','probability','count']#,'area']
self.acceptedGradientParam = ['steepest', 'maxflow']
self.acceptedNormalizationParam = ['feature', 'zscore', 'none']
# Some default arguments
self.gradient = 'steepest'
self.graph = 'beta skeleton'
self.beta = 1
self.knn = -1
self.simplification = 0
self.persistence = 'difference'
self.normalization = None
self.weighted = False
self.parameters = {}
def inputToInternal(self, currentInp):
"""
Function to convert the incoming input into a usable format
@ In, currentInp, list or DataObjects, The input object to process
@ Out, inputDict, dict, the converted input
"""
# TODO typechecking against what currentInp can be; so far it's a length=1 list with a dataobject inside
currentInp = currentInp[0]
currentInp.asDataset()
# nowadays, our only input should be DataObject
## if no "type", then you're not a PointSet or HistorySet
if not hasattr(currentInp,'type') or currentInp.type != 'PointSet':
self.raiseAnError(IOError, self.__class__.__name__,
' postprocessor only accepts PointSet DataObjects for input. ',
' Requested: ', type(currentInp))
# now we know we have a PointSet
## TODO FIXME maintaining old structure for now, in the future convert to use DataObject directly
## and not bother with inputToInternal
## This works particularly well since we only accept point sets.
data = currentInp.asDataset(outType='dict')['data']
inputDict = {'features':dict((var,data[var]) for var in self.parameters['features']),
'targets' :dict((var,data[var]) for var in self.parameters['targets' ]),
'metadata':currentInp.getMeta(general=True)}
#if 'PointProbability' in currentInp.getVars():
inputDict['metadata']['PointProbability'] = currentInp.getVarValues('PointProbability').values
#else:
# raise NotImplementedError # TODO
return inputDict
def _handleInput(self, paramInput):
"""
Function to handle the parsed paramInput for this class.
@ In, paramInput, ParameterInput, the already parsed input.
@ Out, None
"""
super()._handleInput(paramInput)
for child in paramInput.subparts:
if child.getName() == "graph":
self.graph = child.value.lower()
if self.graph not in self.acceptedGraphParam:
self.raiseAnError(IOError, 'Requested unknown graph type: ',
self.graph, '. Available options: ',
self.acceptedGraphParam)
elif child.getName() == "gradient":
self.gradient = child.value.lower()
if self.gradient not in self.acceptedGradientParam:
self.raiseAnError(IOError, 'Requested unknown gradient method: ',
self.gradient, '. Available options: ',
self.acceptedGradientParam)
elif child.getName() == "beta":
self.beta = child.value
if self.beta <= 0 or self.beta > 2:
self.raiseAnError(IOError, 'Requested invalid beta value: ',
self.beta, '. Allowable range: (0,2]')
elif child.getName() == 'knn':
self.knn = child.value
elif child.getName() == 'simplification':
self.simplification = child.value
elif child.getName() == 'persistence':
self.persistence = child.value.lower()
if self.persistence not in self.acceptedPersistenceParam:
self.raiseAnError(IOError, 'Requested unknown persistence method: ',
self.persistence, '. Available options: ',
self.acceptedPersistenceParam)
elif child.getName() == 'parameters':
self.parameters['features'] = child.value.strip().split(',')
for i, parameter in enumerate(self.parameters['features']):
self.parameters['features'][i] = self.parameters['features'][i]
elif child.getName() == 'weighted':
self.weighted = child.value in ['True', 'true']
elif child.getName() == 'response':
self.parameters['targets'] = child.value
elif child.getName() == 'normalization':
self.normalization = child.value.lower()
if self.normalization not in self.acceptedNormalizationParam:
self.raiseAnError(IOError, 'Requested unknown normalization type: ',
self.normalization, '. Available options: ',
self.acceptedNormalizationParam)
# register metadata
self.addMetaKeys(['maxLabel','minLabel'])
def collectOutput(self, finishedJob, output):
"""
Function to place all of the computed data into the output object
@ In, finishedJob, JobHandler External or Internal instance, A JobHandler object that is in charge of running this post-processor
@ In, output, dataObjects, The object where we want to place our computed results
@ Out, None
"""
evaluation = finishedJob.getEvaluation()
inputList,outputDict = evaluation
if output.type == 'PointSet':
# TODO this is a slow dict-based implementation. It should be improved on need.
# TODO can inputList ever be multiple dataobjects?
if len(inputList) > 1:
self.raiseAnError(NotImplementedError, 'Need to implement looping over all inputs.')
fromInput = inputList[0].asDataset('dict')['data']
results = dict((var,fromInput[var]) for var in output.getVars() if var in fromInput.keys())
for label in ['minLabel','maxLabel']:
results[label] = outputDict[label]
output.load(results,style='dict')
output.addMeta(self.type,{'general':{'hierarchy':outputDict['hierarchy']}})
return
#### OLD ####
requestedInput = output.getParaKeys('input')
requestedOutput = output.getParaKeys('output')
dataLength = None
for inputData in inputList:
# Pass inputs from input data to output data
for key, value in inputData.getParametersValues('input').items():
if key in requestedInput:
# We need the size to ensure the data size is consistent, but there
# is no guarantee the data is not scalar, so this check is necessary
myLength = 1
if hasattr(value, "__len__"):
myLength = len(value)
if dataLength is None:
dataLength = myLength
elif dataLength != myLength:
dataLength = max(dataLength, myLength)
self.raiseAWarning('Data size is inconsistent. Currently set to '
+ str(dataLength) + '.')
for val in value:
output.updateInputValue(key, val)
# Pass outputs from input data to output data
for key, value in inputData.getParametersValues('output').items():
if key in requestedOutput:
# We need the size to ensure the data size is consistent, but there
# is no guarantee the data is not scalar, so this check is necessary
myLength = 1
if hasattr(value, "__len__"):
myLength = len(value)
if dataLength is None:
dataLength = myLength
elif dataLength != myLength:
dataLength = max(dataLength, myLength)
self.raiseAWarning('Data size is inconsistent. Currently set to '
+ str(dataLength) + '.')
for val in value:
output.updateOutputValue(key, val)
# Append the min/max labels to the data whether the user wants them or
# not, and place the hierarchy information into the metadata
for key, values in outputDict.items():
if key in ['minLabel', 'maxLabel']:
for value in values:
output.updateOutputValue(key, [value])
elif key in ['hierarchy']:
output.updateMetadata(key, [values])
else:
self.raiseAWarning('Output type ' + type(output).__name__ + ' not'
+ ' yet implemented. I am going to skip it.')
def userInteraction(self):
"""
A placeholder for allowing user's to interact and tweak the model in-situ
before saving the analysis results
@ In, None
@ Out, None
"""
pass
def run(self, inputIn):
"""
Function to finalize the filter => execute the filtering
@ In, inputIn, dict, dictionary of data to process
@ Out, outputDict, dict, Dictionary containing the post-processed results
"""
internalInput = self.inputToInternal(inputIn)
outputDict = {}
myDataIn = internalInput['features']
myDataOut = internalInput['targets']
self.outputData = myDataOut[self.parameters['targets']]
self.pointCount = len(self.outputData)
self.dimensionCount = len(self.parameters['features'])
self.inputData = np.zeros((self.pointCount, self.dimensionCount))
for i, lbl in enumerate(self.parameters['features']):
self.inputData[:, i] = myDataIn[lbl]
if self.weighted:
self.weights = internalInput['metadata']['PointProbability']
else:
self.weights = None
self.names = self.parameters['features'] + [self.parameters['targets']]
self.__amsc = None
self.userInteraction()
## Possibly load this here in case people have trouble building it, so it
## only errors if they try to use it?
from AMSC_Object import AMSC_Object
if self.__amsc is None:
self.__amsc = AMSC_Object(X=self.inputData, Y=self.outputData,
w=self.weights, names=self.names,
graph=self.graph, gradient=self.gradient,
knn=self.knn, beta=self.beta,
normalization=self.normalization,
persistence=self.persistence, debug=False)
self.__amsc.Persistence(self.simplification)
partitions = self.__amsc.Partitions()
outputDict['minLabel'] = np.zeros(self.pointCount)
outputDict['maxLabel'] = np.zeros(self.pointCount)
for extPair, indices in partitions.items():
for idx in indices:
outputDict['minLabel'][idx] = extPair[0]
outputDict['maxLabel'][idx] = extPair[1]
outputDict['hierarchy'] = self.__amsc.PrintHierarchy()
self.__amsc.BuildModels()
linearFits = self.__amsc.SegmentFitCoefficients()
linearFitnesses = self.__amsc.SegmentFitnesses()
for key in linearFits.keys():
coefficients = linearFits[key]
rSquared = linearFitnesses[key]
outputDict['coefficients_%d_%d' % (key[0], key[1])] = coefficients
outputDict['R2_%d_%d' % (key[0], key[1])] = rSquared
return outputDict
try:
import PySide.QtCore as qtc
__QtAvailable = True
except ImportError as e:
try:
import PySide2.QtCore as qtc
__QtAvailable = True
except ImportError as e:
__QtAvailable = False
if __QtAvailable:
class mQTopologicalDecomposition(type(TopologicalDecomposition), type(qtc.QObject)):
"""
Class used to solve the metaclass conflict
"""
pass
class QTopologicalDecomposition(TopologicalDecomposition, qtc.QObject, metaclass=mQTopologicalDecomposition):
"""
TopologicalDecomposition class - Computes an approximated hierarchical
Morse-Smale decomposition from an input point cloud consisting of an
arbitrary number of input parameters and a response value per input point
"""
requestUI = qtc.Signal(str,str,dict)
@classmethod
def getInputSpecification(cls):
"""
Method to get a reference to a class that specifies the input data for
class cls.
@ In, cls, the class for which we are retrieving the specification
@ Out, inputSpecification, InputData.ParameterInput, class to use for
specifying input of cls.
"""
inputSpecification = super(QTopologicalDecomposition, cls).getInputSpecification()
inputSpecification.addSub(InputData.parameterInputFactory("interactive"))
return inputSpecification
def __init__(self):
"""
Constructor
@ In, None
@ Out, None
"""
super().__init__()
# TopologicalDecomposition.__init__(self)
# qtc.QObject.__init__(self)
self.interactive = False
self.uiDone = True ## If it has not been requested, then we are not waiting for a UI
def _localWhatDoINeed(self):
"""
This method is a local mirror of the general whatDoINeed method.
It is implemented by the samplers that need to request special objects
@ In , None, None
@ Out, needDict, list of objects needed
"""
return {'internal':[(None,'app')]}
def _localGenerateAssembler(self,initDict):
"""
Generates the assembler.
@ In, initDict, dict of init objects
@ Out, None
"""
self.app = initDict['internal']['app']
if self.app is None:
self.interactive = False
def _localReadMoreXML(self, xmlNode):
"""
Function to grab the names of the methods this post-processor will be
using
@ In, xmlNode : Xml element node
@ Out, None
"""
paramInput = QTopologicalDecomposition.getInputSpecification()()
paramInput.parseNode(xmlNode)
self._handleInput(paramInput)
def _handleInput(self, paramInput):
"""
Function to handle the parsed paramInput for this class.
@ In, paramInput, ParameterInput, the already parsed input.
@ Out, None
"""
TopologicalDecomposition._handleInput(self, paramInput)
for child in paramInput.subparts:
if child.getName() == 'interactive':
self.interactive = True
def userInteraction(self):
"""
Launches an interface allowing the user to tweak specific model
parameters before saving the results to the output object(s).
@ In, None
@ Out, None
"""
self.uiDone = not self.interactive
if self.interactive:
## Connect our own signal to the slot on the main thread
self.requestUI.connect(self.app.createUI)
## Connect our own slot to listen for whenver the main thread signals a
## window has been closed
self.app.windowClosed.connect(self.signalDone)
## Give this UI a unique id in case other threads are requesting UI
## elements
uiID = str(id(self))
## Send the request for a UI thread to the main application
self.requestUI.emit('TopologyWindow', uiID,
{'X':self.inputData, 'Y':self.outputData,
'w':self.weights, 'names':self.names,
'graph':self.graph, 'gradient': self.gradient,
'knn':self.knn, 'beta':self.beta,
'normalization':self.normalization,
'views': ['TopologyMapView', 'SensitivityView',
'FitnessView', 'ScatterView2D',
'ScatterView3D']})
## Spinlock will wait until this instance's window has been closed
while(not self.uiDone):
time.sleep(1)
## First check that the requested UI exists, and then if that UI has the
## requested information, if not proceed as if it were not an
## interactive session.
if uiID in self.app.UIs and hasattr(self.app.UIs[uiID],'amsc'):
self.__amsc = self.app.UIs[uiID].amsc
self.simplification = self.app.UIs[uiID].amsc.Persistence()
else:
self.__amsc = None
def signalDone(self,uiID):
"""
In Qt language, this is a slot that will accept a signal from the UI
saying that it has completed, thus allowing the computation to begin
again with information updated by the user in the UI.
@In, uiID, string, the ID of the user interface that signaled its
completion. Thus, if several UI windows are open, we don't proceed,
until the correct one has signaled it is done.
@Out, None
"""
if uiID == str(id(self)):
self.uiDone = True
|
|
import base64
import json
import msgpack
import operator
import zlib
from collections import defaultdict
from copy import copy
from datetime import datetime
from dateutil import tz
from enum import IntEnum
from functools import reduce
from itertools import chain
from . import model
from .model import EmbedLike, Property, NotFound, Skip, classname
#: The UNIX epoch.
_epoch = datetime(1970, 1, 1, tzinfo=tz.tzutc())
def _seconds_since_epoch(dt):
return (dt - _epoch).total_seconds()
#: The maximum length of indexed properties.
_max_indexed_length = 1500
class Blob:
"""Mixin for Properties whose values cannot be indexed.
"""
def __init__(self, **options):
if options.get("indexed") or options.get("indexed_if"):
raise TypeError(f"{classname(self)} properties cannot be indexed.")
super().__init__(**options)
class Compressable(Blob):
"""Mixin for Properties whose values can be gzipped before being
persisted.
Parameters:
compressed(bool): Whether or not values belonging to this
Property should be stored gzipped in Datastore.
compression_level(int): The amount of compression to apply.
See :func:`zlib.compress` for details.
"""
def __init__(self, *, compressed=False, compression_level=-1, **options):
if not (-1 <= compression_level <= 9):
raise ValueError("compression_level must be an integer between -1 and 9.")
super().__init__(**options)
self.compressed = compressed
self.compression_level = compression_level
def prepare_to_load(self, entity, value):
if value is not None and self.compressed:
value = zlib.decompress(value)
return super().prepare_to_load(entity, value)
def prepare_to_store(self, entity, value):
if value is not None and self.compressed:
value = zlib.compress(value, level=self.compression_level)
return super().prepare_to_store(entity, value)
class Encodable:
"""Mixin for string properties that have an encoding.
Parameters:
encoding(str): The encoding to use when persisting this Property
to Datastore. Defaults to ``utf-8``.
"""
def __init__(self, *, encoding="utf-8", **options):
super().__init__(**options)
self.encoding = encoding
def prepare_to_load(self, entity, value):
value = super().prepare_to_load(entity, value)
# BUG(gcloud): Projections seem to cause bytes to be
# loaded as strings so this instance check is required.
if value is not None and isinstance(value, (list, bytes)):
if self.repeated:
value = [v.decode(self.encoding) for v in value]
else:
value = value.decode(self.encoding)
return value
def prepare_to_store(self, entity, value):
if value is not None:
if self.repeated:
value = [v.encode(self.encoding) for v in value]
else:
value = value.encode(self.encoding)
return super().prepare_to_store(entity, value)
class Serializer(Compressable, Property):
"""Base class for properties that serialize data.
"""
_types = (bool, bytes, dict, float, int, str, datetime, model.Key, model.Model)
@classmethod
def _serialize(cls, value): # pragma: no cover
raise NotImplementedError
@classmethod
def _deserialize(cls, data): # pragma: no cover
raise NotImplementedError
@classmethod
def _dumps(cls, value): # pragma: no cover
raise NotImplementedError
@classmethod
def _loads(cls, data): # pragma: no cover
raise NotImplementedError
@staticmethod
def _entity_to_dict(entity):
return {
"key": entity.key,
"kind": entity._kind,
"data": dict(entity),
}
@staticmethod
def _entity_from_dict(data):
key = model.Key(*data["key"])
clazz = model.lookup_model_by_kind(data["kind"])
instance = clazz._load(key, data["data"])
return instance
def prepare_to_load(self, entity, value):
if value is not None:
value = self._loads(value)
return super().prepare_to_load(entity, value)
def prepare_to_store(self, entity, value):
if value is not None:
value = self._dumps(value)
return super().prepare_to_store(entity, value)
class Bool(Property):
"""A Property for boolean values.
Parameters:
name(str, optional): The name of this property on the Datastore
entity. Defaults to the name of this property on the model.
default(object, optional): The property's default value.
indexed(bool, optional): Whether or not this property should be
indexed. Defaults to ``False``.
indexed_if(callable, optional): Whether or not this property
should be indexed when the callable returns ``True``.
Defaults to ``None``.
optional(bool, optional): Whether or not this property is
optional. Defaults to ``False``. Required but empty values
cause models to raise an exception before data is persisted.
repeated(bool, optional): Whether or not this property is
repeated. Defaults to ``False``. Optional repeated
properties default to an empty list.
"""
_types = (bool,)
@property
def is_true(self):
"PropertyFilter: A filter that checks if this value is True."
return self == True # noqa
@property
def is_false(self):
"PropertyFilter: A filter that checks if this value is False."
return self == False # noqa
class Bytes(Compressable, Property):
"""A Property for bytestring values.
Parameters:
name(str, optional): The name of this property on the Datastore
entity. Defaults to the name of this property on the model.
default(object, optional): The property's default value.
optional(bool, optional): Whether or not this property is
optional. Defaults to ``False``. Required but empty values
cause models to raise an exception before data is persisted.
repeated(bool, optional): Whether or not this property is
repeated. Defaults to ``False``. Optional repeated
properties default to an empty list.
compressed(bool, optional): Whether or not this property should
be compressed before being persisted.
compression_level(int, optional): The amount of compression to
apply when compressing values.
"""
_types = (bytes,)
class Computed(Property):
"""A Property for values that should be computed dinamically based
on the state of the entity. Values on an entity are only computed
the first time computed properties are accessed on that entity and
they are re-computed every time the entity is loaded from
Datastore.
Computed properties cannot be assigned to and their "cache" can be
busted by deleting them::
del an_entity.a_property
Note:
Computed properties are **indexed** and **optional** by default
for convenience. This is different from all other built-in
properties.
Parameters:
fn(callable): The function to use when computing the data.
name(str, optional): The name of this property on the Datastore
entity. Defaults to the name of this property on the model.
default(object, optional): The property's default value.
indexed(bool, optional): Whether or not this property should be
indexed. Defaults to ``True``.
indexed_if(callable, optional): Whether or not this property
should be indexed when the callable returns ``True``.
Defaults to ``None``.
optional(bool, optional): Whether or not this property is
optional. Defaults to ``True``.
repeated(bool, optional): Whether or not this property is
repeated. Defaults to ``False``.
"""
_types = (object,)
def __init__(self, fn, **options):
# Computed properties are/should mainly be used for filtering
# purposes, so it makes sense for them to default to being
# both optional and indexed for convenience.
options.setdefault("indexed", True)
options.setdefault("optional", True)
super().__init__(**options)
self.fn = fn
def __get__(self, ob, obtype):
if ob is None:
return self
value = ob._data.get(self.name_on_model, NotFound)
if value is NotFound:
value = ob._data[self.name_on_model] = self.fn(ob)
return value
def __set__(self, ob, value):
raise AttributeError("Can't set attribute.")
def prepare_to_load(self, entity, value):
return Skip
class DateTime(Property):
"""A Property for :class:`datetime.datetime` values.
Parameters:
name(str, optional): The name of this property on the Datastore
entity. Defaults to the name of this property on the model.
default(object, optional): The property's default value.
indexed(bool, optional): Whether or not this property should be
indexed. Defaults to ``False``.
indexed_if(callable, optional): Whether or not this property
should be indexed when the callable returns ``True``.
Defaults to ``None``.
optional(bool, optional): Whether or not this property is
optional. Defaults to ``False``. Required but empty values
cause models to raise an exception before data is persisted.
repeated(bool, optional): Whether or not this property is
repeated. Defaults to ``False``. Optional repeated
properties default to an empty list.
auto_now_add(bool, optional): Whether or not to set this
property's value to the current time the first time it's
stored.
auto_now(bool, optional): Whether or not this property's value
should be set to the current time every time it is stored.
"""
_types = (datetime,)
def __init__(self, *, auto_now_add=False, auto_now=False, **options):
super().__init__(**options)
self.auto_now_add = auto_now_add
self.auto_now = auto_now
if self.repeated and (auto_now_add or auto_now):
raise TypeError("Cannot use auto_now{,_add} with repeated properties.")
def _current_value(self):
return datetime.now(tz.tzlocal())
def prepare_to_load(self, entity, value):
return super().prepare_to_load(entity, value)
def prepare_to_store(self, entity, value):
if value is None and self.auto_now_add:
value = entity._data[self.name_on_model] = self._current_value()
elif self.auto_now:
value = entity._data[self.name_on_model] = self._current_value()
if value is not None:
value = entity._data[self.name_on_model] = value.astimezone(tz.tzutc())
return super().prepare_to_store(entity, value)
def validate(self, value):
value = super().validate(value)
if value is not None and not value.tzinfo:
return value.replace(tzinfo=tz.tzlocal())
return value
class Float(Property):
"""A Property for floating point values.
Parameters:
name(str, optional): The name of this property on the Datastore
entity. Defaults to the name of this property on the model.
default(object, optional): The property's default value.
indexed(bool, optional): Whether or not this property should be
indexed. Defaults to ``False``.
indexed_if(callable, optional): Whether or not this property
should be indexed when the callable returns ``True``.
Defaults to ``None``.
optional(bool, optional): Whether or not this property is
optional. Defaults to ``False``. Required but empty values
cause models to raise an exception before data is persisted.
repeated(bool, optional): Whether or not this property is
repeated. Defaults to ``False``. Optional repeated
properties default to an empty list.
"""
_types = (float,)
class Integer(Property):
"""A Property for integer values.
Parameters:
name(str, optional): The name of this property on the Datastore
entity. Defaults to the name of this property on the model.
default(object, optional): The property's default value.
indexed(bool, optional): Whether or not this property should be
indexed. Defaults to ``False``.
indexed_if(callable, optional): Whether or not this property
should be indexed when the callable returns ``True``.
Defaults to ``None``.
optional(bool, optional): Whether or not this property is
optional. Defaults to ``False``. Required but empty values
cause models to raise an exception before data is persisted.
repeated(bool, optional): Whether or not this property is
repeated. Defaults to ``False``. Optional repeated
properties default to an empty list.
"""
_types = (int,)
class Json(Serializer):
"""A Property for values that should be stored as JSON.
Parameters:
name(str, optional): The name of this property on the Datastore
entity. Defaults to the name of this property on the model.
default(object, optional): The property's default value.
optional(bool, optional): Whether or not this property is
optional. Defaults to ``False``. Required but empty values
cause models to raise an exception before data is persisted.
compressed(bool, optional): Whether or not this property should
be compressed before being persisted.
compression_level(int, optional): The amount of compression to
apply when compressing values.
"""
#: The name of the field that is used to store type information
#: about non-standard JSON values.
_type_field = "__anom_type"
@classmethod
def _serialize(cls, value):
if isinstance(value, bytes):
kind, value = "blob", base64.b64encode(value).decode("utf-8")
elif isinstance(value, datetime):
kind, value = "datetime", _seconds_since_epoch(value)
elif isinstance(value, model.Model):
kind, value = "model", cls._entity_to_dict(value)
else:
raise TypeError(f"Value of type {type(value)} cannot be serialized.")
return {cls._type_field: kind, "value": value}
@classmethod
def _deserialize(cls, data):
kind = data.get(cls._type_field, None)
if kind is None:
return data
value = data.get("value")
if kind == "blob":
return base64.b64decode(value.encode("utf-8"))
elif kind == "datetime":
return datetime.fromtimestamp(value, tz.tzutc())
elif kind == "model":
return cls._entity_from_dict(value)
else:
raise ValueError(f"Invalid kind {kind!r}.")
@classmethod
def _dumps(cls, value):
return json.dumps(value, separators=(",", ":"), default=Json._serialize)
@classmethod
def _loads(cls, data):
return json.loads(data, object_hook=Json._deserialize)
class Key(Property):
"""A Property for :class:`anom.Key` values.
Parameters:
name(str, optional): The name of this property on the Datastore
entity. Defaults to the name of this property on the model.
default(object, optional): The property's default value.
kind(str or model, optional): The kinds of keys that may be
assigned to this property.
indexed(bool, optional): Whether or not this property should be
indexed. Defaults to ``False``.
indexed_if(callable, optional): Whether or not this property
should be indexed when the callable returns ``True``.
Defaults to ``None``.
optional(bool, optional): Whether or not this property is
optional. Defaults to ``False``. Required but empty values
cause models to raise an exception before data is persisted.
repeated(bool, optional): Whether or not this property is
repeated. Defaults to ``False``. Optional repeated
properties default to an empty list.
"""
_types = (model.Model, model.Key, tuple)
def __init__(self, *, kind=None, **options):
super().__init__(**options)
if isinstance(kind, model.model):
self.kind = kind._kind
else:
self.kind = kind
def validate(self, value):
value = super().validate(value)
if value is not None:
if self.repeated and isinstance(value, list):
return [self.validate(v) for v in value]
if isinstance(value, model.Model):
value = value.key
# Serialized keys may end up being turned into tuples
# (JSON, msgpack) so we attempt to coerce them here.
if isinstance(value, tuple):
value = model.Key(*value)
if value.is_partial:
raise ValueError("Cannot assign partial Keys to Key properties.")
elif self.kind and self.kind != value.kind:
raise ValueError(f"Property {self.name_on_model} cannot be assigned keys of kind {value.kind}.")
return value
class Msgpack(Serializer):
"""A Property for values that should be stored as msgpack data.
Parameters:
name(str, optional): The name of this property on the Datastore
entity. Defaults to the name of this property on the model.
default(object, optional): The property's default value.
optional(bool, optional): Whether or not this property is
optional. Defaults to ``False``. Required but empty values
cause models to raise an exception before data is persisted.
compressed(bool, optional): Whether or not this property should
be compressed before being persisted.
compression_level(int, optional): The amount of compression to
apply when compressing values.
"""
class Extensions(IntEnum):
Model = 0
DateTime = 1
@classmethod
def _serialize(cls, value):
if isinstance(value, model.Model):
kind, value = cls.Extensions.Model, cls._entity_to_dict(value)
elif isinstance(value, datetime):
kind, value = cls.Extensions.DateTime, _seconds_since_epoch(value)
else:
raise TypeError(f"Value of type {type(value)} cannot be serialized.")
return msgpack.ExtType(kind, cls._dumps(value))
@classmethod
def _deserialize(cls, code, data):
try:
kind = cls.Extensions(code)
except ValueError:
raise ValueError(f"Invalid extension code {code}.")
value = cls._loads(data)
if kind is cls.Extensions.Model:
return cls._entity_from_dict(value)
elif kind is cls.Extensions.DateTime:
return datetime.fromtimestamp(value, tz.tzutc())
@classmethod
def _dumps(cls, value):
return msgpack.packb(value, default=cls._serialize, use_bin_type=True)
@classmethod
def _loads(cls, data):
return msgpack.unpackb(data, ext_hook=cls._deserialize, encoding="utf-8")
class String(Encodable, Property):
"""A Property for indexable string values.
Parameters:
name(str, optional): The name of this property on the Datastore
entity. Defaults to the name of this property on the model.
default(object, optional): The property's default value.
indexed(bool, optional): Whether or not this property should be
indexed. Defaults to ``False``.
indexed_if(callable, optional): Whether or not this property
should be indexed when the callable returns ``True``.
Defaults to ``None``.
optional(bool, optional): Whether or not this property is
optional. Defaults to ``False``. Required but empty values
cause models to raise an exception before data is persisted.
repeated(bool, optional): Whether or not this property is
repeated. Defaults to ``False``. Optional repeated
properties default to an empty list.
encoding(str): The encoding to use when persisting this Property
to Datastore. Defaults to ``utf-8``.
"""
_types = (str,)
def _validate_length(self, value):
if len(value) > _max_indexed_length and \
len(value.encode(self.encoding)) > _max_indexed_length:
raise ValueError(
f"String value is longer than the maximum allowed length "
f"({_max_indexed_length}) for indexed properties. Set "
f"indexed to False if the value should not be indexed."
)
def validate(self, value):
value = super().validate(value)
if not self.indexed or value is None:
return value
if not self.repeated:
self._validate_length(value)
return value
class Text(Encodable, Compressable, Property):
"""A Property for long string values that are never indexed.
Parameters:
name(str, optional): The name of this property on the Datastore
entity. Defaults to the name of this property on the model.
default(object, optional): The property's default value.
optional(bool, optional): Whether or not this property is
optional. Defaults to ``False``. Required but empty values
cause models to raise an exception before data is persisted.
repeated(bool, optional): Whether or not this property is
repeated. Defaults to ``False``. Optional repeated
properties default to an empty list.
compressed(bool, optional): Whether or not this property should
be compressed before being persisted.
compression_level(int, optional): The amount of compression to
apply when compressing values.
encoding(str): The encoding to use when persisting this Property
to Datastore. Defaults to ``utf-8``.
"""
_types = (str,)
class Unicode(Property):
"""A Property for string values that should be stored as unicode strings.
Parameters:
name(str, optional): The name of this property on the Datastore
entity. Defaults to the name of this property on the model.
default(object, optional): The property's default value.
indexed(bool, optional): Whether or not this property should be
indexed. Defaults to ``False``.
indexed_if(callable, optional): Whether or not this property
should be indexed when the callable returns ``True``.
Defaults to ``None``.
optional(bool, optional): Whether or not this property is
optional. Defaults to ``False``. Required but empty values
cause models to raise an exception before data is persisted.
repeated(bool, optional): Whether or not this property is
repeated. Defaults to ``False``. Optional repeated
properties default to an empty list.
"""
_types = (str,)
def _validate_length(self, value):
if len(value.encode('utf-8')) > _max_indexed_length:
raise ValueError(
f"Unicode value is longer than the maximum allowed length "
f"({_max_indexed_length} bytes) for indexed properties. Set "
f"indexed to False if the value should not be indexed."
)
def validate(self, value):
value = super().validate(value)
if not self.indexed or value is None:
return value
if not self.repeated:
self._validate_length(value)
return value
class Embed(EmbedLike):
"""A property for embedding entities inside other entities.
Parameters:
kind(str or model): The kinds of keys that may be assigned to
this property.
optional(bool, optional): Whether or not this property is
optional. Defaults to ``False``. Required but empty values
cause models to raise an exception before data is persisted.
repeated(bool, optional): Whether or not this property is
repeated. Defaults to ``False``. Optional repeated
properties default to an empty list.
"""
def __init__(self, *, kind, **options):
if "default" in options or "indexed" in options or "indexed_if" in options:
raise TypeError(f"{classname(self)} does not support name, default, indexed or indexed_if.")
super().__init__(**options)
if isinstance(kind, model.model):
self.kind = kind._kind
else:
self.kind = kind
def __copy__(self):
prop = type(self)(kind=self.kind)
for name in vars(self):
setattr(prop, name, getattr(self, name))
return prop
def get_unindexed_properties(self, entity):
if isinstance(entity, list):
return tuple(set(chain.from_iterable(self.get_unindexed_properties(e) for e in entity)))
return tuple(f"{self.name_on_entity}.{name}" for name in entity.unindexed_properties)
def validate(self, value):
if self.optional and value is None:
return [] if self.repeated else None
model_class = model.lookup_model_by_kind(self.kind)
if self.repeated and not all(isinstance(v, model_class) for v in value):
raise TypeError(f"{self.name_on_model} properties must be instances of {self.kind}.")
elif not self.repeated and not isinstance(value, model_class):
raise TypeError(f"{self.name_on_model} properties must be instances of {self.kind}.")
return value
def prepare_to_load(self, entity, data):
embed_prefix = f"{self.name_on_entity}."
embed_prefix_len = len(embed_prefix)
data = {k[embed_prefix_len:]: v for k, v in data.items() if k.startswith(embed_prefix)}
if self.repeated:
return self._prepare_to_load_repeated_properties(data)
return self._prepare_to_load_properties(data)
def _prepare_to_load_repeated_properties(self, data):
if self.optional and not data:
return []
datas = [{} for _ in range(len(next(iter(data.values()))))]
for key, values in data.items():
for i, value in enumerate(values):
datas[i][key] = value
return [self._prepare_to_load_properties(data) for data in datas]
def _prepare_to_load_properties(self, data):
if self.optional and not data:
return None
model_class = model.lookup_model_by_kind(self.kind)
model_key = model.Key(self.kind)
return model_class._load(model_key, data)
def prepare_to_store(self, entity, value):
if value is not None:
if isinstance(value, list):
yield from self._prepare_to_store_repeated_properties(value)
else:
yield from self._prepare_to_store_properties(value)
elif not self.optional:
raise RuntimeError(f"Property {self.name_on_model} requires a value.")
def _prepare_to_store_repeated_properties(self, entities):
if self.optional and not entities:
return []
properties = defaultdict(list)
for entity in entities:
for name, value in self._prepare_to_store_properties(entity):
properties[name].append(value)
# Ensure all sublists are equal, otherwise rebuilding the
# entity is not going to be possible.
props_are_valid = reduce(operator.eq, (len(val) for val in properties.values()))
if not props_are_valid: # pragma: no cover
raise ValueError(
"Repeated properties for {self.name_on_model} have different lengths. "
"You probably just found a bug in anom, please report it!"
)
for name, values in properties.items():
yield name, values
def _prepare_to_store_properties(self, entity):
for name, prop in entity._properties.items():
value = getattr(entity, name)
if isinstance(prop, EmbedLike):
for name, value in prop.prepare_to_store(entity, value):
yield f"{self.name_on_entity}.{name}", value
else:
yield f"{self.name_on_entity}.{prop.name_on_entity}", prop.prepare_to_store(entity, value)
def __getattr__(self, name):
model_class = model.lookup_model_by_kind(self.kind)
value = getattr(model_class, name)
if isinstance(value, model.Property):
value = copy(value)
value._name_on_entity = f"{self.name_on_entity}.{value.name_on_entity}"
value._name_on_model = f"{self.name_on_model}.{value.name_on_model}"
return value
return value
|
|
#!/usr/bin/env python
import math
import ctypes
from contextlib import contextmanager
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.arrays import vbo
from PIL import Image
import pygame
class ImageTexture(object):
def __init__(self, image):
self.width = image.size[0]
self.height = image.size[1]
self.buffer = image.convert("RGBA").tobytes("raw", "RGBA", 0, -1)
self._tex_id = glGenTextures(1)
self.bind()
glTexImage2D(GL_TEXTURE_2D, 0, 4, self.width, self.height,
0, GL_RGBA, GL_UNSIGNED_BYTE, self.buffer);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
def bind(self):
glBindTexture(GL_TEXTURE_2D, self._tex_id)
class Shader(object):
type = None
def __init__(self, source):
self._shader = glCreateShader(self.type)
# glShaderSource actually expects a list of strings...
if isinstance(source, basestring):
source = [source]
glShaderSource(self._shader, source)
glCompileShader(self._shader)
result = glGetShaderiv(self._shader, GL_COMPILE_STATUS)
if result != 1:
log = glGetShaderInfoLog(self._shader)
print "Shader compile failed\nShader compilation Log:\n"+log
raise Exception()
class VertexShader(Shader):
type = GL_VERTEX_SHADER
class FragmentShader(Shader):
type = GL_FRAGMENT_SHADER
class Program(object):
def __init__(self, vs, fs):
self._program = glCreateProgram()
glAttachShader(self._program, vs._shader)
glAttachShader(self._program, fs._shader)
glLinkProgram(self._program)
@contextmanager
def in_use(self):
try:
try:
glUseProgram(self._program)
except OpenGL.error.GLError:
print glGetProgramInfoLog(self._program)
raise
yield
finally:
glUseProgram(0)
def setUniform1i(self, name, value):
u = glGetUniformLocation(self._program, name)
glUniform1i(u, value)
def setUniform1f(self, name, value):
u = glGetUniformLocation(self._program, name)
glUniform1f(u, value)
it1 = it2 = None
program = None
point_lattice = None
points = None
def initGL():
im = Image.open(sys.argv[1])
im = im.convert('L')
global it1
it1 = ImageTexture(im)
im = Image.open(sys.argv[2])
im = im.convert('L')
global it2
it2 = ImageTexture(im)
vs = VertexShader(open("peaks.vs").read())
fs = FragmentShader(open("peaks.fs").read())
global program
program = Program(vs, fs)
# Create array of points
global point_lattice, points
points = (ctypes.c_float * 2 * 100 * 100)()
for x in xrange(100):
for y in xrange(100):
points[x][y][0] = (x - 50)
points[x][y][1] = (y - 50)
point_lattice = vbo.VBO(points)
glEnable(GL_DEPTH_TEST)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(90,1,0.01,1000)
gluLookAt(0,0,100,0,0,0,0,1,0)
glClearColor(0.3, 0.3, 0.3, 1.0)
glEnableClientState(GL_VERTEX_ARRAY)
angle = 0.0
anglex = 0.0
angley = 0.0
def paintGL(elapsed):
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
global point_lattice, points, program, it1, it2
global angle, anglex, angley
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
glRotated(anglex, 1, 0, 0)
glRotated(angley, 0, 1, 0)
# 90 deg/s
angle += (elapsed * 360.0) / 4000.0
blend = (math.sin(math.radians(angle)) + 1.0) / 2.0
with program.in_use():
program.setUniform1f("blend", blend)
program.setUniform1f("npw", float(len(points)))
program.setUniform1f("nph", float(len(points[0])))
glActiveTexture(GL_TEXTURE0 + 0)
it1.bind()
program.setUniform1i("texture1", 0)
glActiveTexture(GL_TEXTURE0 + 1)
it2.bind()
program.setUniform1i("texture2", 1)
with point_lattice:
glVertexPointerf(point_lattice)
glDrawArrays(GL_POINTS, 0, 100*100)
if __name__ == "__main__":
import sys
if len(sys.argv) < 2:
print "Usage: imgpeaks.py <image1> <image2>"
sys.exit(1)
pygame.init()
pygame.display.set_mode((800, 600), pygame.OPENGL|pygame.DOUBLEBUF)
initGL()
then = pygame.time.get_ticks()
running = True
mdown = False
mpos = (0, 0)
while running:
now = pygame.time.get_ticks()
elapsed = now - then
then = now
for event in pygame.event.get():
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
running = False
elif event.type == pygame.QUIT:
running = False
elif event.type == pygame.MOUSEBUTTONDOWN and pygame.mouse.get_pressed()[0]:
mdown = True
mpos = event.pos
elif event.type == pygame.MOUSEBUTTONUP:
mdown = False
elif mdown and event.type == pygame.MOUSEMOTION:
dx = event.pos[0] - mpos[0]
dy = event.pos[1] - mpos[1]
anglex += dy
angley -= dx
mpos = event.pos
paintGL(elapsed)
pygame.display.flip()
pygame.time.wait(0)
|
|
"""
Example generation for the scikit learn
Generate the rst files for the examples by iterating over the python
example files.
Files that generate images should start with 'plot'
"""
from time import time
import os
import shutil
import traceback
import glob
import sys
from StringIO import StringIO
import matplotlib
matplotlib.use('Agg')
import token
import tokenize
rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
"""
plot_rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
%(image_list)s
%(stdout)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
"""
# The following strings are used when we have several pictures: we use
# an html div tag that our CSS uses to turn the lists into horizontal
# lists.
HLIST_HEADER = """
.. rst-class:: horizontal
"""
HLIST_IMAGE_TEMPLATE = """
*
.. image:: images/%s
:scale: 47
"""
SINGLE_IMAGE = """
.. image:: images/%s
:align: center
"""
def extract_docstring(filename):
""" Extract a module-level docstring, if any
"""
lines = file(filename).readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
tokens = tokenize.generate_tokens(lines.__iter__().next)
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs, extract
# the first one:
paragraphs = '\n'.join(line.rstrip()
for line in docstring.split('\n')).split('\n\n')
if len(paragraphs) > 0:
first_par = paragraphs[0]
break
return docstring, first_par, erow + 1 + start_row
def generate_example_rst(app):
""" Generate the list of examples, as well as the contents of
examples.
"""
root_dir = os.path.join(app.builder.srcdir, 'auto_examples')
example_dir = os.path.abspath(app.builder.srcdir + '/../' + 'examples')
try:
plot_gallery = eval(app.builder.config.plot_gallery)
except TypeError:
plot_gallery = bool(app.builder.config.plot_gallery)
if not os.path.exists(example_dir):
os.makedirs(example_dir)
if not os.path.exists(root_dir):
os.makedirs(root_dir)
# we create an index.rst with all examples
fhindex = file(os.path.join(root_dir, 'index.rst'), 'w')
fhindex.write("""\
.. raw:: html
<style type="text/css">
.figure {
float: left;
margin: 10px;
width: auto;
height: 200px;
width: 180px;
}
.figure img {
display: inline;
}
.figure .caption {
width: 170px;
text-align: center !important;
}
</style>
Examples
========
.. _examples-index:
""")
# Here we don't use an os.walk, but we recurse only twice: flat is
# better than nested.
generate_dir_rst('.', fhindex, example_dir, root_dir, plot_gallery)
for dir in sorted(os.listdir(example_dir)):
if os.path.isdir(os.path.join(example_dir, dir)):
generate_dir_rst(dir, fhindex, example_dir, root_dir, plot_gallery)
fhindex.flush()
def generate_dir_rst(dir, fhindex, example_dir, root_dir, plot_gallery):
""" Generate the rst file for an example directory.
"""
if not dir == '.':
target_dir = os.path.join(root_dir, dir)
src_dir = os.path.join(example_dir, dir)
else:
target_dir = root_dir
src_dir = example_dir
if not os.path.exists(os.path.join(src_dir, 'README.txt')):
print 80 * '_'
print ('Example directory %s does not have a README.txt file'
% src_dir)
print 'Skipping this directory'
print 80 * '_'
return
fhindex.write("""
%s
""" % file(os.path.join(src_dir, 'README.txt')).read())
if not os.path.exists(target_dir):
os.makedirs(target_dir)
def sort_key(a):
# put last elements without a plot
if not a.startswith('plot') and a.endswith('.py'):
return 'zz' + a
return a
for fname in sorted(os.listdir(src_dir), key=sort_key):
if fname.endswith('py'):
generate_file_rst(fname, target_dir, src_dir, plot_gallery)
thumb = os.path.join(dir, 'images', 'thumb', fname[:-3] + '.png')
link_name = os.path.join(dir, fname).replace(os.path.sep, '_')
fhindex.write('.. figure:: %s\n' % thumb)
if link_name.startswith('._'):
link_name = link_name[2:]
if dir != '.':
fhindex.write(' :target: ./%s/%s.html\n\n' % (dir,
fname[:-3]))
else:
fhindex.write(' :target: ./%s.html\n\n' % link_name[:-3])
fhindex.write(""" :ref:`example_%s`
.. toctree::
:hidden:
%s/%s
""" % (link_name, dir, fname[:-3]))
fhindex.write("""
.. raw:: html
<div style="clear: both"></div>
""") # clear at the end of the section
def generate_file_rst(fname, target_dir, src_dir, plot_gallery):
""" Generate the rst file for a given example.
"""
base_image_name = os.path.splitext(fname)[0]
image_fname = '%s_%%s.png' % base_image_name
this_template = rst_template
last_dir = os.path.split(src_dir)[-1]
# to avoid leading . in file names, and wrong names in links
if last_dir == '.' or last_dir == 'examples':
last_dir = ''
else:
last_dir += '_'
short_fname = last_dir + fname
src_file = os.path.join(src_dir, fname)
example_file = os.path.join(target_dir, fname)
shutil.copyfile(src_file, example_file)
# The following is a list containing all the figure names
figure_list = []
image_dir = os.path.join(target_dir, 'images')
thumb_dir = os.path.join(image_dir, 'thumb')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
image_path = os.path.join(image_dir, image_fname)
stdout_path = os.path.join(image_dir,
'stdout_%s.txt' % base_image_name)
thumb_file = os.path.join(thumb_dir, fname[:-3] + '.png')
if plot_gallery and fname.startswith('plot'):
# generate the plot as png image if file name
# starts with plot and if it is more recent than an
# existing image.
first_image_file = image_path % 1
if os.path.exists(stdout_path):
stdout = open(stdout_path).read()
else:
stdout = ''
if (not os.path.exists(first_image_file) or
os.stat(first_image_file).st_mtime <=
os.stat(src_file).st_mtime):
# We need to execute the code
print 'plotting %s' % fname
t0 = time()
import matplotlib.pyplot as plt
plt.close('all')
cwd = os.getcwd()
try:
# First CD in the original example dir, so that any file
# created by the example get created in this directory
orig_stdout = sys.stdout
os.chdir(os.path.dirname(src_file))
my_stdout = StringIO()
sys.stdout = my_stdout
my_globals = {'pl': plt}
execfile(os.path.basename(src_file), my_globals)
sys.stdout = orig_stdout
my_stdout = my_stdout.getvalue()
if '__doc__' in my_globals:
# The __doc__ is often printed in the example, we
# don't with to echo it
my_stdout = my_stdout.replace(
my_globals['__doc__'],
'')
my_stdout = my_stdout.strip()
if my_stdout:
stdout = '**Script output**::\n\n %s\n\n' % (
'\n '.join(my_stdout.split('\n')))
open(stdout_path, 'w').write(stdout)
os.chdir(cwd)
# In order to save every figure we have two solutions :
# * iterate from 1 to infinity and call plt.fignum_exists(n)
# (this requires the figures to be numbered
# incrementally: 1, 2, 3 and not 1, 2, 5)
# * iterate over [fig_mngr.num for fig_mngr in
# matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
for fig_num in (fig_mngr.num for fig_mngr in
matplotlib._pylab_helpers.Gcf.get_all_fig_managers()):
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
plt.figure(fig_num)
plt.savefig(image_path % fig_num)
figure_list.append(image_fname % fig_num)
except:
print 80 * '_'
print '%s is not compiling:' % fname
traceback.print_exc()
print 80 * '_'
finally:
os.chdir(cwd)
sys.stdout = orig_stdout
print " - time elapsed : %.2g sec" % (time() - t0)
else:
figure_list = [f[len(image_dir):]
for f in glob.glob(image_path % '[1-9]')]
#for f in glob.glob(image_path % '*')]
# generate thumb file
this_template = plot_rst_template
from matplotlib import image
if os.path.exists(first_image_file):
image.thumbnail(first_image_file, thumb_file, 0.2)
if not os.path.exists(thumb_file):
# create something not to replace the thumbnail
shutil.copy('images/blank_image.png', thumb_file)
docstring, short_desc, end_row = extract_docstring(example_file)
# Depending on whether we have one or more figures, we're using a
# horizontal list or a single rst call to 'image'.
if len(figure_list) == 1:
figure_name = figure_list[0]
image_list = SINGLE_IMAGE % figure_name.lstrip('/')
else:
image_list = HLIST_HEADER
for figure_name in figure_list:
image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
f = open(os.path.join(target_dir, fname[:-2] + 'rst'), 'w')
f.write(this_template % locals())
f.flush()
def setup(app):
app.connect('builder-inited', generate_example_rst)
app.add_config_value('plot_gallery', True, 'html')
# Sphinx hack: sphinx copies generated images to the build directory
# each time the docs are made. If the desired image name already
# exists, it appends a digit to prevent overwrites. The problem is,
# the directory is never cleared. This means that each time you build
# the docs, the number of images in the directory grows.
#
# This question has been asked on the sphinx development list, but there
# was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html
#
# The following is a hack that prevents this behavior by clearing the
# image build directory each time the docs are built. If sphinx
# changes their layout between versions, this will not work (though
# it should probably not cause a crash). Tested successfully
# on Sphinx 1.0.7
build_image_dir = '_build/html/_images'
if os.path.exists(build_image_dir):
filelist = os.listdir(build_image_dir)
for filename in filelist:
if filename.endswith('png'):
os.remove(os.path.join(build_image_dir, filename))
|
|
# coding: utf-8
"""
flask_oauthlib.provider.oauth2
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Implemnts OAuth2 provider support for Flask.
:copyright: (c) 2013 - 2014 by Hsiaoming Yang.
"""
import os
import logging
import datetime
from functools import wraps
from flask import request, url_for
from flask import redirect, abort
from werkzeug import cached_property
from werkzeug.utils import import_string
from oauthlib import oauth2
from oauthlib.oauth2 import RequestValidator, Server
from oauthlib.common import to_unicode, add_params_to_uri
from ..utils import extract_params, decode_base64, create_response
__all__ = ('OAuth2Provider', 'OAuth2RequestValidator')
log = logging.getLogger('flask_oauthlib')
class OAuth2Provider(object):
"""Provide secure services using OAuth2.
The server should provide an authorize handler and a token hander,
But before the handlers are implemented, the server should provide
some getters for the validation.
Like many other Flask extensions, there are two usage modes. One is
binding the Flask app instance::
app = Flask(__name__)
oauth = OAuth2Provider(app)
The second possibility is to bind the Flask app later::
oauth = OAuth2Provider()
def create_app():
app = Flask(__name__)
oauth.init_app(app)
return app
Configure :meth:`tokengetter` and :meth:`tokensetter` to get and
set tokens. Configure :meth:`grantgetter` and :meth:`grantsetter`
to get and set grant tokens. Configure :meth:`clientgetter` to
get the client.
Configure :meth:`usergetter` if you need password credential
authorization.
With everything ready, implement the authorization workflow:
* :meth:`authorize_handler` for consumer to confirm the grant
* :meth:`token_handler` for client to exchange access token
And now you can protect the resource with scopes::
@app.route('/api/user')
@oauth.require_oauth('email', 'username')
def user():
return jsonify(request.oauth.user)
"""
def __init__(self, app=None):
self._before_request_funcs = []
self._after_request_funcs = []
self._invalid_response = None
if app:
self.init_app(app)
def init_app(self, app):
"""
This callback can be used to initialize an application for the
oauth provider instance.
"""
self.app = app
app.extensions = getattr(app, 'extensions', {})
app.extensions['oauthlib.provider.oauth2'] = self
@cached_property
def error_uri(self):
"""The error page URI.
When something turns error, it will redirect to this error page.
You can configure the error page URI with Flask config::
OAUTH2_PROVIDER_ERROR_URI = '/error'
You can also define the error page by a named endpoint::
OAUTH2_PROVIDER_ERROR_ENDPOINT = 'oauth.error'
"""
error_uri = self.app.config.get('OAUTH2_PROVIDER_ERROR_URI')
if error_uri:
return error_uri
error_endpoint = self.app.config.get('OAUTH2_PROVIDER_ERROR_ENDPOINT')
if error_endpoint:
return url_for(error_endpoint)
return '/oauth/errors'
@cached_property
def server(self):
"""
All in one endpoints. This property is created automaticly
if you have implemented all the getters and setters.
However, if you are not satisfied with the getter and setter,
you can create a validator with :class:`OAuth2RequestValidator`::
class MyValidator(OAuth2RequestValidator):
def validate_client_id(self, client_id):
# do something
return True
And assign the validator for the provider::
oauth._validator = MyValidator()
"""
expires_in = self.app.config.get('OAUTH2_PROVIDER_TOKEN_EXPIRES_IN')
token_generator = self.app.config.get(
'OAUTH2_PROVIDER_TOKEN_GENERATOR', None
)
if token_generator and not callable(token_generator):
token_generator = import_string(token_generator)
refresh_token_generator = self.app.config.get(
'OAUTH2_PROVIDER_REFRESH_TOKEN_GENERATOR', None
)
if refresh_token_generator and not callable(refresh_token_generator):
refresh_token_generator = import_string(refresh_token_generator)
if hasattr(self, '_validator'):
return Server(
self._validator,
token_expires_in=expires_in,
token_generator=token_generator,
refresh_token_generator=refresh_token_generator,
)
if hasattr(self, '_clientgetter') and \
hasattr(self, '_tokengetter') and \
hasattr(self, '_tokensetter') and \
hasattr(self, '_grantgetter') and \
hasattr(self, '_grantsetter'):
usergetter = None
if hasattr(self, '_usergetter'):
usergetter = self._usergetter
validator = OAuth2RequestValidator(
clientgetter=self._clientgetter,
tokengetter=self._tokengetter,
grantgetter=self._grantgetter,
usergetter=usergetter,
tokensetter=self._tokensetter,
grantsetter=self._grantsetter,
)
self._validator = validator
return Server(
validator,
token_expires_in=expires_in,
token_generator=token_generator,
refresh_token_generator=refresh_token_generator,
)
raise RuntimeError('application not bound to required getters')
def before_request(self, f):
"""Register functions to be invoked before accessing the resource.
The function accepts nothing as parameters, but you can get
information from `Flask.request` object. It is usually useful
for setting limitation on the client request::
@oauth.before_request
def limit_client_request():
client_id = request.values.get('client_id')
if not client_id:
return
client = Client.get(client_id)
if over_limit(client):
return abort(403)
track_request(client)
"""
self._before_request_funcs.append(f)
return f
def after_request(self, f):
"""Register functions to be invoked after accessing the resource.
The function accepts ``valid`` and ``request`` as parameters,
and it should return a tuple of them::
@oauth.after_request
def valid_after_request(valid, oauth):
if oauth.user in black_list:
return False, oauth
return valid, oauth
"""
self._after_request_funcs.append(f)
return f
def invalid_response(self, f):
"""Register a function for responsing with invalid request.
When an invalid request proceeds to :meth:`require_oauth`, we can
handle the request with the registered function. The function
accepts one parameter, which is an oauthlib Request object::
@oauth.invalid_response
def invalid_require_oauth(req):
return jsonify(message=req.error_message), 401
If no function is registered, it will return with ``abort(401)``.
"""
self._invalid_response = f
return f
def clientgetter(self, f):
"""Register a function as the client getter.
The function accepts one parameter `client_id`, and it returns
a client object with at least these information:
- client_id: A random string
- client_secret: A random string
- client_type: A string represents if it is `confidential`
- redirect_uris: A list of redirect uris
- default_redirect_uri: One of the redirect uris
- default_scopes: Default scopes of the client
The client may contain more information, which is suggested:
- allowed_grant_types: A list of grant types
- allowed_response_types: A list of response types
- validate_scopes: A function to validate scopes
Implement the client getter::
@oauth.clientgetter
def get_client(client_id):
client = get_client_model(client_id)
# Client is an object
return client
"""
self._clientgetter = f
return f
def usergetter(self, f):
"""Register a function as the user getter.
This decorator is only required for **password credential**
authorization::
@oauth.usergetter
def get_user(username, password, client, request,
*args, **kwargs):
# client: current request client
if not client.has_password_credential_permission:
return None
user = User.get_user_by_username(username)
if not user.validate_password(password):
return None
# parameter `request` is an OAuthlib Request object.
# maybe you will need it somewhere
return user
"""
self._usergetter = f
return f
def tokengetter(self, f):
"""Register a function as the token getter.
The function accepts an `access_token` or `refresh_token` parameters,
and it returns a token object with at least these information:
- access_token: A string token
- refresh_token: A string token
- client_id: ID of the client
- scopes: A list of scopes
- expires: A `datetime.datetime` object
- user: The user object
The implementation of tokengetter should accepts two parameters,
one is access_token the other is refresh_token::
@oauth.tokengetter
def bearer_token(access_token=None, refresh_token=None):
if access_token:
return get_token(access_token=access_token)
if refresh_token:
return get_token(refresh_token=refresh_token)
return None
"""
self._tokengetter = f
return f
def tokensetter(self, f):
"""Register a function to save the bearer token.
The setter accepts two parameters at least, one is token,
the other is request::
@oauth.tokensetter
def set_token(token, request, *args, **kwargs):
save_token(token, request.client, request.user)
The parameter token is a dict, that looks like::
{
u'access_token': u'6JwgO77PApxsFCU8Quz0pnL9s23016',
u'token_type': u'Bearer',
u'expires_in': 3600,
u'scope': u'email address'
}
The request is an object, that contains an user object and a
client object.
"""
self._tokensetter = f
return f
def grantgetter(self, f):
"""Register a function as the grant getter.
The function accepts `client_id`, `code` and more::
@oauth.grantgetter
def grant(client_id, code):
return get_grant(client_id, code)
It returns a grant object with at least these information:
- delete: A function to delete itself
"""
self._grantgetter = f
return f
def grantsetter(self, f):
"""Register a function to save the grant code.
The function accepts `client_id`, `code`, `request` and more::
@oauth.grantsetter
def set_grant(client_id, code, request, *args, **kwargs):
save_grant(client_id, code, request.user, request.scopes)
"""
self._grantsetter = f
return f
def authorize_handler(self, f):
"""Authorization handler decorator.
This decorator will sort the parameters and headers out, and
pre validate everything::
@app.route('/oauth/authorize', methods=['GET', 'POST'])
@oauth.authorize_handler
def authorize(*args, **kwargs):
if request.method == 'GET':
# render a page for user to confirm the authorization
return render_template('oauthorize.html')
confirm = request.form.get('confirm', 'no')
return confirm == 'yes'
"""
@wraps(f)
def decorated(*args, **kwargs):
# raise if server not implemented
server = self.server
uri, http_method, body, headers = extract_params()
if request.method == 'GET':
redirect_uri = request.args.get('redirect_uri', None)
log.debug('Found redirect_uri %s.', redirect_uri)
try:
ret = server.validate_authorization_request(
uri, http_method, body, headers
)
scopes, credentials = ret
kwargs['scopes'] = scopes
kwargs.update(credentials)
except oauth2.FatalClientError as e:
log.debug('Fatal client error %r', e)
return redirect(e.in_uri(self.error_uri))
except oauth2.OAuth2Error as e:
log.debug('OAuth2Error: %r', e)
return redirect(e.in_uri(redirect_uri or self.error_uri))
except Exception as e:
log.warn('Exception: %r', e)
return redirect(add_params_to_uri(
self.error_uri, {'error': 'unknown'}
))
else:
redirect_uri = request.values.get('redirect_uri', None)
try:
rv = f(*args, **kwargs)
except oauth2.FatalClientError as e:
log.debug('Fatal client error %r', e)
return redirect(e.in_uri(self.error_uri))
except oauth2.OAuth2Error as e:
log.debug('OAuth2Error: %r', e)
return redirect(e.in_uri(redirect_uri or self.error_uri))
except Exception as e:
log.warn('Exception: %r', e)
return redirect(add_params_to_uri(
self.error_uri, {'error': 'unknown'}
))
if not isinstance(rv, bool):
# if is a response or redirect
return rv
if not rv:
# denied by user
e = oauth2.AccessDeniedError()
return redirect(e.in_uri(redirect_uri))
return self.confirm_authorization_request()
return decorated
def confirm_authorization_request(self):
"""When consumer confirm the authorization."""
server = self.server
scope = request.values.get('scope') or ''
scopes = scope.split()
credentials = dict(
client_id=request.values.get('client_id'),
redirect_uri=request.values.get('redirect_uri', None),
response_type=request.values.get('response_type', None),
state=request.values.get('state', None)
)
log.debug('Fetched credentials from request %r.', credentials)
redirect_uri = credentials.get('redirect_uri')
log.debug('Found redirect_uri %s.', redirect_uri)
uri, http_method, body, headers = extract_params()
try:
ret = server.create_authorization_response(
uri, http_method, body, headers, scopes, credentials)
log.debug('Authorization successful.')
return create_response(*ret)
except oauth2.FatalClientError as e:
log.debug('Fatal client error %r', e)
return redirect(e.in_uri(self.error_uri))
except oauth2.OAuth2Error as e:
log.debug('OAuth2Error: %r', e)
return redirect(e.in_uri(redirect_uri or self.error_uri))
except Exception as e:
log.warn('Exception: %r', e)
return redirect(add_params_to_uri(
self.error_uri, {'error': 'unknown'}
))
def verify_request(self, scopes):
"""Verify current request, get the oauth data.
If you can't use the ``require_oauth`` decorator, you can fetch
the data in your request body::
def your_handler():
valid, req = oauth.verify_request(['email'])
if valid:
return jsonify(user=req.user)
return jsonify(status='error')
"""
uri, http_method, body, headers = extract_params()
return self.server.verify_request(
uri, http_method, body, headers, scopes
)
def token_handler(self, f):
"""Access/refresh token handler decorator.
The decorated function should return an dictionary or None as
the extra credentials for creating the token response.
You can control the access method with standard flask route mechanism.
If you only allow the `POST` method::
@app.route('/oauth/token', methods=['POST'])
@oauth.token_handler
def access_token():
return None
"""
@wraps(f)
def decorated(*args, **kwargs):
server = self.server
uri, http_method, body, headers = extract_params()
credentials = f(*args, **kwargs) or {}
log.debug('Fetched extra credentials, %r.', credentials)
ret = server.create_token_response(
uri, http_method, body, headers, credentials
)
return create_response(*ret)
return decorated
def revoke_handler(self, f):
"""Access/refresh token revoke decorator.
Any return value by the decorated function will get discarded as
defined in [`RFC7009`_].
You can control the access method with the standard flask routing
mechanism, as per [`RFC7009`_] it is recommended to only allow
the `POST` method::
@app.route('/oauth/revoke', methods=['POST'])
@oauth.revoke_handler
def revoke_token():
pass
.. _`RFC7009`: http://tools.ietf.org/html/rfc7009
"""
@wraps(f)
def decorated(*args, **kwargs):
server = self.server
token = request.values.get('token')
request.token_type_hint = request.values.get('token_type_hint')
if token:
request.token = token
uri, http_method, body, headers = extract_params()
ret = server.create_revocation_response(
uri, headers=headers, body=body, http_method=http_method)
return create_response(*ret)
return decorated
def require_oauth(self, *scopes):
"""Protect resource with specified scopes."""
def wrapper(f):
@wraps(f)
def decorated(*args, **kwargs):
for func in self._before_request_funcs:
func()
if hasattr(request, 'oauth') and request.oauth:
return f(*args, **kwargs)
valid, req = self.verify_request(scopes)
for func in self._after_request_funcs:
valid, req = func(valid, req)
if not valid:
if self._invalid_response:
return self._invalid_response(req)
return abort(401)
request.oauth = req
return f(*args, **kwargs)
return decorated
return wrapper
class OAuth2RequestValidator(RequestValidator):
"""Subclass of Request Validator.
:param clientgetter: a function to get client object
:param tokengetter: a function to get bearer token
:param tokensetter: a function to save bearer token
:param grantgetter: a function to get grant token
:param grantsetter: a function to save grant token
"""
def __init__(self, clientgetter, tokengetter, grantgetter,
usergetter=None, tokensetter=None, grantsetter=None):
self._clientgetter = clientgetter
self._tokengetter = tokengetter
self._usergetter = usergetter
self._tokensetter = tokensetter
self._grantgetter = grantgetter
self._grantsetter = grantsetter
def client_authentication_required(self, request, *args, **kwargs):
"""Determine if client authentication is required for current request.
According to the rfc6749, client authentication is required in the
following cases:
Resource Owner Password Credentials Grant: see `Section 4.3.2`_.
Authorization Code Grant: see `Section 4.1.3`_.
Refresh Token Grant: see `Section 6`_.
.. _`Section 4.3.2`: http://tools.ietf.org/html/rfc6749#section-4.3.2
.. _`Section 4.1.3`: http://tools.ietf.org/html/rfc6749#section-4.1.3
.. _`Section 6`: http://tools.ietf.org/html/rfc6749#section-6
"""
if request.grant_type == 'password':
client = self._clientgetter(request.client_id)
return (not client) or client.client_type == 'confidential' or\
request.client_secret
auth_required = ('authorization_code', 'refresh_token')
return 'Authorization' in request.headers and\
request.grant_type in auth_required
def authenticate_client(self, request, *args, **kwargs):
"""Authenticate itself in other means.
Other means means is described in `Section 3.2.1`_.
.. _`Section 3.2.1`: http://tools.ietf.org/html/rfc6749#section-3.2.1
"""
auth = request.headers.get('Authorization', None)
log.debug('Authenticate client %r', auth)
if auth:
try:
_, s = auth.split(' ')
client_id, client_secret = decode_base64(s).split(':')
client_id = to_unicode(client_id, 'utf-8')
client_secret = to_unicode(client_secret, 'utf-8')
except Exception as e:
log.debug('Authenticate client failed with exception: %r', e)
return False
else:
client_id = request.client_id
client_secret = request.client_secret
client = self._clientgetter(client_id)
if not client:
log.debug('Authenticate client failed, client not found.')
return False
request.client = client
if client.client_secret != client_secret:
log.debug('Authenticate client failed, secret not match.')
return False
if client.client_type != 'confidential':
log.debug('Authenticate client failed, not confidential.')
return False
log.debug('Authenticate client success.')
return True
def authenticate_client_id(self, client_id, request, *args, **kwargs):
"""Authenticate a non-confidential client.
:param client_id: Client ID of the non-confidential client
:param request: The Request object passed by oauthlib
"""
log.debug('Authenticate client %r.', client_id)
client = request.client or self._clientgetter(client_id)
if not client:
log.debug('Authenticate failed, client not found.')
return False
# attach client on request for convenience
request.client = client
return True
def confirm_redirect_uri(self, client_id, code, redirect_uri, client,
*args, **kwargs):
"""Ensure client is authorized to redirect to the redirect_uri.
This method is used in the authorization code grant flow. It will
compare redirect_uri and the one in grant token strictly, you can
add a `validate_redirect_uri` function on grant for a customized
validation.
"""
client = client or self._clientgetter(client_id)
log.debug('Confirm redirect uri for client %r and code %r.',
client.client_id, code)
grant = self._grantgetter(client_id=client.client_id, code=code)
if not grant:
log.debug('Grant not found.')
return False
if hasattr(grant, 'validate_redirect_uri'):
return grant.validate_redirect_uri(redirect_uri)
log.debug('Compare redirect uri for grant %r and %r.',
grant.redirect_uri, redirect_uri)
testing = 'OAUTHLIB_INSECURE_TRANSPORT' in os.environ
if testing and redirect_uri is None:
# For testing
return True
return grant.redirect_uri == redirect_uri
def get_original_scopes(self, refresh_token, request, *args, **kwargs):
"""Get the list of scopes associated with the refresh token.
This method is used in the refresh token grant flow. We return
the scope of the token to be refreshed so it can be applied to the
new access token.
"""
log.debug('Obtaining scope of refreshed token.')
tok = self._tokengetter(refresh_token=refresh_token)
return tok.scopes
def confirm_scopes(self, refresh_token, scopes, request, *args, **kwargs):
"""Ensures the requested scope matches the scope originally granted
by the resource owner. If the scope is omitted it is treated as equal
to the scope originally granted by the resource owner.
DEPRECATION NOTE: This method will cease to be used in oauthlib>0.4.2,
future versions of ``oauthlib`` use the validator method
``get_original_scopes`` to determine the scope of the refreshed token.
"""
if not scopes:
log.debug('Scope omitted for refresh token %r', refresh_token)
return True
log.debug('Confirm scopes %r for refresh token %r',
scopes, refresh_token)
tok = self._tokengetter(refresh_token=refresh_token)
return set(tok.scopes) == set(scopes)
def get_default_redirect_uri(self, client_id, request, *args, **kwargs):
"""Default redirect_uri for the given client."""
request.client = request.client or self._clientgetter(client_id)
redirect_uri = request.client.default_redirect_uri
log.debug('Found default redirect uri %r', redirect_uri)
return redirect_uri
def get_default_scopes(self, client_id, request, *args, **kwargs):
"""Default scopes for the given client."""
request.client = request.client or self._clientgetter(client_id)
scopes = request.client.default_scopes
log.debug('Found default scopes %r', scopes)
return scopes
def invalidate_authorization_code(self, client_id, code, request,
*args, **kwargs):
"""Invalidate an authorization code after use.
We keep the temporary code in a grant, which has a `delete`
function to destroy itself.
"""
log.debug('Destroy grant token for client %r, %r', client_id, code)
grant = self._grantgetter(client_id=client_id, code=code)
if grant:
grant.delete()
def save_authorization_code(self, client_id, code, request,
*args, **kwargs):
"""Persist the authorization code."""
log.debug(
'Persist authorization code %r for client %r',
code, client_id
)
request.client = request.client or self._clientgetter(client_id)
self._grantsetter(client_id, code, request, *args, **kwargs)
return request.client.default_redirect_uri
def save_bearer_token(self, token, request, *args, **kwargs):
"""Persist the Bearer token."""
log.debug('Save bearer token %r', token)
self._tokensetter(token, request, *args, **kwargs)
return request.client.default_redirect_uri
def validate_bearer_token(self, token, scopes, request):
"""Validate access token.
:param token: A string of random characters
:param scopes: A list of scopes
:param request: The Request object passed by oauthlib
The validation validates:
1) if the token is available
2) if the token has expired
3) if the scopes are available
"""
log.debug('Validate bearer token %r', token)
tok = self._tokengetter(access_token=token)
if not tok:
msg = 'Bearer token not found.'
request.error_message = msg
log.debug(msg)
return False
# validate expires
if datetime.datetime.utcnow() > tok.expires:
msg = 'Bearer token is expired.'
request.error_message = msg
log.debug(msg)
return False
# validate scopes
if not set(tok.scopes).issuperset(set(scopes)):
msg = 'Bearer token scope not valid.'
request.error_message = msg
log.debug(msg)
return False
request.access_token = tok
request.user = tok.user
request.scopes = scopes
if hasattr(tok, 'client'):
request.client = tok.client
elif hasattr(tok, 'client_id'):
request.client = self._clientgetter(tok.client_id)
return True
def validate_client_id(self, client_id, request, *args, **kwargs):
"""Ensure client_id belong to a valid and active client."""
log.debug('Validate client %r', client_id)
client = request.client or self._clientgetter(client_id)
if client:
# attach client to request object
request.client = client
return True
return False
def validate_code(self, client_id, code, client, request, *args, **kwargs):
"""Ensure the grant code is valid."""
client = client or self._clientgetter(client_id)
log.debug(
'Validate code for client %r and code %r', client.client_id, code
)
grant = self._grantgetter(client_id=client.client_id, code=code)
if not grant:
log.debug('Grant not found.')
return False
if hasattr(grant, 'expires') and \
datetime.datetime.utcnow() > grant.expires:
log.debug('Grant is expired.')
return False
request.state = kwargs.get('state')
request.user = grant.user
request.scopes = grant.scopes
return True
def validate_grant_type(self, client_id, grant_type, client, request,
*args, **kwargs):
"""Ensure the client is authorized to use the grant type requested.
It will allow any of the four grant types (`authorization_code`,
`password`, `client_credentials`, `refresh_token`) by default.
Implemented `allowed_grant_types` for client object to authorize
the request.
It is suggested that `allowed_grant_types` should contain at least
`authorization_code` and `refresh_token`.
"""
if self._usergetter is None and grant_type == 'password':
log.debug('Password credential authorization is disabled.')
return False
default_grant_types = (
'authorization_code', 'password',
'client_credentials', 'refresh_token',
)
if grant_type not in default_grant_types:
return False
if hasattr(client, 'allowed_grant_types') and \
grant_type not in client.allowed_grant_types:
return False
if grant_type == 'client_credentials':
if not hasattr(client, 'user'):
log.debug('Client should have a user property')
return False
request.user = client.user
return True
def validate_redirect_uri(self, client_id, redirect_uri, request,
*args, **kwargs):
"""Ensure client is authorized to redirect to the redirect_uri.
This method is used in the authorization code grant flow and also
in implicit grant flow. It will detect if redirect_uri in client's
redirect_uris strictly, you can add a `validate_redirect_uri`
function on grant for a customized validation.
"""
request.client = request.client or self._clientgetter(client_id)
client = request.client
if hasattr(client, 'validate_redirect_uri'):
return client.validate_redirect_uri(redirect_uri)
return redirect_uri in client.redirect_uris
def validate_refresh_token(self, refresh_token, client, request,
*args, **kwargs):
"""Ensure the token is valid and belongs to the client
This method is used by the authorization code grant indirectly by
issuing refresh tokens, resource owner password credentials grant
(also indirectly) and the refresh token grant.
"""
token = self._tokengetter(refresh_token=refresh_token)
if token and token.client_id == client.client_id:
# Make sure the request object contains user and client_id
request.client_id = token.client_id
request.user = token.user
return True
return False
def validate_response_type(self, client_id, response_type, client, request,
*args, **kwargs):
"""Ensure client is authorized to use the response type requested.
It will allow any of the two (`code`, `token`) response types by
default. Implemented `allowed_response_types` for client object
to authorize the request.
"""
if response_type not in ('code', 'token'):
return False
if hasattr(client, 'allowed_response_types'):
return response_type in client.allowed_response_types
return True
def validate_scopes(self, client_id, scopes, client, request,
*args, **kwargs):
"""Ensure the client is authorized access to requested scopes."""
if hasattr(client, 'validate_scopes'):
return client.validate_scopes(scopes)
return set(client.default_scopes).issuperset(set(scopes))
def validate_user(self, username, password, client, request,
*args, **kwargs):
"""Ensure the username and password is valid.
Attach user object on request for later using.
"""
log.debug('Validating username %r and password %r',
username, password)
if self._usergetter is not None:
user = self._usergetter(
username, password, client, request, *args, **kwargs
)
if user:
request.user = user
return True
return False
log.debug('Password credential authorization is disabled.')
return False
def revoke_token(self, token, token_type_hint, request, *args, **kwargs):
"""Revoke an access or refresh token.
"""
if token_type_hint:
tok = self._tokengetter(**{token_type_hint: token})
else:
tok = self._tokengetter(access_token=token)
if not tok:
tok = self._tokengetter(refresh_token=token)
if tok and tok.client_id == request.client.client_id:
request.client_id = tok.client_id
request.user = tok.user
tok.delete()
return True
msg = 'Invalid token supplied.'
log.debug(msg)
request.error_message = msg
return False
|
|
import datetime as dt
from functools import wraps
import requests
import time
import pandas as pd
import numpy as np
import numpy.random as nr
from sklearn.preprocessing import StandardScaler
from torch.autograd import Variable
import torch as T
import toolz.curried as z
toyear, tomonth = dt.date.today().year, dt.date.today().month
yrmths = [(yr, m) for yr in range(2000, 2018)
for m in range(1, 13)
if (yr, m) <= (toyear, tomonth)]
flatten_multindex = lambda xs: [
'_'.join([lvl1, lvl2.lower()]) if lvl2 else lvl1 for lvl1, lvl2 in xs]
# mse = lambda x, y: np.sqrt(((x - y) ** 2).sum())
mse = lambda x, y: (((x - y) ** 2).sum()) / len(x)
def check_one2one(df, k1, krest):
krest = [krest] if isinstance(krest, str) else krest
assert df.groupby(
[k1] + krest).size().reset_index(drop=0).groupby(k1).size().eq(1).all()
def random_requests(urls, mean=2, sd=2, min=.1):
"Random intervals between sequential requests."
for u in urls:
r = requests.get(u)
if r.from_cache:
continue
sleep_time = max((np.random.randn() + mean) * sd, min)
time.sleep(sleep_time)
def impute(df):
df = df.copy()
for c in df:
s = df[c]
bm = ~(s == s)
df.loc[bm, c] = s.median()
return df
def ends_with(suff):
return (lambda xs: [x for x in xs if x.endswith(suff)])
######################################################################
# Batches 1
######################################################################
def batchify(x, batch_size=5, train_len=100):
"""Cut a sequence x into smaller `train_len`-long sequences.
Then return a list where each element contains
`batch_size` of these sequences.
"""
# Create extra dimension; as if making a list
# of the `train_len`-long mini-sequences
seq_ixs = np.arange(0, len(x), train_len)[:-1]
batchified = np.stack([x[six:six + train_len] for six in seq_ixs])
batch_ixs = np.arange(0, len(batchified), batch_size)[:-1]
return [batchified[bix:batch_size] for bix in batch_ixs]
def test_batchify():
x = np.array(
[[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9],
[10, 11],
[12, 13],
[14, 15],
[16, 17],
[18, 19]])
[only_batch] = batchify(x, batch_size=2, train_len=3)
subseq1, subseq2 = only_batch
shouldbe1 = np.array(
[[0, 1],
[2, 3],
[4, 5]])
assert (subseq1 == shouldbe1).all()
shouldbe2 = np.array(
[[6, 7],
[8, 9],
[10, 11]])
assert (subseq2 == shouldbe2).all()
######################################################################
# Batches 2
######################################################################
def to_sub_seqs(x: np.array, seq_len=5, warn=True):
ixs = np.arange(0, len(x) + 1, seq_len)[:-1]
subseqs = [x[i:i + seq_len] for i in ixs]
res = np.array(subseqs)
to_sub_seqs.diff = diff = x.size - res.size
if warn and diff:
print('{} elems dropped from end'.format(diff))
return res
def test_to_sub_seqs():
for i in range(8, 12):
res = to_sub_seqs(np.arange(i), 5, warn=False)
assert to_sub_seqs.diff == (i % 5)
assert len(res) == i // 5
def batch_getterer(x, *, y=None, batch_size=5, var=True):
batch_ixs = np.arange(0, len(x) + 1, batch_size)[:-1]
nslices = len(batch_ixs) * batch_size
print('nslices', nslices, 'len(x)', len(x))
if nslices < len(x):
print('Warning: {} sequences clipped'.format(len(x) - nslices))
def get_batch(x, i):
ix = batch_ixs[i]
res = x[ix:ix + batch_size]
if var:
return Variable(res)
return res
def batch_getter(i):
xres = get_batch(x, i)
if y is None:
return xres
return xres, get_batch(y, i)
batch_getter.batch_size = batch_size
return batch_getter, range(len(batch_ixs))
##########################################################################
# Batches 3
##########################################################################
isint = lambda x: np.issubdtype(type(x), int)
class BatchArraySingle(object):
def __init__(self, x=None, seq_len=5, truncate=True, tovar=True):
self.truncate = truncate
self.x = x
self.N = len(x)
self.seq_len = seq_len
if truncate:
self.rem = BatchArraySingle(x=x, seq_len=seq_len, truncate=False)
n_segs = self.N // seq_len
# print(n_segs)
self.ix = np.arange(0, n_segs * seq_len, seq_len, dtype=int)
retfuncs = [Variable, T.stack] if tovar else [T.stack]
else:
# remainder
self.ix = np.arange(0, self.N, seq_len, dtype=int)
retfuncs = [list, z.map(Variable)] if tovar else []
self.retfunc = z.compose(*retfuncs)
def __getitem__(self, ix):
bixs = self.ix[ix]
if isint(bixs):
batches = self.x[bixs:bixs + self.seq_len]
else:
batches = [self.x[bix:bix + self.seq_len] for bix in bixs]
return self.retfunc(batches)
idxmax = property(lambda x: len(x.ix) - 1)
class BatchArray(BatchArraySingle):
def __init__(self, x=None, y=None, seq_len=5, truncate=True, tovar=True, batch_size=20):
if seq_len is None:
seq_len = len(y)
super().__init__(x=x, seq_len=seq_len, truncate=truncate, tovar=tovar)
self.xb = BatchArraySingle(
x=x, seq_len=seq_len, truncate=truncate, tovar=tovar)
if truncate:
self.rem = BatchArray(x=x, y=y, seq_len=seq_len,
truncate=False, tovar=tovar)
self.y = y
self.yb = BatchArraySingle(
x=y, seq_len=seq_len, truncate=truncate, tovar=tovar)
self.batch_size = batch_size
self.num_batches = len(self.ix) // batch_size
self.num_truncated_rows = self.num_batches * batch_size * seq_len
self.num_leftover_rows = len(self.y) - self.num_truncated_rows
def __getitem__(self, ix):
return self.xb.__getitem__(ix), self.yb.__getitem__(ix)
def batch_ix_iter(self, batch_size=None):
batch_size = batch_size or self.batch_size
nb = len(self.ix) // batch_size
return np.arange(nb * batch_size).reshape(-1, batch_size)
@property
def train_samples_y(self):
return self.y[:self.num_truncated_rows].numpy().ravel()
return np.concatenate([self[bix][1].data.numpy().ravel() for bix in
self.batch_ix_iter()])
@property
def train_samples_x(self):
return self.x[:self.num_truncated_rows]
@property
def test_samples_x(self):
return self.x[self.num_truncated_rows:]
@property
def test_samples_y(self):
return self.y[self.num_truncated_rows:]
######################################################################
# Tensor manip
######################################################################
def collapse_dims_(t, dim_prods: [(int,)]):
'''tens[a x b x c], [(a, b), (c,)]
-> tens[a]
'''
dims = t.size()
new_dims = [int(np.prod([dims[i] for i in dimtup]))
for dimtup in dim_prods]
# new_dims = lmap(np.prod, new_dims)
return new_dims
def collapse_dims(t, dim_prods: [(int,)]):
new_dims = collapse_dims_(t, dim_prods=dim_prods)
# print(new_dims)
return t.contiguous().view(*new_dims)
def test_collapse_dims_():
assert collapse_dims_(T.randn(3, 4, 5), [(0, 1), (2,)]) == [12, 5]
assert collapse_dims_(T.randn(3, 4, 5), [(0,), (1, 2,)]) == [3, 20]
def ravel(t):
try:
return t.view(-1, t.size(-1))
except RuntimeError as e:
return ravel(t.contiguous())
def repackage_hidden(h):
"""Wraps hidden states in new Variables, to detach them from their history."""
if type(h) == Variable:
return Variable(h.data)
else:
return tuple(repackage_hidden(v) for v in h)
##########################################################################
# Validation Utilities
##########################################################################
def add_dates_(df, l):
"""Batches will have `l` leftover elements.
I stripped out the dates from the features,
and just left day of the year integers, so this function
reconstructs the actual dates from these day of year features.
"""
ldays = df.iloc[-2 * l:].assign(
Yr=lambda x: (x.Doy.shift() > x.Doy # New year when
).fillna(False).astype(int).cumsum() # .add(2000)
)
ldays['Yr'] = ldays['Yr'] + (2017 - ldays['Yr'].max())
ldays['Dt'] = np.concatenate([pd.date_range('{}-01-01'.format(yr), '{}-12-31'.format(yr), freq='D')[gdf.Doy - 1].date
for yr, gdf in ldays.groupby(['Yr'], sort=False)])
ld1, ld2 = np.array_split(ldays, 2)
return ld1, ld2, ldays
##########################################################################
# Processing
##########################################################################
def rep_with_dummies_(df, col):
df = df.copy()
newcs = pd.get_dummies(df[col]).astype(int)
for c in newcs:
df[c] = newcs[c]
return df.drop(col, axis=1)
def replace_with_dummies(df, cols):
"""Return a copy of df w/ each of `cols` replaced
with its dummified self"""
for c in cols:
df = rep_with_dummies_(df, c)
return df
def filter_dtypes(df, dtypes=[float]):
cs = [k for k, v in df.dtypes.items() if any(v == d for d in dtypes)]
return cs
def log_(s):
lg = np.log10(s)
lg[np.isneginf(lg)] = 0
return lg
def join_pollen_weather(poldf, ddf, time_cols, ycol='Logcnt'):
pol_joinable = poldf.set_index(
'Date')[['Count', 'Prev_cnt', 'Prev_cnt_null']].ix[ddf.Dt.dt.date]
xdf = (ddf.assign(**{
c: pol_joinable[c].tolist() for c in pol_joinable
}).assign(
Logcnt=lambda x: log_(x.Count),
Log_prev_cnt=lambda x: log_(x.Prev_cnt),
).dropna(axis=0)
.assign(
Day_diff=lambda x: (x.Day_int - x.Day_int.shift(1)
).fillna(1).sub(1).astype(int)
))
xdf.Prev_cnt_null = xdf.Prev_cnt_null.astype(int)
feats = filter_dtypes(
ddf, dtypes=[int, float]) + ['Log_prev_cnt', 'Prev_cnt_null', 'Day_diff']
feats = np.setdiff1d(feats, time_cols + ['Y']).tolist()
rxdf = xdf[feats].copy()
ry = xdf[ycol].astype(np.float32)
ss = StandardScaler()
rx = ss.fit_transform(rxdf).astype(dtype=np.float32)
xt = T.from_numpy(rx)
yt = T.from_numpy(ry.values[:, None])
return xdf, xt, yt, rx, rxdf, ry
######################################################################
# fake data
######################################################################
def gen_dat1(P=3, N=20, dtype=np.float32):
x = nr.randint(0, high=5, size=(N, P)).astype(dtype)
sm = x.sum(axis=1)
y = sm + np.r_[0, sm[:-1]]
return x, y[:, None]
def gen_dat2(P=3, N=20, dtype=np.float32):
x = nr.randint(-5, high=5, size=(N, P)).astype(dtype)
return x, x.sum(axis=1).cumsum()[:, None]
######################################################################
# random stuff
######################################################################
def check_cached(min_time):
def deco(f):
@wraps(f)
def wrapper(*a, **k):
t = time.perf_counter()
res = f(*a, **k)
e = time.perf_counter() - t
if e > min_time:
print('{:.2f}s elapsed'.format(e))
return res
return wrapper
return deco
def show_dt(X, y, criterion='gini', max_depth=4):
"Run decision tree on data, output the graph, and open it"
from sklearn.tree import DecisionTreeClassifier, export_graphviz
dtc = DecisionTreeClassifier(
criterion=criterion, max_depth=max_depth).fit(X, y)
export_graphviz(dtc, feature_names=X.columns, out_file='tree.dot')
get_ipython().system('dot -Tpng tree.dot -o tree.png')
get_ipython().system('open tree.png')
def read(fn):
with open(fn, 'r') as f:
return f.read()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._tags_operations import build_create_or_update_request, build_create_or_update_value_request, build_delete_request, build_delete_value_request, build_list_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class TagsOperations:
"""TagsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.resource.resources.v2018_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def delete_value(
self,
tag_name: str,
tag_value: str,
**kwargs: Any
) -> None:
"""Deletes a tag value.
:param tag_name: The name of the tag.
:type tag_name: str
:param tag_value: The value of the tag to delete.
:type tag_value: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_value_request(
tag_name=tag_name,
tag_value=tag_value,
subscription_id=self._config.subscription_id,
template_url=self.delete_value.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_value.metadata = {'url': '/subscriptions/{subscriptionId}/tagNames/{tagName}/tagValues/{tagValue}'} # type: ignore
@distributed_trace_async
async def create_or_update_value(
self,
tag_name: str,
tag_value: str,
**kwargs: Any
) -> "_models.TagValue":
"""Creates a tag value. The name of the tag must already exist.
:param tag_name: The name of the tag.
:type tag_name: str
:param tag_value: The value of the tag to create.
:type tag_value: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TagValue, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2018_05_01.models.TagValue
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagValue"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_create_or_update_value_request(
tag_name=tag_name,
tag_value=tag_value,
subscription_id=self._config.subscription_id,
template_url=self.create_or_update_value.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('TagValue', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('TagValue', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update_value.metadata = {'url': '/subscriptions/{subscriptionId}/tagNames/{tagName}/tagValues/{tagValue}'} # type: ignore
@distributed_trace_async
async def create_or_update(
self,
tag_name: str,
**kwargs: Any
) -> "_models.TagDetails":
"""Creates a tag in the subscription.
The tag name can have a maximum of 512 characters and is case insensitive. Tag names created by
Azure have prefixes of microsoft, azure, or windows. You cannot create tags with one of these
prefixes.
:param tag_name: The name of the tag to create.
:type tag_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TagDetails, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2018_05_01.models.TagDetails
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagDetails"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_create_or_update_request(
tag_name=tag_name,
subscription_id=self._config.subscription_id,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('TagDetails', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('TagDetails', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/tagNames/{tagName}'} # type: ignore
@distributed_trace_async
async def delete(
self,
tag_name: str,
**kwargs: Any
) -> None:
"""Deletes a tag from the subscription.
You must remove all values from a resource tag before you can delete it.
:param tag_name: The name of the tag.
:type tag_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
tag_name=tag_name,
subscription_id=self._config.subscription_id,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/tagNames/{tagName}'} # type: ignore
@distributed_trace
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.TagsListResult"]:
"""Gets the names and values of all resource tags that are defined in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either TagsListResult or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2018_05_01.models.TagsListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagsListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("TagsListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/tagNames'} # type: ignore
|
|
# Copyright (c) 2016 636F57@GitHub
# This software is released under an MIT license.
# See LICENSE for full details.
import discord
from discord.ext import commands
import random
import glob
from os.path import basename
import time
import aiohttp
from bs4 import BeautifulSoup
import asyncio
import html
import calendar
from cactusconsts import CactusConsts
import traceback
if not discord.opus.is_loaded(): #this is necessary for voice activities
discord.opus.load_opus('libopus-0.dll')
print("opus dll is loaded = ", discord.opus.is_loaded())
description = '''Utility Bot custom-made for this server. :cactus:'''
bot = commands.Bot(command_prefix='#', description=description)
#global variants for music
g_listEcho = []
Mee6_ID = CactusConsts.Mee6_ID
fredboadPrefix = ";;play "
marshmallowPrefix = ":request "
#global variants for RSS
g_RSSdictkey = ["index", "url", "lastModified", "eTag", "lastcheck", "channel_ID", "userID"]
filenameRSS = "RSSdata" #RSSdata format: "index","url","lastModified","eTag","lastcheck","channel_ID","userID"\n for each entry
g_session = aiohttp.ClientSession()
g_intervalhours = 2 # RSS check interval in hours
@bot.event
async def on_ready():
print('Logged in as')
print(bot.user.name)
print(bot.user.id)
print('------')
#### For music utility ####
@bot.event
async def on_message(message):
print("on message : ", message.author.name, message.author.id)
global g_listEcho
if (message.author.id == Mee6_ID):
print("message by Mee6")
if len(g_listEcho) > 0:
#print(message.content)
if CactusConsts.Mee6_notfound_msg in message.content:
print("canceling 1 echo")
g_listEcho.pop(0)
elif 'youtu' in message.content:
print("in echo")
await bot.send_message(message.channel, g_listEcho[0] + message.content)
g_listEcho.pop(0)
if (len(g_listEcho) > 0 ) and (g_listEcho[0] == marshmallowPrefix):
await asyncio.sleep(10) # since requests to marshmallow must have 10sec intervals
else:
await bot.process_commands(message)
@bot.command()
async def songfiles():
"""List the available songlist category options."""
strList = ""
fileList = glob.glob("./Songs/*.txt")
if len(fileList) == 0:
strList = "No file found."
else:
for file in fileList:
strList += basename(file) + " "
await bot.say(strList)
@bot.command()
async def feeda(number : int, category='favorite'):
"""Feed number of songs to Aethex, randomly selecting from the txt file."""
if number > 5:
await bot.say("Maximun queue is limited to 5 songs.")
number = 5
print("category = ", category)
strFile = "./Songs/" + category + ".txt"
with open(strFile, "rt") as f:
listSongs = f.readlines()
print("list length = ", len(listSongs))
for i in range(number):
strCommand = "-play " + listSongs[random.randint(0, len(listSongs)-1)] + "\n"
await bot.say(strCommand)
@bot.command()
async def feedf(number : int, category='favorite'):
"""Feed number of songs to FredBoat, randomly selecting from the txt file."""
global g_listEcho
if number > 5:
await bot.say("Maximun queue is limited to 5 songs.")
number = 5
print("category = ", category)
strFile = "./Songs/" + category + ".txt"
with open(strFile, "rt") as f:
listSongs = f.readlines()
print("list length = ", len(listSongs))
for i in range(number):
strCommand = "!youtube " + listSongs[random.randint(0, len(listSongs)-1)] + "\n"
await bot.say(strCommand)
g_listEcho.append(fredboadPrefix)
@bot.command()
async def feedm(number : int, category='favorite'):
"""Feed number of songs to Marshmallow, randomly selecting from the txt file."""
global g_listEcho
if number > 5:
await bot.say("Maximun queue is limited to 5 songs.")
number = 5
print("category = ", category)
strFile = "./Songs/" + category + ".txt"
with open(strFile, "rt") as f:
listSongs = f.readlines()
print("list length = ", len(listSongs))
for i in range(number):
strCommand = "!youtube " + listSongs[random.randint(0, len(listSongs)-1)] + "\n"
await bot.say(strCommand)
g_listEcho.append(marshmallowPrefix)
time. sleep(11) # since requests to marshmallow must have 10sec intervals
@bot.command()
async def feedf_url(number : int):
"""Feed number of URLs to FredBoat, randomly selecting from the FavoriteURLs file."""
if number > 5:
await bot.say("Maximun queue is limited to 5 songs.")
number = 5
strFile = "./Songs/FavoriteURLs"
with open(strFile, "rt") as f:
listURLs = f.readlines()
print("list length = ", len(listURLs))
for i in range(number):
strCommand = fredboadPrefix + listURLs[random.randint(0, len(listURLs)-1)] + "\n"
await bot.say(strCommand)
@bot.command()
async def feedm_url(number : int):
"""Feed number of URLs to Marshmallow, randomly selecting from the FavoriteURLs file."""
if number > 5:
await bot.say("Maximun queue is limited to 5 songs.")
number = 5
strFile = "./Songs/FavoriteURLs"
with open(strFile, "rt") as f:
listURLs = f.readlines()
print("list length = ", len(listURLs))
for i in range(number):
strCommand = marshmallowPrefix + listURLs[random.randint(0, len(listURLs)-1)] + "\n"
await bot.say(strCommand)
time. sleep(11) # since requests to marshmallow must have 10sec intervals
@bot.command()
async def feedf_url_playlist():
"""Feed one of playlist url to FredBoat, randomly selecting from the FavoritePlaylists file."""
strFile = "./Songs/FavoritePlaylists"
with open(strFile, "rt") as f:
listURLs = f.readlines()
print("list length = ", len(listURLs))
strCommand = fredboadPrefix + listURLs[random.randint(0, len(listURLs)-1)] + "\n"
await bot.say(strCommand)
@bot.command()
async def feedm_url_playlist():
"""Feed one of playlist url to Marshmallow, randomly selecting from the FavoritePlaylists file."""
strFile = "./Songs/FavoritePlaylists"
with open(strFile, "rt") as f:
listURLs = f.readlines()
print("list length = ", len(listURLs))
strCommand = marshmallowPrefix + listURLs[random.randint(0, len(listURLs)-1)] + "\n"
await bot.say(strCommand)
@bot.command()
async def favor(song):
"""Add the song to Favorite.txt file."""
if song == "":
await bot.say("You must specify the song to add.")
with open("./Songs/Favorite.txt", "a+") as f:
f.write(song + "\n")
await bot.say(song + " is added. :cactus:")
@bot.command()
async def favor_url(url):
"""Add the URL to FavoriteURLs file."""
if url == "":
await bot.say("You must specify the URL to add.")
with open("./Songs/FavoriteURLs", "a+") as f:
f.write(url + "\n")
await bot.say(url + " is added. :cactus:")
@bot.command()
async def favor_url_playlist(url):
"""Add the playlist URL to FavoritePlaylists file."""
if url == "":
await bot.say("You must specify the URL to add.")
with open("./Songs/FavoritePlaylists", "a+") as f:
f.write(url + "\n")
await bot.say(url + " is added. :cactus:")
@bot.command(pass_context=True)
async def join(ctx):
"""Let CactusBot to join the voice channel."""
print(ctx.message.author)
voiceclient = bot.voice_client_in(ctx.message.server)
print(ctx.message.server)
if voiceclient == None:
print(ctx.message.author.voice.voice_channel)
await bot.join_voice_channel(ctx.message.author.voice.voice_channel)
elif voiceclient.channel != ctx.message.channel:
await voiceclient.move_to(ctx.message.channel)
@bot.command()
async def ytm(text):
"""Feed search result to Marshmallow."""
global g_listEcho
await bot.say("!youtube " + text)
g_listEcho.append(marshmallowPrefix)
@bot.command()
async def ytf(text):
"""Feed search result to FredBoat."""
global g_listEcho
await bot.say("!youtube " + text)
g_listEcho.append(fredboadPrefix)
#############################
#### For RSS utility #####
# return the channel ID of the given name in the server
# return "" when not found
def get_channel_ID(bot, server_ID, channel_name):
if channel_name == "":
return ""
for channel in bot.get_server(server_ID).channels:
if channel.name == channel_name:
return channel.id
return ""
def read_rssfile():
global g_RSSdictkey
listRSSdict = []
with open(filenameRSS, "rt") as f:
for line in f:
if len(line) > 1:
line = line.lower()
listRSSdict.append(dict(zip(g_RSSdictkey, line.strip().split(','))))
return listRSSdict
def max_index_of_rssfile():
max_index = 0
listRSSdict = read_rssfile()
if len(listRSSdict) > 0:
max_index = listRSSdict[len(listRSSdict)-1]["index"] #assume the last entry always has the max index
return max_index
def write_rssfile(listRSSdict):
global g_RSSdictkey
with open(filenameRSS, "wt") as f:
for rss in listRSSdict:
line = ""
for key in g_RSSdictkey:
line += str(rss[key]) + ","
f.write(line[:-1]+"\n")
print("successfully wrote listRSSdict to the file.")
return
@bot.command(pass_context=True)
async def rss_add_reddit(ctx):
"""Specify the subreddit name and channel name. Add the subreddit to RSS check-list."""
command,sub,channel_name = ctx.message.content.split(' ')
line = str(int(max_index_of_rssfile())+1)+",https://www.reddit.com/r/"+ sub +"/.rss,,,"+str(int(time.time()))
channelID = get_channel_ID(bot, ctx.message.server.id, channel_name)
print("CACTUS ROOMS SERVER ID:",ctx.message.server.id)
if channelID == "":
channelID = ctx.message.channel.id
line += ","+ channelID + "," + ctx.message.author.id + "\n"
with open(filenameRSS, "a+") as f:
f.write(line)
await bot.say(":cactus:"+sub+" was added to RSS list.:cactus:")
@bot.command(pass_context=True)
async def rss_add_github(ctx):
"""Specify github repo URL and channel name. Add the repo to RSS check-list."""
command,url,channel_name = ctx.message.content.split(' ')
if not 'github' in url:
await bot.say("It is not GitHub URL.")
return
with open(filenameRSS, "a+") as f:
if url[len(url)-1] != '/':
url += '/'
line = str(int(max_index_of_rssfile())+1)+","+url+"commits/master.atom,,,"+str(int(time.time()))
channelID = get_channel_ID(bot, ctx.message.server.id, channel_name)
if channelID == "":
channelID = ctx.message.channel.id
line += ","+ channelID + "," + ctx.message.author.id + "\n"
f.write(line)
await bot.say(":cactus:"+url+" was added to RSS list.:cactus:")
@bot.command(pass_context=True)
async def rss_list(ctx):
"""List all the RSS URLs requested by you."""
bAll = False
# only server owner can see all the URLs in the list
if ctx.message.author.id == ctx.message.server.owner.id:
bAll = True
listRSSdict = read_rssfile()
if len(listRSSdict) == 0:
await bot.say("There is no URL in the list.")
for rss in listRSSdict:
if bAll or ctx.message.author.id == rss["userID"]:
channel_name = bot.get_channel(rss["channel_ID"]).name
await bot.say(rss["index"]+" : " + rss["url"] +" to #" + channel_name) #list index, URL, and channel to cat
@bot.command()
async def rss_del(index):
"""Delete the specified index entry from RSS check-list."""
listRSSdict = read_rssfile()
output = []
for rss in listRSSdict:
if rss["index"] != index:
output.append(rss)
write_rssfile(output)
if len(output) < len(listRSSdict):
await bot.say(index+" was deleted.")
else:
await bot.say(index+" was not found in the list.")
# function that is called as a task to fetch and report RSS updates
async def checkRSS(bot):
global g_RSSdictkey
global g_session
try:
while not bot.is_closed:
print("now in checkRSS.")
while bot.is_logged_in:
print("now start checking RSS updates...")
listRSSdict = read_rssfile()
if len(listRSSdict) == 0:
print("no RSS urls found.")
else:
header = {'User-Agent':CactusConsts.UserAgentName}
for rss in listRSSdict:
rss_backup = rss
try:
print("checking RSS of ", rss["url"])
if len(rss["lastModified"]) > 0:
header['If-Modified-Since'] = rss["lastModified"] #Last-Modified
if len(rss["eTag"]) > 0:
header['If-None-Match'] = rss["eTag"] #ETAG
response = await g_session.get(rss["url"], headers = header)
print("response status=",response.status)
if response.status == 304:
print("no update for ", rss["url"])
elif response.status == 200:
#print(response.headers)
if 'LAST-MODIFIED' in response.headers:
rss["lastModified"] = response.headers['LAST-MODIFIED']
else:
rss["lastModified"] = ""
if 'ETAG' in response.headers:
rss["eTag"] = response.headers['ETAG']
else:
rss["eTag"] = ""
body = await response.read()
soup = BeautifulSoup(body)
entries = soup.find_all('entry')
if 'reddit' in rss["url"]:
await process_reddit(bot, entries, rss["lastcheck"], bot.get_channel(rss["channel_ID"]))
elif 'github' in rss["url"]:
await process_github(bot, entries, rss["lastcheck"], bot.get_channel(rss["channel_ID"]))
else:
await bot.say("Failed to get RSS feed from the server. " + rss["url"])
response.close()
rss["lastcheck"] = int(time.time())
except:
rss = rss_backup
print("error in checkRSS:",rss["url"])
print(traceback.format_exc())
write_rssfile(listRSSdict)
await asyncio.sleep(g_intervalhours*3600)
await asyncio.sleep(30) #wait 30 seconds then retry
except asyncio.CancelledError:
print("checkRSS task is cancelled by program")
except Exception as e:
print("Error in checkRSS:", e.args)
# functions which actrually parse the HTML and make the bot say the results
async def process_reddit(bot, entries, lastcheck, channel):
for entry in entries:
if is_updated(entry.find('updated').text, lastcheck):
postcat = entry.find('category')
#print(postcat)
strSay = ":cactus:*New Post at " + postcat['term'] + ' (' + postcat['label'] + ')*:cactus:\n\n'
strSay += "**Title : " + entry.find('title').text + '**\n'
#print(entry.find('content').text)
postcontent = html.unescape(entry.find('content').text)
#print(postcontent)
postcontent = BeautifulSoup(postcontent)
urlcontent = postcontent.find_all('a')
#print(urlcontent)
for url in urlcontent:
if '[comments]' in url:
strSay += url['href'] + "\n"
break
#print(strSay)
await bot.send_message(channel, strSay)
async def process_github(bot, entries, lastcheck, channel):
for entry in entries:
#print(entry)
if is_updated(entry.find('updated').text, lastcheck) :
author = entry.find('author')
strSay = ":cactus:*New Commit at GitHub by " + author.find('name').text + '*:cactus:\n\n'
strSay += "**Comment : " + entry.find('title').text + '**\n'
strSay += entry.find('link')['href']
print(strSay)
await bot.send_message(channel, strSay)
# updatedtime should be in the format like: 2016-11-11T12:38:34+02:00(reddit) or 2016-11-11T12:38:34Z(github) #changed 8th.dec.2016
# lastcheck is the string which is stored in RSSfile
def is_updated(updatedtime, lastcheck):
print(updatedtime)
shiftsec = 0
if '+' in updatedtime:
times = updatedtime.split('+')
updatedtime = times[0]
shifttimes = times[1][:2]
shiftsec = int(shifttimes[0]) * 3600 + int(shifttimes[1]) * 60
elif 'Z' in updatedtime:
updatedtime = updatedtime[:-1]
sttime = time.strptime(updatedtime, "%Y-%m-%dT%H:%M:%S")
updated_insec = calendar.timegm(sttime) - shiftsec
print ("updated, since = ",updated_insec, lastcheck)
if updated_insec < int(lastcheck):
return False
else:
return True
#######################
##### Others #########
@bot.command()
async def test():
"""command for test and debug"""
await bot.say("RSS test started.")
await checkRSS(bot)
@bot.command(pass_context=True)
async def b(ctx):
"""command for Birthday Wish"""
command,name = ctx.message.content.split(' ')
await bot.say("Happy Birthday to **" + name + "**! :cactus: :tada:")
######################################
loop = asyncio.get_event_loop()
try:
loop.create_task(checkRSS(bot))
loop.run_until_complete(bot.start(CactusConsts.CactusBot_Token))
except KeyboardInterrupt:
print("KeyboardInterrupt")
except Exception as e:
print(e.args)
finally:
loop.run_until_complete(bot.logout())
# cancel all tasks lingering
tasks = asyncio.Task.all_tasks(loop)
for task in tasks:
task.cancel()
loop.close()
if g_session:
g_session.close()
|
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import testtools
from tempest.api.compute import base
from tempest.common import compute
from tempest.common import utils
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
CONF = config.CONF
class ServersNegativeTestJSON(base.BaseV2ComputeTest):
def setUp(self):
super(ServersNegativeTestJSON, self).setUp()
try:
waiters.wait_for_server_status(self.client, self.server_id,
'ACTIVE')
except Exception:
self.__class__.server_id = self.recreate_server(self.server_id)
def tearDown(self):
super(ServersNegativeTestJSON, self).tearDown()
# NOTE(zhufl): Because server_check_teardown will raise Exception
# which will prevent other cleanup steps from being executed, so
# server_check_teardown should be called after super's tearDown.
self.server_check_teardown()
@classmethod
def setup_clients(cls):
super(ServersNegativeTestJSON, cls).setup_clients()
cls.client = cls.servers_client
@classmethod
def resource_setup(cls):
super(ServersNegativeTestJSON, cls).resource_setup()
server = cls.create_test_server(wait_until='ACTIVE')
cls.server_id = server['id']
server = cls.create_test_server()
cls.client.delete_server(server['id'])
waiters.wait_for_server_termination(cls.client, server['id'])
cls.deleted_server_id = server['id']
@decorators.attr(type=['negative'])
@decorators.idempotent_id('dbbfd247-c40c-449e-8f6c-d2aa7c7da7cf')
def test_server_name_blank(self):
# Create a server with name parameter empty
self.assertRaises(lib_exc.BadRequest,
self.create_test_server,
name='')
@decorators.attr(type=['negative'])
@decorators.idempotent_id('b8a7235e-5246-4a8f-a08e-b34877c6586f')
@testtools.skipUnless(CONF.compute_feature_enabled.personality,
'Nova personality feature disabled')
def test_personality_file_contents_not_encoded(self):
# Use an unencoded file when creating a server with personality
file_contents = 'This is a test file.'
person = [{'path': '/etc/testfile.txt',
'contents': file_contents}]
self.assertRaises(lib_exc.BadRequest,
self.create_test_server,
personality=person)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('fcba1052-0a50-4cf3-b1ac-fae241edf02f')
def test_create_with_invalid_image(self):
# Create a server with an unknown image
self.assertRaises(lib_exc.BadRequest,
self.create_test_server,
image_id=-1)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('18f5227f-d155-4429-807c-ccb103887537')
def test_create_with_invalid_flavor(self):
# Create a server with an unknown flavor
self.assertRaises(lib_exc.BadRequest,
self.create_test_server,
flavor=-1,)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('7f70a4d1-608f-4794-9e56-cb182765972c')
def test_invalid_access_ip_v4_address(self):
# An access IPv4 address must match a valid address pattern
IPv4 = '1.1.1.1.1.1'
self.assertRaises(lib_exc.BadRequest,
self.create_test_server, accessIPv4=IPv4)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('5226dd80-1e9c-4d8a-b5f9-b26ca4763fd0')
def test_invalid_ip_v6_address(self):
# An access IPv6 address must match a valid address pattern
IPv6 = 'notvalid'
self.assertRaises(lib_exc.BadRequest,
self.create_test_server, accessIPv6=IPv6)
@decorators.idempotent_id('7ea45b3e-e770-46fa-bfcc-9daaf6d987c0')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
@decorators.attr(type=['negative'])
def test_resize_nonexistent_server(self):
# Resize a non-existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound,
self.client.resize_server,
nonexistent_server, self.flavor_ref)
@decorators.idempotent_id('ced1a1d7-2ab6-45c9-b90f-b27d87b30efd')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
@decorators.attr(type=['negative'])
def test_resize_server_with_non_existent_flavor(self):
# Resize a server with non-existent flavor
nonexistent_flavor = data_utils.rand_uuid()
self.assertRaises(lib_exc.BadRequest, self.client.resize_server,
self.server_id, flavor_ref=nonexistent_flavor)
@decorators.idempotent_id('45436a7d-a388-4a35-a9d8-3adc5d0d940b')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
@decorators.attr(type=['negative'])
def test_resize_server_with_null_flavor(self):
# Resize a server with null flavor
self.assertRaises(lib_exc.BadRequest, self.client.resize_server,
self.server_id, flavor_ref="")
@decorators.attr(type=['negative'])
@decorators.idempotent_id('d4c023a0-9c55-4747-9dd5-413b820143c7')
def test_reboot_non_existent_server(self):
# Reboot a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound, self.client.reboot_server,
nonexistent_server, type='SOFT')
@decorators.idempotent_id('d1417e7f-a509-41b5-a102-d5eed8613369')
@testtools.skipUnless(CONF.compute_feature_enabled.pause,
'Pause is not available.')
@decorators.attr(type=['negative'])
def test_pause_paused_server(self):
# Pause a paused server.
self.client.pause_server(self.server_id)
waiters.wait_for_server_status(self.client, self.server_id, 'PAUSED')
self.assertRaises(lib_exc.Conflict,
self.client.pause_server,
self.server_id)
self.client.unpause_server(self.server_id)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('98fa0458-1485-440f-873b-fe7f0d714930')
def test_rebuild_deleted_server(self):
# Rebuild a deleted server
self.assertRaises(lib_exc.NotFound,
self.client.rebuild_server,
self.deleted_server_id, self.image_ref)
@decorators.related_bug('1660878', status_code=409)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('581a397d-5eab-486f-9cf9-1014bbd4c984')
def test_reboot_deleted_server(self):
# Reboot a deleted server
self.assertRaises(lib_exc.NotFound, self.client.reboot_server,
self.deleted_server_id, type='SOFT')
@decorators.attr(type=['negative'])
@decorators.idempotent_id('d86141a7-906e-4731-b187-d64a2ea61422')
def test_rebuild_non_existent_server(self):
# Rebuild a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound,
self.client.rebuild_server,
nonexistent_server,
self.image_ref)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('fd57f159-68d6-4c2a-902b-03070828a87e')
def test_create_numeric_server_name(self):
server_name = 12345
self.assertRaises(lib_exc.BadRequest,
self.create_test_server,
name=server_name)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('c3e0fb12-07fc-4d76-a22e-37409887afe8')
def test_create_server_name_length_exceeds_256(self):
# Create a server with name length exceeding 255 characters
server_name = 'a' * 256
self.assertRaises(lib_exc.BadRequest,
self.create_test_server,
name=server_name)
@decorators.attr(type=['negative'])
@decorators.related_bug('1651064', status_code=500)
@utils.services('volume')
@decorators.idempotent_id('12146ac1-d7df-4928-ad25-b1f99e5286cd')
def test_create_server_invalid_bdm_in_2nd_dict(self):
volume = self.create_volume()
bdm_1st = {"source_type": "image",
"delete_on_termination": True,
"boot_index": 0,
"uuid": self.image_ref,
"destination_type": "local"}
bdm_2nd = {"source_type": "volume",
"uuid": volume["id"],
"destination_type": "invalid"}
bdm = [bdm_1st, bdm_2nd]
self.assertRaises(lib_exc.BadRequest,
self.create_test_server,
image_id=self.image_ref,
block_device_mapping_v2=bdm)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('4e72dc2d-44c5-4336-9667-f7972e95c402')
def test_create_with_invalid_network_uuid(self):
# Pass invalid network uuid while creating a server
networks = [{'fixed_ip': '10.0.1.1', 'uuid': 'a-b-c-d-e-f-g-h-i-j'}]
self.assertRaises(lib_exc.BadRequest,
self.create_test_server,
networks=networks)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('7a2efc39-530c-47de-b875-2dd01c8d39bd')
def test_create_with_non_existent_keypair(self):
# Pass a non-existent keypair while creating a server
key_name = data_utils.rand_name('key')
self.assertRaises(lib_exc.BadRequest,
self.create_test_server,
key_name=key_name)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('7fc74810-0bd2-4cd7-8244-4f33a9db865a')
def test_create_server_metadata_exceeds_length_limit(self):
# Pass really long metadata while creating a server
metadata = {'a': 'b' * 260}
self.assertRaises((lib_exc.BadRequest, lib_exc.OverLimit),
self.create_test_server,
metadata=metadata)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('aa8eed43-e2cb-4ebf-930b-da14f6a21d81')
def test_update_name_of_non_existent_server(self):
# Update name of a non-existent server
nonexistent_server = data_utils.rand_uuid()
new_name = data_utils.rand_name(
self.__class__.__name__ + '-server') + '_updated'
self.assertRaises(lib_exc.NotFound, self.client.update_server,
nonexistent_server, name=new_name)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('38204696-17c6-44da-9590-40f87fb5a899')
def test_update_server_set_empty_name(self):
# Update name of the server to an empty string
new_name = ''
self.assertRaises(lib_exc.BadRequest, self.client.update_server,
self.server_id, name=new_name)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('5c8e244c-dada-4590-9944-749c455b431f')
def test_update_server_name_length_exceeds_256(self):
# Update name of server exceed the name length limit
new_name = 'a' * 256
self.assertRaises(lib_exc.BadRequest,
self.client.update_server,
self.server_id,
name=new_name)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('1041b4e6-514b-4855-96a5-e974b60870a3')
def test_delete_non_existent_server(self):
# Delete a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound, self.client.delete_server,
nonexistent_server)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('75f79124-277c-45e6-a373-a1d6803f4cc4')
def test_delete_server_pass_negative_id(self):
# Pass an invalid string parameter to delete server
self.assertRaises(lib_exc.NotFound, self.client.delete_server, -1)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('f4d7279b-5fd2-4bf2-9ba4-ae35df0d18c5')
def test_delete_server_pass_id_exceeding_length_limit(self):
# Pass a server ID that exceeds length limit to delete server
self.assertRaises(lib_exc.NotFound, self.client.delete_server,
sys.maxsize + 1)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('c5fa6041-80cd-483b-aa6d-4e45f19d093c')
def test_create_with_nonexistent_security_group(self):
# Create a server with a nonexistent security group
security_groups = [{'name': 'does_not_exist'}]
self.assertRaises(lib_exc.BadRequest,
self.create_test_server,
security_groups=security_groups)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('3436b02f-1b1e-4f03-881e-c6a602327439')
def test_get_non_existent_server(self):
# Get a non existent server details
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound, self.client.show_server,
nonexistent_server)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('a31460a9-49e1-42aa-82ee-06e0bb7c2d03')
def test_stop_non_existent_server(self):
# Stop a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound, self.servers_client.stop_server,
nonexistent_server)
@decorators.idempotent_id('6a8dc0c6-6cd4-4c0a-9f32-413881828091')
@testtools.skipUnless(CONF.compute_feature_enabled.pause,
'Pause is not available.')
@decorators.attr(type=['negative'])
def test_pause_non_existent_server(self):
# pause a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound, self.client.pause_server,
nonexistent_server)
@decorators.idempotent_id('705b8e3a-e8a7-477c-a19b-6868fc24ac75')
@testtools.skipUnless(CONF.compute_feature_enabled.pause,
'Pause is not available.')
@decorators.attr(type=['negative'])
def test_unpause_non_existent_server(self):
# unpause a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound, self.client.unpause_server,
nonexistent_server)
@decorators.idempotent_id('c8e639a7-ece8-42dd-a2e0-49615917ba4f')
@testtools.skipUnless(CONF.compute_feature_enabled.pause,
'Pause is not available.')
@decorators.attr(type=['negative'])
def test_unpause_server_invalid_state(self):
# unpause an active server.
self.assertRaises(lib_exc.Conflict,
self.client.unpause_server,
self.server_id)
@decorators.idempotent_id('d1f032d5-7b6e-48aa-b252-d5f16dd994ca')
@testtools.skipUnless(CONF.compute_feature_enabled.suspend,
'Suspend is not available.')
@decorators.attr(type=['negative'])
def test_suspend_non_existent_server(self):
# suspend a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound, self.client.suspend_server,
nonexistent_server)
@decorators.idempotent_id('7f323206-05a9-4bf8-996b-dd5b2036501b')
@testtools.skipUnless(CONF.compute_feature_enabled.suspend,
'Suspend is not available.')
@decorators.attr(type=['negative'])
def test_suspend_server_invalid_state(self):
# suspend a suspended server.
self.client.suspend_server(self.server_id)
waiters.wait_for_server_status(self.client, self.server_id,
'SUSPENDED')
self.assertRaises(lib_exc.Conflict,
self.client.suspend_server,
self.server_id)
self.client.resume_server(self.server_id)
@decorators.idempotent_id('221cd282-bddb-4837-a683-89c2487389b6')
@testtools.skipUnless(CONF.compute_feature_enabled.suspend,
'Suspend is not available.')
@decorators.attr(type=['negative'])
def test_resume_non_existent_server(self):
# resume a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound, self.client.resume_server,
nonexistent_server)
@decorators.idempotent_id('ccb6294d-c4c9-498f-8a43-554c098bfadb')
@testtools.skipUnless(CONF.compute_feature_enabled.suspend,
'Suspend is not available.')
@decorators.attr(type=['negative'])
def test_resume_server_invalid_state(self):
# resume an active server.
self.assertRaises(lib_exc.Conflict,
self.client.resume_server,
self.server_id)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('7dd919e7-413f-4198-bebb-35e2a01b13e9')
def test_get_console_output_of_non_existent_server(self):
# get the console output for a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound,
self.client.get_console_output,
nonexistent_server, length=10)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('6f47992b-5144-4250-9f8b-f00aa33950f3')
def test_force_delete_nonexistent_server_id(self):
# force-delete a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound,
self.client.force_delete_server,
nonexistent_server)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('9c6d38cc-fcfb-437a-85b9-7b788af8bf01')
def test_restore_nonexistent_server_id(self):
# restore-delete a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound,
self.client.restore_soft_deleted_server,
nonexistent_server)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('7fcadfab-bd6a-4753-8db7-4a51e51aade9')
def test_restore_server_invalid_state(self):
# we can only restore-delete a server in 'soft-delete' state
self.assertRaises(lib_exc.Conflict,
self.client.restore_soft_deleted_server,
self.server_id)
@decorators.idempotent_id('abca56e2-a892-48ea-b5e5-e07e69774816')
@testtools.skipUnless(CONF.compute_feature_enabled.shelve,
'Shelve is not available.')
@decorators.attr(type=['negative'])
def test_shelve_non_existent_server(self):
# shelve a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound, self.client.shelve_server,
nonexistent_server)
@decorators.idempotent_id('443e4f9b-e6bf-4389-b601-3a710f15fddd')
@testtools.skipUnless(CONF.compute_feature_enabled.shelve,
'Shelve is not available.')
@decorators.attr(type=['negative'])
def test_shelve_shelved_server(self):
# shelve a shelved server.
compute.shelve_server(self.client, self.server_id)
def _unshelve_server():
server_info = self.client.show_server(self.server_id)['server']
if 'SHELVED' in server_info['status']:
self.client.unshelve_server(self.server_id)
self.addCleanup(_unshelve_server)
server = self.client.show_server(self.server_id)['server']
image_name = server['name'] + '-shelved'
if CONF.image_feature_enabled.api_v1:
kwargs = {'name': image_name}
else:
kwargs = {'params': {'name': image_name}}
images = self.images_client.list_images(**kwargs)['images']
self.assertEqual(1, len(images))
self.assertEqual(image_name, images[0]['name'])
self.assertRaises(lib_exc.Conflict,
self.client.shelve_server,
self.server_id)
self.client.unshelve_server(self.server_id)
@decorators.idempotent_id('23d23b37-afaf-40d7-aa5d-5726f82d8821')
@testtools.skipUnless(CONF.compute_feature_enabled.shelve,
'Shelve is not available.')
@decorators.attr(type=['negative'])
def test_unshelve_non_existent_server(self):
# unshelve a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound, self.client.unshelve_server,
nonexistent_server)
@decorators.idempotent_id('8f198ded-1cca-4228-9e65-c6b449c54880')
@testtools.skipUnless(CONF.compute_feature_enabled.shelve,
'Shelve is not available.')
@decorators.attr(type=['negative'])
def test_unshelve_server_invalid_state(self):
# unshelve an active server.
self.assertRaises(lib_exc.Conflict,
self.client.unshelve_server,
self.server_id)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('74085be3-a370-4ca2-bc51-2d0e10e0f573')
@utils.services('volume', 'image')
def test_create_server_from_non_bootable_volume(self):
# Create a volume
volume = self.create_volume()
# Update volume bootable status to false
self.volumes_client.set_bootable_volume(volume['id'],
bootable=False)
# Verify bootable flag was updated
nonbootable_vol = self.volumes_client.show_volume(
volume['id'])['volume']
self.assertEqual('false', nonbootable_vol['bootable'])
# Block device mapping
bd_map = [{'boot_index': '0',
'uuid': volume['id'],
'source_type': 'volume',
'destination_type': 'volume',
'delete_on_termination': False}]
# Try creating a server from non-bootable volume
self.assertRaises(lib_exc.BadRequest,
self.create_test_server,
image_id='',
wait_until='ACTIVE',
block_device_mapping_v2=bd_map)
class ServersNegativeTestMultiTenantJSON(base.BaseV2ComputeTest):
credentials = ['primary', 'alt']
def setUp(self):
super(ServersNegativeTestMultiTenantJSON, self).setUp()
try:
waiters.wait_for_server_status(self.servers_client, self.server_id,
'ACTIVE')
except Exception:
self.__class__.server_id = self.recreate_server(self.server_id)
@classmethod
def setup_clients(cls):
super(ServersNegativeTestMultiTenantJSON, cls).setup_clients()
cls.alt_client = cls.os_alt.servers_client
@classmethod
def resource_setup(cls):
super(ServersNegativeTestMultiTenantJSON, cls).resource_setup()
server = cls.create_test_server(wait_until='ACTIVE')
cls.server_id = server['id']
@decorators.attr(type=['negative'])
@decorators.idempotent_id('543d84c1-dd2e-4c6d-8cb2-b9da0efaa384')
def test_update_server_of_another_tenant(self):
# Update name of a server that belongs to another tenant
new_name = self.server_id + '_new'
self.assertRaises(lib_exc.NotFound,
self.alt_client.update_server, self.server_id,
name=new_name)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('5c75009d-3eea-423e-bea3-61b09fd25f9c')
def test_delete_a_server_of_another_tenant(self):
# Delete a server that belongs to another tenant
self.assertRaises(lib_exc.NotFound,
self.alt_client.delete_server,
self.server_id)
|
|
#!/usr/bin/python
# -- Content-Encoding: UTF-8 --
"""
COHORTE Node Composer HTTP Service Proxy
:authors: Bassem Debbabi
:copyright: Copyright 2015, isandlaTech
:license: Apache Software License 2.0
"""
# iPOPO decorators
from pelix.ipopo.decorators import ComponentFactory, Provides, Property, Instantiate, \
Validate, Invalidate, Requires
import pelix.remote
# Herald
import herald
import herald.beans as beans
# Standard library
import logging
import threading
# handles http staff (already bundled with cohorte)
import requests
try:
# Python 3
import urllib.parse as urlparse
except ImportError:
# Python 2
import urlparse
_logger = logging.getLogger("proxy.proxy")
# collecting information
SUBJECT_GET_HTTP = "cohorte/shell/agent/get_http"
# PROXY SUB PATH
PROXY_SUB_PATH = "/p/"
@ComponentFactory("cohorte-http-service-proxy-factory")
@Provides(['pelix.http.servlet', herald.SERVICE_DIRECTORY_LISTENER])
@Property('_path', 'pelix.http.path', "/")
@Requires("_directory", herald.SERVICE_DIRECTORY)
@Requires('_herald', herald.SERVICE_HERALD)
@Property('_reject', pelix.remote.PROP_EXPORT_REJECT, ['pelix.http.servlet'])
@Instantiate('cohorte-http-service-proxy')
class HTTPServiceProxy(object):
def __init__(self):
# lock
self._lock = threading.Lock()
# servlet's path
self._path = None
# herald directory service
self._directory = None
self._herald = None
# list of local isolates
# peer.name -> {p_ref, peer.uid, http.port}
self._local_isolates = {}
"""
Listeners --------------------------------------------------------------------------------------------------------
"""
def peer_registered(self, peer):
"""
Called when an isolate is up
"""
if peer.name != "cohorte.internals.forker":
# avoid adding the forker's isolate (cohorte internal isolate on each node)
self._add_peer(peer)
def peer_updated(self, peer, access_id, data, previous):
pass
def peer_unregistered(self, peer):
"""
Called when an isolate is gone
"""
if peer.name in self._local_isolates:
with self._lock:
del self._local_isolates[peer.name]
"""
Utils -------------------------------------------------------------------------------------------------------------
"""
def load_local_isolates(self):
"""
Loads the initial list of node isolates
"""
for p in self._directory.get_peers():
self._add_peer(p)
def _add_peer(self, p):
"""
Adds an isolate to the local list of Node's isolates
"""
local_isolate = self._directory.get_local_peer()
if p.node_uid == local_isolate.node_uid:
with self._lock:
if p.name not in self._local_isolates:
# to avoid synchronization problems, port is initialized to -1.
# when the first time is requested, we find its concrate value
# using get_isolate_http_port.
self._local_isolates[p.name] = {
"p_ref": p,
"uid" : p.uid,
"port": -1
}
def get_isolate_http_port(self, uid):
"""
Retrieves the http port of the given isolate
"""
lp = self._directory.get_local_peer()
if lp.uid != uid:
msg = beans.Message(SUBJECT_GET_HTTP)
reply = self._herald.send(uid, msg)
return reply.content['http.port']
else:
# Get the isolate HTTP port
port = -1
svc_ref = self._context.get_service_reference(
pelix.http.HTTP_SERVICE)
if svc_ref is not None:
port = svc_ref.get_property(pelix.http.HTTP_SERVICE_PORT)
return port
def get_path(self, myurl):
"""
Gets the path part of an url.
It returns :
* 1: a string containing the path. E.g; "/listeners/1234/state"
* 2: a list of the parts of the path. e.g., ("listeners", "1234", "state")
"""
o = urlparse.urlparse(myurl)
path = o.path
# prepare query path: remove first and last '/' if exists
while len(path) > 0 and path[0] == '/':
path = path[1:]
while len(path) > 0 and path[-1] == '/':
path = path[:-1]
parts = str(path).split('/')
return (path, parts)
"""
Servlet =========================================================================================
"""
def do_GET(self, request, response):
"""
Handle a GET
"""
referer = request.get_header('Referer')
req_path = request.get_path()
if PROXY_SUB_PATH not in req_path and referer is not None and PROXY_SUB_PATH in referer:
# case of relative link from a page located in another isolate.
# request contain a referer of the parent page.
path, parts = self.get_path(referer)
isolate = parts[1]
try:
intern_isolate_port = self._local_isolates[isolate]["port"]
except:
response.send_content(501, "Internal error")
return
if intern_isolate_port == -1 :
intern_isolate_port = self.get_isolate_http_port(self._local_isolates[isolate]["uid"])
self._local_isolates[isolate]["port"] = intern_isolate_port
intern_url = 'http://localhost:' + str(intern_isolate_port) + req_path
try:
r = requests.get(intern_url)
response.send_content(r.status_code, r.content, mime_type=r.headers['content-type'])
except:
response.send_content(501, "Error", "text/html")
else:
if req_path.startswith(PROXY_SUB_PATH):
# link to another isolate
# e.g., /__proxy/led-gateway-python-auto01/...
path, parts = self.get_path(request.get_path())
isolate = parts[1]
intern_isolate_port = self._local_isolates[isolate]["port"]
if intern_isolate_port == -1 :
intern_isolate_port = self.get_isolate_http_port(self._local_isolates[isolate]["uid"])
self._local_isolates[isolate]["port"] = intern_isolate_port
intern_url = 'http://localhost:' + str(intern_isolate_port) + "/" + "/".join(parts[2:])
try:
r = requests.get(intern_url)
response.send_content(r.status_code, r.content, mime_type=r.headers['content-type'])
except:
response.send_content(501, "Error", "text/html")
else:
# any other link
number = len(self._local_isolates)
if number == 0:
# no servlets
http_content = "<h3>HTTP Services Proxy</h3><ul>"
http_content += "<p>This node has no local isolates!</p><p>Please refresh this page to request again the list of local isolates' HTTP proxies.</p>"
response.send_content(200, http_content)
elif number == 1:
# redirect automatically to first one
for isolate in self._local_isolates:
# one loop
to_url = PROXY_SUB_PATH + isolate + "/"
http_content = "<html><head><meta http-equiv='refresh' content='0; URL=" + to_url + "'/></head><body></body></html>"
response.send_content(200, http_content)
else:
http_content = "<h3>HTTP Services Proxy</h3><ul>"
for isolate in self._local_isolates:
http_content += "<li><a href='" + PROXY_SUB_PATH + isolate + "/'>" + isolate + "</a></li>"
http_content += "</ul>"
response.send_content(200, http_content)
def do_delete(self, request, response):
"""
Handle Delete actions : not yet IMPLEMENTED!
"""
pass
def do_post(self, request, response):
"""
Not yet Implemented!
"""
pass
"""
iPOPO STUFF --------------------------------------------------------------------------------------------------------
"""
@Validate
def validate(self, context):
_logger.info("HTTP Service Proxy validated")
self._context = context
# load initial list of local isolates (if already created!)
self.load_local_isolates()
@Invalidate
def invalidate(self, context):
_logger.info("HTTP Service Proxy invalidated")
def bound_to(self, path, params):
"""
Servlet bound to a path
"""
_logger.info('Bound to ' + path)
return True
def unbound_from(self, path, params):
"""
Servlet unbound from a path
"""
_logger.info('Unbound from ' + path)
return None
|
|
"""
Form classes
"""
from django.core.exceptions import ValidationError
from django.utils.copycompat import deepcopy
from django.utils.datastructures import SortedDict
from django.utils.html import conditional_escape
from django.utils.encoding import StrAndUnicode, smart_unicode, force_unicode
from django.utils.safestring import mark_safe
from fields import Field, FileField
from widgets import Media, media_property, TextInput, Textarea
from util import flatatt, ErrorDict, ErrorList
__all__ = ('BaseForm', 'Form')
NON_FIELD_ERRORS = '__all__'
def pretty_name(name):
"""Converts 'first_name' to 'First name'"""
if not name:
return u''
return name.replace('_', ' ').capitalize()
def get_declared_fields(bases, attrs, with_base_fields=True):
"""
Create a list of form field instances from the passed in 'attrs', plus any
similar fields on the base classes (in 'bases'). This is used by both the
Form and ModelForm metclasses.
If 'with_base_fields' is True, all fields from the bases are used.
Otherwise, only fields in the 'declared_fields' attribute on the bases are
used. The distinction is useful in ModelForm subclassing.
Also integrates any additional media definitions
"""
fields = [(field_name, attrs.pop(field_name)) for field_name, obj in attrs.items() if isinstance(obj, Field)]
fields.sort(key=lambda x: x[1].creation_counter)
# If this class is subclassing another Form, add that Form's fields.
# Note that we loop over the bases in *reverse*. This is necessary in
# order to preserve the correct order of fields.
if with_base_fields:
for base in bases[::-1]:
if hasattr(base, 'base_fields'):
fields = base.base_fields.items() + fields
else:
for base in bases[::-1]:
if hasattr(base, 'declared_fields'):
fields = base.declared_fields.items() + fields
return SortedDict(fields)
class DeclarativeFieldsMetaclass(type):
"""
Metaclass that converts Field attributes to a dictionary called
'base_fields', taking into account parent class 'base_fields' as well.
"""
def __new__(cls, name, bases, attrs):
attrs['base_fields'] = get_declared_fields(bases, attrs)
new_class = super(DeclarativeFieldsMetaclass,
cls).__new__(cls, name, bases, attrs)
if 'media' not in attrs:
new_class.media = media_property(new_class)
return new_class
class BaseForm(StrAndUnicode):
# This is the main implementation of all the Form logic. Note that this
# class is different than Form. See the comments by the Form class for more
# information. Any improvements to the form API should be made to *this*
# class, not to the Form class.
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList, label_suffix=':',
empty_permitted=False):
self.is_bound = data is not None or files is not None
self.data = data or {}
self.files = files or {}
self.auto_id = auto_id
self.prefix = prefix
self.initial = initial or {}
self.error_class = error_class
self.label_suffix = label_suffix
self.empty_permitted = empty_permitted
self._errors = None # Stores the errors after clean() has been called.
self._changed_data = None
# The base_fields class attribute is the *class-wide* definition of
# fields. Because a particular *instance* of the class might want to
# alter self.fields, we create self.fields here by copying base_fields.
# Instances should always modify self.fields; they should not modify
# self.base_fields.
self.fields = deepcopy(self.base_fields)
def __unicode__(self):
return self.as_table()
def __iter__(self):
for name, field in self.fields.items():
yield BoundField(self, field, name)
def __getitem__(self, name):
"Returns a BoundField with the given name."
try:
field = self.fields[name]
except KeyError:
raise KeyError('Key %r not found in Form' % name)
return BoundField(self, field, name)
def _get_errors(self):
"Returns an ErrorDict for the data provided for the form"
if self._errors is None:
self.full_clean()
return self._errors
errors = property(_get_errors)
def is_valid(self):
"""
Returns True if the form has no errors. Otherwise, False. If errors are
being ignored, returns False.
"""
return self.is_bound and not bool(self.errors)
def add_prefix(self, field_name):
"""
Returns the field name with a prefix appended, if this Form has a
prefix set.
Subclasses may wish to override.
"""
return self.prefix and ('%s-%s' % (self.prefix, field_name)) or field_name
def add_initial_prefix(self, field_name):
"""
Add a 'initial' prefix for checking dynamic initial values
"""
return u'initial-%s' % self.add_prefix(field_name)
def _html_output(self, normal_row, error_row, row_ender, help_text_html, errors_on_separate_row):
"Helper function for outputting HTML. Used by as_table(), as_ul(), as_p()."
top_errors = self.non_field_errors() # Errors that should be displayed above all fields.
output, hidden_fields = [], []
for name, field in self.fields.items():
html_class_attr = ''
bf = BoundField(self, field, name)
bf_errors = self.error_class([conditional_escape(error) for error in bf.errors]) # Escape and cache in local variable.
if bf.is_hidden:
if bf_errors:
top_errors.extend([u'(Hidden field %s) %s' % (name, force_unicode(e)) for e in bf_errors])
hidden_fields.append(unicode(bf))
else:
# Create a 'class="..."' atribute if the row should have any
# CSS classes applied.
css_classes = bf.css_classes()
if css_classes:
html_class_attr = ' class="%s"' % css_classes
if errors_on_separate_row and bf_errors:
output.append(error_row % force_unicode(bf_errors))
if bf.label:
label = conditional_escape(force_unicode(bf.label))
# Only add the suffix if the label does not end in
# punctuation.
if self.label_suffix:
if label[-1] not in ':?.!':
label += self.label_suffix
label = bf.label_tag(label) or ''
else:
label = ''
if field.help_text:
help_text = help_text_html % force_unicode(field.help_text)
else:
help_text = u''
output.append(normal_row % {
'errors': force_unicode(bf_errors),
'label': force_unicode(label),
'field': unicode(bf),
'help_text': help_text,
'html_class_attr': html_class_attr
})
if top_errors:
output.insert(0, error_row % force_unicode(top_errors))
if hidden_fields: # Insert any hidden fields in the last row.
str_hidden = u''.join(hidden_fields)
if output:
last_row = output[-1]
# Chop off the trailing row_ender (e.g. '</td></tr>') and
# insert the hidden fields.
if not last_row.endswith(row_ender):
# This can happen in the as_p() case (and possibly others
# that users write): if there are only top errors, we may
# not be able to conscript the last row for our purposes,
# so insert a new, empty row.
last_row = (normal_row % {'errors': '', 'label': '',
'field': '', 'help_text':'',
'html_class_attr': html_class_attr})
output.append(last_row)
output[-1] = last_row[:-len(row_ender)] + str_hidden + row_ender
else:
# If there aren't any rows in the output, just append the
# hidden fields.
output.append(str_hidden)
return mark_safe(u'\n'.join(output))
def as_table(self):
"Returns this form rendered as HTML <tr>s -- excluding the <table></table>."
return self._html_output(
normal_row = u'<tr%(html_class_attr)s><th>%(label)s</th><td>%(errors)s%(field)s%(help_text)s</td></tr>',
error_row = u'<tr><td colspan="2">%s</td></tr>',
row_ender = u'</td></tr>',
help_text_html = u'<br /><span class="helptext">%s</span>',
errors_on_separate_row = False)
def as_ul(self):
"Returns this form rendered as HTML <li>s -- excluding the <ul></ul>."
return self._html_output(
normal_row = u'<li%(html_class_attr)s>%(errors)s%(label)s %(field)s%(help_text)s</li>',
error_row = u'<li>%s</li>',
row_ender = '</li>',
help_text_html = u' <span class="helptext">%s</span>',
errors_on_separate_row = False)
def as_p(self):
"Returns this form rendered as HTML <p>s."
return self._html_output(
normal_row = u'<p%(html_class_attr)s>%(label)s %(field)s%(help_text)s</p>',
error_row = u'%s',
row_ender = '</p>',
help_text_html = u' <span class="helptext">%s</span>',
errors_on_separate_row = True)
def non_field_errors(self):
"""
Returns an ErrorList of errors that aren't associated with a particular
field -- i.e., from Form.clean(). Returns an empty ErrorList if there
are none.
"""
return self.errors.get(NON_FIELD_ERRORS, self.error_class())
def _raw_value(self, fieldname):
"""
Returns the raw_value for a particular field name. This is just a
convenient wrapper around widget.value_from_datadict.
"""
field = self.fields[fieldname]
prefix = self.add_prefix(fieldname)
return field.widget.value_from_datadict(self.data, self.files, prefix)
def full_clean(self):
"""
Cleans all of self.data and populates self._errors and
self.cleaned_data.
"""
self._errors = ErrorDict()
if not self.is_bound: # Stop further processing.
return
self.cleaned_data = {}
# If the form is permitted to be empty, and none of the form data has
# changed from the initial data, short circuit any validation.
if self.empty_permitted and not self.has_changed():
return
self._clean_fields()
self._clean_form()
self._post_clean()
if self._errors:
delattr(self, 'cleaned_data')
def _clean_fields(self):
for name, field in self.fields.items():
# value_from_datadict() gets the data from the data dictionaries.
# Each widget type knows how to retrieve its own data, because some
# widgets split data over several HTML fields.
value = field.widget.value_from_datadict(self.data, self.files, self.add_prefix(name))
try:
if isinstance(field, FileField):
initial = self.initial.get(name, field.initial)
value = field.clean(value, initial)
else:
value = field.clean(value)
self.cleaned_data[name] = value
if hasattr(self, 'clean_%s' % name):
value = getattr(self, 'clean_%s' % name)()
self.cleaned_data[name] = value
except ValidationError, e:
self._errors[name] = self.error_class(e.messages)
if name in self.cleaned_data:
del self.cleaned_data[name]
def _clean_form(self):
try:
self.cleaned_data = self.clean()
except ValidationError, e:
self._errors[NON_FIELD_ERRORS] = self.error_class(e.messages)
def _post_clean(self):
"""
An internal hook for performing additional cleaning after form cleaning
is complete. Used for model validation in model forms.
"""
pass
def clean(self):
"""
Hook for doing any extra form-wide cleaning after Field.clean() been
called on every field. Any ValidationError raised by this method will
not be associated with a particular field; it will have a special-case
association with the field named '__all__'.
"""
return self.cleaned_data
def has_changed(self):
"""
Returns True if data differs from initial.
"""
return bool(self.changed_data)
def _get_changed_data(self):
if self._changed_data is None:
self._changed_data = []
# XXX: For now we're asking the individual widgets whether or not the
# data has changed. It would probably be more efficient to hash the
# initial data, store it in a hidden field, and compare a hash of the
# submitted data, but we'd need a way to easily get the string value
# for a given field. Right now, that logic is embedded in the render
# method of each widget.
for name, field in self.fields.items():
prefixed_name = self.add_prefix(name)
data_value = field.widget.value_from_datadict(self.data, self.files, prefixed_name)
if not field.show_hidden_initial:
initial_value = self.initial.get(name, field.initial)
else:
initial_prefixed_name = self.add_initial_prefix(name)
hidden_widget = field.hidden_widget()
initial_value = hidden_widget.value_from_datadict(
self.data, self.files, initial_prefixed_name)
if field.widget._has_changed(initial_value, data_value):
self._changed_data.append(name)
return self._changed_data
changed_data = property(_get_changed_data)
def _get_media(self):
"""
Provide a description of all media required to render the widgets on this form
"""
media = Media()
for field in self.fields.values():
media = media + field.widget.media
return media
media = property(_get_media)
def is_multipart(self):
"""
Returns True if the form needs to be multipart-encrypted, i.e. it has
FileInput. Otherwise, False.
"""
for field in self.fields.values():
if field.widget.needs_multipart_form:
return True
return False
def hidden_fields(self):
"""
Returns a list of all the BoundField objects that are hidden fields.
Useful for manual form layout in templates.
"""
return [field for field in self if field.is_hidden]
def visible_fields(self):
"""
Returns a list of BoundField objects that aren't hidden fields.
The opposite of the hidden_fields() method.
"""
return [field for field in self if not field.is_hidden]
class Form(BaseForm):
"A collection of Fields, plus their associated data."
# This is a separate class from BaseForm in order to abstract the way
# self.fields is specified. This class (Form) is the one that does the
# fancy metaclass stuff purely for the semantic sugar -- it allows one
# to define a form using declarative syntax.
# BaseForm itself has no way of designating self.fields.
__metaclass__ = DeclarativeFieldsMetaclass
class BoundField(StrAndUnicode):
"A Field plus data"
def __init__(self, form, field, name):
self.form = form
self.field = field
self.name = name
self.html_name = form.add_prefix(name)
self.html_initial_name = form.add_initial_prefix(name)
self.html_initial_id = form.add_initial_prefix(self.auto_id)
if self.field.label is None:
self.label = pretty_name(name)
else:
self.label = self.field.label
self.help_text = field.help_text or ''
def __unicode__(self):
"""Renders this field as an HTML widget."""
if self.field.show_hidden_initial:
return self.as_widget() + self.as_hidden(only_initial=True)
return self.as_widget()
def _errors(self):
"""
Returns an ErrorList for this field. Returns an empty ErrorList
if there are none.
"""
return self.form.errors.get(self.name, self.form.error_class())
errors = property(_errors)
def as_widget(self, widget=None, attrs=None, only_initial=False):
"""
Renders the field by rendering the passed widget, adding any HTML
attributes passed as attrs. If no widget is specified, then the
field's default widget will be used.
"""
if not widget:
widget = self.field.widget
attrs = attrs or {}
auto_id = self.auto_id
if auto_id and 'id' not in attrs and 'id' not in widget.attrs:
if not only_initial:
attrs['id'] = auto_id
else:
attrs['id'] = self.html_initial_id
if not self.form.is_bound:
data = self.form.initial.get(self.name, self.field.initial)
if callable(data):
data = data()
else:
if isinstance(self.field, FileField) and self.data is None:
data = self.form.initial.get(self.name, self.field.initial)
else:
data = self.data
if not only_initial:
name = self.html_name
else:
name = self.html_initial_name
return widget.render(name, data, attrs=attrs)
def as_text(self, attrs=None, **kwargs):
"""
Returns a string of HTML for representing this as an <input type="text">.
"""
return self.as_widget(TextInput(), attrs, **kwargs)
def as_textarea(self, attrs=None, **kwargs):
"Returns a string of HTML for representing this as a <textarea>."
return self.as_widget(Textarea(), attrs, **kwargs)
def as_hidden(self, attrs=None, **kwargs):
"""
Returns a string of HTML for representing this as an <input type="hidden">.
"""
return self.as_widget(self.field.hidden_widget(), attrs, **kwargs)
def _data(self):
"""
Returns the data for this BoundField, or None if it wasn't given.
"""
return self.field.widget.value_from_datadict(self.form.data, self.form.files, self.html_name)
data = property(_data)
def label_tag(self, contents=None, attrs=None):
"""
Wraps the given contents in a <label>, if the field has an ID attribute.
Does not HTML-escape the contents. If contents aren't given, uses the
field's HTML-escaped label.
If attrs are given, they're used as HTML attributes on the <label> tag.
"""
contents = contents or conditional_escape(self.label)
widget = self.field.widget
id_ = widget.attrs.get('id') or self.auto_id
if id_:
attrs = attrs and flatatt(attrs) or ''
contents = u'<label for="%s"%s>%s</label>' % (widget.id_for_label(id_), attrs, unicode(contents))
return mark_safe(contents)
def css_classes(self, extra_classes=None):
"""
Returns a string of space-separated CSS classes for this field.
"""
if hasattr(extra_classes, 'split'):
extra_classes = extra_classes.split()
extra_classes = set(extra_classes or [])
if self.errors and hasattr(self.form, 'error_css_class'):
extra_classes.add(self.form.error_css_class)
if self.field.required and hasattr(self.form, 'required_css_class'):
extra_classes.add(self.form.required_css_class)
return ' '.join(extra_classes)
def _is_hidden(self):
"Returns True if this BoundField's widget is hidden."
return self.field.widget.is_hidden
is_hidden = property(_is_hidden)
def _auto_id(self):
"""
Calculates and returns the ID attribute for this BoundField, if the
associated Form has specified auto_id. Returns an empty string otherwise.
"""
auto_id = self.form.auto_id
if auto_id and '%s' in smart_unicode(auto_id):
return smart_unicode(auto_id) % self.html_name
elif auto_id:
return self.html_name
return ''
auto_id = property(_auto_id)
|
|
#!/usr/bin/python2
from fabric.api import *
from fabric.tasks import execute
import sys
import os
import argparse
import ntpath
import fnmatch
"""
Author: mtask@github.com
"""
class multissh(object):
def manage_servers(self, add=False, delete=False, param=None):
if add:
self.server = param.strip()
if fnmatch.fnmatch(self.server, "*@*=*"):
with open("multissh.conf", 'a') as config:
config.write(self.server)
else:
print "[!] Invalid syntax"
return
elif delete:
try:
self.delete_num = int(param)
except Exception as e:
raise e
self.hosts = []
with open('multissh.conf','r') as conf:
self.config_lines = conf.readlines()
for self.config in self.config_lines:
if self.config.startswith("#"):
continue
elif self.config.startswith("keypath="):
continue
else:
try:
self.params = self.config.split('=',1)
self.hosts.append(self.params[0])
except Exception as e:
raise e
self.server_num = 1
self.host_to_delete = None
for self.h in self.hosts:
if self.server_num == self.delete_num:
self.host_to_delete = self.h
self.server_num += 1
if self.host_to_delete:
self.ans = raw_input("[!] Really delete "+self.host_to_delete+"?(Y/n)")
if self.ans.lower() == "n":
return
else:
print "[!] Host not found"
sys.exit(0)
with open('multissh.conf','w') as conf:
for self.line in self.config_lines:
if self.host_to_delete in self.line:
continue
else:
conf.write(self.line)
def get_settings(self, list=False):
self.hosts = []
self.users = []
self.keypath = None
with open('multissh.conf','r') as conf:
for self.config in conf:
if self.config.startswith("#"):
continue
elif self.config.startswith("keypath="):
try:
self.keypath = self.config.split('=',1)[1].strip()
except Exception as e:
raise e
else:
try:
self.params = self.config.split('=',1)
self.hosts.append(self.params[0])
self.users.append(self.params[1])
except Exception as e:
raise e
if list:
self.server_num = 1
for self.h in self.hosts:
print "["+str(self.server_num)+"] "+self.h
self.server_num += 1
else:
return (self.hosts, self.users, self.keypath)
def run_cmd(self, cmd,sudo_=False, script=False, copy_file="", yesno=False):
def file_base_name(path):
try:
file_name = ntpath.basename(path)
return file_name
except Exception as e:
raise e
self.failed = []
self.cmd = cmd
self.servers,self.users, self.keypath = self.get_settings()
os.path.expanduser("~/")
if not self.keypath:
if os.path.isfile(os.path.expanduser("~/")+".ssh/id_rsa"):
self.keypath = "~/.ssh/id_rsa"
else:
print "[!] No clue where the ssh keys are..."
sys.exit(0)
for self.s, self.u in zip(self.servers, self.users):
if yesno:
self.confirm = raw_input("Current server is "+self.s+". Run command?(y/N)")
if self.confirm.lower() != "y":
continue
with settings(host_string=self.s, user=self.u, key_filename=self.keypath):
try:
if script:
if os.path.isfile(self.cmd):
put(self.cmd, "tempscript", mode=0755)
else:
print "[!] Path to local script not found."
sys.exit(1)
if sudo_:
sudo("./tempscript")
sudo("rm tempscript")
else:
run("./tempscript")
run("rm tempscript")
return
elif copy_file:
self.base_name = file_base_name(copy_file)
if os.path.isfile(copy_file):
put(copy_file, self.base_name)
return
else:
if sudo_:
sudo(self.cmd)
else:
run(self.cmd)
except Exception as e:
self.failed.append(self.s)
print "Execution failed on: "+self.s
print "Error:"+str(e)
if len(self.failed) == 0:
if script:
print "[!] Script executed on all servers"
else:
if yesno:
print "[!] Command executed on selected servers"
else:
print "[!] Command executed on all servers"
else:
print "[!] Execution failed on:"
for f in self.failed:
print f
def parse_args(self):
self.descr = """
Easily run commands through multiple ssh servers.
Configurate hosts to multissh.conf.
Example configuration: user@server=user
"""
self.parser = argparse.ArgumentParser(description=self.descr)
self.parser.add_argument("-c", "--cmd", type=str, help="Run command script on servers. Wrap commans inside \"\"")
self.parser.add_argument("-s", "--script", type=str, help="Path to local script to move and run on servers")
self.parser.add_argument("-S", "--sudo", action='store_true', help="Run with sudo. Can be used with --cmd, --script and --copy-file. Leave \"sudo\" out of the given command")
self.parser.add_argument("-l", "--list", action='store_true', help="List servers")
self.parser.add_argument("-a", "--add", type=str, help="Add server to config. Use syntax of multissh.config")
self.parser.add_argument("-d", "--delete", type=str, help="Delete server from config. Use list switch to get server's number")
self.parser.add_argument("-cf", "--copyfile", type=str, help="Copy file to servers. Give local path as argument.")
self.parser.add_argument("-yn", "--yesno", action='store_true', help="Ask on every server if to run command on it.")
self.args = self.parser.parse_args()
return self.args
def main(self):
self.arg = self.parse_args()
if self.arg.yesno:
yn = True
else:
yn = False
if self.arg.add:
self.manage_servers(add=True, param=self.arg.add)
if self.arg.delete:
self.manage_servers(delete=True, param=self.arg.delete)
if self.arg.list:
self.get_settings(list=True)
sys.exit(0)
if self.arg.cmd:
if self.arg.sudo:
self.run_cmd(self.arg.cmd, sudo_=True, yesno=yn)
else:
self.run_cmd(self.arg.cmd, yesno=yn)
if self.arg.script:
if self.arg.sudo:
self.run_cmd(self.arg.script, sudo_=True, script=True, yesno=yn)
else:
self.run_cmd(self.arg.script, script=True, yesno=yn)
if self.arg.copyfile:
self.run_cmd("",copy_file=self.arg.copyfile, yesno=yn)
if __name__=='__main__':
multissh().main()
|
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 10337
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
|
import pytest
import responses
import re
from flask import json
from copy import deepcopy
from json.decoder import JSONDecodeError
from requests import ConnectionError
from backend.util.request.store.search_request import SearchRequest
from backend.util.response.store.search_results import SearchResultsSchema
from backend.util.response.error import ErrorSchema
@pytest.fixture(scope="module")
def request_json():
return {
"pricerange": {
"min": 1000,
"max": 2000
}
}
@pytest.fixture(scope="module")
def response_json():
return {
"total": 10,
"pricerange": {
"min": 10,
"max": 20
},
"brands": [
{
"brand": "string",
"amount": 10
}
],
"kinds": [
{
"kind": "string",
"amount": 10
}
]
}
@pytest.mark.parametrize(
"ftype",
[
("brand"),
("kind"),
("search")
]
)
def test_find_controller(flask_app, willstores_ws, request_json, response_json, ftype):
with responses.RequestsMock() as rsps:
rsps.add(responses.POST, re.compile(willstores_ws),
status=200,
json=response_json
)
with flask_app.test_client() as client:
response = client.post(
"api/store/find/%s/test" % ftype
)
data = json.loads(response.data)
SearchResultsSchema().load(data)
assert response.status_code == 200
assert data["total"] == 10
with flask_app.test_client() as client:
response = client.post(
"api/store/find/%s/test" % ftype,
json=request_json
)
data = json.loads(response.data)
SearchResultsSchema().load(data)
assert response.status_code == 200
assert data["total"] == 10
@pytest.mark.parametrize(
"ftype",
[
("brand"),
("kind"),
("search")
]
)
def test_find_controller_no_content(flask_app, willstores_ws, request_json, ftype):
with responses.RequestsMock() as rsps:
rsps.add(responses.POST, re.compile(willstores_ws),
status=204
)
with flask_app.test_client() as client:
response = client.post(
"api/store/find/%s/test" % ftype
)
assert response.status_code == 204
with pytest.raises(JSONDecodeError):
json.loads(response.data)
with flask_app.test_client() as client:
response = client.post(
"api/store/find/%s/test" % ftype,
json=request_json
)
assert response.status_code == 204
with pytest.raises(JSONDecodeError):
json.loads(response.data)
def test_find_controller_invalid_ftype(flask_app):
with flask_app.test_client() as client:
response = client.post(
"api/store/find/invalid/test"
)
data = json.loads(response.data)
ErrorSchema().load(data)
assert response.status_code == 400
@pytest.mark.parametrize(
"ftype",
[
("brand"),
("kind"),
("search")
]
)
def test_find_controller_invalid_json(flask_app, request_json, ftype):
with flask_app.test_client() as client:
response = client.post(
"api/store/find/%s/test" % ftype,
json="notjson"
)
data = json.loads(response.data)
ErrorSchema().load(data)
assert response.status_code == 400
invalid_min = deepcopy(request_json)
invalid_min["pricerange"].update(min=-10.0)
with flask_app.test_client() as client:
response = client.post(
"api/store/find/%s/test" % ftype,
json=invalid_min
)
data = json.loads(response.data)
ErrorSchema().load(data)
assert response.status_code == 400
invalid_max = deepcopy(request_json)
invalid_max["pricerange"].update(max=-10.0)
with flask_app.test_client() as client:
response = client.post(
"api/store/find/%s/test" % ftype,
json=invalid_max
)
data = json.loads(response.data)
ErrorSchema().load(data)
assert response.status_code == 400
invalid_range = deepcopy(request_json)
invalid_range["pricerange"].update(min=100.0, max=50.0)
with flask_app.test_client() as client:
response = client.post(
"api/store/find/%s/test" % ftype,
json=invalid_range
)
data = json.loads(response.data)
ErrorSchema().load(data)
assert response.status_code == 400
@pytest.mark.parametrize(
"method,http_method,test_url,error,status_code",
[
("parse_json", "POST", "/api/store/find/brand/test", ConnectionError(), 502),
("parse_json", "POST", "/api/store/find/kind/test", ConnectionError(), 502),
("parse_json", "POST", "/api/store/find/search/test", ConnectionError(), 502),
("parse_json", "POST", "/api/store/find/brand/test", Exception(), 500),
("parse_json", "POST", "/api/store/find/kind/test", Exception(), 500),
("parse_json", "POST", "/api/store/find/search/test", Exception(), 500)
]
)
def test_find_controller_error(mocker, get_request_function, method, http_method, test_url, error, status_code):
mocker.patch.object(SearchRequest, method, side_effect=error)
make_request = get_request_function(http_method)
response = make_request(
test_url
)
data = json.loads(response.data)
ErrorSchema().load(data)
assert response.status_code == status_code
@pytest.mark.parametrize(
"test_url, status_code",
[
("/api/store/find/brand/test", 204),
("/api/store/find/kind/test", 204),
("/api/store/find/search/test", 204),
("/api/store/find/brand/test", 400),
("/api/store/find/kind/test", 400),
("/api/store/find/search/test", 400),
("/api/store/find/brand/test", 401),
("/api/store/find/kind/test", 401),
("/api/store/find/search/test", 401),
("/api/store/find/brand/test", 500),
("/api/store/find/kind/test", 500),
("/api/store/find/search/test", 500),
("/api/store/find/brand/test", 502),
("/api/store/find/kind/test", 502),
("/api/store/find/search/test", 502),
("/api/store/find/brand/test", 504),
("/api/store/find/kind/test", 504),
("/api/store/find/search/test", 504),
]
)
def test_find_controller_http_error(flask_app, willstores_ws, json_error_recv, test_url, status_code):
with responses.RequestsMock() as rsps:
rsps.add(responses.POST, re.compile(willstores_ws),
status=status_code,
json=json_error_recv
)
with flask_app.test_client() as client:
response = client.post(
test_url
)
if status_code == 204:
with pytest.raises(JSONDecodeError):
json.loads(response.data)
else:
data = json.loads(response.data)
ErrorSchema().load(data)
assert response.status_code == status_code
|
|
# -*- coding: utf-8 -*-
#
# (C) Rob W.W. Hooft, 2003
# (C) Pywikipedia bot team, 2003-2012
#
# Distributed under the terms of the MIT license.
#
__version__ = '$Id$'
import os, re
import sys as __sys
import platform
# IMPORTANT:
# Do not change any of the variables in this file. Instead, make
# a file user-config.py, and overwrite values in there.
# Note: all variables defined in this module are made available to bots as
# configuration settings, *except* variable names beginning with an
# underscore (example: _variable). Be sure to use an underscore on any
# variables that are intended only for internal use and not to be exported
# to other modules.
############## ACCOUNT SETTINGS ##############
# The family of sites we are working on. wikipedia.py will import
# families/xxx_family.py so if you want to change this variable,
# you need to write such a file.
family = 'wikipedia'
# The language code of the site we're working on.
mylang = 'language'
# The default interface for communicating with the site
# currently the only defined interface is 'APISite', so don't change this!
site_interface = 'APISite'
# number of days to cache namespaces, api configuration, etc.
API_config_expiry = 30
# The dictionary usernames should contain a username for each site where you
# have a bot account. Please set your usernames by adding such lines to your
# user-config.py:
#
# usernames['wikipedia']['de'] = 'myGermanUsername'
# usernames['wiktionary']['en'] = 'myEnglishUsername'
#
# If you have a sysop account on some wikis, this will be used to delete pages
# or to edit locked pages if you add such lines to your
# user-config.py:
#
# sysopnames['wikipedia']['de'] = 'myGermanUsername'
# sysopnames['wiktionary']['en'] = 'myEnglishUsername'
usernames = {}
sysopnames = {}
disambiguation_comment = {}
# Solve captchas in the webbrowser. Setting this to False will result in the
# exception CaptchaError being thrown if a captcha is encountered.
solve_captcha = True
# Some sites will require password authentication to access the HTML pages at
# the site. If you have any such site, add lines to your user-config.py of
# the following form:
#
# authenticate['en.wikipedia.org'] = ('John','XXXXX')
#
# where John is your login name, and XXXXX your password.
# Note:
# 1. This is only for sites that use authentication in the form that gives
# you a popup for name and password when you try to access any data, NOT
# for, for example, wiki usernames
# 2. You must use the hostname of the site, not its family/language pair
authenticate = {}
#
# Security Connection for Wikimedia Projects
#
use_SSL_onlogin = False # if available, use SSL when logging in
use_SSL_always = False # if available, use SSL for all API queries
# Available security projects
available_ssl_project = [
u'wikipedia', u'wikinews', u'wikisource', u'wiktionary', u'wikibooks',
u'wikiquote', u'wikiversity', u'meta', u'mediawiki', u'commons',
u'species', u'incubator'
]
# password_file = ".passwd"
# A password file with default passwords. For more information, please
# see LoginManager.readPassword in login.py.
# By default you are asked for a password on the terminal.
password_file = None
# edit summary to use if not supplied by bot script
# WARNING: this should NEVER be used in practice, ALWAYS supply a more
# relevant summary for bot edits
default_edit_summary = u'Wikipedia python library v.2'
# Get the names of all known families, and initialize
# with empty dictionaries
def _get_base_dir():
"""Return the directory in which user-specific information is stored.
This is determined in the following order -
1. If the script was called with a -dir: argument, use the directory
provided in this argument
2. If the user has a PYWIKIBOT2_DIR environment variable, use the value
of it
3. Use (and if necessary create) a 'pywikibot' folder (Windows) or
'.pywikibot' directory (Unix and similar) under the user's home
directory.
"""
NAME = "pywikibot"
for arg in __sys.argv[1:]:
if arg.startswith("-dir:"):
base_dir = arg[5:]
__sys.argv.remove(arg)
break
else:
if "PYWIKIBOT2_DIR" in os.environ:
base_dir = os.environ["PYWIKIBOT2_DIR"]
else:
is_windows = __sys.platform == 'win32'
home = os.path.expanduser("~")
if is_windows:
_win_version = int(platform.version()[0])
if _win_version == 5:
base_dir = os.path.join(home, "Application Data", NAME)
elif _win_version == 6:
base_dir = os.path.join(home, "AppData\\Roaming", NAME)
else:
base_dir = os.path.join(home, "."+NAME)
if not os.path.isdir(base_dir):
os.makedirs(base_dir, mode=0700)
if not os.path.isabs(base_dir):
base_dir = os.path.normpath(os.path.join(os.getcwd(), base_dir))
# make sure this path is valid and that it contains user-config file
if not os.path.isdir(base_dir):
raise RuntimeError("Directory '%(base_dir)s' does not exist."
% locals())
if not os.path.exists(os.path.join(base_dir, "user-config.py")):
raise RuntimeError("No user-config.py found in directory '%(base_dir)s'."
% locals())
return base_dir
_base_dir = _get_base_dir()
# families/ is a subdirectory of the directory in which config.py is found
for _filename in os.listdir(
os.path.join(os.path.dirname(__file__), 'families')):
if _filename.endswith("_family.py"):
familyName = _filename[ : -len("_family.py")]
usernames[familyName] = {}
sysopnames[familyName] = {}
disambiguation_comment[familyName] = {}
# Set to True to override the {{bots}} exclusion protocol (at your own risk!)
ignore_bot_templates = False
############## USER INTERFACE SETTINGS ##############
# The encoding that's used in the user's console, i.e. how strings are encoded
# when they are read by raw_input(). On Windows systems' DOS box, this should
# be 'cp850' ('cp437' for older versions). Linux users might try 'iso-8859-1'
# or 'utf-8'.
# This default code should work fine, so you don't have to think about it.
# TODO: consider getting rid of this config variable.
try:
console_encoding = __sys.stdout.encoding
except:
#When using pywikipedia inside a daemonized twisted application,
#we get "StdioOnnaStick instance has no attribute 'encoding'"
console_encoding = None
# The encoding the user would like to see text transliterated to. This can be
# set to a charset (e.g. 'ascii', 'iso-8859-1' or 'cp850'), and we will output
# only characters that exist in that charset. However, the characters will be
# output using console_encoding.
# If this is not defined on Windows, we emit a Warning explaining the user
# to either switch to a Unicode-able font and use
# transliteration_target = None
# or to keep using raster fonts and set
# transliteration_target = console_encoding
# After emitting the warning, this last option will be set.
transliteration_target = 'not set'
# The encoding in which textfiles are stored, which contain lists of page
# titles. The most used is: 'utf-8'. 'utf-8-sig' recognizes BOM but it is
# available on Python 2.5 or higher. For a complete list please see:
# http://docs.python.org/library/codecs.html#standard-encodings
textfile_encoding = 'utf-8'
# tkinter isn't yet ready
userinterface = 'terminal'
# this can be used to pass variables to the UI init function
# useful for e.g.
# userinterface_init_kwargs = {'default_stream': 'stdout'}
userinterface_init_kwargs = {}
# i18n setting for user interface language
# default is config.mylang or 'en'
userinterface_lang = None
# Should we transliterate characters that do not exist in the console
# character set?
# True: whenever possible
# False: never - always replace them by question marks
# Currently only works if interface 'terminal' is set.
transliterate = True
# Should the system bell ring if the bot expects user input?
ring_bell = False
# Colorization can be used to markup important text parts of the output.
# On Linux/Unix terminals, ANSI escape codes are used for this. On Windows,
# it is done by a DLL call via ctypes. ctypes is only available since
# Python 2.5, so if you're using Python 2.4 or lower on Windows, you should
# upgrade.
# Set this to False if you're using Linux and your tty doesn't support
# ANSI colors.
try:
# Don't print colorized when the output is, for example, piped to a file.
colorized_output = __sys.stdout.isatty()
except:
colorized_output = False
############## EXTERNAL EDITOR SETTINGS ##############
# The command for the editor you want to use. If set to None, a simple Tkinter
# editor will be used.
# On Windows systems, this script tries to determine the default text editor.
if __sys.platform == 'win32':
try:
import _winreg
_key1 = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, 'Software\Microsoft\Windows\CurrentVersion\Explorer\FileExts\.txt\OpenWithProgids')
_progID = _winreg.EnumValue(_key1, 1)[0]
_key2 = _winreg.OpenKey(_winreg.HKEY_CLASSES_ROOT, '%s\shell\open\command' % _progID)
_cmd = _winreg.QueryValueEx(_key2, None)[0]
editor = _cmd.replace('%1', '')
# Notepad is even worse than our Tkinter editor.
if editor.lower().endswith('notepad.exe'):
editor = None
except:
# XXX what are we catching here?
#raise
editor = None
else:
editor = None
# Warning: DO NOT use an editor which doesn't support Unicode to edit pages!
# You will BREAK non-ASCII symbols!
editor_encoding = 'utf-8'
# The temporary file name extension can be set in order to use syntax
# highlighting in your text editor.
editor_filename_extension = 'wiki'
############## LOGFILE SETTINGS ##############
# Defines for which scripts a logfile should be enabled. Logfiles will be
# saved in the 'logs' subdirectory.
# Example:
# log = ['interwiki', 'weblinkchecker', 'table2wiki']
# It is also possible to enable logging for all scripts, using this line:
# log = ['*']
# To disable all logging, use this:
# log = []
# Per default, logging of interwiki.py is enabled because its logfiles can
# be used to generate so-called warnfiles.
# This setting can be overridden by the -log or -nolog command-line arguments.
log = ['interwiki']
# filename defaults to modulename-bot.log
logfilename = None
# maximal size of a logfile in kilobytes. If the size reached that limit the
# logfile will be renamed (if logfilecount is not 0) and the old file is filled
# again. logfilesize must be an integer value
logfilesize = 1024
# Number of rotating logfiles are created. The older files get the higher
# number. If logfilecount is 0, no logfile will be archived but the current
# logfile will be overwritten if the file size reached the logfilesize above.
# If logfilecount is -1 there are no rotating logfiles but the files where
# renamed if the logfile is full. The newest file gets the highest number until
# some logfiles where deleted.
logfilecount = 5
# set to 1 (or higher) to generate "informative" messages to terminal
verbose_output = 0
# if True, include a lot of debugging info in logfile
# (overrides log setting above)
debug_log = []
############## INTERWIKI SETTINGS ##############
# Should interwiki.py report warnings for missing links between foreign
# languages?
interwiki_backlink = True
# Should interwiki.py display every new link it discovers?
interwiki_shownew = True
# Should interwiki.py output a graph PNG file on conflicts?
# You need pydot for this: http://dkbza.org/pydot.html
interwiki_graph = False
# Specifies that the robot should process that amount of subjects at a time,
# only starting to load new pages in the original language when the total
# falls below that number. Default is to process (at least) 100 subjects at
# once.
interwiki_min_subjects = 100
# If interwiki graphs are enabled, which format(s) should be used?
# Supported formats include png, jpg, ps, and svg. See:
# http://www.graphviz.org/doc/info/output.html
# If you want to also dump the dot files, you can use this in your
# user-config.py:
# interwiki_graph_formats = ['dot', 'png']
# If you need a PNG image with an HTML image map, use this:
# interwiki_graph_formats = ['png', 'cmap']
# If you only need SVG images, use:
# interwiki_graph_formats = ['svg']
interwiki_graph_formats = ['png']
# You can post the contents of your autonomous_problems.dat to the wiki,
# e.g. to http://de.wikipedia.org/wiki/Wikipedia:Interwiki-Konflikte .
# This allows others to assist you in resolving interwiki problems.
# To help these people, you can upload the interwiki graphs to your
# webspace somewhere. Set the base URL here, e.g.:
# 'http://www.example.org/~yourname/interwiki-graphs/'
interwiki_graph_url = None
# Save file with local articles without interwikis.
without_interwiki = False
# Experimental feature:
# Store the page contents on disk (/cache/ directory) instead of loading
# them in RAM.
interwiki_contents_on_disk = False
############## SOLVE_DISAMBIGUATION SETTINGS ############
#
# Set disambiguation_comment[FAMILY][LANG] to a non-empty string to override
# the default edit comment for the solve_disambiguation bot.
# Use %s to represent the name of the disambiguation page being treated.
# Example:
#
# disambiguation_comment['wikipedia']['en'] = \
# "Robot-assisted disambiguation ([[WP:DPL|you can help!]]): %s"
sort_ignore_case = False
############## IMAGE RELATED SETTINGS ##############
# If you set this to True, images will be uploaded to Wikimedia
# Commons by default.
upload_to_commons = False
############## SETTINGS TO AVOID SERVER OVERLOAD ##############
# Slow down the robot such that it never requests a second page within
# 'minthrottle' seconds. This can be lengthened if the server is slow,
# but never more than 'maxthrottle' seconds. However - if you are running
# more than one bot in parallel the times are lengthened.
# By default, the get_throttle is turned off, and 'maxlag' is used to
# control the rate of server access. Set minthrottle to non-zero to use a
# throttle on read access.
minthrottle = 0
maxthrottle = 60
# Slow down the robot such that it never makes a second page edit within
# 'put_throttle' seconds.
put_throttle = 10
# Sometimes you want to know when a delay is inserted. If a delay is larger
# than 'noisysleep' seconds, it is logged on the screen.
noisysleep = 3.0
# Defer bot edits during periods of database server lag. For details, see
# http://www.mediawiki.org/wiki/Maxlag_parameter
# You can set this variable to a number of seconds, or to None (or 0) to
# disable this behavior. Higher values are more aggressive in seeking
# access to the wiki.
# Non-Wikimedia wikis may or may not support this feature; for families
# that do not use it, it is recommended to set minthrottle (above) to
# at least 1 second.
maxlag = 5
# Maximum of pages which can be retrieved by special pages. Increase this if
# you heavily use redirect.py with action "double", and especially if you're
# running solve_disambiguation.py with the -primary argument.
special_page_limit = 500
# Maximum number of times to retry an API request before quitting.
max_retries = 25
# Minimum time to wait before resubmitting a failed API request.
retry_wait = 5
############## TABLE CONVERSION BOT SETTINGS ##############
# will split long paragraphs for better reading the source.
# only table2wiki.py use it by now
splitLongParagraphs = False
# sometimes HTML-tables are indented for better reading.
# That can do very ugly results.
deIndentTables = True
# table2wiki.py works quite stable, so you might switch to True
table2wikiAskOnlyWarnings = True
table2wikiSkipWarnings = False
############## WEBLINK CHECKER SETTINGS ##############
# How many external links should weblinkchecker.py check at the same time?
# If you have a fast connection, you might want to increase this number so
# that slow servers won't slow you down.
max_external_links = 50
report_dead_links_on_talk = False
############## DATABASE SETTINGS ##############
db_hostname = 'localhost'
db_username = 'wikiuser'
db_password = ''
############## SEARCH ENGINE SETTINGS ##############
# Some scripts allow querying Google via the Google Web API. To use this feature,
# you must install the pyGoogle module from http://pygoogle.sf.net/ and have a
# Google Web API license key. Note that Google doesn't give out license keys
# anymore.
google_key = ''
# Some scripts allow using the Yahoo! Search Web Services. To use this feature,
# you must install the pYsearch module from http://pysearch.sourceforge.net/
# and get a Yahoo AppID from http://developer.yahoo.com
yahoo_appid = ''
# To use Windows Live Search web service you must get an AppID from
# http://search.msn.com/developer
msn_appid = ''
############## COPYRIGHT SETTINGS ##############
# Enable/disable search engine in copyright.py script
copyright_google = True
copyright_yahoo = True
copyright_msn = False
# Perform a deep check, loading URLs to search if 'Wikipedia' is present.
# This may be useful to increase the number of correct results. If you haven't
# a fast connection, you might want to keep them disabled.
copyright_check_in_source_google = False
copyright_check_in_source_yahoo = False
copyright_check_in_source_msn = False
# Web pages may contain a Wikipedia text without the word 'Wikipedia' but with
# the typical '[edit]' tag as a result of a copy & paste procedure. You want
# no report for this kind of URLs, even if they are copyright violations.
# However, when enabled, these URLs are logged in a file.
copyright_check_in_source_section_names = False
# Limit number of queries for page.
copyright_max_query_for_page = 25
# Skip a specified number of queries
copyright_skip_query = 0
# Number of attempts on connection error.
copyright_connection_tries = 10
# Behavior if an exceeded error occur.
#
# Possibilities:
#
# 0 = None
# 1 = Disable search engine
# 2 = Sleep (default)
# 3 = Stop
copyright_exceeded_in_queries = 2
copyright_exceeded_in_queries_sleep_hours = 6
# Append last modified date of URL to script result
copyright_show_date = True
# Append length of URL to script result
copyright_show_length = True
# By default the script tries to identify and skip text that contains a large
# comma separated list or only numbers. But sometimes that might be the
# only part unmodified of a slightly edited and not otherwise reported
# copyright violation. You can disable this feature to try to increase the
# number of results.
copyright_economize_query = True
############## HTTP SETTINGS ##############
# Use a persistent http connection. An http connection has to be established
# only once per site object, making stuff a whole lot faster. Do NOT EVER
# use this if you share Site objects across threads without proper locking.
## DISABLED FUNCTION. Setting this variable will not have any effect.
persistent_http = False
# Default socket timeout. Set to None to disable timeouts.
socket_timeout = 120 # set a pretty long timeout just in case...
############## COSMETIC CHANGES SETTINGS ##############
# The bot can make some additional changes to each page it edits, e.g. fix
# whitespace or positioning of interwiki and category links.
# This is an experimental feature; handle with care and consider re-checking
# each bot edit if enabling this!
cosmetic_changes = False
# If cosmetic changes are switched on, and you also have several accounts at
# projects where you're not familiar with the local conventions, you probably
# only want the bot to do cosmetic changes on your "home" wiki which you
# specified in config.mylang and config.family.
# If you want the bot to also do cosmetic changes when editing a page on a
# foreign wiki, set cosmetic_changes_mylang_only to False, but be careful!
cosmetic_changes_mylang_only = True
# The dictionary cosmetic_changes_enable should contain a tuple of languages
# for each site where you wish to enable in addition to your own langlanguage
# (if cosmetic_changes_mylang_only is set)
# Please set your dictionary by adding such lines to your user-config.py:
# cosmetic_changes_enable['wikipedia'] = ('de', 'en', 'fr')
cosmetic_changes_enable = {}
# The dictionary cosmetic_changes_disable should contain a tuple of languages
# for each site where you wish to disable cosmetic changes. You may use it with
# cosmetic_changes_mylang_only is False, but you can also disable your own
# language. This also overrides the settings in the cosmetic_changes_enable
# dictionary. Please set your dict by adding such lines to your user-config.py:
# cosmetic_changes_disable['wikipedia'] = ('de', 'en', 'fr')
cosmetic_changes_disable = {}
# cosmetic_changes_deny_script is a list of scripts for which cosmetic changes
# are disabled. You may add additional scripts by appending script names in
# your user_config.py ("+=" operator is strictly recommended):
# cosmetic_changes_deny_script += ['your_script_name_1', 'your_script_name_2']
# Appending the script name also works:
# cosmetic_changes_deny_script.append('your_script_name')
cosmetic_changes_deny_script = ['cosmetic_changes', 'touch']
############## REPLICATION BOT ################
# You can add replicate_replace to your user_config.py, which has the following format:
#
# replicate_replace = {
# 'wikipedia:li': {'Hoofdpagina': 'Veurblaad'}
# }
#
# to replace all occurences of 'Hoofdpagina' with 'Veurblaad' when writing to liwiki. Note that this does
# not take the origin wiki into account.
replicate_replace = {}
############## FURTHER SETTINGS ##############
### Proxy configuration ###
# assign prox = None to connect directly
# For proxy support first run: apt-get install python-socks.py
# then change your user-config.py like:
# import httplib2
# import socks
# proxy = httplib2.ProxyInfo(socks.PROXY_TYPE_HTTP, 'localhost', 8000)
# The following lines will be printed, but it works:
# Configuration variable 'httplib2' is defined but unknown. Misspelled?
# Configuration variable 'socks' is defined but unknown. Misspelled?proxy = None
proxy = None
### Simulate settings ###
# Defines what actions the bots are NOT allowed to do (e.g. 'edit') on wikipedia
# servers. Allows simulation runs of bots to be carried out without changing any
# page on the server side. This setting may be overridden in user_config.py.
actions_to_block = ['edit', 'watch', 'move', 'delete', 'undelete', 'protect',
'emailuser']
# Set simulate to True or use -simulate option to block all actions given above.
simulate = False
# How many pages should be put to a queue in asynchroneous mode.
# If maxsize is <= 0, the queue size is infinite.
# Increasing this value will increase memory space but could speed up
# processing. As higher this value this effect will decrease.
max_queue_size = 64
# Define the line separator. Pages retrieved via API have "\n" whereas
# pages fetched from screen (mostly) have "\r\n". Interwiki and category
# separator settings in family files should use multiplied of this.
# LS is a shortcut alias.
line_separator = LS = u'\n'
# Settings to enable mwparserfromhell <http://mwparserfromhell.readthedocs.org/en/latest/>
# Currently used in textlib.extract_templates_and_params
# This should be more accurate than our current regex, but is currently opt-in.
use_mwparserfromhell = False
# End of configuration section
# ============================
def makepath(path):
"""Return a normalized absolute version of the path argument.
- if the given path already exists in the filesystem
the filesystem is not modified.
- otherwise makepath creates directories along the given path
using the dirname() of the path. You may append
a '/' to the path if you want it to be a directory path.
from holger@trillke.net 2002/03/18
"""
import os
dpath = os.path.normpath(os.path.dirname(path))
if not os.path.exists(dpath):
os.makedirs(dpath)
return os.path.normpath(os.path.abspath(path))
def datafilepath(*filename):
"""Return an absolute path to a data file in a standard location.
Argument(s) are zero or more directory names, optionally followed by a
data file name. The return path is offset to config.base_dir. Any
directories in the path that do not already exist are created.
"""
import os.path
return makepath(os.path.join(base_dir, *filename))
def shortpath(path):
"""Return a file path relative to config.base_dir."""
import os.path
if path.startswith(base_dir):
return path[len(base_dir) + len(os.path.sep) : ]
return path
# System-level and User-level changes.
# Store current variables and their types.
_glv = {}
_glv.update(globals())
_gl = _glv.keys()
_tp = {}
for _key in _gl:
if _key[0] != '_':
_tp[_key] = type(globals()[_key])
# Get the user files
_thislevel = 0
_fns = [os.path.join(_base_dir, "user-config.py")]
for _filename in _fns:
_thislevel += 1
if os.path.exists(_filename):
_filestatus = os.stat(_filename)
_filemode = _filestatus[0]
_fileuid = _filestatus[4]
if __sys.platform == 'win32' or _fileuid in [os.getuid(), 0]:
if __sys.platform == 'win32' or _filemode & 002 == 0 or True:
execfile(_filename)
else:
print "WARNING: Skipped '%(fn)s': writeable by others."\
% {'fn' :_filename}
else:
print "WARNING: Skipped '%(fn)s': owned by someone else."\
% {'fn' :_filename}
# Test for obsoleted and/or unknown variables.
for _key, _val in globals().items():
if _key.startswith('_'):
pass
elif _key in _gl:
nt = type(_val)
ot = _tp[_key]
if nt == ot or _val is None or ot == type(None):
pass
elif nt is int and (ot is float or ot is bool):
pass
elif ot is int and (nt is float or nt is bool):
pass
else:
print "WARNING: Type of '%(_key)s' changed" % locals()
print " %(was)s: %(old)s" % {'was': "Was", 'old': ot}
print " %(now)s: %(new)s" % {'now': "Now", 'new': nt}
del nt, ot
else:
print \
"Configuration variable %(_key)r is defined but unknown."\
" Misspelled?" % locals()
# Fix up default console_encoding
if console_encoding is None:
if __sys.platform == 'win32':
console_encoding = 'cp850'
else:
console_encoding = 'iso-8859-1'
# Fix up transliteration_target
if transliteration_target == 'not set':
if __sys.platform == 'win32':
transliteration_target = console_encoding
print "WARNING: Running on Windows and transliteration_target is not set."
print "Please see http://www.mediawiki.org/wiki/Manual:Pywikipediabot/Windows"
else:
transliteration_target = None
elif transliteration_target in ('None', 'none'):
transliteration_target = None
# Save base_dir for use by other modules
base_dir = _base_dir
#
# When called as main program, list all configuration variables
#
if __name__ == "__main__":
import types
_all = 1
for _arg in __sys.argv[1:]:
if _arg == "modified":
_all = 0
else:
print "Unknown arg %(_arg)s ignored" % locals()
_k = globals().keys()
_k.sort()
for _name in _k:
if _name[0] != '_':
if not type(globals()[_name]) in [types.FunctionType, types.ModuleType]:
if _all or _glv[_name] != globals()[_name]:
print _name, "=", repr(globals()[_name])
# cleanup all locally-defined variables
for __var in globals().keys():
if __var.startswith("_") and not __var.startswith("__"):
del __sys.modules[__name__].__dict__[__var]
del __var, __sys
del os, re
|
|
from js_process_ast import traverse, traverse_i, null_node, \
find_node, flatten_statementlists, \
kill_bad_globals
from js_ast import *
from js_cc import js_parse, unpack_for_c_loops, combine_if_else_nodes
import sys, os, time, math, struct, io, imp
typespace = None
debug_gen = False
class Frame (list):
def __init__(self, input=[], parent=None, node=None):
super(Frame, self).__init__(input)
self.parent = parent
self.node = node
self.locals = {}
self.leaf = False
self.pop_trystack = False
self.paths = []
def append(self, item):
if type(item) == Frame:
item.parent = self
else:
item.frame = self
super(Frame, self).append(item)
def prepend(self, item):
if type(item) == Frame:
item.parent = self
else:
item.frame = self
super(Frame, self).insert(0, item)
def replace(self, i1, i2):
self[self.index(i1)] = i2
if type(i2) == Frame:
i2.parent = self
else:
i2.frame = self
def insert(self, i, item):
if type(item) == Frame:
item.parent = self
else:
item.frame = self
super(Frame, self).insert(i, item)
def print_frames(frames, tlevel=0):
tstr = tab(tlevel)
tstr2 = tab(tlevel+1)
s = ""
for f in frames:
if type(f) == Frame:
if f.node != None:
nstr = "%s %d " % (f.node.get_line_str(), f.label)
else:
nstr = str(f.label) + " "
s += tstr + nstr + "{\n" + print_frames(f, tlevel+1)
s += tstr + "}\n";
else:
s += tstr + f.get_line_str() + "\n"
if tlevel == 0:
print(s)
return s
def visit_yields(node):
p = node
while not null_node(p) and type(p) != FunctionNode:
p = p.parent
if null_node(p):
typespace.error("yield keyword only valid within functions")
p.is_generator = True
def node_has_yield(node):
if type(node) == YieldNode:
return True
for c in node.children:
if type(c) == FunctionNode:
continue
ret = node_has_yield(c)
if ret: return True
return False
def visit_generators(node):
if not node.is_generator: return
def _remove_this(n):
if n.val != "this": return
if type(n.parent) != BinOpNode or n.parent.op != ".":
#typespace.error("Can only reference members of 'this' in generators");
n.val = "__gen_this2"
else:
n.val = "__gen_this2"
#n.parent.parent.replace(n.parent, n.parent[1])
def set_cur(n):
if type(n) in [IfNode, WhileNode,
DoWhileNode, ForLoopNode, CatchNode]:
n._cur = 1;
n._startcur = 1;
else:
n._cur = 0
n._startcur = 0
n._start = True
n._has_yield = node_has_yield(n)
for c in n:
set_cur(c)
def prior_if(n):
if n.parent == None: return None
sl = n.parent
i = sl.children.index(n)-1
while 1:
while i >= 0:
if type(sl[i]) == IfNode:
break
i -= 1
if i >= 0 or null_node(n.parent): break
i = sl.parent.children.index(sl);
sl = sl.parent;
if i < 0:
typespace.error("Orphaned else node", n)
sys.exit(-1)
return sl[i]
def prior_try(n):
if n.parent == None: return None
sl = n.parent
i = sl.children.index(n)-1
while 1:
while i >= 0:
if type(sl[i]) == TryNode:
break
i -= 1
if i >= 0 or null_node(n.parent): break
i = sl.parent.children.index(sl);
sl = sl.parent;
if i < 0:
typespace.error("Orphaned catch node", n)
sys.exit(-1)
return sl[i]
def is_stype(n):
ret = type(n) in stypes # and (n._has_yield or n.parent._has_yield)
return ret
if type(n) == CatchNode:
ret |= prior_try(n)._has_yield
if type(n) == ElseNode:
ret |= prior_if(n)._has_yield
if type(n) in [IfNode, ElseNode]:
p5 = n.parent
while not null_node(p5):
if hasattr(p5, "_has_yield") and p5._has_yield:
ret = True;
break
p5 = p5.parent
return ret
combine_if_else_nodes(node)
traverse(node, ForCNode, unpack_for_c_loops, exclude=[FunctionNode], copy_children=True);
traverse(node, IdentNode, _remove_this)
traverse(node, VarDeclNode, _remove_this)
frames = frame = Frame(node=node)
stack = [c for c in node.children[1:]]
stack.reverse()
stypes = set([ForLoopNode, WhileNode, DoWhileNode, IfNode,
ElseNode, TryNode, CatchNode])
for c in stack:
set_cur(c)
while len(stack) > 0:
n = stack.pop(-1)
if is_stype(n) or type(n) == StatementList:
if n._start:
if type(n) != StatementList:
f = Frame(node=n)
frame.append(f)
frame = f
n._start = False
if n._cur < len(n.children):
stack.append(n)
stack.append(n[n._cur])
n._cur += 1
elif type(n) != StatementList:
frame = frame.parent
else:
frame.append(n)
def compact_frames(frames):
i = 0
frm = None
while i < len(frames):
f1 = frames[i]
if type(f1) == YieldNode:
frm = None
if type(f1) != Frame:
if frm == None:
frm = Frame()
frames.insert(i, frm)
frm.parent = frames
i += 1
frames.remove(f1)
i -= 1
frm.append(f1)
else:
compact_frames(f1)
frm = None
if type(f1) == YieldNode:
frm = None
i += 1
def label_frames(frames, cur=None):
if cur == None: cur = [0]
frames.label = cur[0]
cur[0] += 1
for f in frames:
if type(f) == Frame:
if f.node != None:
f.node.frame = f
label_frames(f, cur)
else:
f.frame = f
def prop_frame_refs(node, f):
if hasattr(node, "frame"): f = node.frame
else: node.frame = f
for c in node.children:
prop_frame_refs(c, f)
def apply_frame_scope(n, scope, frames):
if type(n) == IdentNode:
if n.val in scope:
n.val = "scope.%s_%d" % (n.val, scope[n.val])
else:
p = n.parent
n2 = n
#check for implicit declarations within catch and loop nodes
while not null_node(p):
if type(p) in [CatchNode, WhileNode, ForLoopNode]: break
n2 = p
p = p.parent
if not null_node(p) and n2 == p[0]:
scope[n.val] = frames.label
n.val = "scope.%s_%d" % (n.val, scope[n.val])
elif type(n) == VarDeclNode:
n.local = False;
if "local" in n.modifiers: n.modifiers.remove("local")
if hasattr(n.parent, "_c_loop_node"):
frames = n.parent._c_loop_node.frame
#print("yay", n.parent._c_loop_node.frame.label)
if n.val not in scope:
scope[n.val] = frames.label
if n.val in scope:
n.val = "scope.%s_%d" % (n.val, scope[n.val])
for c in n.children:
#ignore expr functions, but not nested functions?
if type(c) == FunctionNode and type(c.parent) == AssignNode: continue
if type(n) == BinOpNode and n.op == "." and c == n[1] and type(c) == IdentNode:
continue
if type(n) == FuncCallNode and type(c) == IdentNode and c == n[0]:
continue
apply_frame_scope(c, scope, frames)
def frame_scope(frames, scope, depth=0):
frames.scope = scope
for f in frames:
ss = "-"
fstr = ""
if type(f) == Frame:
if f.node != None:
fstr = f.node.get_line_str()
else:
if type(f[0]) == Frame: fstr = f[0].node.get_line_str()
else: fstr = f[0].get_line_str()
if f.node != None:
ss = "+"
scope2 = dict(scope)
for i in range(f.node._startcur):
apply_frame_scope(f.node[i], scope2, f)
frame_scope(f, scope2, depth+1)
else:
frame_scope(f, scope, depth)
else:
fstr = f.get_line_str()
apply_frame_scope(f, scope, frames)
scope = {}
for a in node.children[0]:
scope[a.val] = 0
compact_frames(frames)
label_frames(frames)
prop_frame_refs(node, frames)
frame_scope(frames, scope)
#print_frames(frames)
def frames_validate(frames):
def gen_frame_validate(frames, tlevel=0):
s = ""
tstr = tab(tlevel+1)
tstr2 = tab(tlevel+2)
for f in frames:
if type(f) == Frame:
if f.node != None:
cs = f.node.children
f.node.children = f.node.children[:node._startcur]
f.node.add(ExprNode([]))
c = f.node.gen_js(tlevel+1).split("\n")[0].replace("{", "").replace("\n", "").replace("}", "").strip()
if c.endswith(";"): c = c[:-1]
s += tstr + c + " {\n"
f.node.children = cs
s += gen_frame_validate(f, tlevel+1)
if f.node != None:
s += tstr + "}\n"
else:
c = tstr + f.gen_js(tlevel+2)
s += c
if c.strip().endswith("}") == 0 and c.strip().endswith(";") == 0:
s += ";"
s += "\n"
if tlevel == 0:
c = node.gen_js(0).split("\n")[0] + "\n"
s = c + s + "}\n"
return s
#print(node.gen_js(0))
#print(scope)
#print_frames(frames)
s = gen_frame_validate(frames)
s2 = js_parse(s).gen_js(0).strip()
s = node.gen_js(0).strip()
s = js_parse(s, print_stack=False).gen_js(0).strip()
print(s==s2)
if s != s2:
import difflib
print(dir(difflib))
d = difflib.ndiff(s.split("\n"), s2.split("\n"))
ds = ""
for l in d:
ds += l + "\n"
#print(ds)
line_print(s)
line_print(s2)
#frames_validate(frames)
flatframes = []
def flatten_frames(frames):
flatframes.append(frames)
for f in frames:
if type(f) == Frame:
flatten_frames(f)
flatten_frames(frames)
#print([f.label for f in flatframes])
def frames_transform(frames, node2):
scope = frames.scope
node2 = FunctionNode(node.name, node.lineno)
node2.add(ExprListNode([]))
for c in node.children[0]:
node2[0].add(IdentNode(c.val))
frames2 = frames
for j, frames in enumerate(flatframes[1:]):
p = frames.parent
f = frames
frames.return_frame = 0
frames.return_frame_parent = 0
i = p.index(f)
while i >= len(p)-1 and p.parent != None:
f = p
p = p.parent
i = p.index(f)
if p.parent == None:
frames.return_frame = 0
frames.return_frame_parent = p.label
else:
frames.return_frame = p[i+1].label
frames.return_frame_parent = p.label
def f_name(f):
return "frame_%d" % f.label
def f_ref(f):
return "this.frame_%d" % f.label
def f_raw_next(f):
if f.parent == None:
f = Frame()
f.label = len(flatframes)
return f
while f.parent != None:
i = f.parent.index(f)+1
while i < len(f.parent):
if type(f.parent[i]) == Frame:
return f.parent[i]
i += 1
f = f.parent
f = Frame()
f.label = len(flatframes)
return f
def f_next(f, ignore_loops=False):
if f.parent == None:
if debug_gen:
print("no f.parent! make frame");
f = Frame()
f.label = len(flatframes)
return f
while f.parent != None:
i = f.parent.index(f)+1
while i < len(f.parent):
if type(f.parent[i]) == Frame:
if type(f.parent[i].node) not in [CatchNode, ElseNode]:
return f.parent[i]
i += 1
if not ignore_loops and f.parent != None and \
type(f.parent.node) in \
[WhileNode, DoWhileNode, ForLoopNode]:
if debug_gen:
print("looper!", f.label, f.parent.label)
return f.parent
f = f.parent
if debug_gen:
print("made frame!", len(flatframes))
f = Frame()
f.label = len(flatframes)
return f
def f_first(f):
for f2 in f:
if type(f2) == Frame:
return f2
#return f
def f_last(f):
return f[-1]
def has_parent(f, p):
while f != p and f != None:
f = f.parent
return f == p
def find_exit_points(f, p=None, vset=None):
stack = []
if p == None: p = f
if vset == None: vset = set()
lst = []
"""
lst = []
for f2 in f:
if type(f2) == Frame:
for f3 in f2.paths:
if type(f3) == Frame:
if not has_parent(f3, p) and f3.label not in vset:
lst.append(f3)
vset.add(f3.label)
lst += find_exit_points(f3, p, vset)
else:
continue
"""
for f2 in f.paths:
if not has_parent(f2, p) and f2.label not in vset:
lst.append(f)
vset.add(f.label)
else:
lst += find_exit_points(f2, p, vset)
for f in lst:
print(f.label)
#sys.exit()
return lst
tot = len(node)-1
for i in range(tot):
node.pop(1)
def param_var(k):
for c in node[0]:
val = c.gen_js(0)
val = val.strip()
k = k.strip()
if k == val: return True
return False
#build generator state data
scopestr = "{"
for k in scope:
if scopestr != "{": scopestr += ", ";
if param_var(k):
scopestr += "%s_%i : %s" % (k, scope[k], k);
else:
scopestr += "%s_%i : %s" % (k, scope[k], "undefined");
scopestr += "}"
node.add(js_parse("this.scope = $s;", [scopestr], start_node=AssignNode))
node.add(js_parse("this.ret = {done : false, value : undefined};", start_node=AssignNode))
node.add(js_parse("this.state = 1;", start_node=AssignNode))
node.add(js_parse("this.trystack = [];", start_node=AssignNode))
node.add(js_parse("""
this.next = function() {
var ret;
var stack = this.trystack;
try {
ret = this._next();
} catch (err) {
if (stack.length > 0) {
var item = stack.pop(stack.length-1);
this.state = item[0];
this.scope[item[1]] = err;
return this.next();
} else {
throw err;
}
}
return ret;
}""", start_node=AssignNode))
node.add(js_parse("""
this.push_trystack = function(catchstate, catchvar) {
this.trystack.push([catchstate, catchvar]);
}""", start_node=AssignNode))
node.add(js_parse("""
this.pop_trystack = function() {
this.trystack.pop(this.trystack.length-1);
}""", start_node=AssignNode))
#build next function
keynode = IdentNode("$__state");
sn = SwitchNode(keynode);
slist = js_parse("var $__ret = undefined; var $__state = this.state; var scope = this.scope;");
slist2 = StatementList()
slist2.add(sn)
wn = WhileNode(BinOpNode(IdentNode("$__state"), NumLitNode(len(flatframes)), "<"))
wn.add(slist2)
wn[1].add(js_parse("""
if ($__ret != undefined) {
break;
}
""", start_node=IfNode));
slist.add(wn);
slist.add(js_parse("""
if ($__ret != undefined) {
this.ret.value = $__ret.value;
} else {
this.ret.done = true;
this.ret.value = undefined;
}
this.state = $__state;
return this.ret;
"""));
next = js_parse("this._next = function() { };", start_node=AssignNode)
next[1].add(slist)
node.add(next)
sn.line = slist.line = node.line
sn.lexpos = slist.lexpos = node.lexpos
#find leaves
for f in flatframes:
if len(f) > 0:
f.leaf = True
for c in f:
if type(c) == Frame:
f.leaf = False
break
#move control frame of dowhile statements to
#after their statement body frames.
visit = set()
for i in range(len(flatframes)):
if i in visit: continue
f = flatframes[i]
if f.leaf or type(f.node) != DoWhileNode: continue
f2 = f_first(f)
if f2 == None: continue
last = f2.label
while (f2 != f_next(f) and f2 != f):
last = f2.label
f2 = f_next(f2)
last = ((last-1) if last > i else last) + 1
flatframes.pop(i);
flatframes.insert(last, f);
visit.add(last)
for i, f in enumerate(flatframes):
f.label = i
#set up case statements
for f in flatframes:
n2 = CaseNode(NumLitNode(f.label))
sl = StatementList()
if debug_gen:
sl.add(js_parse("console.log(\"in frame $s\");", [f.label]));
#set line/lexpos data
if f.node != None:
n2.line = f.node.line
n2.lexpos = f.node.lexpos
sl.line = f.node.line
sl.lexpos = f.node.lexpos
f.case_sl = sl
n2.add(sl)
#add to switch statement
sn.add(n2)
def set_linepos(n, line, lexpos):
n.line = line
n.lexpos = lexpos
for c in n:
set_linepos(c, line, lexpos)
for f in flatframes:
if f.leaf:
for c in f:
c.frame = f
else:
f.node.frame = f
#handle loop breaks/continues
visit = set()
def visit_breaks(n):
wn = n
if n in visit: return
visit.add(n)
while type(wn) not in [WhileNode, DoWhileNode, ForLoopNode]:
if type(wn) == SwitchNode:
typespace.error("Switches in generators not supported yet.", wn);
wn = wn.parent
if not wn:
typespace.error("Invalid break statement.", n);
if "frame" not in wn.__dict__:
return
f = wn.frame
i = n.parent.index(n)
n2 = js_parse("$s=$s;", ("$__state", f_next(f).label))
if "frame" in n.__dict__:
n.frame.insert(n.frame.index(n), n2)
else:
n.parent.insert(i, n2)
def visit_continues(n):
if n in visit: return
visit.add(n)
wn = n
while wn != None and (type(wn) not in [WhileNode, DoWhileNode, ForLoopNode]):
wn = wn.parent
if wn == None:
typespace.error("Invalid continue statement.", n);
if "frame" not in wn.__dict__:
return
f = wn.frame
i = n.parent.index(n)
n2 = js_parse("$s=$s;", ("$__state", f.label));
n3 = BreakNode();
visit.add(n3)
n.parent.remove(n)
n.frame.replace(n, n2)
n.frame.insert(n.frame.index(n2)+1, n3)
def handle_yields(node):
slist = js_parse("""$__ret = this.ret;""");
is_empty = type(node[0]) == ExprNode and len(node[0]) == 0
if is_empty:
slist.add(js_parse("""$s.value = undefined;""", ["$__ret"], start_node=AssignNode));
else:
slist.add(js_parse("""$s.value = $n;""", ["$__ret", node[0]], start_node=AssignNode))
slen = len(slist)
#print(slist)
if node in node.parent:
i = node.parent.index(node)
node.parent.remove(node)
for j in range(slen):
node.parent.insert(i, slist[slen-j-1])
i = node.frame.index(node)
node.frame.remove(node)
for j in range(slen):
node.frame.insert(i, slist[slen-j-1])
#handle loop breaks
for f in flatframes:
if not f.leaf: continue;
for c in f:
traverse(c, BreakNode, visit_breaks, exclude=FunctionNode)
pass
#handle loop continues
for f in flatframes:
if not f.leaf: continue;
for c in f:
traverse(c, ContinueNode, visit_continues, exclude=FunctionNode)
pass
#handle yields
for f in flatframes:
if not f.leaf: continue
for c in f:
traverse(c, YieldNode, handle_yields, exclude=FunctionNode);
def has_common_parent(n1, n2, p):
while n1 != p and n1 != None:
n1 = n1.parent
while n2 != p and n2 != None:
n2 = n2.parent
if n1 == n2 and n1 == p: return True
else: return False
#build control code
for f in flatframes:
if f.leaf: continue
n = f.node
sl = f.case_sl
if type(n) == IfNode:
f2 = f_first(f)
if f2 == None: #empty if node
f2 = Frame()
f2.label = len(flatframes)
if len(n) > 2:
f3 = n[2].frame
else:
f3 = f_next(f)
f.paths += [f2, f3]
n2 = js_parse("""
$s = ($n) ? $s : $s;
""", ["$__state", n[0], f2.label, f3.label]);
set_linepos(n2, n.line, n.lexpos);
sl.add(n2)
elif type(n) == ElseNode:
f2 = f_first(f)
if f2 == None: #empty else node
f2 = Frame()
f2.label = len(flatframes)
f.paths += [f2]
n2 = js_parse(";$s = $s;", ("$__state", str(f2.label)))
set_linepos(n2, n.line, n.lexpos);
sl.add(n2)
elif type(n) == WhileNode:
f.paths += [f_first(f), f_next(f, False)]
n2 = js_parse("""
$s = ($n) ? $s : $s;
""", ("$__state", n[0], f_first(f).label, f_next(f, False).label));
set_linepos(n2, n.line, n.lexpos);
sl.add(n2)
elif type(n) == ForLoopNode:
#okay, why did I say to ignore loops here?
f.paths += [f_first(f), f_next(f, False)]
if type(n[0]) == ForCNode:
n2 = js_parse("""
$s = ($n) ? $s : $s;
""", ("$__state", n[0][1], f_first(f).label, f_next(f, False).label));
set_linepos(n2, n.line, n.lexpos);
sl.add(n2)
else:
typespace.error("process_generators expects unpacked iterator for loops")
elif type(n) == DoWhileNode:
f.paths += [f_first(f), f_next(f, False)]
n2 = js_parse("""
$s = ($n) ? $s : $s;
""", ("$__state", n[0], f_first(f).label, f_next(f, False).label), start_node=AssignNode)
set_linepos(n2, n.line, n.lexpos)
sl.add(n2)
elif type(n) == TryNode:
f.paths += [f_first(f)]
cn = f_raw_next(f).node
if type(cn) != CatchNode:
typespace.error("Missing catch block", f.node)
ident = cn[0].gen_js(0).replace("scope.", "")
n2 = js_parse("$s = $s;", ("$__state", f_first(f).label), start_node=AssignNode)
n3 = js_parse("this.push_trystack($s, \"$s\");", [f_raw_next(f).label, ident])
set_linepos(n2, n.line, n.lexpos)
set_linepos(n3, n.line, n.lexpos)
sl.add(n2)
sl.add(n3)
elif type(n) == CatchNode:
f.paths += [f_first(f)]
n2 = js_parse("$s = $s;", ("$__state", f_first(f).label), start_node=AssignNode)
set_linepos(n2, n.line, n.lexpos)
sl.add(n2)
#build leaf code
for f in flatframes:
if not f.leaf: continue
sl = f.case_sl
for n in f:
sl.add(n)
f2 = f_next(f)
sl.add(js_parse(";$s=$s;", ("$__state", str(f2.label))))
f.paths += [f2]
#add in pop_trystack calls
for f in flatframes:
if type(f.node) != TryNode: continue
f2 = f_last(f)
ps = find_exit_points(f)
for f2 in ps:
f2.case_sl.add(js_parse("this.pop_trystack();"))
#add case breaks
for f in flatframes:
bn = BreakNode()
bn.line = f.case_sl.line
bn.lexpos = f.case_sl.lexpos
f.case_sl.add(bn);
#add final state case
cn = CaseNode(NumLitNode(len(flatframes)))
sl2 = StatementList()
sl2.add(BreakNode())
cn.add(sl2)
sn.add(cn)
#default case
df = DefaultCaseNode()
df.add(js_parse("console.log(\"Generator state error\"); console.trace();"))
df[0].add(BreakNode())
sn.add(df)
outernode = js_parse("""
function() {
var __gen_this2 = this;
function _generator_iter() {
}
return new _generator_iter();
}
""", start_node=FunctionNode);
#add a self-referencing [Symbol.iterator] method
n = js_parse("""
this[Symbol.iterator] = function() {
return this;
}
""");
for c in n:
node.add(c);
#and, a es5.1-style forEach method
n = js_parse("""
this.forEach = function(callback, thisvar) {
if (thisvar == undefined)
thisvar = self;
var _i = 0;
while (1) {
var ret = this.next();
if (ret == undefined || ret.done || (ret._ret != undefined && ret._ret.done))
break;
callback.call(thisvar, ret.value);
if (_i++ > 100) {
console.log("inf loop", ret);
break;
}
}
}
""");
for c in n:
node.add(c);
outernode.name = node.name;
if node.is_anonymous:
outernode.is_anonymous = True
outernode.replace(outernode[0], node[0])
node.parent.replace(node, outernode);
node2 = outernode[2]
cs = node[:]
for c in cs[1:]:
node2.add(c)
#print(outernode, "\n\n\n", outernode[2])
def bleh():
for frames in flatframes:
fname = f_name(frames)
n = js_parse("""
function $s1(scope) {
if (_do_frame_debug) console.log("in $s1");
}""", (fname), start_node=FunctionNode)
if type(n[1]) != StatementList:
n.replace(n[1], StatementList())
n = n[1]
func = n
while type(func) != FunctionNode:
func = func.parent
excl = (type(frames.node) == StatementList and type(frames.parent.node) == FunctionNode)
if frames.node != None and not excl and type(frames.node) != FunctionNode:
f = frames
sl = StatementList()
f.node[f.node._startcur] = sl
frames.funcnode = func
frames.subnode = frames.funcnode
local_frames = "["
totframes = 0
for i, f in enumerate(frames):
if type(f) != Frame:
frames.subnode.add(f)
frames.leaf = True
else:
frames.leaf = False
if len(local_frames) > 1: local_frames += ", "
local_frames += f_ref(f) #.replace("this.", "")
totframes += 1
if f.node != None and type(f.node) != FunctionNode:
if len(f.node.children) > f.node._startcur + 1:
do_conv(f.node, f)
if frames.leaf:
f2 = f_next(frames)
f2 = f2.label if f2 != -1 else -1
frames.subnode.add(js_parse("return [$i, undefined];", [f2], start_node=ReturnNode));
local_frames = "%s_frames = "%f_ref(frames) + local_frames + "];"
frames.frames = js_parse(local_frames)
frames.totframes = totframes
def build_next(f, parent=None):
if type(f) != Frame:
return
subnode = f.subnode
if f.label >= 0: # and f.label < 3:
n2 = js_parse("this.$s1 = 0;", [f_name(f)], start_node=AssignNode)
n2.replace(n2[1], f.funcnode)
f.funcnode.name = "(anonymous)"
f.funcnode.is_anonymous = True
node2.add(n2) #f.funcnode)
if f.totframes > 0:
if f.node != None and type(f.node) == WhileNode:
f2 = f_next(f)
f2 = f2.label if f2 != -1 else -1
n = js_parse("""
if (!"placeholder") {
return [$i1, undefined];
}
""", [f2])
if n == None:
typespace.error("internal error", subnode);
n2 = find_node(n, StrLitNode);
n2.parent.replace(n2, f.node[0])
subnode.add(n)
f2 = f_first(f);
n.add(js_parse("return [$i, undefined];", [f2.label], start_node=ReturnNode))
elif f.node != None and type(f.node) == TryNode:
n = StatementList()
if n == None:
typespace.error("internal error", subnode);
f3 = f_raw_next(f)
while f3 != -1 and type(f3.node) != CatchNode:
f3 = f_raw_next(f3);
if f3 == -1:
typespace.error("Orphaned try block", f.node)
f3name = "_nfothing"
if len(f3.node) > 0:
f3name = f3.node[0].gen_js(0).replace("scope.", "")
n.add(js_parse("""
this.trystack.push([$i, "$s"]);
""", [f3.label, f3name]))
f2 = f_first(f);
n.add(js_parse("return [$i, undefined];", [f2.label], start_node=ReturnNode))
subnode.add(n)
f2.pop_trystack = True
elif f.node != None and type(f.node) == IfNode:
f2 = f_first(f)
f1 = f_raw_next(f)
while type(f1.node) != ElseNode and f1.label != len(flatframes):
f1 = f_raw_next(f1)
if f1.label == len(flatframes):
f1 = f_next(f)
n = js_parse("""
if (!("placeholder")) {
return [$i1, undefined];
} else {
return [$i2, undefined];
}
""", [f1.label, f2.label]);
n2 = find_node(n, StrLitNode)
n2.parent.replace(n2, f.node[0].copy())
if n == None:
typespace.error("internal error", subnode);
f2 = f_first(f);
n.add(js_parse("return [$i, undefined];", [f2.label], start_node=ReturnNode))
subnode.add(n)
f2.pop_trystack = True
elif f.node != None and type(f.node) == ElseNode:
f2 = f_first(f)
f1 = f_raw_next(f)
while type(f1.node) != ElseNode and f1.label != len(flatframes):
f1 = f_raw_next(f1)
if f1.label == len(flatframes):
f1 = f_next(f)
n = js_parse("""
return [$i1, undefined];
""", [f2.label]);
if n == None:
typespace.error("internal error", subnode);
f2 = f_first(f);
subnode.add(n)
elif f.node != None and type(f.node) == CatchNode:
f2 = f_first(f)
n = js_parse("""
return [$i1, undefined];
""", [f2.label]);
if n == None:
typespace.error("internal error", subnode);
subnode.add(n)
elif f.node != None and type(f.node) == ForLoopNode:
f2 = f_first(f);
f3 = f_next(f)
f3 = f3.label if f2 != -1 else -1
f2 = f2.label if f2 != -1 else -1
n = js_parse("""
if ($n) {
return [$i, undefined];
} else {
return [$i, undefined];
}
""", [f.node[0][1], f2, f3])
if n == None:
typespace.error("internal error", subnode);
subnode.add(n)
node2.insert(1, js_parse("""
this[Symbol.iterator] = function() {
return this;
}
""")[0])
for f in flatframes:
build_next(f, f.parent)
#process returns from within try nodes
for f in flatframes:
if f.parent != None and type(f.parent.node) == TryNode:
def visit_rets1(n2):
target = n2[0][0][0].val
isyield = n2[0][0][1].val
ni = n2.parent.index(n2)
if target >= f_next(f.parent).label:
n3 = js_parse("this.trystack.pop();")[0]
n2.parent.insert(ni, n3)
traverse(f.subnode, ReturnNode, visit_rets1, copy_children=True);
#process yields
for f in flatframes:
f2 = f.parent
set_yield = None
def visit_rets2(n2):
if set_yield != None:
#print(n2)
n2[0][0].replace(n2[0][0][1], set_yield);
set_yield = find_node(f.subnode, YieldNode);
if set_yield != None:
set_yield.parent.remove(set_yield);
set_yield = ArrayLitNode(ExprListNode([set_yield[0]]))
traverse(f.subnode, ReturnNode, visit_rets2, copy_children=True);
def find_parent_frame(f, ntypes, include_first=True):
p = f
if not include_first:
p = p.parent
while p != None:
if type(p.node) in ntypes:
return p
p = p.parent
return None
#process breaks
for f in flatframes:
f2 = f.parent
def visit_rets3(n2):
p = n2.parent
while not null_node(p) and p != f.subnode:
if type(p) in [WhileNode, DoWhileNode, ForLoopNode]: break
p = p.parent
if p != f.subnode and not null_node(p): return #break corresponds to a loop internal to this frame
p = find_parent_frame(f, [WhileNode, DoWhileNode, ForLoopNode], True)
if p == None:
typespace.error("Invalid break statement (switches within generators aren't supported yet)", n2)
f2 = f_next(p)
n3 = js_parse("return [$i, undefined];", [f2.label], start_node=ReturnNode);
n2.parent.replace(n2, n3)
traverse(f.subnode, BreakNode, visit_rets3, copy_children=True);
#process continues
for f in flatframes:
f2 = f.parent
def visit_rets3(n2):
p = n2.parent
while not null_node(p) and p != f.subnode:
p = p.parent
if p != f.subnode and not null_node(p): return #continue corresponds to a loop internal to this frame
p = f.parent
while p != None:
if type(p.node) in [WhileNode, DoWhileNode, ForLoopNode]:
break;
p = p.parent
if p == None:
typespace.error("Invalid continue statement")
n3 = js_parse("return [$i, undefined];", [p.label], start_node=ReturnNode);
n2.parent.replace(n2, n3)
traverse(f.subnode, ContinueNode, visit_rets3, copy_children=True);
firstnode = js_parse("if (this.first) {\n}", start_node=IfNode)
firstnode2 = js_parse("if (this.first) {\n}", start_node=IfNode)
firstnode.replace(firstnode[1], StatementList())
firstnode2.replace(firstnode2[1], StatementList())
flatframes[0].subnode.add(firstnode);
node2.insert(1, firstnode2[1]);
firstnode = firstnode[1]
firstnode2 = firstnode2[1]
args = list(node.children[0])
for i3 in range(len(args)):
argn = args[i3]
while type(argn) not in [IdentNode, VarDeclNode]:
argn = argn[0]
args[i3] = argn.val
scope = {}
for f in flatframes:
scope.update(f.scope)
s = "{"
j2 = 0
for j, v in enumerate(scope.keys()):
if j2 > 0: s += ", "
j2 += 1
if v in args:
s += "%s:%s" % ("%s_%s"%(v, scope[v]), v)
else:
s += "%s:undefined" % ("%s_%s"%(v, scope[v]))
s += "}"
s = "this.scope = %s;\n" % s
firstnode2.add(js_parse(s)[0])
#ensure all frames have returns
for f in flatframes:
if not find_node(f.subnode, ReturnNode):
f.subnode.add(js_parse("return [$i, undefined];", [f_next(f).label], start_node=ReturnNode));
framelist = "["
for i, f in enumerate(flatframes):
if i > 0: framelist += ", "
framelist += "this.frame_%i" % f.label
framelist = "this.frames = %s];"%framelist
node2.add(js_parse(framelist));
node2.add(js_parse("""
this.cur = 1;
this.trystack = new Array();
this.next = function() {
var ret;
while (this.cur < this.frames.length) {
try {
ret = this.frames[this.cur].call(this, this.scope);
} catch (_generator_error) {
if (this.trystack.length > 0) {
var ts1 = this.trystack.pop();
this.scope[ts1[1]] = _generator_error;
ret = [ts1[0], undefined];
} else {
throw _generator_error;
}
}
if (ret[0] == this.frames.length) {
return {done : true, value : undefined};
break;
}
if (ret[0] == this.cur) {
console.trace();
console.log("YEEK!")
return {done : true, value : undefined};
}
this.cur = ret[0];
if (ret[1] != undefined) {
return {value : ret[1][0], done : false};
} else {
return {value : undefined, done : false};
}
}
}
""", []))
node.parent.replace(node, node2)
def process_generators(result, tspace):
global typespace
typespace = tspace
traverse(result, YieldNode, visit_yields)
traverse(result, FunctionNode, visit_generators)
del_attrs = []
def cleanup_generator_garbage(n):
for a in del_attrs:
if hasattr(n, a):
delattr(n, a)
for c in n.children:
cleanup_generator_garbage(c)
cleanup_generator_garbage(result)
def process_generators_old(result, typespace):
def visit_yields(node):
p = node
while not null_node(p) and type(p) != FunctionNode:
p = p.parent
if null_node(p):
typespace.error("yield keyword only valid within functions")
p.is_generator = True
traverse(result, YieldNode, visit_yields)
def node_has_yield(node):
if type(node) == YieldNode:
return True
for c in node.children:
if type(c) == FunctionNode:
continue
ret = node_has_yield(c)
if ret: return True
return False
def visit_generators(node):
def print_frames(frames, tlevel=0):
tstr = tab(tlevel)
tstr2 = tab(tlevel+1)
s = ""
for f in frames:
if type(f) == Frame:
if f.node != None:
nstr = "%s %d " % (f.node.get_line_str(), f.label)
else:
nstr = str(f.label) + " "
s += tstr + nstr + "{\n" + print_frames(f, tlevel+1)
s += tstr + "}\n";
else:
s += tstr + f.get_line_str() + "\n"
if tlevel == 0:
print(s)
return s
if 0:
file = open("generator_test.html", "w")
file.write("""
<html><head><title>Generator Test</title></head>
<script>
FrameContinue = {1:1};
FrameBreak = {2:2};
""")
file.write(node2.gen_js(3).replace("yield", "return"))
file.write("""
j = 0;
for (var tst in new range(2, 8)) {
console.log(tst);
if (j > 10)
break;
j++;
}
</script>
</html>
""")
file.close()
#print(node2.gen_js(1))
#print_frames(frames2)
traverse(result, FunctionNode, visit_generators)
del_attrs = ["_cur", "_startcur", "frame", "return_frame", "pop_trystack"]
def cleanup_generator_garbage(n):
for a in del_attrs:
if hasattr(n, a):
delattr(n, a)
for c in n.children:
cleanup_generator_garbage(c)
cleanup_generator_garbage(result)
|
|
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
from sys import version_info
import numpy as np
from scipy import interpolate, sparse
from copy import deepcopy
from sklearn.datasets import load_boston
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import TempMemmap
from sklearn.linear_model.coordinate_descent import Lasso, \
LassoCV, ElasticNet, ElasticNetCV, MultiTaskLasso, MultiTaskElasticNet, \
MultiTaskElasticNetCV, MultiTaskLassoCV, lasso_path, enet_path
from sklearn.linear_model import LassoLarsCV, lars_path
from sklearn.utils import check_array
def test_lasso_zero():
# Check that the lasso can handle zero data without crashing
X = [[0], [0], [0]]
y = [0, 0, 0]
clf = Lasso(alpha=0.1).fit(X, y)
pred = clf.predict([[1], [2], [3]])
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_lasso_toy():
# Test Lasso on a toy example for various values of alpha.
# When validating this against glmnet notice that glmnet divides it
# against nobs.
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
T = [[2], [3], [4]] # test sample
clf = Lasso(alpha=1e-8)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.85])
assert_array_almost_equal(pred, [1.7, 2.55, 3.4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy():
# Test ElasticNet for various parameters of alpha and l1_ratio.
# Actually, the parameters alpha = 0 should not be allowed. However,
# we test it as a border case.
# ElasticNet is tested with and without precomputed Gram matrix
X = np.array([[-1.], [0.], [1.]])
Y = [-1, 0, 1] # just a straight line
T = [[2.], [3.], [4.]] # test sample
# this should be the same as lasso
clf = ElasticNet(alpha=1e-8, l1_ratio=1.0)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=100,
precompute=False)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=True)
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=np.dot(X.T, X))
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def build_dataset(n_samples=50, n_features=200, n_informative_features=10,
n_targets=1):
"""
build an ill-posed linear regression problem with many noisy features and
comparatively few samples
"""
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
return X, y, X_test, y_test
def test_lasso_cv():
X, y, X_test, y_test = build_dataset()
max_iter = 150
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter).fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter, precompute=True)
clf.fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
# Check that the lars and the coordinate descent implementation
# select a similar alpha
lars = LassoLarsCV(normalize=False, max_iter=30).fit(X, y)
# for this we check that they don't fall in the grid of
# clf.alphas further than 1
assert_true(np.abs(
np.searchsorted(clf.alphas_[::-1], lars.alpha_) -
np.searchsorted(clf.alphas_[::-1], clf.alpha_)) <= 1)
# check that they also give a similar MSE
mse_lars = interpolate.interp1d(lars.cv_alphas_, lars.mse_path_.T)
np.testing.assert_approx_equal(mse_lars(clf.alphas_[5]).mean(),
clf.mse_path_[5].mean(), significant=2)
# test set
assert_greater(clf.score(X_test, y_test), 0.99)
def test_lasso_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
clf_unconstrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2,
n_jobs=1)
clf_unconstrained.fit(X, y)
assert_true(min(clf_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
clf_constrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
positive=True, cv=2, n_jobs=1)
clf_constrained.fit(X, y)
assert_true(min(clf_constrained.coef_) >= 0)
def test_lasso_path_return_models_vs_new_return_gives_same_coefficients():
# Test that lasso_path with lars_path style output gives the
# same result
# Some toy data
X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
y = np.array([1, 2, 3.1])
alphas = [5., 1., .5]
# Use lars_path and lasso_path(new output) with 1D linear interpolation
# to compute the same path
alphas_lars, _, coef_path_lars = lars_path(X, y, method='lasso')
coef_path_cont_lars = interpolate.interp1d(alphas_lars[::-1],
coef_path_lars[:, ::-1])
alphas_lasso2, coef_path_lasso2, _ = lasso_path(X, y, alphas=alphas,
return_models=False)
coef_path_cont_lasso = interpolate.interp1d(alphas_lasso2[::-1],
coef_path_lasso2[:, ::-1])
assert_array_almost_equal(
coef_path_cont_lasso(alphas), coef_path_cont_lars(alphas),
decimal=1)
def test_enet_path():
# We use a large number of samples and of informative features so that
# the l1_ratio selected is more toward ridge than lasso
X, y, X_test, y_test = build_dataset(n_samples=200, n_features=100,
n_informative_features=100)
max_iter = 150
# Here we have a small number of iterations, and thus the
# ElasticNet might not converge. This is to speed up tests
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have selected an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter, precompute=True)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have selected an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
# Multi-output/target case
X, y, X_test, y_test = build_dataset(n_features=10, n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7],
cv=3, max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
assert_equal(clf.coef_.shape, (3, 10))
# Mono-output should have same cross-validated alpha_ and l1_ratio_
# in both cases.
X, y, _, _ = build_dataset(n_features=10)
clf1 = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
clf2 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf2.fit(X, y[:, np.newaxis])
assert_almost_equal(clf1.l1_ratio_, clf2.l1_ratio_)
assert_almost_equal(clf1.alpha_, clf2.alpha_)
def test_path_parameters():
X, y, _, _ = build_dataset()
max_iter = 100
clf = ElasticNetCV(n_alphas=50, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, tol=1e-3)
clf.fit(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(50, clf.n_alphas)
assert_equal(50, len(clf.alphas_))
def test_warm_start():
X, y, _, _ = build_dataset()
clf = ElasticNet(alpha=0.1, max_iter=5, warm_start=True)
ignore_warnings(clf.fit)(X, y)
ignore_warnings(clf.fit)(X, y) # do a second round with 5 iterations
clf2 = ElasticNet(alpha=0.1, max_iter=10)
ignore_warnings(clf2.fit)(X, y)
assert_array_almost_equal(clf2.coef_, clf.coef_)
def test_lasso_alpha_warning():
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
clf = Lasso(alpha=0)
assert_warns(UserWarning, clf.fit, X, Y)
def test_lasso_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
lasso = Lasso(alpha=0.1, max_iter=1000, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
lasso = Lasso(alpha=0.1, max_iter=1000, precompute=True, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
def test_enet_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
enet = ElasticNet(alpha=0.1, max_iter=1000, positive=True)
enet.fit(X, y)
assert_true(min(enet.coef_) >= 0)
def test_enet_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
enetcv_unconstrained = ElasticNetCV(n_alphas=3, eps=1e-1,
max_iter=max_iter,
cv=2, n_jobs=1)
enetcv_unconstrained.fit(X, y)
assert_true(min(enetcv_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
enetcv_constrained = ElasticNetCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
cv=2, positive=True, n_jobs=1)
enetcv_constrained.fit(X, y)
assert_true(min(enetcv_constrained.coef_) >= 0)
def test_uniform_targets():
enet = ElasticNetCV(fit_intercept=True, n_alphas=3)
m_enet = MultiTaskElasticNetCV(fit_intercept=True, n_alphas=3)
lasso = LassoCV(fit_intercept=True, n_alphas=3)
m_lasso = MultiTaskLassoCV(fit_intercept=True, n_alphas=3)
models_single_task = (enet, lasso)
models_multi_task = (m_enet, m_lasso)
rng = np.random.RandomState(0)
X_train = rng.random_sample(size=(10, 3))
X_test = rng.random_sample(size=(10, 3))
y1 = np.empty(10)
y2 = np.empty((10, 2))
for model in models_single_task:
for y_values in (0, 5):
y1.fill(y_values)
assert_array_equal(model.fit(X_train, y1).predict(X_test), y1)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
for model in models_multi_task:
for y_values in (0, 5):
y2[:, 0].fill(y_values)
y2[:, 1].fill(2 * y_values)
assert_array_equal(model.fit(X_train, y2).predict(X_test), y2)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
def test_multi_task_lasso_and_enet():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
# Y_test = np.c_[y_test, y_test]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
clf = MultiTaskElasticNet(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
clf = MultiTaskElasticNet(alpha=1.0, tol=1e-8, max_iter=1)
assert_warns_message(ConvergenceWarning, 'did not converge', clf.fit, X, Y)
def test_lasso_readonly_data():
X = np.array([[-1], [0], [1]])
Y = np.array([-1, 0, 1]) # just a straight line
T = np.array([[2], [3], [4]]) # test sample
with TempMemmap((X, Y)) as (X, Y):
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
def test_multi_task_lasso_readonly_data():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
with TempMemmap((X, Y)) as (X, Y):
Y = np.c_[y, y]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_enet_multitarget():
n_targets = 3
X, y, _, _ = build_dataset(n_samples=10, n_features=8,
n_informative_features=10, n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True)
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_, estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_multioutput_enetcv_error():
rng = np.random.RandomState(0)
X = rng.randn(10, 2)
y = rng.randn(10, 2)
clf = ElasticNetCV()
assert_raises(ValueError, clf.fit, X, y)
def test_multitask_enet_and_lasso_cv():
X, y, _, _ = build_dataset(n_features=50, n_targets=3)
clf = MultiTaskElasticNetCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00556, 3)
clf = MultiTaskLassoCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00278, 3)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=10, eps=1e-3, max_iter=100,
l1_ratio=[0.3, 0.5], tol=1e-3)
clf.fit(X, y)
assert_equal(0.5, clf.l1_ratio_)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((2, 10, 3), clf.mse_path_.shape)
assert_equal((2, 10), clf.alphas_.shape)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskLassoCV(n_alphas=10, eps=1e-3, max_iter=100, tol=1e-3)
clf.fit(X, y)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((10, 3), clf.mse_path_.shape)
assert_equal(10, len(clf.alphas_))
def test_1d_multioutput_enet_and_multitask_enet_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf.fit(X, y[:, 0])
clf1 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
assert_almost_equal(clf.l1_ratio_, clf1.l1_ratio_)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_1d_multioutput_lasso_and_multitask_lasso_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = LassoCV(n_alphas=5, eps=2e-3)
clf.fit(X, y[:, 0])
clf1 = MultiTaskLassoCV(n_alphas=5, eps=2e-3)
clf1.fit(X, y)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_sparse_input_dtype_enet_and_lassocv():
X, y, _, _ = build_dataset(n_features=10)
clf = ElasticNetCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = ElasticNetCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
clf = LassoCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = LassoCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
def test_precompute_invalid_argument():
X, y, _, _ = build_dataset()
for clf in [ElasticNetCV(precompute="invalid"),
LassoCV(precompute="invalid")]:
assert_raises_regex(ValueError, ".*should be.*True.*False.*auto.*"
"array-like.*Got 'invalid'", clf.fit, X, y)
# Precompute = 'auto' is not supported for ElasticNet
assert_raises_regex(ValueError, ".*should be.*True.*False.*array-like.*"
"Got 'auto'", ElasticNet(precompute='auto').fit, X, y)
def test_warm_start_convergence():
X, y, _, _ = build_dataset()
model = ElasticNet(alpha=1e-3, tol=1e-3).fit(X, y)
n_iter_reference = model.n_iter_
# This dataset is not trivial enough for the model to converge in one pass.
assert_greater(n_iter_reference, 2)
# Check that n_iter_ is invariant to multiple calls to fit
# when warm_start=False, all else being equal.
model.fit(X, y)
n_iter_cold_start = model.n_iter_
assert_equal(n_iter_cold_start, n_iter_reference)
# Fit the same model again, using a warm start: the optimizer just performs
# a single pass before checking that it has already converged
model.set_params(warm_start=True)
model.fit(X, y)
n_iter_warm_start = model.n_iter_
assert_equal(n_iter_warm_start, 1)
def test_warm_start_convergence_with_regularizer_decrement():
boston = load_boston()
X, y = boston.data, boston.target
# Train a model to converge on a lightly regularized problem
final_alpha = 1e-5
low_reg_model = ElasticNet(alpha=final_alpha).fit(X, y)
# Fitting a new model on a more regularized version of the same problem.
# Fitting with high regularization is easier it should converge faster
# in general.
high_reg_model = ElasticNet(alpha=final_alpha * 10).fit(X, y)
assert_greater(low_reg_model.n_iter_, high_reg_model.n_iter_)
# Fit the solution to the original, less regularized version of the
# problem but from the solution of the highly regularized variant of
# the problem as a better starting point. This should also converge
# faster than the original model that starts from zero.
warm_low_reg_model = deepcopy(high_reg_model)
warm_low_reg_model.set_params(warm_start=True, alpha=final_alpha)
warm_low_reg_model.fit(X, y)
assert_greater(low_reg_model.n_iter_, warm_low_reg_model.n_iter_)
def test_random_descent():
# Test that both random and cyclic selection give the same results.
# Ensure that the test models fully converge and check a wide
# range of conditions.
# This uses the coordinate descent algo using the gram trick.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X, y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# This uses the descent algo without the gram trick
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X.T, y[:20])
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X.T, y[:20])
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Sparse Case
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(sparse.csr_matrix(X), y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(sparse.csr_matrix(X), y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Multioutput case.
new_y = np.hstack((y[:, np.newaxis], y[:, np.newaxis]))
clf_cyclic = MultiTaskElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, new_y)
clf_random = MultiTaskElasticNet(selection='random', tol=1e-8,
random_state=42)
clf_random.fit(X, new_y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Raise error when selection is not in cyclic or random.
clf_random = ElasticNet(selection='invalid')
assert_raises(ValueError, clf_random.fit, X, y)
def test_enet_path_positive():
# Test that the coefs returned by positive=True in enet_path are positive
X, y, _, _ = build_dataset(n_samples=50, n_features=50)
for path in [enet_path, lasso_path]:
pos_path_coef = path(X, y, positive=True)[1]
assert_true(np.all(pos_path_coef >= 0))
def test_sparse_dense_descent_paths():
# Test that dense and sparse input give the same input for descent paths.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
csr = sparse.csr_matrix(X)
for path in [enet_path, lasso_path]:
_, coefs, _ = path(X, y, fit_intercept=False)
_, sparse_coefs, _ = path(csr, y, fit_intercept=False)
assert_array_almost_equal(coefs, sparse_coefs)
def test_check_input_false():
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
X = check_array(X, order='F', dtype='float64')
y = check_array(X, order='F', dtype='float64')
clf = ElasticNet(selection='cyclic', tol=1e-8)
# Check that no error is raised if data is provided in the right format
clf.fit(X, y, check_input=False)
X = check_array(X, order='F', dtype='float32')
clf.fit(X, y, check_input=True)
# Check that an error is raised if data is provided in the wrong dtype,
# because of check bypassing
assert_raises(ValueError, clf.fit, X, y, check_input=False)
# With no input checking, providing X in C order should result in false
# computation
X = check_array(X, order='C', dtype='float64')
assert_raises(ValueError, clf.fit, X, y, check_input=False)
def test_overrided_gram_matrix():
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
Gram = X.T.dot(X)
clf = ElasticNet(selection='cyclic', tol=1e-8, precompute=Gram,
fit_intercept=True)
assert_warns_message(UserWarning,
"Gram matrix was provided but X was centered"
" to fit intercept, "
"or X was normalized : recomputing Gram matrix.",
clf.fit, X, y)
def test_lasso_non_float_y():
X = [[0, 0], [1, 1], [-1, -1]]
y = [0, 1, 2]
y_float = [0.0, 1.0, 2.0]
for model in [ElasticNet, Lasso]:
clf = model(fit_intercept=False)
clf.fit(X, y)
clf_float = model(fit_intercept=False)
clf_float.fit(X, y_float)
assert_array_equal(clf.coef_, clf_float.coef_)
def test_enet_float_precision():
# Generate dataset
X, y, X_test, y_test = build_dataset(n_samples=20, n_features=10)
# Here we have a small number of iterations, and thus the
# ElasticNet might not converge. This is to speed up tests
for normalize in [True, False]:
for fit_intercept in [True, False]:
coef = {}
intercept = {}
for dtype in [np.float64, np.float32]:
clf = ElasticNet(alpha=0.5, max_iter=100, precompute=False,
fit_intercept=fit_intercept,
normalize=normalize)
X = dtype(X)
y = dtype(y)
ignore_warnings(clf.fit)(X, y)
coef[('simple', dtype)] = clf.coef_
intercept[('simple', dtype)] = clf.intercept_
assert_equal(clf.coef_.dtype, dtype)
# test precompute Gram array
Gram = X.T.dot(X)
clf_precompute = ElasticNet(alpha=0.5, max_iter=100,
precompute=Gram,
fit_intercept=fit_intercept,
normalize=normalize)
ignore_warnings(clf_precompute.fit)(X, y)
assert_array_almost_equal(clf.coef_, clf_precompute.coef_)
assert_array_almost_equal(clf.intercept_,
clf_precompute.intercept_)
# test multi task enet
multi_y = np.hstack((y[:, np.newaxis], y[:, np.newaxis]))
clf_multioutput = MultiTaskElasticNet(
alpha=0.5, max_iter=100, fit_intercept=fit_intercept,
normalize=normalize)
clf_multioutput.fit(X, multi_y)
coef[('multi', dtype)] = clf_multioutput.coef_
intercept[('multi', dtype)] = clf_multioutput.intercept_
assert_equal(clf.coef_.dtype, dtype)
for v in ['simple', 'multi']:
assert_array_almost_equal(coef[(v, np.float32)],
coef[(v, np.float64)],
decimal=4)
assert_array_almost_equal(intercept[(v, np.float32)],
intercept[(v, np.float64)],
decimal=4)
def test_enet_l1_ratio():
# Test that an error message is raised if an estimator that
# uses _alpha_grid is called with l1_ratio=0
msg = ("Automatic alpha grid generation is not supported for l1_ratio=0. "
"Please supply a grid by providing your estimator with the "
"appropriate `alphas=` argument.")
X = np.array([[1, 2, 4, 5, 8], [3, 5, 7, 7, 8]]).T
y = np.array([12, 10, 11, 21, 5])
assert_raise_message(ValueError, msg, ElasticNetCV(
l1_ratio=0, random_state=42).fit, X, y)
assert_raise_message(ValueError, msg, MultiTaskElasticNetCV(
l1_ratio=0, random_state=42).fit, X, y[:, None])
# Test that l1_ratio=0 is allowed if we supply a grid manually
alphas = [0.1, 10]
estkwds = {'alphas': alphas, 'random_state': 42}
est_desired = ElasticNetCV(l1_ratio=0.00001, **estkwds)
est = ElasticNetCV(l1_ratio=0, **estkwds)
with ignore_warnings():
est_desired.fit(X, y)
est.fit(X, y)
assert_array_almost_equal(est.coef_, est_desired.coef_, decimal=5)
est_desired = MultiTaskElasticNetCV(l1_ratio=0.00001, **estkwds)
est = MultiTaskElasticNetCV(l1_ratio=0, **estkwds)
with ignore_warnings():
est.fit(X, y[:, None])
est_desired.fit(X, y[:, None])
assert_array_almost_equal(est.coef_, est_desired.coef_, decimal=5)
|
|
import base64
import boto
from boto.ec2.instance import Reservation, InstanceAttribute
from boto.exception import EC2ResponseError
import sure # noqa
from moto import mock_ec2
################ Test Readme ###############
def add_servers(ami_id, count):
conn = boto.connect_ec2()
for index in range(count):
conn.run_instances(ami_id)
@mock_ec2
def test_add_servers():
add_servers('ami-1234abcd', 2)
conn = boto.connect_ec2()
reservations = conn.get_all_instances()
assert len(reservations) == 2
instance1 = reservations[0].instances[0]
assert instance1.image_id == 'ami-1234abcd'
############################################
@mock_ec2
def test_instance_launch_and_terminate():
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd')
reservation.should.be.a(Reservation)
reservation.instances.should.have.length_of(1)
instance = reservation.instances[0]
instance.state.should.equal('pending')
reservations = conn.get_all_instances()
reservations.should.have.length_of(1)
reservations[0].id.should.equal(reservation.id)
instances = reservations[0].instances
instances.should.have.length_of(1)
instances[0].id.should.equal(instance.id)
instances[0].state.should.equal('running')
conn.terminate_instances([instances[0].id])
reservations = conn.get_all_instances()
instance = reservations[0].instances[0]
instance.state.should.equal('terminated')
@mock_ec2
def test_get_instances_by_id():
conn = boto.connect_ec2()
reservation = conn.run_instances('ami-1234abcd', min_count=2)
instance1, instance2 = reservation.instances
reservations = conn.get_all_instances(instance_ids=[instance1.id])
reservations.should.have.length_of(1)
reservation = reservations[0]
reservation.instances.should.have.length_of(1)
reservation.instances[0].id.should.equal(instance1.id)
reservations = conn.get_all_instances(instance_ids=[instance1.id, instance2.id])
reservations.should.have.length_of(1)
reservation = reservations[0]
reservation.instances.should.have.length_of(2)
instance_ids = [instance.id for instance in reservation.instances]
instance_ids.should.equal([instance1.id, instance2.id])
# Call get_all_instances with a bad id should raise an error
conn.get_all_instances.when.called_with(instance_ids=[instance1.id, "i-1234abcd"]).should.throw(
EC2ResponseError,
"The instance ID 'i-1234abcd' does not exist"
)
@mock_ec2
def test_get_instances_filtering_by_state():
conn = boto.connect_ec2()
reservation = conn.run_instances('ami-1234abcd', min_count=3)
instance1, instance2, instance3 = reservation.instances
conn.terminate_instances([instance1.id])
reservations = conn.get_all_instances(filters={'instance-state-name': 'running'})
reservations.should.have.length_of(1)
# Since we terminated instance1, only instance2 and instance3 should be returned
instance_ids = [instance.id for instance in reservations[0].instances]
set(instance_ids).should.equal(set([instance2.id, instance3.id]))
reservations = conn.get_all_instances([instance2.id], filters={'instance-state-name': 'running'})
reservations.should.have.length_of(1)
instance_ids = [instance.id for instance in reservations[0].instances]
instance_ids.should.equal([instance2.id])
reservations = conn.get_all_instances([instance2.id], filters={'instance-state-name': 'terminated'})
list(reservations).should.equal([])
# get_all_instances should still return all 3
reservations = conn.get_all_instances()
reservations[0].instances.should.have.length_of(3)
conn.get_all_instances.when.called_with(filters={'not-implemented-filter': 'foobar'}).should.throw(NotImplementedError)
@mock_ec2
def test_get_instances_filtering_by_instance_id():
conn = boto.connect_ec2()
reservation = conn.run_instances('ami-1234abcd', min_count=3)
instance1, instance2, instance3 = reservation.instances
reservations = conn.get_all_instances(filters={'instance-id': instance1.id})
# get_all_instances should return just instance1
reservations[0].instances.should.have.length_of(1)
reservations[0].instances[0].id.should.equal(instance1.id)
reservations = conn.get_all_instances(filters={'instance-id': [instance1.id, instance2.id]})
# get_all_instances should return two
reservations[0].instances.should.have.length_of(2)
reservations = conn.get_all_instances(filters={'instance-id': 'non-existing-id'})
reservations.should.have.length_of(0)
@mock_ec2
def test_instance_start_and_stop():
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd', min_count=2)
instances = reservation.instances
instances.should.have.length_of(2)
instance_ids = [instance.id for instance in instances]
stopped_instances = conn.stop_instances(instance_ids)
for instance in stopped_instances:
instance.state.should.equal('stopping')
started_instances = conn.start_instances([instances[0].id])
started_instances[0].state.should.equal('pending')
@mock_ec2
def test_instance_reboot():
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd')
instance = reservation.instances[0]
instance.reboot()
instance.state.should.equal('pending')
@mock_ec2
def test_instance_attribute_instance_type():
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd')
instance = reservation.instances[0]
instance.modify_attribute("instanceType", "m1.small")
instance_attribute = instance.get_attribute("instanceType")
instance_attribute.should.be.a(InstanceAttribute)
instance_attribute.get('instanceType').should.equal("m1.small")
@mock_ec2
def test_instance_attribute_user_data():
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd')
instance = reservation.instances[0]
instance.modify_attribute("userData", "this is my user data")
instance_attribute = instance.get_attribute("userData")
instance_attribute.should.be.a(InstanceAttribute)
instance_attribute.get("userData").should.equal("this is my user data")
@mock_ec2
def test_user_data_with_run_instance():
user_data = "some user data"
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd', user_data=user_data)
instance = reservation.instances[0]
instance_attribute = instance.get_attribute("userData")
instance_attribute.should.be.a(InstanceAttribute)
decoded_user_data = base64.decodestring(instance_attribute.get("userData"))
decoded_user_data.should.equal("some user data")
@mock_ec2
def test_run_instance_with_security_group_name():
conn = boto.connect_ec2('the_key', 'the_secret')
group = conn.create_security_group('group1', "some description")
reservation = conn.run_instances('ami-1234abcd',
security_groups=['group1'])
instance = reservation.instances[0]
instance.groups[0].id.should.equal(group.id)
instance.groups[0].name.should.equal("group1")
@mock_ec2
def test_run_instance_with_security_group_id():
conn = boto.connect_ec2('the_key', 'the_secret')
group = conn.create_security_group('group1', "some description")
reservation = conn.run_instances('ami-1234abcd',
security_group_ids=[group.id])
instance = reservation.instances[0]
instance.groups[0].id.should.equal(group.id)
instance.groups[0].name.should.equal("group1")
@mock_ec2
def test_run_instance_with_instance_type():
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd', instance_type="t1.micro")
instance = reservation.instances[0]
instance.instance_type.should.equal("t1.micro")
@mock_ec2
def test_run_instance_with_subnet():
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd',
subnet_id="subnet-abcd1234")
instance = reservation.instances[0]
instance.subnet_id.should.equal("subnet-abcd1234")
@mock_ec2
def test_run_instance_with_keypair():
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd', key_name="keypair_name")
instance = reservation.instances[0]
instance.key_name.should.equal("keypair_name")
@mock_ec2
def test_describe_instance_status_no_instances():
conn = boto.connect_ec2('the_key', 'the_secret')
all_status = conn.get_all_instance_status()
len(all_status).should.equal(0)
@mock_ec2
def test_describe_instance_status_with_instances():
conn = boto.connect_ec2('the_key', 'the_secret')
conn.run_instances('ami-1234abcd', key_name="keypair_name")
all_status = conn.get_all_instance_status()
len(all_status).should.equal(1)
all_status[0].instance_status.status.should.equal('ok')
all_status[0].system_status.status.should.equal('ok')
@mock_ec2
def test_describe_instance_status_with_instance_filter():
conn = boto.connect_ec2('the_key', 'the_secret')
# We want to filter based on this one
reservation = conn.run_instances('ami-1234abcd', key_name="keypair_name")
instance = reservation.instances[0]
# This is just to setup the test
conn.run_instances('ami-1234abcd', key_name="keypair_name")
all_status = conn.get_all_instance_status(instance_ids=[instance.id])
len(all_status).should.equal(1)
all_status[0].id.should.equal(instance.id)
|
|
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from zipline.utils.memoize import lazyval
class ZiplineError(Exception):
msg = None
def __init__(self, **kwargs):
self.kwargs = kwargs
self.message = str(self)
def __str__(self):
msg = self.msg.format(**self.kwargs)
return msg
__unicode__ = __str__
__repr__ = __str__
class WrongDataForTransform(ZiplineError):
"""
Raised whenever a rolling transform is called on an event that
does not have the necessary properties.
"""
msg = "{transform} requires {fields}. Event cannot be processed."
class UnsupportedSlippageModel(ZiplineError):
"""
Raised if a user script calls the set_slippage magic
with a slipage object that isn't a VolumeShareSlippage or
FixedSlipapge
"""
msg = """
You attempted to set slippage with an unsupported class. \
Please use VolumeShareSlippage or FixedSlippage.
""".strip()
class SetSlippagePostInit(ZiplineError):
# Raised if a users script calls set_slippage magic
# after the initialize method has returned.
msg = """
You attempted to set slippage outside of `initialize`. \
You may only call 'set_slippage' in your initialize method.
""".strip()
class RegisterTradingControlPostInit(ZiplineError):
# Raised if a user's script register's a trading control after initialize
# has been run.
msg = """
You attempted to set a trading control outside of `initialize`. \
Trading controls may only be set in your initialize method.
""".strip()
class RegisterAccountControlPostInit(ZiplineError):
# Raised if a user's script register's a trading control after initialize
# has been run.
msg = """
You attempted to set an account control outside of `initialize`. \
Account controls may only be set in your initialize method.
""".strip()
class UnsupportedCommissionModel(ZiplineError):
"""
Raised if a user script calls the set_commission magic
with a commission object that isn't a PerShare, PerTrade or
PerDollar commission
"""
msg = """
You attempted to set commission with an unsupported class. \
Please use PerShare or PerTrade.
""".strip()
class SetCommissionPostInit(ZiplineError):
"""
Raised if a users script calls set_commission magic
after the initialize method has returned.
"""
msg = """
You attempted to override commission outside of `initialize`. \
You may only call 'set_commission' in your initialize method.
""".strip()
class TransactionWithNoVolume(ZiplineError):
"""
Raised if a transact call returns a transaction with zero volume.
"""
msg = """
Transaction {txn} has a volume of zero.
""".strip()
class TransactionWithWrongDirection(ZiplineError):
"""
Raised if a transact call returns a transaction with a direction that
does not match the order.
"""
msg = """
Transaction {txn} not in same direction as corresponding order {order}.
""".strip()
class TransactionWithNoAmount(ZiplineError):
"""
Raised if a transact call returns a transaction with zero amount.
"""
msg = """
Transaction {txn} has an amount of zero.
""".strip()
class TransactionVolumeExceedsOrder(ZiplineError):
"""
Raised if a transact call returns a transaction with a volume greater than
the corresponding order.
"""
msg = """
Transaction volume of {txn} exceeds the order volume of {order}.
""".strip()
class UnsupportedOrderParameters(ZiplineError):
"""
Raised if a set of mutually exclusive parameters are passed to an order
call.
"""
msg = "{msg}"
class BadOrderParameters(ZiplineError):
"""
Raised if any impossible parameters (nan, negative limit/stop)
are passed to an order call.
"""
msg = "{msg}"
class OrderDuringInitialize(ZiplineError):
"""
Raised if order is called during initialize()
"""
msg = "{msg}"
class AccountControlViolation(ZiplineError):
"""
Raised if the account violates a constraint set by a AccountControl.
"""
msg = """
Account violates account constraint {constraint}.
""".strip()
class TradingControlViolation(ZiplineError):
"""
Raised if an order would violate a constraint set by a TradingControl.
"""
msg = """
Order for {amount} shares of {asset} at {datetime} violates trading constraint
{constraint}.
""".strip()
class IncompatibleHistoryFrequency(ZiplineError):
"""
Raised when a frequency is given to history which is not supported.
At least, not yet.
"""
msg = """
Requested history at frequency '{frequency}' cannot be created with data
at frequency '{data_frequency}'.
""".strip()
class HistoryInInitialize(ZiplineError):
"""
Raised when an algorithm calls history() in initialize.
"""
msg = "history() should only be called in handle_data()"
class MultipleSymbolsFound(ZiplineError):
"""
Raised when a symbol() call contains a symbol that changed over
time and is thus not resolvable without additional information
provided via as_of_date.
"""
msg = """
Multiple symbols with the name '{symbol}' found. Use the
as_of_date' argument to to specify when the date symbol-lookup
should be valid.
Possible options:{options}
""".strip()
class SymbolNotFound(ZiplineError):
"""
Raised when a symbol() call contains a non-existant symbol.
"""
msg = """
Symbol '{symbol}' was not found.
""".strip()
class RootSymbolNotFound(ZiplineError):
"""
Raised when a lookup_future_chain() call contains a non-existant symbol.
"""
msg = """
Root symbol '{root_symbol}' was not found.
""".strip()
class SidsNotFound(ZiplineError):
"""
Raised when a retrieve_asset() or retrieve_all() call contains a
non-existent sid.
"""
@lazyval
def plural(self):
return len(self.sids) > 1
@lazyval
def sids(self):
return self.kwargs['sids']
@lazyval
def msg(self):
if self.plural:
return "No assets found for sids: {sids}."
return "No asset found for sid: {sids[0]}."
class EquitiesNotFound(SidsNotFound):
"""
Raised when a call to `retrieve_equities` fails to find an asset.
"""
@lazyval
def msg(self):
if self.plural:
return "No equities found for sids: {sids}."
return "No equity found for sid: {sids[0]}."
class FutureContractsNotFound(SidsNotFound):
"""
Raised when a call to `retrieve_futures_contracts` fails to find an asset.
"""
@lazyval
def msg(self):
if self.plural:
return "No future contracts found for sids: {sids}."
return "No future contract found for sid: {sids[0]}."
class ConsumeAssetMetaDataError(ZiplineError):
"""
Raised when AssetFinder.consume() is called on an invalid object.
"""
msg = """
AssetFinder can not consume metadata of type {obj}. Metadata must be a dict, a
DataFrame, or a tables.Table. If the provided metadata is a Table, the rows
must contain both or one of 'sid' or 'symbol'.
""".strip()
class MapAssetIdentifierIndexError(ZiplineError):
"""
Raised when AssetMetaData.map_identifier_index_to_sids() is called on an
index of invalid objects.
"""
msg = """
AssetFinder can not map an index with values of type {obj}. Asset indices of
DataFrames or Panels must be integer sids, string symbols, or Asset objects.
""".strip()
class SidAssignmentError(ZiplineError):
"""
Raised when an AssetFinder tries to build an Asset that does not have a sid
and that AssetFinder is not permitted to assign sids.
"""
msg = """
AssetFinder metadata is missing a SID for identifier '{identifier}'.
""".strip()
class NoSourceError(ZiplineError):
"""
Raised when no source is given to the pipeline
"""
msg = """
No data source given.
""".strip()
class PipelineDateError(ZiplineError):
"""
Raised when only one date is passed to the pipeline
"""
msg = """
Only one simulation date given. Please specify both the 'start' and 'end' for
the simulation, or neither. If neither is given, the start and end of the
DataSource will be used. Given start = '{start}', end = '{end}'
""".strip()
class WindowLengthTooLong(ZiplineError):
"""
Raised when a trailing window is instantiated with a lookback greater than
the length of the underlying array.
"""
msg = (
"Can't construct a rolling window of length "
"{window_length} on an array of length {nrows}."
).strip()
class WindowLengthNotPositive(ZiplineError):
"""
Raised when a trailing window would be instantiated with a length less than
1.
"""
msg = (
"Expected a window_length greater than 0, got {window_length}."
).strip()
class InputTermNotAtomic(ZiplineError):
"""
Raised when a non-atomic term is specified as an input to a Pipeline API
term with a lookback window.
"""
msg = (
"Can't compute {parent} with non-atomic input {child}."
)
class TermInputsNotSpecified(ZiplineError):
"""
Raised if a user attempts to construct a term without specifying inputs and
that term does not have class-level default inputs.
"""
msg = "{termname} requires inputs, but no inputs list was passed."
class WindowLengthNotSpecified(ZiplineError):
"""
Raised if a user attempts to construct a term without specifying inputs and
that term does not have class-level default inputs.
"""
msg = (
"{termname} requires a window_length, but no window_length was passed."
)
class InvalidTermParams(ZiplineError):
"""
Raised if a user attempts to construct a Term using ParameterizedTermMixin
without specifying a `params` list in the class body.
"""
msg = (
"Expected a list of strings as a class-level attribute for "
"{termname}.params, but got {value} instead."
)
class DTypeNotSpecified(ZiplineError):
"""
Raised if a user attempts to construct a term without specifying dtype and
that term does not have class-level default dtype.
"""
msg = (
"{termname} requires a dtype, but no dtype was passed."
)
class InvalidDType(ZiplineError):
"""
Raised when a pipeline Term is constructed with a dtype that isn't a numpy
dtype object.
"""
msg = (
"{termname} expected a numpy dtype "
"object for a dtype, but got {dtype} instead."
)
class BadPercentileBounds(ZiplineError):
"""
Raised by API functions accepting percentile bounds when the passed bounds
are invalid.
"""
msg = (
"Percentile bounds must fall between 0.0 and 100.0, and min must be "
"less than max."
"\nInputs were min={min_percentile}, max={max_percentile}."
)
class UnknownRankMethod(ZiplineError):
"""
Raised during construction of a Rank factor when supplied a bad Rank
method.
"""
msg = (
"Unknown ranking method: '{method}'. "
"`method` must be one of {choices}"
)
class AttachPipelineAfterInitialize(ZiplineError):
"""
Raised when a user tries to call add_pipeline outside of initialize.
"""
msg = (
"Attempted to attach a pipeline after initialize()."
"attach_pipeline() can only be called during initialize."
)
class PipelineOutputDuringInitialize(ZiplineError):
"""
Raised when a user tries to call `pipeline_output` during initialize.
"""
msg = (
"Attempted to call pipeline_output() during initialize. "
"pipeline_output() can only be called once initialize has completed."
)
class NoSuchPipeline(ZiplineError, KeyError):
"""
Raised when a user tries to access a non-existent pipeline by name.
"""
msg = (
"No pipeline named '{name}' exists. Valid pipeline names are {valid}. "
"Did you forget to call attach_pipeline()?"
)
class UnsupportedDataType(ZiplineError):
"""
Raised by CustomFactors with unsupported dtypes.
"""
msg = "{typename} instances with dtype {dtype} are not supported."
class NoFurtherDataError(ZiplineError):
"""
Raised by calendar operations that would ask for dates beyond the extent of
our known data.
"""
# This accepts an arbitrary message string because it's used in more places
# that can be usefully templated.
msg = '{msg}'
class UnsupportedDatetimeFormat(ZiplineError):
"""
Raised when an unsupported datetime is passed to an API method.
"""
msg = ("The input '{input}' passed to '{method}' is not "
"coercible to a pandas.Timestamp object.")
class PositionTrackerMissingAssetFinder(ZiplineError):
"""
Raised by a PositionTracker if it is asked to update an Asset but does not
have an AssetFinder
"""
msg = (
"PositionTracker attempted to update its Asset information but does "
"not have an AssetFinder. This may be caused by a failure to properly "
"de-serialize a TradingAlgorithm."
)
class AssetDBVersionError(ZiplineError):
"""
Raised by an AssetDBWriter or AssetFinder if the version number in the
versions table does not match the ASSET_DB_VERSION in asset_writer.py.
"""
msg = (
"The existing Asset database has an incorrect version: {db_version}. "
"Expected version: {expected_version}. Try rebuilding your asset "
"database or updating your version of Zipline."
)
|
|
'''Pulsar HTTP test application::
python manage.py
Implementation
======================
.. autoclass:: HttpBin
:members:
:member-order: bysource
Server Hooks
===================
This example shows how to use
:ref:`server hooks <setting-section-application-hooks>` to log each request
.. automodule:: examples.httpbin.config
:members:
'''
import os
import sys
import string
import mimetypes
from itertools import repeat, chain
from random import random
from pulsar import HttpRedirect, HttpException, version, JAPANESE, CHINESE
from pulsar.utils.httpurl import (Headers, ENCODE_URL_METHODS,
ENCODE_BODY_METHODS)
from pulsar.utils.html import escape
from pulsar.apps import wsgi, ws
from pulsar.apps.wsgi import (route, Html, Json, HtmlDocument, GZipMiddleware,
AsyncString)
from pulsar.utils.structures import MultiValueDict
from pulsar.utils.system import json
METHODS = frozenset(chain((m.lower() for m in ENCODE_URL_METHODS),
(m.lower() for m in ENCODE_BODY_METHODS)))
pyversion = '.'.join(map(str, sys.version_info[:3]))
ASSET_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'assets')
FAVICON = os.path.join(ASSET_DIR, 'favicon.ico')
characters = string.ascii_letters + string.digits
def asset(name, mode='r', chunk_size=None):
name = os.path.join(ASSET_DIR, name)
if os.path.isfile(name):
content_type, encoding = mimetypes.guess_type(name)
if chunk_size:
def _chunks():
with open(name, mode) as file:
while True:
data = file.read(chunk_size)
if not data:
break
yield data
data = _chunks()
else:
with open(name, mode) as file:
data = file.read()
return data, content_type, encoding
class BaseRouter(wsgi.Router):
########################################################################
# INTERNALS
def bind_server_event(self, request, event, handler):
consumer = request.environ['pulsar.connection'].current_consumer()
consumer.bind_event(event, handler)
def info_data_response(self, request, **params):
data = self.info_data(request, **params)
return Json(data).http_response(request)
def info_data(self, request, **params):
headers = self.getheaders(request)
data = {'method': request.method,
'headers': headers,
'pulsar': self.pulsar_info(request)}
if request.method in ENCODE_URL_METHODS:
data['args'] = dict(request.url_data)
else:
args, files = request.data_and_files()
jfiles = MultiValueDict()
for name, parts in files.lists():
for part in parts:
try:
part = part.string()
except UnicodeError:
part = part.base64()
jfiles[name] = part
data.update((('args', dict(args)),
('files', dict(jfiles))))
data.update(params)
return data
def getheaders(self, request):
headers = Headers(kind='client')
for k in request.environ:
if k.startswith('HTTP_'):
headers[k[5:].replace('_', '-')] = request.environ[k]
return dict(headers)
def pulsar_info(self, request):
return request.get('pulsar.connection').info()
class HttpBin(BaseRouter):
'''The main :class:`.Router` for the HttpBin application
'''
def get(self, request):
'''The home page of this router'''
ul = Html('ul')
for router in sorted(self.routes, key=lambda r: r.creation_count):
a = router.link(escape(router.route.path))
a.addClass(router.name)
for method in METHODS:
if router.getparam(method):
a.addClass(method)
li = Html('li', a, ' %s' % router.getparam('title', ''))
ul.append(li)
title = 'Pulsar'
html = request.html_document
html.head.title = title
html.head.links.append('httpbin.css')
html.head.links.append('favicon.ico', rel="icon", type='image/x-icon')
html.head.scripts.append('httpbin.js')
ul = ul.render(request)
templ, _, _ = asset('template.html')
body = templ % (title, JAPANESE, CHINESE, version, pyversion, ul)
html.body.append(body)
return html.http_response(request)
@route(title='Returns GET data')
def get_get(self, request):
return self.info_data_response(request)
@route(title='Returns POST data')
def post_post(self, request):
return self.info_data_response(request)
@route(title='Returns PATCH data')
def patch_patch(self, request):
return self.info_data_response(request)
@route(title='Returns PUT data')
def put_put(self, request):
return self.info_data_response(request)
@route(title='Returns DELETE data')
def delete_delete(self, request):
return self.info_data_response(request)
@route('redirect/<int(min=1,max=10):times>', defaults={'times': 5},
title='302 Redirect n times')
def redirect(self, request):
num = request.urlargs['times'] - 1
if num:
raise HttpRedirect('/redirect/%s' % num)
else:
raise HttpRedirect('/get')
@route('getsize/<int(min=1,max=8388608):size>', defaults={'size': 150000},
title='Returns a preset size of data (limit at 8MB)')
def getsize(self, request):
size = request.urlargs['size']
data = {'size': size, 'data': 'd' * size}
return self.info_data_response(request, **data)
@route(title='Returns gzip encoded data')
def gzip(self, request):
response = self.info_data_response(request, gzipped=True)
return GZipMiddleware(10)(request.environ, response)
@route(title='Returns cookie data')
def cookies(self, request):
cookies = request.cookies
d = dict(((c.key, c.value) for c in cookies.values()))
return Json({'cookies': d}).http_response(request)
@route('cookies/set/<name>/<value>', title='Sets a simple cookie',
defaults={'name': 'package', 'value': 'pulsar'})
def request_cookies_set(self, request):
key = request.urlargs['name']
value = request.urlargs['value']
request.response.set_cookie(key, value=value)
request.response.status_code = 302
request.response.headers['location'] = '/cookies'
return request.response
@route('status/<int(min=100,max=505):status>',
title='Returns given HTTP Status code',
defaults={'status': 418})
def status(self, request):
request.response.content_type = 'text/html'
raise HttpException(status=request.urlargs['status'])
@route(title='Returns response headers')
def response_headers(self, request):
class Gen:
headers = None
def __call__(self, server, **kw):
self.headers = server.headers
def generate(self):
# yield a byte so that headers are sent
yield b''
# we must have the headers now
yield json.dumps(dict(self.headers))
gen = Gen()
self.bind_server_event(request, 'on_headers', gen)
request.response.content = gen.generate()
request.response.content_type = 'application/json'
return request.response
@route('basic-auth/<username>/<password>',
title='Challenges HTTPBasic Auth',
defaults={'username': 'username', 'password': 'password'})
def challenge_auth(self, request):
auth = request.get('http.authorization')
if auth and auth.authenticated(request.environ, **request.urlargs):
return Json({'authenticated': True,
'username': auth.username}).http_response(request)
raise wsgi.HttpAuthenticate('basic')
@route('digest-auth/<username>/<password>/<qop>',
title='Challenges HTTP Digest Auth',
defaults={'username': 'username',
'password': 'password',
'qop': 'auth'})
def challenge_digest_auth(self, request):
auth = request.get('http.authorization')
if auth and auth.authenticated(request.environ, **request.urlargs):
return Json({'authenticated': True,
'username': auth.username}).http_response(request)
raise wsgi.HttpAuthenticate('digest', qop=[request.urlargs['qop']])
@route('stream/<int(min=1):m>/<int(min=1):n>',
title='Stream m chunk of data n times',
defaults={'m': 300, 'n': 20})
def request_stream(self, request):
m = request.urlargs['m']
n = request.urlargs['n']
request.response.content_type = 'text/plain'
request.response.content = repeat(b'a' * m, n)
return request.response
@route(title='A web socket graph')
def websocket(self, request):
data = open(os.path.join(os.path.dirname(__file__),
'assets', 'websocket.html')).read()
scheme = 'wss' if request.is_secure else 'ws'
host = request.get('HTTP_HOST')
data = data % {'address': '%s://%s/graph-data' % (scheme, host)}
request.response.content_type = 'text/html'
request.response.content = data
return request.response
@route(title='Live server statistics')
def stats(self, request):
'''Live stats for the server.
Try sending lots of requests
'''
# scheme = 'wss' if request.is_secure else 'ws'
# host = request.get('HTTP_HOST')
# address = '%s://%s/stats' % (scheme, host)
doc = HtmlDocument(title='Live server stats', media_path='/assets/')
# docs.head.scripts
return doc.http_response(request)
@route('clip/<int(min=256,max=16777216):chunk_size>',
defaults={'chunk_size': 4096},
title='Show a video clip')
def clip(self, request):
c = request.urlargs['chunk_size']
data, ct, encoding = asset('clip.mp4', 'rb', chunk_size=c)
response = request.response
response.content_type = ct
response.encoding = encoding
response.content = data
return response
########################################################################
# BENCHMARK ROUTES
@route()
def json(self, request):
return Json({'message': "Hello, World!"}).http_response(request)
@route()
def plaintext(self, request):
return AsyncString('Hello, World!').http_response(request)
class ExpectFail(BaseRouter):
def post(self, request):
chunk = request.get('wsgi.input')
if not chunk.done():
chunk.fail()
else:
return self.info_data_response(request)
class Graph(ws.WS):
def on_message(self, websocket, msg):
websocket.write(json.dumps([(i, random()) for i in range(100)]))
class Site(wsgi.LazyWsgi):
def setup(self, environ):
router = HttpBin('/')
return wsgi.WsgiHandler([ExpectFail('expect'),
wsgi.wait_for_body_middleware,
wsgi.clean_path_middleware,
wsgi.authorization_middleware,
wsgi.MediaRouter('media', ASSET_DIR,
show_indexes=True),
ws.WebSocket('/graph-data', Graph()),
router],
async=True)
def server(description=None, **kwargs):
description = description or 'Pulsar HttpBin'
return wsgi.WSGIServer(Site(), description=description, **kwargs)
if __name__ == '__main__': # pragma nocover
server().start()
|
|
# Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sqlalchemy as sa
import toolz
import ibis
from ibis.sql.alchemy import unary, varargs, fixed_arity, _variance_reduction
import ibis.sql.alchemy as alch
import ibis.expr.datatypes as dt
import ibis.expr.operations as ops
import ibis.expr.types as ir
import ibis.common as com
_operation_registry = alch._operation_registry.copy()
def _cast(t, expr):
# It's not all fun and games with SQLite
op = expr.op()
arg, target_type = op.args
sa_arg = t.translate(arg)
sa_type = t.get_sqla_type(target_type)
if isinstance(target_type, dt.Timestamp):
if isinstance(arg, ir.IntegerValue):
return sa.func.datetime(sa_arg, 'unixepoch')
elif isinstance(arg, ir.StringValue):
return sa.func.strftime('%Y-%m-%d %H:%M:%f', sa_arg)
raise com.UnsupportedOperationError(type(arg))
if isinstance(target_type, dt.Date):
if isinstance(arg, ir.IntegerValue):
return sa.func.date(sa.func.datetime(sa_arg, 'unixepoch'))
elif isinstance(arg, ir.StringValue):
return sa.func.date(sa_arg)
raise com.UnsupportedOperationError(type(arg))
if isinstance(arg, ir.CategoryValue) and target_type == 'int32':
return sa_arg
else:
return sa.cast(sa_arg, sa_type)
def _substr(t, expr):
f = sa.func.substr
arg, start, length = expr.op().args
sa_arg = t.translate(arg)
sa_start = t.translate(start)
if length is None:
return f(sa_arg, sa_start + 1)
else:
sa_length = t.translate(length)
return f(sa_arg, sa_start + 1, sa_length)
def _string_right(t, expr):
f = sa.func.substr
arg, length = expr.op().args
sa_arg = t.translate(arg)
sa_length = t.translate(length)
return f(sa_arg, -sa_length, sa_length)
def _string_find(t, expr):
arg, substr, start, _ = expr.op().args
if start is not None:
raise NotImplementedError
sa_arg = t.translate(arg)
sa_substr = t.translate(substr)
f = sa.func.instr
return f(sa_arg, sa_substr) - 1
def _infix_op(infix_sym):
def formatter(t, expr):
op = expr.op()
left, right = op.args
left_arg = t.translate(left)
right_arg = t.translate(right)
return left_arg.op(infix_sym)(right_arg)
return formatter
def _strftime(t, expr):
arg, format = expr.op().args
sa_arg = t.translate(arg)
sa_format = t.translate(format)
return sa.func.strftime(sa_format, sa_arg)
def _strftime_int(fmt):
def translator(t, expr):
arg, = expr.op().args
sa_arg = t.translate(arg)
return sa.cast(sa.func.strftime(fmt, sa_arg), sa.INTEGER)
return translator
_truncate_modifiers = {
'Y': 'start of year',
'M': 'start of month',
'D': 'start of day',
'W': 'weekday 1',
}
def _truncate(func):
def translator(t, expr):
arg, unit = expr.op().args
sa_arg = t.translate(arg)
try:
modifier = _truncate_modifiers[unit]
except KeyError:
raise com.UnsupportedOperationError(
'Unsupported truncate unit {!r}'.format(unit)
)
return func(sa_arg, modifier)
return translator
def _now(t, expr):
return sa.func.datetime('now')
def _millisecond(t, expr):
arg, = expr.op().args
sa_arg = t.translate(arg)
fractional_second = sa.func.strftime('%f', sa_arg)
return (fractional_second * 1000) % 1000
def _identical_to(t, expr):
left, right = args = expr.op().args
if left.equals(right):
return True
else:
left, right = map(t.translate, args)
return sa.func.coalesce(
(left.is_(None) & right.is_(None)) | (left == right),
False
)
def _log(t, expr):
arg, base = expr.op().args
sa_arg = t.translate(arg)
if base is None:
return sa.func._ibis_sqlite_ln(sa_arg)
return sa.func._ibis_sqlite_log(sa_arg, t.translate(base))
def _repeat(t, expr):
arg, times = map(t.translate, expr.op().args)
f = sa.func
return f.replace(
f.substr(
f.quote(
f.zeroblob((times + 1) / 2)
),
3,
times
),
'0',
arg
)
def _generic_pad(arg, length, pad):
f = sa.func
arg_length = f.length(arg)
pad_length = f.length(pad)
number_of_zero_bytes = (
(length - arg_length - 1 + pad_length) / pad_length + 1) / 2
return f.substr(
f.replace(
f.replace(
f.substr(f.quote(f.zeroblob(number_of_zero_bytes)), 3),
"'",
''
),
'0',
pad
),
1,
length - f.length(arg)
)
def _lpad(t, expr):
arg, length, pad = map(t.translate, expr.op().args)
return _generic_pad(arg, length, pad) + arg
def _rpad(t, expr):
arg, length, pad = map(t.translate, expr.op().args)
return arg + _generic_pad(arg, length, pad)
_operation_registry.update({
ops.Cast: _cast,
ops.Substring: _substr,
ops.StrRight: _string_right,
ops.StringFind: _string_find,
ops.Least: varargs(sa.func.min),
ops.Greatest: varargs(sa.func.max),
ops.IfNull: fixed_arity(sa.func.ifnull, 2),
ops.DateTruncate: _truncate(sa.func.date),
ops.TimestampTruncate: _truncate(sa.func.datetime),
ops.Strftime: _strftime,
ops.ExtractYear: _strftime_int('%Y'),
ops.ExtractMonth: _strftime_int('%m'),
ops.ExtractDay: _strftime_int('%d'),
ops.ExtractHour: _strftime_int('%H'),
ops.ExtractMinute: _strftime_int('%M'),
ops.ExtractSecond: _strftime_int('%S'),
ops.ExtractMillisecond: _millisecond,
ops.TimestampNow: _now,
ops.IdenticalTo: _identical_to,
ops.RegexSearch: fixed_arity(sa.func._ibis_sqlite_regex_search, 2),
ops.RegexReplace: fixed_arity(sa.func._ibis_sqlite_regex_replace, 3),
ops.RegexExtract: fixed_arity(sa.func._ibis_sqlite_regex_extract, 3),
ops.LPad: _lpad,
ops.RPad: _rpad,
ops.Repeat: _repeat,
ops.Reverse: unary(sa.func._ibis_sqlite_reverse),
ops.StringAscii: unary(sa.func._ibis_sqlite_string_ascii),
ops.Capitalize: unary(sa.func._ibis_sqlite_capitalize),
ops.Translate: fixed_arity(sa.func._ibis_sqlite_translate, 3),
ops.Sqrt: unary(sa.func._ibis_sqlite_sqrt),
ops.Power: fixed_arity(sa.func._ibis_sqlite_power, 2),
ops.Exp: unary(sa.func._ibis_sqlite_exp),
ops.Ln: unary(sa.func._ibis_sqlite_ln),
ops.Log: _log,
ops.Log10: unary(sa.func._ibis_sqlite_log10),
ops.Log2: unary(sa.func._ibis_sqlite_log2),
ops.Floor: unary(sa.func._ibis_sqlite_floor),
ops.Ceil: unary(sa.func._ibis_sqlite_ceil),
ops.Sign: unary(sa.func._ibis_sqlite_sign),
ops.FloorDivide: fixed_arity(sa.func._ibis_sqlite_floordiv, 2),
ops.Modulus: fixed_arity(sa.func._ibis_sqlite_mod, 2),
ops.Variance: _variance_reduction('_ibis_sqlite_var'),
ops.StandardDev: toolz.compose(
sa.func._ibis_sqlite_sqrt,
_variance_reduction('_ibis_sqlite_var')
),
})
def add_operation(op, translation_func):
_operation_registry[op] = translation_func
class SQLiteExprTranslator(alch.AlchemyExprTranslator):
_registry = _operation_registry
_rewrites = alch.AlchemyExprTranslator._rewrites.copy()
_type_map = alch.AlchemyExprTranslator._type_map.copy()
_type_map.update({
dt.Double: sa.types.REAL,
dt.Float: sa.types.REAL
})
rewrites = SQLiteExprTranslator.rewrites
compiles = SQLiteExprTranslator.compiles
class SQLiteDialect(alch.AlchemyDialect):
translator = SQLiteExprTranslator
dialect = SQLiteDialect
@rewrites(ops.DayOfWeekIndex)
def day_of_week_index(expr):
return (
(expr.op().arg.strftime('%w').cast(dt.int16) + 6) % 7
).cast(dt.int16)
@rewrites(ops.DayOfWeekName)
def day_of_week_name(expr):
return (
expr.op().arg.day_of_week.index()
.case()
.when(0, 'Monday')
.when(1, 'Tuesday')
.when(2, 'Wednesday')
.when(3, 'Thursday')
.when(4, 'Friday')
.when(5, 'Saturday')
.when(6, 'Sunday')
.else_(ibis.NA)
.end()
)
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.common.i18n import _
from heat.engine.cfn import functions as cfn_funcs
from heat.engine import function
from heat.engine.hot import functions as hot_funcs
from heat.engine import properties
class TranslationRule(object):
"""Translating mechanism one properties to another.
Mechanism uses list of rules, each defines by this class, and can be
executed. Working principe: during resource creating after properties
defining resource take list of rules, specified by method
translation_rules, which should be overloaded for each resource, if it's
needed, and execute each rule using translate_properties method. Next
operations are allowed:
- ADD. This rule allows to add some value to list-type properties. Only
list-type values can be added to such properties. Using for other
cases is prohibited and will be returned with error.
- REPLACE. This rule allows to replace some property value to another. Used
for all types of properties. Note, that if property has list type,
then value will be replaced for all elements of list, where it
needed. If element in such property must be replaced by value of
another element of this property, value_name must be defined.
- DELETE. This rule allows to delete some property. If property has list
type, then deleting affects value in all list elements.
- RESOLVE. This rule allows to resolve some property using client and
the finder function. Finders may require an additional entity key.
"""
RULE_KEYS = (ADD, REPLACE,
DELETE, RESOLVE) = ('Add', 'Replace',
'Delete', 'Resolve')
def __init__(self, properties, rule, source_path, value=None,
value_name=None, value_path=None, client_plugin=None,
finder=None, entity=None):
"""Add new rule for translating mechanism.
:param properties: properties of resource
:param rule: rule from RULE_KEYS
:param source_path: list with path to property, which value will be
affected in rule.
:param value: value which will be involved in rule
:param value_name: value_name which used for replacing properties
inside list-type properties.
:param value_path: path to value, which should be used for translation.
:param client_plugin: client plugin that would be used to resolve
:param finder: finder method of the client plugin
:param entity: some generic finders require an entity to resolve ex.
neutron finder function.
"""
self.properties = properties
self.rule = rule
self.source_path = source_path
self.value = value or None
self.value_name = value_name
self.value_path = value_path
self.client_plugin = client_plugin
self.finder = finder
self.entity = entity
self.validate()
def validate(self):
if self.rule not in self.RULE_KEYS:
raise ValueError(_('There is no rule %(rule)s. List of allowed '
'rules is: %(rules)s.') % {
'rule': self.rule,
'rules': ', '.join(self.RULE_KEYS)})
elif not isinstance(self.properties, properties.Properties):
raise ValueError(_('Properties must be Properties type. '
'Found %s.') % type(self.properties))
elif not isinstance(self.source_path, list):
raise ValueError(_('source_path should be a list with path '
'instead of %s.') % type(self.source_path))
elif len(self.source_path) == 0:
raise ValueError(_('source_path must be non-empty list with '
'path.'))
elif self.value_name and self.rule != self.REPLACE:
raise ValueError(_('Use value_name only for replacing list '
'elements.'))
elif self.rule == self.ADD and not isinstance(self.value, list):
raise ValueError(_('value must be list type when rule is Add.'))
elif (self.rule == self.RESOLVE and not (self.client_plugin
or self.finder)):
raise ValueError(_('client_plugin and finder should be specified '
'for Resolve rule'))
def execute_rule(self):
try:
(source_key, source_data) = self._get_data_from_source_path(
self.source_path)
if self.value_path:
(value_key, value_data) = self._get_data_from_source_path(
self.value_path)
value = (value_data[value_key]
if value_data and value_data.get(value_key)
else self.value)
else:
(value_key, value_data) = None, None
value = self.value
except AttributeError:
return
if (source_data is None or (self.rule not in (self.DELETE,
self.RESOLVE) and
(value is None and
self.value_name is None and
(value_data is None or
value_data.get(value_key) is None)))):
return
if self.rule == TranslationRule.ADD:
self._exec_add(source_key, source_data, value)
elif self.rule == TranslationRule.REPLACE:
self._exec_replace(source_key, source_data,
value_key, value_data, value)
elif self.rule == TranslationRule.RESOLVE:
self._exec_resolve(source_key, source_data)
elif self.rule == TranslationRule.DELETE:
self._exec_delete(source_key, source_data, value)
def _get_data_from_source_path(self, path):
def get_props(props, key):
props = props.get(key)
if props.schema.schema is not None:
keys = list(props.schema.schema)
schemata = dict((k, props.schema.schema[k])
for k in keys)
props = dict((k, properties.Property(s, k))
for k, s in schemata.items())
return props
def resolve_param(param):
"""Check whether if given item is param and resolve, if it is."""
# NOTE(prazumovsky): If property uses removed in HOT function,
# we should not translate it for correct validating and raising
# validation error.
if isinstance(param, hot_funcs.Removed):
raise AttributeError(_('Property uses removed function.'))
if isinstance(param, (hot_funcs.GetParam, cfn_funcs.ParamRef)):
return function.resolve(param)
elif isinstance(param, list):
return [resolve_param(param_item) for param_item in param]
else:
return param
source_key = path[0]
data = self.properties.data
props = self.properties.props
for key in path:
if isinstance(data, list):
source_key = key
elif data.get(key) is not None:
# NOTE(prazumovsky): There's no need to resolve other functions
# because we can translate all function to another path. But if
# list or map type property equals to get_param function, need
# to resolve it for correct translating.
data[key] = resolve_param(data[key])
if isinstance(data[key], (dict, list)):
data = data[key]
props = get_props(props, key)
else:
source_key = key
elif data.get(key) is None:
if (self.rule in (TranslationRule.DELETE,
TranslationRule.RESOLVE) or
(self.rule == TranslationRule.REPLACE and
self.value_name)):
return None, None
elif props.get(key).type() == properties.Schema.LIST:
data[key] = []
elif props.get(key).type() == properties.Schema.MAP:
data[key] = {}
else:
source_key = key
continue
data = data.get(key)
props = get_props(props, key)
return source_key, data
def _exec_add(self, source_key, source_data, value):
if isinstance(source_data, list):
source_data.extend(value)
else:
raise ValueError(_('Add rule must be used only for '
'lists.'))
def _exec_replace(self, source_key, source_data,
value_key, value_data, value):
if isinstance(source_data, list):
for item in source_data:
if item.get(self.value_name) and item.get(source_key):
raise ValueError(_('Cannot use %(key)s and '
'%(name)s at the same time.')
% dict(key=source_key,
name=self.value_name))
elif item.get(self.value_name) is not None:
item[source_key] = item[self.value_name]
del item[self.value_name]
elif value is not None:
item[source_key] = value
else:
if (source_data and source_data.get(source_key) and
value_data and value_data.get(value_key)):
raise ValueError(_('Cannot use %(key)s and '
'%(name)s at the same time.')
% dict(key=source_key,
name=value_key))
source_data[source_key] = value
# If value defined with value_path, need to delete value_path
# property data after it's replacing.
if value_data and value_data.get(value_key):
del value_data[value_key]
def _exec_resolve(self, source_key, source_data):
def resolve_and_find(source_data, source_value):
if isinstance(source_value, cfn_funcs.ResourceRef):
return
if isinstance(source_value, function.Function):
source_value = function.resolve(source_value)
if source_value:
finder = getattr(self.client_plugin, self.finder)
if self.entity:
value = finder(self.entity, source_value)
else:
value = finder(source_value)
source_data[source_key] = value
if isinstance(source_data, list):
for item in source_data:
source_value = item.get(source_key)
resolve_and_find(item, source_value)
else:
source_value = source_data.get(source_key)
resolve_and_find(source_data, source_value)
def _exec_delete(self, source_key, source_data, value):
if isinstance(source_data, list):
for item in source_data:
if item.get(source_key) is not None:
del item[source_key]
else:
del source_data[source_key]
|
|
# coding=utf-8
''''''
import os
import ast
import sys
import time
import signal
import argparse
import asyncio
from datetime import datetime, timedelta
from collections import defaultdict
from functools import reduce, partial
from slacker import Slacker
from tornado.ioloop import IOLoop
from apscheduler.job import Job
from apscheduler.events import EVENT_JOB_EXECUTED, EVENT_JOB_ERROR
try:
from configparser import ConfigParser as SafeConfigParser, Error
except ImportError:
from ConfigParser import SafeConfigParser, Error
__version__ = '0.1.5'
desc = 'Send message onto a channel when this need be alerted under Python3'
g = defaultdict(int)
stoped = {}
excluded_job_names = ('_update_scheduler_status',)
if sys.platform == 'win32':
USER_CONFIG = os.path.expanduser(r'~\.slack_alert.conf')
else:
USER_CONFIG = os.path.join(
os.getenv('XDG_CONFIG_HOME') or os.path.expanduser('~/.config'),
'slack_alert.conf'
)
def read_config(args, parser):
config = SafeConfigParser()
try:
config.read(args.config)
defaults = dict((k.lstrip('-').replace('-', '_'), v)
for k, v in config.items('slack'))
parser.set_defaults(**defaults)
except Error:
# Ignore for now.
pass
return parser
def create_parser():
"""Return command-line parser."""
parser = argparse.ArgumentParser(description=desc, prog='slack-alert')
parser.add_argument('--version', action='version',
version='%(prog)s ' + __version__)
parser.add_argument('-c', '--config', metavar='filename',
default=USER_CONFIG,
help='path to a global config file; if this file '
'does not exist then this is ignored '
'(default: {0})'.format(USER_CONFIG))
parser.add_argument('--ignore-global-config', action='store_true',
help="don't look for and apply global config files")
parser.add_argument('-s', '--scheduler', choices=['AsyncIOScheduler',
'BackgroundScheduler',
'GeventScheduler',
'TornadoScheduler'],
default='AsyncIOScheduler',
help=('You can choosing a scheduler that depends '
'mostly on your programming environment'
'(default: AsyncIOScheduler)'))
parser.add_argument('--path', default='.',
help=('path to plugins files directory. (default: '
'current directory)'))
parser.add_argument('--working-hours', default='',
help=('working hours, you can set like this: '
'`9:30-19:00`,`9:00-12:00,13:00-18:00` '
'(default: all time)'))
parser.add_argument('--max-alert', type=int, default=3,
help=('If a error raise. the max times of sending '
'error. and need pause for a while (default: '
'3 times)'))
parser.add_argument('--pause-time', type=int, default=60,
help=('When send the max alert number. pause jobs"s '
'time. unit: minute (default: 60 minutes)'))
return parser
def parse_args(arguments):
parser = create_parser()
args = parser.parse_args(arguments)
if not args.ignore_global_config:
parser = read_config(args, parser)
args = parser.parse_args(arguments)
return args
class GetJobs(ast.NodeTransformer):
def __init__(self):
self.jobs = []
def get_jobs(self):
return self.jobs
def get_job_args(self, decorator):
return {k.arg: k.value.n for k in decorator.keywords
if k.arg in ('hours', 'seconds', 'minutes', 'days')
and isinstance(k.value, ast.Num)}
def visit_FunctionDef(self, node):
decorator_list = node.decorator_list
if not decorator_list:
return node
decorator = decorator_list[0]
args = self.get_job_args(decorator)
if args:
node.decorator_list = decorator_list[1:]
self.jobs.append((node.name, args))
return node
def find_jobs(path):
jobs = []
for root, dirs, files in os.walk(path):
for name in files:
file = os.path.join(root, name)
if not file.endswith('.py'):
continue
with open(file) as f:
expr_ast = ast.parse(f.read())
transformer = GetJobs()
sandbox = {}
exec(compile(transformer.visit(expr_ast),
'<string>', 'exec'), sandbox)
jobs.extend([(sandbox[j], kw) for j, kw in transformer.jobs])
return jobs
def slack_listener(config, event):
slack = Slacker(config.token)
if event.retval:
g[event.job_id] += 1
res = event.retval
if g[event.job_id] == 3:
notice = ' [notice: this message will pause {} minutes]'.format(
config.pause_time)
res = str(res) + notice
slack.chat.post_message(
'#{}'.format(config.channel), res, username=config.username,
icon_url=config.icon_url, icon_emoji=config.icon_emoji)
def parse_working_hours(config):
time_ = []
working_hours = config.working_hours
if not working_hours.strip('\'"'):
return [[0, 24 * 60]]
for w in working_hours.split(','):
w = w.strip()
s, e = w.split('-')
start_hour, start_minute = s.split(':')
end_hour, end_minute = e.split(':')
time_.append([int(start_hour) * 60 + int(start_minute),
int(end_hour) * 60 + int(end_minute)])
return time_
def _update_scheduler_status(scheduler, config):
now = datetime.now()
working_hours = parse_working_hours(config)
jobs = scheduler.get_jobs()
work = False
for start, end in working_hours:
if start <= (now.hour * 60 + now.minute) <= end:
work = True
if not work:
for j in jobs:
if j.name == 'partial' and \
j.func.func.__name__ in excluded_job_names:
continue
j.pause()
else:
# slack post message limit
for job_id, times_ in g.items():
if times_ > config.max_alert - 1:
job = Job(scheduler, job_id)
job.pause()
stoped[job_id] = (job, now)
g[job_id] = 0
for job_id in list(stoped):
job, time_ = stoped[job_id]
if time_ + timedelta(minutes=config.pause_time) <= now:
job.resume()
del stoped[job_id]
for j in jobs:
if j.name == 'partial' and \
j.func.func.__name__ in excluded_job_names:
continue
if j.id not in stoped:
j.resume()
def _main(args):
plugins_path = os.path.join(args.path, 'plugins')
scheduler_name = args.scheduler
scheduler_module = scheduler_name.lower().replace('scheduler', '')
if not os.path.isdir(plugins_path):
print('{} must be exists and is a directory'.format(
plugins_path))
return 1
jobs = find_jobs(plugins_path)
if not jobs:
print('Not yet jobs!')
return 1
apscheduler = __import__('apscheduler.schedulers.{}'.format(
scheduler_module))
scheduler_cls = reduce(lambda x, y: getattr(x, y),
[apscheduler.schedulers, scheduler_module,
scheduler_name])
scheduler = scheduler_cls()
listener = partial(slack_listener, args)
scheduler.add_listener(listener,
EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
for job, kw in jobs:
scheduler.add_job(job, 'interval', **kw)
update_scheduler_status = partial(
_update_scheduler_status, scheduler, args)
scheduler.add_job(update_scheduler_status, 'interval', seconds=5)
g = scheduler.start()
print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))
try:
if scheduler_name == 'AsyncIOScheduler':
asyncio.get_event_loop().run_forever()
elif scheduler_name == 'GeventScheduler':
g.join()
elif scheduler_name == 'TornadoScheduler':
IOLoop.instance().start()
else:
while True:
time.sleep(2)
return 0
except (KeyboardInterrupt, SystemExit):
if scheduler_name not in ('AsyncIOScheduler', 'GeventScheduler',
'TornadoScheduler'):
scheduler.shutdown()
def main():
try:
# Exit on broken pipe.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except AttributeError: # pragma: no cover
# SIGPIPE is not available on Windows.
pass
try:
args = parse_args(sys.argv[1:])
return _main(args)
except KeyboardInterrupt:
return 1 # pragma: no cover
if __name__ == '__main__':
main()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Driver base-classes:
(Beginning of) the contract that compute drivers must follow, and shared
types that support that contract
"""
import sys
from oslo.config import cfg
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova import utils
from nova.virt import event as virtevent
driver_opts = [
cfg.StrOpt('compute_driver',
help='Driver to use for controlling virtualization. Options '
'include: libvirt.LibvirtDriver, xenapi.XenAPIDriver, '
'fake.FakeDriver, baremetal.BareMetalDriver, '
'vmwareapi.VMwareESXDriver, vmwareapi.VMwareVCDriver'),
cfg.StrOpt('default_ephemeral_format',
default=None,
help='The default format an ephemeral_volume will be '
'formatted with on creation.'),
cfg.StrOpt('preallocate_images',
default='none',
help='VM image preallocation mode: '
'"none" => no storage provisioning is done up front, '
'"space" => storage is fully allocated at instance start'),
cfg.BoolOpt('use_cow_images',
default=True,
help='Whether to use cow images'),
]
CONF = cfg.CONF
CONF.register_opts(driver_opts)
LOG = logging.getLogger(__name__)
def driver_dict_from_config(named_driver_config, *args, **kwargs):
driver_registry = dict()
for driver_str in named_driver_config:
driver_type, _sep, driver = driver_str.partition('=')
driver_class = importutils.import_class(driver)
driver_registry[driver_type] = driver_class(*args, **kwargs)
return driver_registry
def block_device_info_get_root(block_device_info):
block_device_info = block_device_info or {}
return block_device_info.get('root_device_name')
def block_device_info_get_swap(block_device_info):
block_device_info = block_device_info or {}
return block_device_info.get('swap') or {'device_name': None,
'swap_size': 0}
def swap_is_usable(swap):
return swap and swap['device_name'] and swap['swap_size'] > 0
def block_device_info_get_ephemerals(block_device_info):
block_device_info = block_device_info or {}
ephemerals = block_device_info.get('ephemerals') or []
return ephemerals
def block_device_info_get_mapping(block_device_info):
block_device_info = block_device_info or {}
block_device_mapping = block_device_info.get('block_device_mapping') or []
return block_device_mapping
class ComputeDriver(object):
"""Base class for compute drivers.
The interface to this class talks in terms of 'instances' (Amazon EC2 and
internal Nova terminology), by which we mean 'running virtual machine'
(XenAPI terminology) or domain (Xen or libvirt terminology).
An instance has an ID, which is the identifier chosen by Nova to represent
the instance further up the stack. This is unfortunately also called a
'name' elsewhere. As far as this layer is concerned, 'instance ID' and
'instance name' are synonyms.
Note that the instance ID or name is not human-readable or
customer-controlled -- it's an internal ID chosen by Nova. At the
nova.virt layer, instances do not have human-readable names at all -- such
things are only known higher up the stack.
Most virtualization platforms will also have their own identity schemes,
to uniquely identify a VM or domain. These IDs must stay internal to the
platform-specific layer, and never escape the connection interface. The
platform-specific layer is responsible for keeping track of which instance
ID maps to which platform-specific ID, and vice versa.
Some methods here take an instance of nova.compute.service.Instance. This
is the data structure used by nova.compute to store details regarding an
instance, and pass them into this layer. This layer is responsible for
translating that generic data structure into terms that are specific to the
virtualization platform.
"""
capabilities = {
"has_imagecache": False,
"supports_recreate": False,
}
def __init__(self, virtapi):
self.virtapi = virtapi
self._compute_event_callback = None
def init_host(self, host):
"""Initialize anything that is necessary for the driver to function,
including catching up with currently running VM's on the given host."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_info(self, instance):
"""Get the current status of an instance, by name (not ID!)
Returns a dict containing:
:state: the running state, one of the power_state codes
:max_mem: (int) the maximum memory in KBytes allowed
:mem: (int) the memory in KBytes used by the domain
:num_cpu: (int) the number of virtual CPUs for the domain
:cpu_time: (int) the CPU time used in nanoseconds
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_num_instances(self):
"""Return the total number of virtual machines.
Return the number of virtual machines that the hypervisor knows
about.
.. note::
This implementation works for all drivers, but it is
not particularly efficient. Maintainers of the virt drivers are
encouraged to override this method with something more
efficient.
"""
return len(self.list_instances())
def instance_exists(self, instance_id):
"""Checks existence of an instance on the host.
:param instance_id: The ID / name of the instance to lookup
Returns True if an instance with the supplied ID exists on
the host, False otherwise.
.. note::
This implementation works for all drivers, but it is
not particularly efficient. Maintainers of the virt drivers are
encouraged to override this method with something more
efficient.
"""
return instance_id in self.list_instances()
def list_instances(self):
"""
Return the names of all the instances known to the virtualization
layer, as a list.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def list_instance_uuids(self):
"""
Return the UUIDS of all the instances known to the virtualization
layer, as a list.
"""
raise NotImplementedError()
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
"""
Create a new instance/VM/domain on the virtualization platform.
Once this successfully completes, the instance should be
running (power_state.RUNNING).
If this fails, any partial instance should be completely
cleaned up, and the virtualization platform should be in the state
that it was before this call began.
:param context: security context
:param instance: Instance object as returned by DB layer.
This function should use the data there to guide
the creation of the new instance.
:param image_meta: image object returned by nova.image.glance that
defines the image from which to boot this instance
:param injected_files: User files to inject into instance.
:param admin_password: Administrator password to set in instance.
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param block_device_info: Information about block devices to be
attached to the instance.
"""
raise NotImplementedError()
def destroy(self, instance, network_info, block_device_info=None,
destroy_disks=True):
"""Destroy (shutdown and delete) the specified instance.
If the instance is not found (for example if networking failed), this
function should still succeed. It's probably a good idea to log a
warning in that case.
:param instance: Instance object as returned by DB layer.
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param block_device_info: Information about block devices that should
be detached from the instance.
:param destroy_disks: Indicates if disks should be destroyed
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
"""Reboot the specified instance.
After this is called successfully, the instance's state
goes back to power_state.RUNNING. The virtualization
platform should ensure that the reboot action has completed
successfully even in cases in which the underlying domain/vm
is paused or halted/stopped.
:param instance: Instance object as returned by DB layer.
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param reboot_type: Either a HARD or SOFT reboot
:param block_device_info: Info pertaining to attached volumes
:param bad_volumes_callback: Function to handle any bad volumes
encountered
"""
raise NotImplementedError()
def get_console_pool_info(self, console_type):
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_console_output(self, instance):
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_vnc_console(self, instance):
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_spice_console(self, instance):
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_all_bw_counters(self, instances):
"""Return bandwidth usage counters for each interface on each
running VM"""
raise NotImplementedError()
def get_all_volume_usage(self, context, compute_host_bdms):
"""Return usage info for volumes attached to vms on
a given host"""
raise NotImplementedError()
def get_host_ip_addr(self):
"""
Retrieves the IP address of the dom0
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def attach_volume(self, connection_info, instance, mountpoint):
"""Attach the disk to the instance at mountpoint using info."""
raise NotImplementedError()
def detach_volume(self, connection_info, instance, mountpoint):
"""Detach the disk attached to the instance."""
raise NotImplementedError()
def attach_interface(self, instance, image_meta, network_info):
"""Attach an interface to the instance."""
raise NotImplementedError()
def detach_interface(self, instance, network_info):
"""Detach an interface from the instance."""
raise NotImplementedError()
def migrate_disk_and_power_off(self, context, instance, dest,
instance_type, network_info,
block_device_info=None):
"""
Transfers the disk of a running instance in multiple phases, turning
off the instance before the end.
"""
raise NotImplementedError()
def snapshot(self, context, instance, image_id, update_task_state):
"""
Snapshots the specified instance.
:param context: security context
:param instance: Instance object as returned by DB layer.
:param image_id: Reference to a pre-created image that will
hold the snapshot.
"""
raise NotImplementedError()
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None):
"""Completes a resize, turning on the migrated instance
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param image_meta: image object returned by nova.image.glance that
defines the image from which this instance
was created
"""
raise NotImplementedError()
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def finish_revert_migration(self, instance, network_info,
block_device_info=None):
"""Finish reverting a resize, powering back on the instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def pause(self, instance):
"""Pause the specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def unpause(self, instance):
"""Unpause paused VM instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def suspend(self, instance):
"""suspend the specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def resume(self, instance, network_info, block_device_info=None):
"""resume the specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""resume guest state when a host is booted."""
raise NotImplementedError()
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Rescue the specified instance."""
raise NotImplementedError()
def unrescue(self, instance, network_info):
"""Unrescue the specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def power_off(self, instance):
"""Power off the specified instance."""
raise NotImplementedError()
def power_on(self, instance):
"""Power on the specified instance."""
raise NotImplementedError()
def soft_delete(self, instance):
"""Soft delete the specified instance."""
raise NotImplementedError()
def restore(self, instance):
"""Restore the specified instance."""
raise NotImplementedError()
def get_available_resource(self, nodename):
"""Retrieve resource information.
This method is called when nova-compute launches, and
as part of a periodic task
:param nodename:
node which the caller want to get resources from
a driver that manages only one node can safely ignore this
:returns: Dictionary describing resources
"""
raise NotImplementedError()
def pre_live_migration(self, ctxt, instance_ref,
block_device_info, network_info,
migrate_data=None):
"""Prepare an instance for live migration
:param ctxt: security context
:param instance_ref: instance object that will be migrated
:param block_device_info: instance block device information
:param network_info: instance network information
:param migrate_data: implementation specific data dict.
"""
raise NotImplementedError()
def pre_block_migration(self, ctxt, instance_ref, disk_info):
"""Prepare a block device for migration
:param ctxt: security context
:param instance_ref: instance object that will have its disk migrated
:param disk_info: information about disk to be migrated (as returned
from get_instance_disk_info())
"""
raise NotImplementedError()
def live_migration(self, ctxt, instance_ref, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
"""Live migration of an instance to another host.
:params ctxt: security context
:params instance_ref:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:params dest: destination host
:params post_method:
post operation method.
expected nova.compute.manager.post_live_migration.
:params recover_method:
recovery method when any exception occurs.
expected nova.compute.manager.recover_live_migration.
:params block_migration: if true, migrate VM disk.
:params migrate_data: implementation specific params.
"""
raise NotImplementedError()
def post_live_migration_at_destination(self, ctxt, instance_ref,
network_info,
block_migration=False,
block_device_info=None):
"""Post operation of live migration at destination host.
:param ctxt: security context
:param instance_ref: instance object that is migrated
:param network_info: instance network information
:param block_migration: if true, post operation of block_migration.
"""
raise NotImplementedError()
def check_instance_shared_storage_local(self, ctxt, instance):
"""Check if instance files located on shared storage.
This runs check on the destination host, and then calls
back to the source host to check the results.
:param ctxt: security context
:param instance: nova.db.sqlalchemy.models.Instance
"""
raise NotImplementedError()
def check_instance_shared_storage_remote(self, ctxt, data):
"""Check if instance files located on shared storage.
:param context: security context
:param data: result of check_instance_shared_storage_local
"""
raise NotImplementedError()
def check_instance_shared_storage_cleanup(self, ctxt, data):
"""Do cleanup on host after check_instance_shared_storage calls
:param ctxt: security context
:param data: result of check_instance_shared_storage_local
"""
pass
def check_can_live_migrate_destination(self, ctxt, instance_ref,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
"""Check if it is possible to execute live migration.
This runs checks on the destination host, and then calls
back to the source host to check the results.
:param ctxt: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance
:param src_compute_info: Info about the sending machine
:param dst_compute_info: Info about the receiving machine
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
:returns: a dict containing migration info (hypervisor-dependent)
"""
raise NotImplementedError()
def check_can_live_migrate_destination_cleanup(self, ctxt,
dest_check_data):
"""Do required cleanup on dest host after check_can_live_migrate calls
:param ctxt: security context
:param dest_check_data: result of check_can_live_migrate_destination
"""
raise NotImplementedError()
def check_can_live_migrate_source(self, ctxt, instance_ref,
dest_check_data):
"""Check if it is possible to execute live migration.
This checks if the live migration can succeed, based on the
results from check_can_live_migrate_destination.
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance
:param dest_check_data: result of check_can_live_migrate_destination
:returns: a dict containing migration info (hypervisor-dependent)
"""
raise NotImplementedError()
def refresh_security_group_rules(self, security_group_id):
"""This method is called after a change to security groups.
All security groups and their associated rules live in the datastore,
and calling this method should apply the updated rules to instances
running the specified security group.
An error should be raised if the operation cannot complete.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def refresh_security_group_members(self, security_group_id):
"""This method is called when a security group is added to an instance.
This message is sent to the virtualization drivers on hosts that are
running an instance that belongs to a security group that has a rule
that references the security group identified by `security_group_id`.
It is the responsibility of this method to make sure any rules
that authorize traffic flow with members of the security group are
updated and any new members can communicate, and any removed members
cannot.
Scenario:
* we are running on host 'H0' and we have an instance 'i-0'.
* instance 'i-0' is a member of security group 'speaks-b'
* group 'speaks-b' has an ingress rule that authorizes group 'b'
* another host 'H1' runs an instance 'i-1'
* instance 'i-1' is a member of security group 'b'
When 'i-1' launches or terminates we will receive the message
to update members of group 'b', at which time we will make
any changes needed to the rules for instance 'i-0' to allow
or deny traffic coming from 'i-1', depending on if it is being
added or removed from the group.
In this scenario, 'i-1' could just as easily have been running on our
host 'H0' and this method would still have been called. The point was
that this method isn't called on the host where instances of that
group are running (as is the case with
:py:meth:`refresh_security_group_rules`) but is called where references
are made to authorizing those instances.
An error should be raised if the operation cannot complete.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def refresh_provider_fw_rules(self):
"""This triggers a firewall update based on database changes.
When this is called, rules have either been added or removed from the
datastore. You can retrieve rules with
:py:meth:`nova.db.provider_fw_rule_get_all`.
Provider rules take precedence over security group rules. If an IP
would be allowed by a security group ingress rule, but blocked by
a provider rule, then packets from the IP are dropped. This includes
intra-project traffic in the case of the allow_project_net_traffic
flag for the libvirt-derived classes.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def reset_network(self, instance):
"""reset networking for specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
pass
def ensure_filtering_rules_for_instance(self, instance_ref, network_info):
"""Setting up filtering rules and waiting for its completion.
To migrate an instance, filtering rules to hypervisors
and firewalls are inevitable on destination host.
( Waiting only for filtering rules to hypervisor,
since filtering rules to firewall rules can be set faster).
Concretely, the below method must be called.
- setup_basic_filtering (for nova-basic, etc.)
- prepare_instance_filter(for nova-instance-instance-xxx, etc.)
to_xml may have to be called since it defines PROJNET, PROJMASK.
but libvirt migrates those value through migrateToURI(),
so , no need to be called.
Don't use thread for this method since migration should
not be started when setting-up filtering rules operations
are not completed.
:params instance_ref: nova.db.sqlalchemy.models.Instance object
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def filter_defer_apply_on(self):
"""Defer application of IPTables rules."""
pass
def filter_defer_apply_off(self):
"""Turn off deferral of IPTables rules and apply the rules now."""
pass
def unfilter_instance(self, instance, network_info):
"""Stop filtering instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def set_admin_password(self, context, instance_id, new_pass=None):
"""
Set the root password on the specified instance.
The first parameter is an instance of nova.compute.service.Instance,
and so the instance is being specified as instance.name. The second
parameter is the value of the new password.
"""
raise NotImplementedError()
def inject_file(self, instance, b64_path, b64_contents):
"""
Writes a file on the specified instance.
The first parameter is an instance of nova.compute.service.Instance,
and so the instance is being specified as instance.name. The second
parameter is the base64-encoded path to which the file is to be
written on the instance; the third is the contents of the file, also
base64-encoded.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def change_instance_metadata(self, context, instance, diff):
"""
Applies a diff to the instance metadata.
This is an optional driver method which is used to publish
changes to the instance's metadata to the hypervisor. If the
hypervisor has no means of publishing the instance metadata to
the instance, then this method should not be implemented.
"""
pass
def inject_network_info(self, instance, nw_info):
"""inject network info for specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
pass
def poll_rebooting_instances(self, timeout, instances):
"""Poll for rebooting instances
:param timeout: the currently configured timeout for considering
rebooting instances to be stuck
:param instances: instances that have been in rebooting state
longer than the configured timeout
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def host_power_action(self, host, action):
"""Reboots, shuts down or powers up the host."""
raise NotImplementedError()
def host_maintenance_mode(self, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation."""
raise NotImplementedError()
def set_host_enabled(self, host, enabled):
"""Sets the specified host's ability to accept new instances."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_host_uptime(self, host):
"""Returns the result of calling "uptime" on the target host."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks."""
raise NotImplementedError()
def get_host_stats(self, refresh=False):
"""Return currently known host stats."""
raise NotImplementedError()
def block_stats(self, instance_name, disk_id):
"""
Return performance counters associated with the given disk_id on the
given instance_name. These are returned as [rd_req, rd_bytes, wr_req,
wr_bytes, errs], where rd indicates read, wr indicates write, req is
the total number of I/O requests made, bytes is the total number of
bytes transferred, and errs is the number of requests held up due to a
full pipeline.
All counters are long integers.
This method is optional. On some platforms (e.g. XenAPI) performance
statistics can be retrieved directly in aggregate form, without Nova
having to do the aggregation. On those platforms, this method is
unused.
Note that this function takes an instance ID.
"""
raise NotImplementedError()
def interface_stats(self, instance_name, iface_id):
"""
Return performance counters associated with the given iface_id on the
given instance_id. These are returned as [rx_bytes, rx_packets,
rx_errs, rx_drop, tx_bytes, tx_packets, tx_errs, tx_drop], where rx
indicates receive, tx indicates transmit, bytes and packets indicate
the total number of bytes or packets transferred, and errs and dropped
is the total number of packets failed / dropped.
All counters are long integers.
This method is optional. On some platforms (e.g. XenAPI) performance
statistics can be retrieved directly in aggregate form, without Nova
having to do the aggregation. On those platforms, this method is
unused.
Note that this function takes an instance ID.
"""
raise NotImplementedError()
def legacy_nwinfo(self):
"""True if the driver requires the legacy network_info format."""
# TODO(tr3buchet): update all subclasses and remove this method and
# related helpers.
raise NotImplementedError(self.legacy_nwinfo)
def macs_for_instance(self, instance):
"""What MAC addresses must this instance have?
Some hypervisors (such as bare metal) cannot do freeform virtualisation
of MAC addresses. This method allows drivers to return a set of MAC
addresses that the instance is to have. allocate_for_instance will take
this into consideration when provisioning networking for the instance.
Mapping of MAC addresses to actual networks (or permitting them to be
freeform) is up to the network implementation layer. For instance,
with openflow switches, fixed MAC addresses can still be virtualised
onto any L2 domain, with arbitrary VLANs etc, but regular switches
require pre-configured MAC->network mappings that will match the
actual configuration.
Most hypervisors can use the default implementation which returns None.
Hypervisors with MAC limits should return a set of MAC addresses, which
will be supplied to the allocate_for_instance call by the compute
manager, and it is up to that call to ensure that all assigned network
details are compatible with the set of MAC addresses.
This is called during spawn_instance by the compute manager.
:return: None, or a set of MAC ids (e.g. set(['12:34:56:78:90:ab'])).
None means 'no constraints', a set means 'these and only these
MAC addresses'.
"""
return None
def manage_image_cache(self, context, all_instances):
"""
Manage the driver's local image cache.
Some drivers chose to cache images for instances on disk. This method
is an opportunity to do management of that cache which isn't directly
related to other calls into the driver. The prime example is to clean
the cache and remove images which are no longer of interest.
"""
pass
def add_to_aggregate(self, context, aggregate, host, **kwargs):
"""Add a compute host to an aggregate."""
#NOTE(jogo) Currently only used for XenAPI-Pool
raise NotImplementedError()
def remove_from_aggregate(self, context, aggregate, host, **kwargs):
"""Remove a compute host from an aggregate."""
raise NotImplementedError()
def undo_aggregate_operation(self, context, op, aggregate,
host, set_error=True):
"""Undo for Resource Pools."""
raise NotImplementedError()
def get_volume_connector(self, instance):
"""Get connector information for the instance for attaching to volumes.
Connector information is a dictionary representing the ip of the
machine that will be making the connection, the name of the iscsi
initiator and the hostname of the machine as follows::
{
'ip': ip,
'initiator': initiator,
'host': hostname
}
"""
raise NotImplementedError()
def get_available_nodes(self):
"""Returns nodenames of all nodes managed by the compute service.
This method is for multi compute-nodes support. If a driver supports
multi compute-nodes, this method returns a list of nodenames managed
by the service. Otherwise, this method should return
[hypervisor_hostname].
"""
stats = self.get_host_stats(refresh=True)
if not isinstance(stats, list):
stats = [stats]
return [s['hypervisor_hostname'] for s in stats]
def get_per_instance_usage(self):
"""Get information about instance resource usage.
:returns: dict of nova uuid => dict of usage info
"""
return {}
def instance_on_disk(self, instance):
"""Checks access of instance files on the host.
:param instance: instance to lookup
Returns True if files of an instance with the supplied ID accessible on
the host, False otherwise.
.. note::
Used in rebuild for HA implementation and required for validation
of access to instance shared disk files
"""
return False
def register_event_listener(self, callback):
"""Register a callback to receive events.
Register a callback to receive asynchronous event
notifications from hypervisors. The callback will
be invoked with a single parameter, which will be
an instance of the nova.virt.event.Event class."""
self._compute_event_callback = callback
def emit_event(self, event):
"""Dispatches an event to the compute manager.
Invokes the event callback registered by the
compute manager to dispatch the event. This
must only be invoked from a green thread."""
if not self._compute_event_callback:
LOG.debug("Discarding event %s" % str(event))
return
if not isinstance(event, virtevent.Event):
raise ValueError(
_("Event must be an instance of nova.virt.event.Event"))
try:
LOG.debug("Emitting event %s" % str(event))
self._compute_event_callback(event)
except Exception, ex:
LOG.error(_("Exception dispatching event %(event)s: %(ex)s")
% locals())
def load_compute_driver(virtapi, compute_driver=None):
"""Load a compute driver module.
Load the compute driver module specified by the compute_driver
configuration option or, if supplied, the driver name supplied as an
argument.
Compute drivers constructors take a VirtAPI object as their first object
and this must be supplied.
:param virtapi: a VirtAPI instance
:param compute_driver: a compute driver name to override the config opt
:returns: a ComputeDriver instance
"""
if not compute_driver:
compute_driver = CONF.compute_driver
if not compute_driver:
LOG.error(_("Compute driver option required, but not specified"))
sys.exit(1)
LOG.info(_("Loading compute driver '%s'") % compute_driver)
try:
driver = importutils.import_object_ns('nova.virt',
compute_driver,
virtapi)
return utils.check_isinstance(driver, ComputeDriver)
except ImportError as e:
LOG.error(_("Unable to load the virtualization driver: %s") % (e))
sys.exit(1)
def compute_driver_matches(match):
return CONF.compute_driver.endswith(match)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class RouteFilterRulesOperations(object):
"""RouteFilterRulesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
route_filter_name, # type: str
rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
route_filter_name, # type: str
rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified rule from a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the rule.
:type rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
rule_name=rule_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
route_filter_name, # type: str
rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.RouteFilterRule"
"""Gets the specified rule from a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the rule.
:type rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RouteFilterRule, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_02_01.models.RouteFilterRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
route_filter_name, # type: str
rule_name, # type: str
route_filter_rule_parameters, # type: "_models.RouteFilterRule"
**kwargs # type: Any
):
# type: (...) -> "_models.RouteFilterRule"
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(route_filter_rule_parameters, 'RouteFilterRule')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
route_filter_name, # type: str
rule_name, # type: str
route_filter_rule_parameters, # type: "_models.RouteFilterRule"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.RouteFilterRule"]
"""Creates or updates a route in the specified route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the route filter rule.
:type rule_name: str
:param route_filter_rule_parameters: Parameters supplied to the create or update route filter
rule operation.
:type route_filter_rule_parameters: ~azure.mgmt.network.v2018_02_01.models.RouteFilterRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either RouteFilterRule or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_02_01.models.RouteFilterRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
rule_name=rule_name,
route_filter_rule_parameters=route_filter_rule_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
def _update_initial(
self,
resource_group_name, # type: str
route_filter_name, # type: str
rule_name, # type: str
route_filter_rule_parameters, # type: "_models.PatchRouteFilterRule"
**kwargs # type: Any
):
# type: (...) -> "_models.RouteFilterRule"
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(route_filter_rule_parameters, 'PatchRouteFilterRule')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
def begin_update(
self,
resource_group_name, # type: str
route_filter_name, # type: str
rule_name, # type: str
route_filter_rule_parameters, # type: "_models.PatchRouteFilterRule"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.RouteFilterRule"]
"""Updates a route in the specified route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the route filter rule.
:type rule_name: str
:param route_filter_rule_parameters: Parameters supplied to the update route filter rule
operation.
:type route_filter_rule_parameters: ~azure.mgmt.network.v2018_02_01.models.PatchRouteFilterRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either RouteFilterRule or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_02_01.models.RouteFilterRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
rule_name=rule_name,
route_filter_rule_parameters=route_filter_rule_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
def list_by_route_filter(
self,
resource_group_name, # type: str
route_filter_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.RouteFilterRuleListResult"]
"""Gets all RouteFilterRules in a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteFilterRuleListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_02_01.models.RouteFilterRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_route_filter.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('RouteFilterRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_route_filter.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules'} # type: ignore
|
|
#!/usr/bin/env python3
""" Converts owl or ttl or raw rdflib graph into a pandas DataFrame. Saved in .pickle format.
Usage:
allen_cell_types [options]
Options:
-h --help Display this help message
-v --version Current version of file
-r --refresh Update local copy
-i --input=<path> Local copy of Allen Brain Atlas meta data [default: /tmp/allen-cell-types.json]
-o --output=<path> Output path of picklized pandas DataFrame [default: allen-cell-types]
"""
import re
import json
from pathlib import Path
import rdflib
import requests
from pyontutils.utils import TermColors as tc, relative_path
from pyontutils.core import simpleOnt, OntGraph
from pyontutils.namespaces import makePrefixes, ilxtr, definition
from pyontutils.namespaces import rdf, rdfs, owl, AIBSSPEC
from pyontutils.combinators import annotation, allDifferent, distinctMembers
from neurondm.core import auth
from neurondm.lang import *
from docopt import docopt, parse_defaults
class NeuronACT(NeuronEBM):
owlClass = ilxtr.NeuronACT
shortname = 'AllenCT'
class AllenCellTypes:
branch = auth.get('neurons-branch')
prefixes = {**{'JAX': 'http://jaxmice.jax.org/strain/',
'MMRRC': 'http://www.mmrrc.org/catalog/getSDS.jsp?mmrrc_id=',
'AllenTL': 'http://api.brain-map.org/api/v2/data/TransgenicLine/'},
**makePrefixes('definition', 'ilxtr', 'owl')}
prefixes['AllenTransgenicLine'] = 'http://api.brain-map.org/api/v2/data/TransgenicLine/'
def __init__(self, input, name):
self.name = name
self.ns = {k:rdflib.Namespace(v) for k, v in self.prefixes.items()}
self.neuron_data = input
self.tag_names = set()
# self.sample_neuron()
def avoid_url_conversion(self, string):
if not string:
return string
return re.sub("/| |\(", '_', string).replace(')', '')
def sample_neuron(self,):
Neuron(
Phenotype('ilxtr:apical',
'ilxtr:hasPhenotype',
label='apical - truncated'),
Phenotype('JAX:12345',
'ilxtr:hasExperimentalPhenotype',
label='prefix+stock_number'),
)
print(graphBase.ttl())
def cell_phenotypes(self, cell_specimen):
cell_mappings = {
'hemisphere': 'ilxtr:hasSomaLocationLaterality',
# 'name': 'ilxtr:hasPhenotype',
}
phenotypes = []
for name, value in cell_specimen.items():
mapping = cell_mappings.get(name)
if mapping and value:
if name == 'hemisphere':
if value.lower() == 'left':
curie = 'UBERON:0002812'
elif value.lower() == 'right':
curie = 'UBERON:0002813'
else:
raise ValueError('got stuck with unkown hemisphere ' + value)
phenotypes.append(
Phenotype(
curie,
mapping,
)
)
return phenotypes
# TODO: wrong phenotype
def structure_phenotypes(self, cell_specimen):
struc = cell_specimen['structure']
phenotypes = []
acronym = self.avoid_url_conversion(struc['acronym'])
curie = 'MBA:' + str(struc['id'])
if struc:
phenotypes.append(
Phenotype(
curie,
'ilxtr:hasSomaLocatedIn',
label=acronym
),
)
return phenotypes
def donor_phenotypes(self, cell_specimen):
donor_mappings = {
'sex_full_name': 'ilxtr:hasBiologicalSex'
}
phenotypes = []
for name, value in cell_specimen['donor'].items():
mapping = donor_mappings.get(name)
if mapping and value:
if name == 'sex_full_name':
if value.lower() == 'female':
curie = 'PATO:0000383'
elif value.lower() == 'male':
curie = 'PATO:0000384'
else:
raise ValueError('unkown sex ' + str(value))
phenotypes.append(
Phenotype(
curie,
mapping,
),
)
return phenotypes
# TODO: Figure how to add: description, name and type
def transgenic_lines_phenotypes(self, cell_specimen):
transgenic_mappings = {
}
phenotypes = []
for tl in cell_specimen['donor']['transgenic_lines']:
prefix = tl['transgenic_line_source_name']
suffix = tl['stock_number'] if tl['stock_number'] else str(tl['id'])
name = self.avoid_url_conversion(tl['name'])
_type = tl['transgenic_line_type_name']
if _type == 'driver':
if 'CreERT2' in name: # FIXME from structured instead of name?
pred = ilxtr.hasDriverExpressionInducedPhenotype
else:
pred = 'ilxtr:hasDriverExpressionPhenotype'
elif _type == 'reporter':
pred = 'ilxtr:hasReporterExpressionPhenotype'
else:
pred = 'ilxtr:hasExpressionPhenotype'
line_names = []
if prefix and suffix and prefix in ['AIBS', 'MMRRC', 'JAX']:
if prefix == 'AIBS':
prefix = 'AllenTL'
iri = self.ns[prefix][suffix]
phenotypes.append(Phenotype(iri, pred))
return phenotypes
# TODO: search if description exists
# TODO: Create mapping for all possible types
# TODO: Fork negatives to NegPhenotype
def specimen_tags_phenotypes(self, cell_specimen):
pred = 'ilxtr:hasDendriteMorphologicalPhenotype'
specimen_tag_mappings = {
'spiny': Phenotype('ilxtr:SpinyPhenotype', pred),
'aspiny': NegPhenotype('ilxtr:SpinyPhenotype', pred),
'sparsely spiny': LogicalPhenotype(AND,
Phenotype('ilxtr:SpinyPhenotype', pred),
Phenotype('PATO:0001609', 'ilxtr:hasPhenotypeModifier')),
'apicalIntact': Phenotype('ilxtr:ApicalDendritePhenotype', 'ilxtr:hasMorphologicalPhenotype'),
'apicalTruncated': LogicalPhenotype(AND,
Phenotype('ilxtr:ApicalDendritePhenotype', 'ilxtr:hasMorphologicalPhenotype'),
Phenotype('PATO:0000936', 'ilxtr:hasPhenotypeModifier')),
'apicalNa': NegPhenotype('ilxtr:ApicalDendritePhenotype', 'ilxtr:hasMorphologicalPhenotype'), # NA means there was no apical dendrite
}
phenotypes = []
for tag in cell_specimen['specimen_tags']:
if 'dendrite type' in tag['name']:
one_two = tag['name'].split(' - ')[1]
#if ' ' in one_two:
#one, two = one_two.split(' ')
#name = one + two.capitalize()
#else:
name = one_two
else:
one, two = tag['name'].split(' - ')
#if two == 'NA': # apical - NA
#continue
name = one + two.capitalize()
self.tag_names.add(tag['name'])
# if phenotype == '+':
if name not in specimen_tag_mappings:
raise ValueError(name)
phenotypes.append(specimen_tag_mappings[name]
if name in specimen_tag_mappings else
Phenotype('ilxtr:' + name, pred))
# elif phenotype == '-': phenotypes.append(NegPhenotype(...))
return phenotypes
# TODO: check to see if specimen_id is really the priority
def cell_soma_locations_phenotypes(self, cell_specimen):
cell_soma_mappings = {
}
phenotypes = []
for csl in cell_specimen['cell_soma_locations']:
location = csl['id']
phenotypes.append(
Phenotype(
'ilxtr:' + str(location),
'ilxtr:hasSomaLocatedIn',
)
)
return phenotypes
def add_mouse_lineage(self, cell_specimen):
phenotypes = [Phenotype('NCBITaxon:10090', 'ilxtr:hasInstanceInTaxon')]
return phenotypes
def build_phenotypes(self, cell_specimen):
phenotype_functions = [
self.cell_phenotypes,
self.structure_phenotypes,
self.donor_phenotypes,
self.transgenic_lines_phenotypes,
self.specimen_tags_phenotypes,
self.add_mouse_lineage,
# self.cell_soma_locations_phenotypes, # deprecated
]
phenotypes = []
for func in phenotype_functions:
phenotypes.extend(func(cell_specimen))
return phenotypes
def make_config(self):
# have to call Config here because transgenic lines doesn't exist
self.config = Config(name=self.name,
imports=[f'NIFRAW:{self.branch}/ttl/generated/allen-transgenic-lines.ttl'],
prefixes=self.prefixes,
branch=self.branch,
sources=tuple(), # TODO insert the link to the query...
source_file=relative_path(
__file__,
no_wd_value=__file__))
def build_neurons(self):
instances = []
dids = []
for cell_specimen in self.neuron_data:
neuron = NeuronACT(*self.build_phenotypes(cell_specimen))
did = AIBSSPEC[str(cell_specimen['id'])]
dids.append(did)
instances.append((did, rdf.type, owl.NamedIndividual))
instances.append((did, rdf.type, neuron.identifier))
print(sorted(self.tag_names))
NeuronACT.write()
NeuronACT.write_python()
self.build_instances(instances, dids)
def build_instances(self, instances, dids):
folder = Path(self.config.out_graph_path()).parent
# WOW do I need to implement the new/better way of
# managing writing collections of neurons to graphs
neuron_uri = next(NeuronACT.out_graph[:rdf.type:owl.Ontology])
name = 'allen-cell-instances.ttl'
base, _ = neuron_uri.rsplit('/', 1)
uri = rdflib.URIRef(base + '/' + name)
metadata = ((uri, rdf.type, owl.Ontology),)
instance_graph = OntGraph(path=folder / name)
instance_graph.bind('AIBSSPEC', AIBSSPEC)
[instance_graph.add(t) for t in metadata]
[instance_graph.add(t) for t in instances]
[instance_graph.add(t) for t in allDifferent(None, distinctMembers(*dids))]
instance_graph.write()
def build_transgenic_lines(self):
"""
init class | "transgenic_line_source_name":"stock_number" a Class
add superClass | rdfs:subClassOf ilxtr:transgenicLine
add *order* | ilxtr:useObjectProperty ilxtr:<order>
add name | rdfs:label "name"
add def | definition: "description"
add transtype | rdfs:hasTransgenicType "transgenic_line_type_name"
"""
triples = []
for cell_specimen in self.neuron_data:
for tl in cell_specimen['donor']['transgenic_lines']:
_id = tl['stock_number'] if tl['stock_number'] else tl['id']
prefix = tl['transgenic_line_source_name']
line_type = tl['transgenic_line_type_name']
if line_type == 'driver' and 'CreERT2' in tl['name']:
line_type = 'inducibleDriver'
if prefix not in ['JAX', 'MMRRC', 'AIBS']:
print(tc.red('WARNING:'), 'unknown prefix', prefix, json.dumps(tl, indent=4))
continue
elif prefix == 'AIBS':
prefix = 'AllenTL'
_class = self.ns[prefix][str(_id)]
triples.append((_class, rdf.type, owl.Class))
triples.append((_class, rdfs.label, rdflib.Literal(tl['name'])))
triples.append((_class, definition, rdflib.Literal(tl['description'])))
triples.append((_class, rdfs.subClassOf, ilxtr.transgenicLine))
triples.append((_class, ilxtr.hasTransgenicType, ilxtr[line_type + 'Line']))
# TODO aspects.ttl?
transgenic_lines = simpleOnt(filename='allen-transgenic-lines',
local_base=graphBase.local_base,
path='ttl/generated/',
prefixes=self.prefixes,
triples=triples,
comment='Allen transgenic lines for cell types',
branch=self.branch,
calling__file__=__file__,)
transgenic_lines._graph.write()
def main(args={o.name:o.value for o in parse_defaults(__doc__)}, run=True):
#print(args)
if not args['--refresh'] and args['--input'] and Path(args['--input']).exists():
with open(args['--input'], 'rt') as f:
input = json.load(f)
else:
response = requests.get('http://api.brain-map.org/api/v2/data/query.json?criteria='
'model::Specimen,rma::criteria,[is_cell_specimen$eq%27true%27],'
'products[name$eq%27Mouse%20Cell%20Types%27],'
'rma::include,structure,donor(transgenic_lines),'
'specimen_tags,cell_soma_locations,rma::options[num_rows$eqall]')
input = response.json()['msg']
with open(args['--input'], 'wt') as f:
json.dump(input, f, indent=4)
act = AllenCellTypes(input, args['--output'])
act.make_config()
if __name__ == '__main__' or run:
act.build_transgenic_lines()
act.build_neurons()
return act.config,
if __name__ == '__main__':
args = docopt(__doc__, version='0.0.4')
main(args)
|
|
"""
Work with indexes in the database.
"""
import asyncio
import asyncio.tasks
from typing import Any, Dict, List, Optional, Tuple
import pymongo
from sqlalchemy.ext.asyncio import AsyncEngine
import virtool.history.db
import virtool.pg.utils
import virtool.references.db
import virtool.utils
from virtool.api.utils import paginate
from virtool.config.cls import Config
from virtool.db.transforms import AbstractTransform, apply_transforms
from virtool.db.utils import get_new_id
from virtool.indexes.models import IndexFile
from virtool.types import Document
from virtool.users.db import AttachUserTransform
PROJECTION = [
"_id",
"created_at",
"has_files",
"job",
"otu_count",
"modification_count",
"modified_count",
"user",
"ready",
"reference",
"version",
]
FILES = (
"reference.json.gz",
"reference.fa.gz",
"reference.1.bt2",
"reference.2.bt2",
"reference.3.bt2",
"reference.4.bt2",
"reference.rev.1.bt2",
"reference.rev.2.bt2",
)
class IndexCountsTransform(AbstractTransform):
def __init__(self, db):
self._db = db
async def attach_one(self, document: Document, prepared: Any) -> Document:
return {**document, **prepared}
async def prepare_one(self, document: Document) -> Any:
query = {"index.id": document["id"]}
change_count, otu_ids = await asyncio.gather(
self._db.history.count_documents(query),
self._db.history.distinct("otu.id", query),
)
return {"change_count": change_count, "modified_otu_count": len(otu_ids)}
async def create(
db, ref_id: str, user_id: str, job_id: str, index_id: Optional[str] = None
) -> dict:
"""
Create a new index and update history to show the version and id of the new index.
:param db: the application database client
:param ref_id: the ID of the reference to create index for
:param user_id: the ID of the current user
:param job_id: the ID of the job
:param index_id: the ID of the index
:return: the new index document
"""
index_id = index_id or await get_new_id(db.indexes)
index_version = await get_next_version(db, ref_id)
manifest = await virtool.references.db.get_manifest(db, ref_id)
document = {
"_id": index_id,
"version": index_version,
"created_at": virtool.utils.timestamp(),
"manifest": manifest,
"ready": False,
"has_files": True,
"has_json": False,
"reference": {"id": ref_id},
"job": {"id": job_id},
"user": {"id": user_id},
}
await db.indexes.insert_one(document)
await db.history.update_many(
{"index.id": "unbuilt", "reference.id": ref_id},
{"$set": {"index": {"id": index_id, "version": index_version}}},
)
return document
async def processor(db, document: dict) -> dict:
"""
A processor for index documents. Adds computed data about the index.
:param db: the application database client
:param document: the document to be processed
:return: the processed document
"""
return await apply_transforms(
virtool.utils.base_processor(document),
[AttachUserTransform(db), IndexCountsTransform(db)],
)
async def find(db, req_query: dict, ref_id: Optional[str] = None) -> dict:
"""
Find an index document matching the `req_query`
:param db: the application database client
:param req_query: the request object
:param ref_id: the id of the reference
:return: the index document
"""
base_query = None
if ref_id:
base_query = {"reference.id": ref_id}
data = await paginate(
db.indexes,
{},
req_query,
base_query=base_query,
projection=PROJECTION,
reverse=True,
sort="version",
)
unbuilt_stats = await get_unbuilt_stats(db, ref_id)
return {
**data,
**unbuilt_stats,
"documents": await apply_transforms(
data["documents"], [AttachUserTransform(db), IndexCountsTransform(db)]
),
}
async def finalize(
db, pg: AsyncEngine, base_url: str, ref_id: str, index_id: str
) -> dict:
"""
Finalize an index document by setting `ready` to `True`.
:param db: the application database client
:param pg: the PostgreSQL AsyncEngine object
:param base_url: the application base URL configuration value
:param ref_id: the ID of the reference
:param index_id: the ID of the index to be finalized for
:return: the index document after finalization
"""
await update_last_indexed_versions(db, ref_id)
document = await db.indexes.find_one_and_update(
{"_id": index_id}, {"$set": {"ready": True}}
)
return await attach_files(pg, base_url, document)
async def get_contributors(db, index_id: str) -> List[dict]:
"""
Return an list of contributors and their contribution count for a specific index.
:param db: the application database client
:param index_id: the id of the index to get contributors for
:return: a list of contributors to the index
"""
return await virtool.history.db.get_contributors(db, {"index.id": index_id})
async def get_current_id_and_version(db, ref_id: str) -> Tuple[str, int]:
"""
Return the current index id and version number.
:param db: the application database client
:param ref_id: the id of the reference to get the current index for
:return: the index and version of the current index
"""
document = await db.indexes.find_one(
{"reference.id": ref_id, "ready": True},
sort=[("version", pymongo.DESCENDING)],
projection=["_id", "version"],
)
if document is None:
return None, -1
return document["_id"], document["version"]
async def get_otus(db, index_id: str) -> List[dict]:
"""
Return a list of otus and number of changes for a specific index.
:param db: the application database client
:param index_id: the id of the index to get otus for
:return: a list of otus modified in the index
"""
cursor = db.history.aggregate(
[
{"$match": {"index.id": index_id}},
{"$sort": {"otu.id": 1, "otu.version": -1}},
{
"$group": {
"_id": "$otu.id",
"name": {"$first": "$otu.name"},
"count": {"$sum": 1},
}
},
{"$match": {"name": {"$ne": None}}},
{"$sort": {"name": 1}},
]
)
return [
{"id": v["_id"], "name": v["name"], "change_count": v["count"]}
async for v in cursor
]
async def get_next_version(db, ref_id: str) -> int:
"""
Get the version number that should be used for the next index build.
:param db: the application database client
:param ref_id: the id of the reference to get the next version for
:return: the next version number
"""
return await db.indexes.count_documents({"reference.id": ref_id, "ready": True})
async def get_unbuilt_stats(db, ref_id: Optional[str] = None) -> dict:
"""
Get the number of unbuilt changes and number of OTUs affected by those changes.
Used to populate the metadata for an index find request. Can search against a
specific reference or all references.
:param db: the application database client
:param ref_id: the ref id to search unbuilt changes for
:return: the change count and modified OTU count
"""
ref_query = dict()
if ref_id:
ref_query["reference.id"] = ref_id
history_query = {**ref_query, "index.id": "unbuilt"}
return {
"total_otu_count": await db.otus.count_documents(ref_query),
"change_count": await db.history.count_documents(history_query),
"modified_otu_count": len(await db.history.distinct("otu.id", history_query)),
}
async def reset_history(db, index_id: str):
"""
Set the index.id and index.version fields with the given index id to 'unbuilt'.
:param db: The virtool database
:param index_id: The ID of the index which failed to build
"""
query = {"_id": {"$in": await db.history.distinct("_id", {"index.id": index_id})}}
return await db.history.update_many(
query, {"$set": {"index": {"id": "unbuilt", "version": "unbuilt"}}}
)
async def get_patched_otus(db, config: Config, manifest: Dict[str, int]) -> List[dict]:
"""
Get joined OTUs patched to a specific version based on a manifest of OTU ids and
versions.
:param db: the job database client
:param config: the application configuration
:param manifest: the manifest
"""
app_dict = {"db": db, "config": config}
coros = list()
for patch_id, patch_version in manifest.items():
coros.append(
virtool.history.db.patch_to_version(app_dict, patch_id, patch_version)
)
return [j[1] for j in await asyncio.tasks.gather(*coros)]
async def update_last_indexed_versions(db, ref_id: str):
"""
Update the `last_indexed_version` field for OTUs associated with `ref_id`
:param db: Application database client
:param ref_id: An ID that corresponds to an entry in the `references` db
"""
# Find OTUs with changes.
pipeline = [
{
"$project": {
"reference": True,
"version": True,
"last_indexed_version": True,
"comp": {"$cmp": ["$version", "$last_indexed_version"]},
}
},
{"$match": {"reference.id": ref_id, "comp": {"$ne": 0}}},
{"$group": {"_id": "$version", "id_list": {"$addToSet": "$_id"}}},
]
id_version_key = {
agg["_id"]: agg["id_list"] async for agg in db.otus.aggregate(pipeline)
}
# For each version number
for version, id_list in id_version_key.items():
await db.otus.update_many(
{"_id": {"$in": id_list}}, {"$set": {"last_indexed_version": version}}
)
async def attach_files(pg: AsyncEngine, base_url: str, document: dict) -> dict:
"""
Attach a list of index files under `files` field.
:param pg: the application Postgres client
:param base_url: the application base URL
:param document: an index document
:return: Index document with updated `files` entry containing a list of index files.
"""
index_id = document["_id"]
rows = await virtool.pg.utils.get_rows(pg, IndexFile, "index", index_id)
files = []
for index_file in [row.to_dict() for row in rows]:
location = f"/indexes/{index_id}/files/{index_file['name']}"
files.append(
{
**index_file,
"download_url": str(base_url) + location,
}
)
return {**document, "files": files}
|
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
from measurements import smoothness
from metrics import power
from telemetry import decorators
from telemetry.core import exceptions
from telemetry.core import wpr_modes
from telemetry.page import page
from telemetry.unittest_util import options_for_unittests
from telemetry.unittest_util import page_test_test_case
class FakeTracingController(object):
def __init__(self):
self.category_filter = None
def Start(self, _options, category_filter, _timeout):
self.category_filter = category_filter
def IsDisplayTracingSupported(self):
return False
class FakePlatform(object):
def __init__(self):
self.tracing_controller = FakeTracingController()
def CanMonitorPower(self):
return False
class FakeBrowser(object):
def __init__(self):
self.platform = FakePlatform()
class AnimatedPage(page.Page):
def __init__(self, page_set):
super(AnimatedPage, self).__init__(
url='file://animated_page.html',
page_set=page_set, base_dir=page_set.base_dir)
def RunPageInteractions(self, action_runner):
action_runner.Wait(.2)
class FakeTab(object):
def __init__(self):
self.browser = FakeBrowser()
def ExecuteJavaScript(self, js):
pass
class SmoothnessUnitTest(page_test_test_case.PageTestTestCase):
"""Smoke test for smoothness measurement
Runs smoothness measurement on a simple page and verifies
that all metrics were added to the results. The test is purely functional,
i.e. it only checks if the metrics are present and non-zero.
"""
def testSyntheticDelayConfiguration(self):
test_page = page.Page('http://dummy', None)
test_page.synthetic_delays = {
'cc.BeginMainFrame': { 'target_duration': 0.012 },
'cc.DrawAndSwap': { 'target_duration': 0.012, 'mode': 'alternating' },
'gpu.PresentingFrame': { 'target_duration': 0.012 }
}
tab = FakeTab()
measurement = smoothness.Smoothness()
measurement.WillStartBrowser(tab.browser.platform)
measurement.WillNavigateToPage(test_page, tab)
measurement.WillRunActions(test_page, tab)
expected_category_filter = set([
'DELAY(cc.BeginMainFrame;0.012000;static)',
'DELAY(cc.DrawAndSwap;0.012000;alternating)',
'DELAY(gpu.PresentingFrame;0.012000;static)',
'benchmark'
])
tracing_controller = tab.browser.platform.tracing_controller
actual_category_filter = (
tracing_controller.category_filter.included_categories)
# FIXME: Put blink.console into the expected above and remove these two
# remove entries when the blink.console change has rolled into chromium.
actual_category_filter.remove('webkit.console')
actual_category_filter.remove('blink.console')
if expected_category_filter != actual_category_filter:
sys.stderr.write("Expected category filter: %s\n" %
repr(expected_category_filter))
sys.stderr.write("Actual category filter filter: %s\n" %
repr(actual_category_filter))
self.assertEquals(expected_category_filter, actual_category_filter)
def setUp(self):
self._options = options_for_unittests.GetCopy()
self._options.browser_options.wpr_mode = wpr_modes.WPR_OFF
def testSmoothness(self):
ps = self.CreatePageSetFromFileInUnittestDataDir('scrollable_page.html')
measurement = smoothness.Smoothness()
results = self.RunMeasurement(measurement, ps, options=self._options)
self.assertEquals(0, len(results.failures))
frame_times = results.FindAllPageSpecificValuesNamed('frame_times')
self.assertEquals(len(frame_times), 1)
self.assertGreater(frame_times[0].GetRepresentativeNumber(), 0)
mean_frame_time = results.FindAllPageSpecificValuesNamed('mean_frame_time')
self.assertEquals(len(mean_frame_time), 1)
self.assertGreater(mean_frame_time[0].GetRepresentativeNumber(), 0)
frame_time_discrepancy = results.FindAllPageSpecificValuesNamed(
'frame_time_discrepancy')
self.assertEquals(len(frame_time_discrepancy), 1)
self.assertGreater(frame_time_discrepancy[0].GetRepresentativeNumber(), 0)
percentage_smooth = results.FindAllPageSpecificValuesNamed(
'percentage_smooth')
self.assertEquals(len(percentage_smooth), 1)
self.assertGreaterEqual(percentage_smooth[0].GetRepresentativeNumber(), 0)
mean_input_event_latency = results.FindAllPageSpecificValuesNamed(
'mean_input_event_latency')
if mean_input_event_latency:
self.assertEquals(len(mean_input_event_latency), 1)
self.assertGreater(
mean_input_event_latency[0].GetRepresentativeNumber(), 0)
@decorators.Enabled('android') # SurfaceFlinger is android-only
def testSmoothnessSurfaceFlingerMetricsCalculated(self):
ps = self.CreatePageSetFromFileInUnittestDataDir('scrollable_page.html')
measurement = smoothness.Smoothness()
results = self.RunMeasurement(measurement, ps, options=self._options)
self.assertEquals(0, len(results.failures))
avg_surface_fps = results.FindAllPageSpecificValuesNamed('avg_surface_fps')
self.assertEquals(1, len(avg_surface_fps))
self.assertGreater(avg_surface_fps[0].GetRepresentativeNumber, 0)
jank_count = results.FindAllPageSpecificValuesNamed('jank_count')
self.assertEquals(1, len(jank_count))
self.assertGreater(jank_count[0].GetRepresentativeNumber(), -1)
max_frame_delay = results.FindAllPageSpecificValuesNamed('max_frame_delay')
self.assertEquals(1, len(max_frame_delay))
self.assertGreater(max_frame_delay[0].GetRepresentativeNumber, 0)
frame_lengths = results.FindAllPageSpecificValuesNamed('frame_lengths')
self.assertEquals(1, len(frame_lengths))
self.assertGreater(frame_lengths[0].GetRepresentativeNumber, 0)
@decorators.Disabled('mac', 'chromeos') # http://crbug.com/403903
def testSmoothnessForPageWithNoGesture(self):
ps = self.CreateEmptyPageSet()
ps.AddUserStory(AnimatedPage(ps))
measurement = smoothness.Smoothness()
results = self.RunMeasurement(measurement, ps, options=self._options)
self.assertEquals(0, len(results.failures))
percentage_smooth = results.FindAllPageSpecificValuesNamed(
'percentage_smooth')
self.assertEquals(len(percentage_smooth), 1)
self.assertGreaterEqual(percentage_smooth[0].GetRepresentativeNumber(), 0)
def testCleanUpTrace(self):
self.TestTracingCleanedUp(smoothness.Smoothness, self._options)
def testCleanUpPowerMetric(self):
class FailPage(page.Page):
def __init__(self, page_set):
# pylint: disable=bad-super-call
super(FailPage, self).__init__(
url='file://blank.html',
page_set=page_set, base_dir=page_set.base_dir)
def RunPageInteractions(self, _):
raise exceptions.IntentionalException
class FakePowerMetric(power.PowerMetric):
start_called = False
stop_called = True
def Start(self, _1, _2):
self.start_called = True
def Stop(self, _1, _2):
self.stop_called = True
ps = self.CreateEmptyPageSet()
ps.AddUserStory(FailPage(ps))
class BuggyMeasurement(smoothness.Smoothness):
fake_power = None
# Inject fake power metric.
def WillStartBrowser(self, platform):
self.fake_power = self._power_metric = FakePowerMetric(platform)
measurement = BuggyMeasurement()
try:
self.RunMeasurement(measurement, ps)
except exceptions.IntentionalException:
pass
self.assertTrue(measurement.fake_power.start_called)
self.assertTrue(measurement.fake_power.stop_called)
|
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class InputExample:
"""
A single training/test example for simple sequence classification.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
pairID: (Optional) string. Unique identifier for the pair of sentences.
"""
guid: str
text_a: str
text_b: Optional[str] = None
label: Optional[str] = None
pairID: Optional[str] = None
@dataclass(frozen=True)
class InputFeatures:
"""
A single set of features of data.
Property names are the same names as the corresponding inputs to a model.
Args:
input_ids: Indices of input sequence tokens in the vocabulary.
attention_mask: Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
Usually ``1`` for tokens that are NOT MASKED, ``0`` for MASKED (padded) tokens.
token_type_ids: (Optional) Segment token indices to indicate first and second
portions of the inputs. Only some models use them.
label: (Optional) Label corresponding to the input. Int for classification problems,
float for regression problems.
pairID: (Optional) Unique identifier for the pair of sentences.
"""
input_ids: List[int]
attention_mask: Optional[List[int]] = None
token_type_ids: Optional[List[int]] = None
label: Optional[Union[int, float]] = None
pairID: Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class HansDataset(Dataset):
"""
This will be superseded by a framework-agnostic approach
soon.
"""
features: List[InputFeatures]
def __init__(
self,
data_dir: str,
tokenizer: PreTrainedTokenizer,
task: str,
max_seq_length: Optional[int] = None,
overwrite_cache=False,
evaluate: bool = False,
):
processor = hans_processors[task]()
cached_features_file = os.path.join(
data_dir,
"cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train",
tokenizer.__class__.__name__,
str(max_seq_length),
task,
),
)
label_list = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
label_list[1], label_list[2] = label_list[2], label_list[1]
self.label_list = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lock_path = cached_features_file + ".lock"
with FileLock(lock_path):
if os.path.exists(cached_features_file) and not overwrite_cache:
logger.info(f"Loading features from cached file {cached_features_file}")
self.features = torch.load(cached_features_file)
else:
logger.info(f"Creating features from dataset file at {data_dir}")
examples = (
processor.get_dev_examples(data_dir) if evaluate else processor.get_train_examples(data_dir)
)
logger.info("Training examples: %s", len(examples))
self.features = hans_convert_examples_to_features(examples, label_list, max_seq_length, tokenizer)
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(self.features, cached_features_file)
def __len__(self):
return len(self.features)
def __getitem__(self, i) -> InputFeatures:
return self.features[i]
def get_labels(self):
return self.label_list
if is_tf_available():
import tensorflow as tf
class TFHansDataset:
"""
This will be superseded by a framework-agnostic approach
soon.
"""
features: List[InputFeatures]
def __init__(
self,
data_dir: str,
tokenizer: PreTrainedTokenizer,
task: str,
max_seq_length: Optional[int] = 128,
overwrite_cache=False,
evaluate: bool = False,
):
processor = hans_processors[task]()
label_list = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
label_list[1], label_list[2] = label_list[2], label_list[1]
self.label_list = label_list
examples = processor.get_dev_examples(data_dir) if evaluate else processor.get_train_examples(data_dir)
self.features = hans_convert_examples_to_features(examples, label_list, max_seq_length, tokenizer)
def gen():
for (ex_index, ex) in tqdm.tqdm(enumerate(self.features), desc="convert examples to features"):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(examples)))
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
self.dataset = tf.data.Dataset.from_generator(
gen,
(
{
"example_id": tf.int32,
"input_ids": tf.int32,
"attention_mask": tf.int32,
"token_type_ids": tf.int32,
},
tf.int64,
),
(
{
"example_id": tf.TensorShape([]),
"input_ids": tf.TensorShape([None, None]),
"attention_mask": tf.TensorShape([None, None]),
"token_type_ids": tf.TensorShape([None, None]),
},
tf.TensorShape([]),
),
)
def get_dataset(self):
return self.dataset
def __len__(self):
return len(self.features)
def __getitem__(self, i) -> InputFeatures:
return self.features[i]
def get_labels(self):
return self.label_list
class HansProcessor(DataProcessor):
"""Processor for the HANS data set."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "heuristics_train_set.txt")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "heuristics_evaluation_set.txt")), "dev")
def get_labels(self):
"""See base class.
Note that we follow the standard three labels for MNLI
(see :class:`~transformers.data.processors.utils.MnliProcessor`)
but the HANS evaluation groups `contradiction` and `neutral` into `non-entailment` (label 0) while
`entailment` is label 1."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[5]
text_b = line[6]
pairID = line[7][2:] if line[7].startswith("ex") else line[7]
label = line[0]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label, pairID=pairID))
return examples
def hans_convert_examples_to_features(
examples: List[InputExample],
label_list: List[str],
max_length: int,
tokenizer: PreTrainedTokenizer,
):
"""
Loads a data file into a list of ``InputFeatures``
Args:
examples: List of ``InputExamples`` containing the examples.
label_list: List of labels. Can be obtained from the processor using the ``processor.get_labels()`` method.
max_length: Maximum example length.
tokenizer: Instance of a tokenizer that will tokenize the examples.
Returns:
A list of task-specific ``InputFeatures`` which can be fed to the model.
"""
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in tqdm.tqdm(enumerate(examples), desc="convert examples to features"):
if ex_index % 10000 == 0:
logger.info("Writing example %d" % (ex_index))
inputs = tokenizer(
example.text_a,
example.text_b,
add_special_tokens=True,
max_length=max_length,
padding="max_length",
truncation=True,
return_overflowing_tokens=True,
)
label = label_map[example.label] if example.label in label_map else 0
pairID = int(example.pairID)
features.append(InputFeatures(**inputs, label=label, pairID=pairID))
for i, example in enumerate(examples[:5]):
logger.info("*** Example ***")
logger.info(f"guid: {example}")
logger.info(f"features: {features[i]}")
return features
hans_tasks_num_labels = {
"hans": 3,
}
hans_processors = {
"hans": HansProcessor,
}
|
|
import sys
import getopt
import re
import string
#
# Originally written by Einar Lielmanis et al.,
# Conversion to python by Einar Lielmanis, einar@jsbeautifier.org,
# MIT licence, enjoy.
#
# Python is not my native language, feel free to push things around.
#
# Use either from command line (script displays its usage when run
# without any parameters),
#
#
# or, alternatively, use it as a module:
#
# import jsbeautifier
# res = jsbeautifier.beautify('your javascript string')
# res = jsbeautifier.beautify_file('some_file.js')
#
# you may specify some options:
#
# opts = jsbeautifier.default_options()
# opts.indent_size = 2
# res = jsbeautifier.beautify('some javascript', opts)
#
#
# Here are the available options: (read source)
class BeautifierOptions:
def __init__(self):
self.indent_size = 4
self.indent_char = ' '
self.indent_with_tabs = False
self.preserve_newlines = True
self.max_preserve_newlines = 10.
self.jslint_happy = False
self.brace_style = 'collapse'
self.keep_array_indentation = False
self.keep_function_indentation = False
self.eval_code = False
self.unescape_strings = False
self.break_chained_methods = False
def __repr__(self):
return \
"""indent_size = %d
indent_char = [%s]
preserve_newlines = %s
max_preserve_newlines = %d
jslint_happy = %s
indent_with_tabs = %s
brace_style = %s
keep_array_indentation = %s
eval_code = %s
unescape_strings = %s
""" % ( self.indent_size,
self.indent_char,
self.preserve_newlines,
self.max_preserve_newlines,
self.jslint_happy,
self.indent_with_tabs,
self.brace_style,
self.keep_array_indentation,
self.eval_code,
self.unescape_strings,
)
class BeautifierFlags:
def __init__(self, mode):
self.previous_mode = 'BLOCK'
self.mode = mode
self.var_line = False
self.var_line_tainted = False
self.var_line_reindented = False
self.in_html_comment = False
self.if_line = False
self.chain_extra_indentation = 0
self.in_case = False
self.in_case_statement = False
self.case_body = False
self.eat_next_space = False
self.indentation_baseline = -1
self.indentation_level = 0
self.ternary_depth = 0
def default_options():
return BeautifierOptions()
def beautify(string, opts = default_options() ):
b = Beautifier()
return b.beautify(string, opts)
def beautify_file(file_name, opts = default_options() ):
if file_name == '-': # stdin
f = sys.stdin
else:
try:
f = open(file_name)
except Exception as ex:
return 'The file could not be opened'
b = Beautifier()
return b.beautify(''.join(f.readlines()), opts)
def usage():
print("""Javascript beautifier (http://jsbeautifier.org/)
Usage: jsbeautifier.py [options] <infile>
<infile> can be "-", which means stdin.
<outfile> defaults to stdout
Input options:
-i, --stdin read input from stdin
Output options:
-s, --indent-size=NUMBER indentation size. (default 4).
-c, --indent-char=CHAR character to indent with. (default space).
-t, --indent-with-tabs Indent with tabs, overrides -s and -c
-d, --disable-preserve-newlines do not preserve existing line breaks.
-j, --jslint-happy more jslint-compatible output
-b, --brace-style=collapse brace style (collapse, expand, end-expand)
-k, --keep-array-indentation keep array indentation.
-o, --outfile=FILE specify a file to output to (default stdout)
-f, --keep-function-indentation Do not re-indent function bodies defined in var lines.
-x, --unescape-strings Decode printable chars encoded in \\xNN notation.
Rarely needed options:
--eval-code evaluate code if a JS interpreter is
installed. May be useful with some obfuscated
script but poses a potential security issue.
-l, --indent-level=NUMBER initial indentation level. (default 0).
-h, --help, --usage prints this help statement.
""")
class Beautifier:
def __init__(self, opts = default_options() ):
self.opts = opts
self.blank_state()
def blank_state(self):
# internal flags
self.flags = BeautifierFlags('BLOCK')
self.flag_store = []
self.wanted_newline = False
self.just_added_newline = False
self.do_block_just_closed = False
if self.opts.indent_with_tabs:
self.indent_string = "\t"
else:
self.indent_string = self.opts.indent_char * self.opts.indent_size
self.preindent_string = ''
self.last_word = '' # last TK_WORD seen
self.last_type = 'TK_START_EXPR' # last token type
self.last_text = '' # last token text
self.last_last_text = '' # pre-last token text
self.input = None
self.output = [] # formatted javascript gets built here
self.whitespace = ["\n", "\r", "\t", " "]
self.wordchar = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_$'
self.digits = '0123456789'
self.punct = '+ - * / % & ++ -- = += -= *= /= %= == === != !== > < >= <= >> << >>> >>>= >>= <<= && &= | || ! !! , : ? ^ ^= |= ::'
self.punct += ' <?= <? ?> <%= <% %>'
self.punct = self.punct.split(' ')
# Words which always should start on a new line
self.line_starters = 'continue,try,throw,return,var,if,switch,case,default,for,while,break,function'.split(',')
self.set_mode('BLOCK')
global parser_pos
parser_pos = 0
def beautify(self, s, opts = None ):
if opts != None:
self.opts = opts
if self.opts.brace_style not in ['expand', 'collapse', 'end-expand']:
raise(Exception('opts.brace_style must be "expand", "collapse" or "end-expand".'))
self.blank_state()
while s and s[0] in [' ', '\t']:
self.preindent_string += s[0]
s = s[1:]
self.input = self.unpack(s, opts.eval_code)
parser_pos = 0
while True:
token_text, token_type = self.get_next_token()
#print (token_text, token_type, self.flags.mode)
if token_type == 'TK_EOF':
break
handlers = {
'TK_START_EXPR': self.handle_start_expr,
'TK_END_EXPR': self.handle_end_expr,
'TK_START_BLOCK': self.handle_start_block,
'TK_END_BLOCK': self.handle_end_block,
'TK_WORD': self.handle_word,
'TK_SEMICOLON': self.handle_semicolon,
'TK_STRING': self.handle_string,
'TK_EQUALS': self.handle_equals,
'TK_OPERATOR': self.handle_operator,
'TK_COMMA': self.handle_comma,
'TK_BLOCK_COMMENT': self.handle_block_comment,
'TK_INLINE_COMMENT': self.handle_inline_comment,
'TK_COMMENT': self.handle_comment,
'TK_DOT': self.handle_dot,
'TK_UNKNOWN': self.handle_unknown,
}
handlers[token_type](token_text)
self.last_last_text = self.last_text
self.last_type = token_type
self.last_text = token_text
sweet_code = self.preindent_string + re.sub('[\n ]+$', '', ''.join(self.output))
return sweet_code
def unpack(self, source, evalcode=False):
import jsbeautifier.unpackers as unpackers
try:
return unpackers.run(source, evalcode)
except unpackers.UnpackingError as error:
print('error:', error)
return ''
def trim_output(self, eat_newlines = False):
while len(self.output) \
and (
self.output[-1] == ' '\
or self.output[-1] == self.indent_string \
or self.output[-1] == self.preindent_string \
or (eat_newlines and self.output[-1] in ['\n', '\r'])):
self.output.pop()
def is_special_word(self, s):
return s in ['case', 'return', 'do', 'if', 'throw', 'else'];
def is_array(self, mode):
return mode in ['[EXPRESSION]', '[INDENTED-EXPRESSION]']
def is_expression(self, mode):
return mode in ['[EXPRESSION]', '[INDENTED-EXPRESSION]', '(EXPRESSION)', '(FOR-EXPRESSION)', '(COND-EXPRESSION)']
def append_newline_forced(self):
old_array_indentation = self.opts.keep_array_indentation
self.opts.keep_array_indentation = False
self.append_newline()
self.opts.keep_array_indentation = old_array_indentation
def append_newline(self, ignore_repeated = True, reset_statement_flags = True):
self.flags.eat_next_space = False
if self.opts.keep_array_indentation and self.is_array(self.flags.mode):
return
if reset_statement_flags:
self.flags.if_line = False
self.flags.chain_extra_indentation = 0
self.trim_output()
if len(self.output) == 0:
# no newline on start of file
return
if self.output[-1] != '\n' or not ignore_repeated:
self.just_added_newline = True
self.output.append('\n')
if self.preindent_string:
self.output.append(self.preindent_string)
for i in range(self.flags.indentation_level + self.flags.chain_extra_indentation):
self.output.append(self.indent_string)
if self.flags.var_line and self.flags.var_line_reindented:
self.output.append(self.indent_string)
def append(self, s):
if s == ' ':
# do not add just a single space after the // comment, ever
if self.last_type == 'TK_COMMENT':
return self.append_newline()
# make sure only single space gets drawn
if self.flags.eat_next_space:
self.flags.eat_next_space = False
elif len(self.output) and self.output[-1] not in [' ', '\n', self.indent_string]:
self.output.append(' ')
else:
self.just_added_newline = False
self.flags.eat_next_space = False
self.output.append(s)
def indent(self):
self.flags.indentation_level = self.flags.indentation_level + 1
def remove_indent(self):
if len(self.output) and self.output[-1] in [self.indent_string, self.preindent_string]:
self.output.pop()
def set_mode(self, mode):
prev = BeautifierFlags('BLOCK')
if self.flags:
self.flag_store.append(self.flags)
prev = self.flags
self.flags = BeautifierFlags(mode)
if len(self.flag_store) == 1:
self.flags.indentation_level = 0
else:
self.flags.indentation_level = prev.indentation_level
if prev.var_line and prev.var_line_reindented:
self.flags.indentation_level = self.flags.indentation_level + 1
self.flags.previous_mode = prev.mode
def restore_mode(self):
self.do_block_just_closed = self.flags.mode == 'DO_BLOCK'
if len(self.flag_store) > 0:
mode = self.flags.mode
self.flags = self.flag_store.pop()
self.flags.previous_mode = mode
def get_next_token(self):
global parser_pos
self.n_newlines = 0
if parser_pos >= len(self.input):
return '', 'TK_EOF'
self.wanted_newline = False
c = self.input[parser_pos]
parser_pos += 1
keep_whitespace = self.opts.keep_array_indentation and self.is_array(self.flags.mode)
if keep_whitespace:
# slight mess to allow nice preservation of array indentation and reindent that correctly
# first time when we get to the arrays:
# var a = [
# ....'something'
# we make note of whitespace_count = 4 into flags.indentation_baseline
# so we know that 4 whitespaces in original source match indent_level of reindented source
#
# and afterwards, when we get to
# 'something,
# .......'something else'
# we know that this should be indented to indent_level + (7 - indentation_baseline) spaces
whitespace_count = 0
while c in self.whitespace:
if c == '\n':
self.trim_output()
self.output.append('\n')
self.just_added_newline = True
whitespace_count = 0
elif c == '\t':
whitespace_count += 4
elif c == '\r':
pass
else:
whitespace_count += 1
if parser_pos >= len(self.input):
return '', 'TK_EOF'
c = self.input[parser_pos]
parser_pos += 1
if self.flags.indentation_baseline == -1:
self.flags.indentation_baseline = whitespace_count
if self.just_added_newline:
for i in range(self.flags.indentation_level + 1):
self.output.append(self.indent_string)
if self.flags.indentation_baseline != -1:
for i in range(whitespace_count - self.flags.indentation_baseline):
self.output.append(' ')
else: # not keep_whitespace
while c in self.whitespace:
if c == '\n':
if self.opts.max_preserve_newlines == 0 or self.opts.max_preserve_newlines > self.n_newlines:
self.n_newlines += 1
if parser_pos >= len(self.input):
return '', 'TK_EOF'
c = self.input[parser_pos]
parser_pos += 1
if self.opts.preserve_newlines and self.n_newlines > 1:
for i in range(self.n_newlines):
self.append_newline(i == 0)
self.just_added_newline = True
self.wanted_newline = self.n_newlines > 0
if c in self.wordchar:
if parser_pos < len(self.input):
while self.input[parser_pos] in self.wordchar:
c = c + self.input[parser_pos]
parser_pos += 1
if parser_pos == len(self.input):
break
# small and surprisingly unugly hack for 1E-10 representation
if parser_pos != len(self.input) and self.input[parser_pos] in '+-' \
and re.match('^[0-9]+[Ee]$', c):
sign = self.input[parser_pos]
parser_pos += 1
t = self.get_next_token()
c += sign + t[0]
return c, 'TK_WORD'
if c == 'in': # in is an operator, need to hack
return c, 'TK_OPERATOR'
if self.wanted_newline and \
self.last_type != 'TK_OPERATOR' and\
self.last_type != 'TK_EQUALS' and\
not self.flags.if_line and \
(self.opts.preserve_newlines or self.last_text != 'var'):
self.append_newline()
return c, 'TK_WORD'
if c in '([':
return c, 'TK_START_EXPR'
if c in ')]':
return c, 'TK_END_EXPR'
if c == '{':
return c, 'TK_START_BLOCK'
if c == '}':
return c, 'TK_END_BLOCK'
if c == ';':
return c, 'TK_SEMICOLON'
if c == '/':
comment = ''
inline_comment = True
comment_mode = 'TK_INLINE_COMMENT'
if self.input[parser_pos] == '*': # peek /* .. */ comment
parser_pos += 1
if parser_pos < len(self.input):
while not (self.input[parser_pos] == '*' and \
parser_pos + 1 < len(self.input) and \
self.input[parser_pos + 1] == '/')\
and parser_pos < len(self.input):
c = self.input[parser_pos]
comment += c
if c in '\r\n':
comment_mode = 'TK_BLOCK_COMMENT'
parser_pos += 1
if parser_pos >= len(self.input):
break
parser_pos += 2
return '/*' + comment + '*/', comment_mode
if self.input[parser_pos] == '/': # peek // comment
comment = c
while self.input[parser_pos] not in '\r\n':
comment += self.input[parser_pos]
parser_pos += 1
if parser_pos >= len(self.input):
break
if self.wanted_newline:
self.append_newline()
return comment, 'TK_COMMENT'
if c == "'" or c == '"' or \
(c == '/' and ((self.last_type == 'TK_WORD' and self.is_special_word(self.last_text)) or \
(self.last_type == 'TK_END_EXPR' and self.flags.previous_mode in ['(FOR-EXPRESSION)', '(COND-EXPRESSION)']) or \
(self.last_type in ['TK_COMMENT', 'TK_START_EXPR', 'TK_START_BLOCK', 'TK_END_BLOCK', 'TK_OPERATOR',
'TK_EQUALS', 'TK_EOF', 'TK_SEMICOLON', 'TK_COMMA']))):
sep = c
esc = False
esc1 = 0
esc2 = 0
resulting_string = c
in_char_class = False
if parser_pos < len(self.input):
if sep == '/':
# handle regexp
in_char_class = False
while esc or in_char_class or self.input[parser_pos] != sep:
resulting_string += self.input[parser_pos]
if not esc:
esc = self.input[parser_pos] == '\\'
if self.input[parser_pos] == '[':
in_char_class = True
elif self.input[parser_pos] == ']':
in_char_class = False
else:
esc = False
parser_pos += 1
if parser_pos >= len(self.input):
# incomplete regex when end-of-file reached
# bail out with what has received so far
return resulting_string, 'TK_STRING'
else:
# handle string
while esc or self.input[parser_pos] != sep:
resulting_string += self.input[parser_pos]
if esc1 and esc1 >= esc2:
try:
esc1 = int(resulting_string[-esc2:], 16)
except Exception:
esc1 = False
if esc1 and esc1 >= 0x20 and esc1 <= 0x7e:
esc1 = chr(esc1)
resulting_string = resulting_string[:-2 - esc2]
if esc1 == sep or esc1 == '\\':
resulting_string += '\\'
resulting_string += esc1
esc1 = 0
if esc1:
esc1 += 1
elif not esc:
esc = self.input[parser_pos] == '\\'
else:
esc = False
if self.opts.unescape_strings:
if self.input[parser_pos] == 'x':
esc1 += 1
esc2 = 2
elif self.input[parser_pos] == 'u':
esc1 += 1
esc2 = 4
parser_pos += 1
if parser_pos >= len(self.input):
# incomplete string when end-of-file reached
# bail out with what has received so far
return resulting_string, 'TK_STRING'
parser_pos += 1
resulting_string += sep
if sep == '/':
# regexps may have modifiers /regexp/MOD, so fetch those too
while parser_pos < len(self.input) and self.input[parser_pos] in self.wordchar:
resulting_string += self.input[parser_pos]
parser_pos += 1
return resulting_string, 'TK_STRING'
if c == '#':
# she-bang
if len(self.output) == 0 and len(self.input) > 1 and self.input[parser_pos] == '!':
resulting_string = c
while parser_pos < len(self.input) and c != '\n':
c = self.input[parser_pos]
resulting_string += c
parser_pos += 1
self.output.append(resulting_string.strip() + "\n")
self.append_newline()
return self.get_next_token()
# Spidermonkey-specific sharp variables for circular references
# https://developer.mozilla.org/En/Sharp_variables_in_JavaScript
# http://mxr.mozilla.org/mozilla-central/source/js/src/jsscan.cpp around line 1935
sharp = '#'
if parser_pos < len(self.input) and self.input[parser_pos] in self.digits:
while True:
c = self.input[parser_pos]
sharp += c
parser_pos += 1
if parser_pos >= len(self.input) or c == '#' or c == '=':
break
if c == '#' or parser_pos >= len(self.input):
pass
elif self.input[parser_pos] == '[' and self.input[parser_pos + 1] == ']':
sharp += '[]'
parser_pos += 2
elif self.input[parser_pos] == '{' and self.input[parser_pos + 1] == '}':
sharp += '{}'
parser_pos += 2
return sharp, 'TK_WORD'
if c == '<' and self.input[parser_pos - 1 : parser_pos + 3] == '<!--':
parser_pos += 3
c = '<!--'
while parser_pos < len(self.input) and self.input[parser_pos] != '\n':
c += self.input[parser_pos]
parser_pos += 1
self.flags.in_html_comment = True
return c, 'TK_COMMENT'
if c == '-' and self.flags.in_html_comment and self.input[parser_pos - 1 : parser_pos + 2] == '-->':
self.flags.in_html_comment = False
parser_pos += 2
if self.wanted_newline:
self.append_newline()
return '-->', 'TK_COMMENT'
if c == '.':
return c, 'TK_DOT'
if c in self.punct:
while parser_pos < len(self.input) and c + self.input[parser_pos] in self.punct:
c += self.input[parser_pos]
parser_pos += 1
if parser_pos >= len(self.input):
break
if c == '=':
return c, 'TK_EQUALS'
if c == ',':
return c, 'TK_COMMA'
return c, 'TK_OPERATOR'
return c, 'TK_UNKNOWN'
def handle_start_expr(self, token_text):
if token_text == '[':
if self.last_type == 'TK_WORD' or self.last_text == ')':
if self.last_text in self.line_starters:
self.append(' ')
self.set_mode('(EXPRESSION)')
self.append(token_text)
return
if self.flags.mode in ['[EXPRESSION]', '[INDENTED-EXPRESSION]']:
if self.last_last_text == ']' and self.last_text == ',':
# ], [ goes to a new line
if self.flags.mode == '[EXPRESSION]':
self.flags.mode = '[INDENTED-EXPRESSION]'
if not self.opts.keep_array_indentation:
self.indent()
self.set_mode('[EXPRESSION]')
if not self.opts.keep_array_indentation:
self.append_newline()
elif self.last_text == '[':
if self.flags.mode == '[EXPRESSION]':
self.flags.mode = '[INDENTED-EXPRESSION]'
if not self.opts.keep_array_indentation:
self.indent()
self.set_mode('[EXPRESSION]')
if not self.opts.keep_array_indentation:
self.append_newline()
else:
self.set_mode('[EXPRESSION]')
else:
self.set_mode('[EXPRESSION]')
else:
if self.last_text == 'for':
self.set_mode('(FOR-EXPRESSION)')
elif self.last_text in ['if', 'while']:
self.set_mode('(COND-EXPRESSION)')
else:
self.set_mode('(EXPRESSION)')
if self.last_text == ';' or self.last_type == 'TK_START_BLOCK':
self.append_newline()
elif self.last_type in ['TK_END_EXPR', 'TK_START_EXPR', 'TK_END_BLOCK'] or self.last_text == '.':
# do nothing on (( and )( and ][ and ]( and .(
if self.wanted_newline:
self.append_newline();
elif self.last_type not in ['TK_WORD', 'TK_OPERATOR']:
self.append(' ')
elif self.last_word == 'function' or self.last_word == 'typeof':
# function() vs function (), typeof() vs typeof ()
if self.opts.jslint_happy:
self.append(' ')
elif self.last_text in self.line_starters or self.last_text == 'catch':
self.append(' ')
self.append(token_text)
def handle_end_expr(self, token_text):
if token_text == ']':
if self.opts.keep_array_indentation:
if self.last_text == '}':
self.remove_indent()
self.append(token_text)
self.restore_mode()
return
else:
if self.flags.mode == '[INDENTED-EXPRESSION]':
if self.last_text == ']':
self.restore_mode()
self.append_newline()
self.append(token_text)
return
self.restore_mode()
self.append(token_text)
def handle_start_block(self, token_text):
if self.last_word == 'do':
self.set_mode('DO_BLOCK')
else:
self.set_mode('BLOCK')
if self.opts.brace_style == 'expand':
if self.last_type != 'TK_OPERATOR':
if self.last_text == '=' or (self.is_special_word(self.last_text) and self.last_text != 'else'):
self.append(' ')
else:
self.append_newline(True)
self.append(token_text)
self.indent()
else:
if self.last_type not in ['TK_OPERATOR', 'TK_START_EXPR']:
if self.last_type == 'TK_START_BLOCK':
self.append_newline()
else:
self.append(' ')
else:
# if TK_OPERATOR or TK_START_EXPR
if self.is_array(self.flags.previous_mode) and self.last_text == ',':
if self.last_last_text == '}':
self.append(' ')
else:
self.append_newline()
self.indent()
self.append(token_text)
def handle_end_block(self, token_text):
self.restore_mode()
if self.opts.brace_style == 'expand':
if self.last_text != '{':
self.append_newline()
else:
if self.last_type == 'TK_START_BLOCK':
if self.just_added_newline:
self.remove_indent()
else:
# {}
self.trim_output()
else:
if self.is_array(self.flags.mode) and self.opts.keep_array_indentation:
self.opts.keep_array_indentation = False
self.append_newline()
self.opts.keep_array_indentation = True
else:
self.append_newline()
self.append(token_text)
def handle_word(self, token_text):
if self.do_block_just_closed:
self.append(' ')
self.append(token_text)
self.append(' ')
self.do_block_just_closed = False
return
if token_text == 'function':
if self.flags.var_line and self.last_text != '=':
self.flags.var_line_reindented = not self.opts.keep_function_indentation
if (self.just_added_newline or self.last_text == ';') and self.last_text != '{':
# make sure there is a nice clean space of at least one blank line
# before a new function definition
have_newlines = self.n_newlines
if not self.just_added_newline:
have_newlines = 0
if not self.opts.preserve_newlines:
have_newlines = 1
for i in range(2 - have_newlines):
self.append_newline(False)
if self.last_text in ['get', 'set', 'new'] or self.last_type == 'TK_WORD':
self.append(' ')
if self.last_type == 'TK_WORD':
if self.last_text in ['get', 'set', 'new', 'return']:
self.append(' ')
else:
self.append_newline()
elif self.last_type == 'TK_OPERATOR' or self.last_text == '=':
# foo = function
self.append(' ')
elif self.is_expression(self.flags.mode):
# (function
pass
else:
self.append_newline()
self.append('function')
self.last_word = 'function'
return
if token_text == 'case' or (token_text == 'default' and self.flags.in_case_statement):
self.append_newline()
if self.flags.case_body:
self.remove_indent();
self.flags.case_body = False
self.flags.indentation_level -= 1;
self.append(token_text)
self.flags.in_case = True
self.flags.in_case_statement = True
return
prefix = 'NONE'
if self.last_type == 'TK_END_BLOCK':
if token_text not in ['else', 'catch', 'finally']:
prefix = 'NEWLINE'
else:
if self.opts.brace_style in ['expand', 'end-expand']:
prefix = 'NEWLINE'
else:
prefix = 'SPACE'
self.append(' ')
elif self.last_type == 'TK_SEMICOLON' and self.flags.mode in ['BLOCK', 'DO_BLOCK']:
prefix = 'NEWLINE'
elif self.last_type == 'TK_SEMICOLON' and self.is_expression(self.flags.mode):
prefix = 'SPACE'
elif self.last_type == 'TK_STRING':
prefix = 'NEWLINE'
elif self.last_type == 'TK_WORD':
if self.last_text == 'else':
# eat newlines between ...else *** some_op...
# won't preserve extra newlines in this place (if any), but don't care that much
self.trim_output(True)
prefix = 'SPACE'
elif self.last_type == 'TK_START_BLOCK':
prefix = 'NEWLINE'
elif self.last_type == 'TK_END_EXPR':
self.append(' ')
prefix = 'NEWLINE'
if self.flags.if_line and self.last_type == 'TK_END_EXPR':
self.flags.if_line = False
if token_text in self.line_starters:
if self.last_text == 'else':
prefix = 'SPACE'
else:
prefix = 'NEWLINE'
if token_text in ['else', 'catch', 'finally']:
if self.last_type != 'TK_END_BLOCK' \
or self.opts.brace_style == 'expand' \
or self.opts.brace_style == 'end-expand':
self.append_newline()
else:
self.trim_output(True)
self.append(' ')
elif prefix == 'NEWLINE':
if self.is_special_word(self.last_text):
# no newline between return nnn
self.append(' ')
elif self.last_type != 'TK_END_EXPR':
if (self.last_type != 'TK_START_EXPR' or token_text != 'var') and self.last_text != ':':
# no need to force newline on VAR -
# for (var x = 0...
if token_text == 'if' and self.last_word == 'else' and self.last_text != '{':
self.append(' ')
else:
self.flags.var_line = False
self.flags.var_line_reindented = False
self.append_newline()
elif token_text in self.line_starters and self.last_text != ')':
self.flags.var_line = False
self.flags.var_line_reindented = False
self.append_newline()
elif self.is_array(self.flags.mode) and self.last_text == ',' and self.last_last_text == '}':
self.append_newline() # }, in lists get a newline
elif prefix == 'SPACE':
self.append(' ')
self.append(token_text)
self.last_word = token_text
if token_text == 'var':
self.flags.var_line = True
self.flags.var_line_reindented = False
self.flags.var_line_tainted = False
if token_text == 'if':
self.flags.if_line = True
if token_text == 'else':
self.flags.if_line = False
def handle_semicolon(self, token_text):
self.append(token_text)
self.flags.var_line = False
self.flags.var_line_reindented = False
if self.flags.mode == 'OBJECT':
# OBJECT mode is weird and doesn't get reset too well.
self.flags.mode = 'BLOCK'
def handle_string(self, token_text):
if self.last_type == 'TK_END_EXPR' and self.flags.previous_mode in ['(COND-EXPRESSION)', '(FOR-EXPRESSION)']:
self.append(' ')
if self.last_type in ['TK_COMMENT', 'TK_STRING', 'TK_START_BLOCK', 'TK_END_BLOCK', 'TK_SEMICOLON']:
self.append_newline()
elif self.last_type == 'TK_WORD':
self.append(' ')
self.append(token_text)
def handle_equals(self, token_text):
if self.flags.var_line:
# just got an '=' in a var-line, different line breaking rules will apply
self.flags.var_line_tainted = True
self.append(' ')
self.append(token_text)
self.append(' ')
def handle_comma(self, token_text):
if self.last_type == 'TK_COMMENT':
self.append_newline();
if self.flags.var_line:
if self.is_expression(self.flags.mode) or self.last_type == 'TK_END_BLOCK':
# do not break on comma, for ( var a = 1, b = 2
self.flags.var_line_tainted = False
if self.flags.var_line_tainted:
self.append(token_text)
self.flags.var_line_reindented = True
self.flags.var_line_tainted = False
self.append_newline()
return
else:
self.flags.var_line_tainted = False
self.append(token_text)
self.append(' ');
return
if self.last_type == 'TK_END_BLOCK' and self.flags.mode != '(EXPRESSION)':
self.append(token_text)
if self.flags.mode == 'OBJECT' and self.last_text == '}':
self.append_newline()
else:
self.append(' ')
else:
if self.flags.mode == 'OBJECT':
self.append(token_text)
self.append_newline()
else:
# EXPR or DO_BLOCK
self.append(token_text)
self.append(' ')
def handle_operator(self, token_text):
space_before = True
space_after = True
if self.is_special_word(self.last_text):
# return had a special handling in TK_WORD
self.append(' ')
self.append(token_text)
return
# hack for actionscript's import .*;
if token_text == '*' and self.last_type == 'TK_DOT' and not self.last_last_text.isdigit():
self.append(token_text)
return
if token_text == ':' and self.flags.in_case:
self.flags.case_body = True
self.indent();
self.append(token_text)
self.append_newline()
self.flags.in_case = False
return
if token_text == '::':
# no spaces around the exotic namespacing syntax operator
self.append(token_text)
return
if token_text in ['--', '++', '!'] \
or (token_text in ['+', '-'] \
and (self.last_type in ['TK_START_BLOCK', 'TK_START_EXPR', 'TK_EQUALS', 'TK_OPERATOR'] \
or self.last_text in self.line_starters)):
space_before = False
space_after = False
if self.last_text == ';' and self.is_expression(self.flags.mode):
# for (;; ++i)
# ^^
space_before = True
if self.last_type == 'TK_WORD' and self.last_text in self.line_starters:
space_before = True
if self.flags.mode == 'BLOCK' and self.last_text in ['{', ';']:
# { foo: --i }
# foo(): --bar
self.append_newline()
elif token_text == ':':
if self.flags.ternary_depth == 0:
if self.flags.mode == 'BLOCK':
self.flags.mode = 'OBJECT'
space_before = False
else:
self.flags.ternary_depth -= 1
elif token_text == '?':
self.flags.ternary_depth += 1
if space_before:
self.append(' ')
self.append(token_text)
if space_after:
self.append(' ')
def handle_block_comment(self, token_text):
lines = token_text.replace('\x0d', '').split('\x0a')
# all lines start with an asterisk? that's a proper box comment
if not any(l for l in lines[1:] if ( l.strip() == '' or (l.lstrip())[0] != '*')):
self.append_newline()
self.append(lines[0])
for line in lines[1:]:
self.append_newline()
self.append(' ' + line.strip())
else:
# simple block comment: leave intact
if len(lines) > 1:
# multiline comment starts on a new line
self.append_newline()
else:
# single line /* ... */ comment stays on the same line
self.append(' ')
for line in lines:
self.append(line)
self.append('\n')
self.append_newline()
def handle_inline_comment(self, token_text):
self.append(' ')
self.append(token_text)
if self.is_expression(self.flags.mode):
self.append(' ')
else:
self.append_newline_forced()
def handle_comment(self, token_text):
if self.last_text == ',' and not self.wanted_newline:
self.trim_output(True)
if self.last_type != 'TK_COMMENT':
if self.wanted_newline:
self.append_newline()
else:
self.append(' ')
self.append(token_text)
self.append_newline();
def handle_dot(self, token_text):
if self.is_special_word(self.last_text):
self.append(' ')
elif self.last_text == ')':
if self.opts.break_chained_methods or self.wanted_newline:
self.flags.chain_extra_indentation = 1;
self.append_newline(True, False)
self.append(token_text)
def handle_unknown(self, token_text):
self.append(token_text)
def main():
argv = sys.argv[1:]
try:
opts, args = getopt.getopt(argv, "s:c:o:djbkil:xhtf", ['indent-size=','indent-char=','outfile=', 'disable-preserve-newlines',
'jslint-happy', 'brace-style=',
'keep-array-indentation', 'indent-level=', 'unescape-strings', 'help',
'usage', 'stdin', 'eval-code', 'indent-with-tabs', 'keep-function-indentation'])
except getopt.GetoptError:
return usage()
js_options = default_options()
file = None
outfile = 'stdout'
if len(args) == 1:
file = args[0]
for opt, arg in opts:
if opt in ('--keep-array-indentation', '-k'):
js_options.keep_array_indentation = True
if opt in ('--keep-function-indentation','-f'):
js_options.keep_function_indentation = True
elif opt in ('--outfile', '-o'):
outfile = arg
elif opt in ('--indent-size', '-s'):
js_options.indent_size = int(arg)
elif opt in ('--indent-char', '-c'):
js_options.indent_char = arg
elif opt in ('--indent-with-tabs', '-t'):
js_options.indent_with_tabs = True
elif opt in ('--disable-preserve_newlines', '-d'):
js_options.preserve_newlines = False
elif opt in ('--jslint-happy', '-j'):
js_options.jslint_happy = True
elif opt in ('--eval-code'):
js_options.eval_code = True
elif opt in ('--brace-style', '-b'):
js_options.brace_style = arg
elif opt in ('--unescape-strings', '-x'):
js_options.unescape_strings = True
elif opt in ('--stdin', '-i'):
file = '-'
elif opt in ('--help', '--usage', '-h'):
return usage()
if not file:
return usage()
else:
if outfile == 'stdout':
print(beautify_file(file, js_options))
else:
with open(outfile, 'w') as f:
f.write(beautify_file(file, js_options) + '\n')
|
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from airflow import models
from airflow.api.common.experimental.mark_tasks import set_state, _create_dagruns
from airflow.settings import Session
from airflow.utils.dates import days_ago
from airflow.utils.state import State
DEV_NULL = "/dev/null"
class TestMarkTasks(unittest.TestCase):
def setUp(self):
self.dagbag = models.DagBag(include_examples=True)
self.dag1 = self.dagbag.dags['test_example_bash_operator']
self.dag2 = self.dagbag.dags['example_subdag_operator']
self.execution_dates = [days_ago(2), days_ago(1)]
drs = _create_dagruns(self.dag1, self.execution_dates,
state=State.RUNNING,
run_id_template="scheduled__{}")
for dr in drs:
dr.dag = self.dag1
dr.verify_integrity()
drs = _create_dagruns(self.dag2,
[self.dag2.default_args['start_date']],
state=State.RUNNING,
run_id_template="scheduled__{}")
for dr in drs:
dr.dag = self.dag2
dr.verify_integrity()
self.session = Session()
def snapshot_state(self, dag, execution_dates):
TI = models.TaskInstance
tis = self.session.query(TI).filter(
TI.dag_id==dag.dag_id,
TI.execution_date.in_(execution_dates)
).all()
self.session.expunge_all()
return tis
def verify_state(self, dag, task_ids, execution_dates, state, old_tis):
TI = models.TaskInstance
tis = self.session.query(TI).filter(
TI.dag_id==dag.dag_id,
TI.execution_date.in_(execution_dates)
).all()
self.assertTrue(len(tis) > 0)
for ti in tis:
if ti.task_id in task_ids and ti.execution_date in execution_dates:
self.assertEqual(ti.state, state)
else:
for old_ti in old_tis:
if (old_ti.task_id == ti.task_id
and old_ti.execution_date == ti.execution_date):
self.assertEqual(ti.state, old_ti.state)
def test_mark_tasks_now(self):
# set one task to success but do not commit
snapshot = self.snapshot_state(self.dag1, self.execution_dates)
task = self.dag1.get_task("runme_1")
altered = set_state(task=task, execution_date=self.execution_dates[0],
upstream=False, downstream=False, future=False,
past=False, state=State.SUCCESS, commit=False)
self.assertEqual(len(altered), 1)
self.verify_state(self.dag1, [task.task_id], [self.execution_dates[0]],
None, snapshot)
# set one and only one task to success
altered = set_state(task=task, execution_date=self.execution_dates[0],
upstream=False, downstream=False, future=False,
past=False, state=State.SUCCESS, commit=True)
self.assertEqual(len(altered), 1)
self.verify_state(self.dag1, [task.task_id], [self.execution_dates[0]],
State.SUCCESS, snapshot)
# set no tasks
altered = set_state(task=task, execution_date=self.execution_dates[0],
upstream=False, downstream=False, future=False,
past=False, state=State.SUCCESS, commit=True)
self.assertEqual(len(altered), 0)
self.verify_state(self.dag1, [task.task_id], [self.execution_dates[0]],
State.SUCCESS, snapshot)
# set task to other than success
altered = set_state(task=task, execution_date=self.execution_dates[0],
upstream=False, downstream=False, future=False,
past=False, state=State.FAILED, commit=True)
self.assertEqual(len(altered), 1)
self.verify_state(self.dag1, [task.task_id], [self.execution_dates[0]],
State.FAILED, snapshot)
# dont alter other tasks
snapshot = self.snapshot_state(self.dag1, self.execution_dates)
task = self.dag1.get_task("runme_0")
altered = set_state(task=task, execution_date=self.execution_dates[0],
upstream=False, downstream=False, future=False,
past=False, state=State.SUCCESS, commit=True)
self.assertEqual(len(altered), 1)
self.verify_state(self.dag1, [task.task_id], [self.execution_dates[0]],
State.SUCCESS, snapshot)
def test_mark_downstream(self):
# test downstream
snapshot = self.snapshot_state(self.dag1, self.execution_dates)
task = self.dag1.get_task("runme_1")
relatives = task.get_flat_relatives(upstream=False)
task_ids = [t.task_id for t in relatives]
task_ids.append(task.task_id)
altered = set_state(task=task, execution_date=self.execution_dates[0],
upstream=False, downstream=True, future=False,
past=False, state=State.SUCCESS, commit=True)
self.assertEqual(len(altered), 3)
self.verify_state(self.dag1, task_ids, [self.execution_dates[0]],
State.SUCCESS, snapshot)
def test_mark_upstream(self):
# test upstream
snapshot = self.snapshot_state(self.dag1, self.execution_dates)
task = self.dag1.get_task("run_after_loop")
relatives = task.get_flat_relatives(upstream=True)
task_ids = [t.task_id for t in relatives]
task_ids.append(task.task_id)
altered = set_state(task=task, execution_date=self.execution_dates[0],
upstream=True, downstream=False, future=False,
past=False, state=State.SUCCESS, commit=True)
self.assertEqual(len(altered), 4)
self.verify_state(self.dag1, task_ids, [self.execution_dates[0]],
State.SUCCESS, snapshot)
def test_mark_tasks_future(self):
# set one task to success towards end of scheduled dag runs
snapshot = self.snapshot_state(self.dag1, self.execution_dates)
task = self.dag1.get_task("runme_1")
altered = set_state(task=task, execution_date=self.execution_dates[0],
upstream=False, downstream=False, future=True,
past=False, state=State.SUCCESS, commit=True)
self.assertEqual(len(altered), 2)
self.verify_state(self.dag1, [task.task_id], self.execution_dates,
State.SUCCESS, snapshot)
def test_mark_tasks_past(self):
# set one task to success towards end of scheduled dag runs
snapshot = self.snapshot_state(self.dag1, self.execution_dates)
task = self.dag1.get_task("runme_1")
altered = set_state(task=task, execution_date=self.execution_dates[1],
upstream=False, downstream=False, future=False,
past=True, state=State.SUCCESS, commit=True)
self.assertEqual(len(altered), 2)
self.verify_state(self.dag1, [task.task_id], self.execution_dates,
State.SUCCESS, snapshot)
def test_mark_tasks_subdag(self):
# set one task to success towards end of scheduled dag runs
task = self.dag2.get_task("section-1")
relatives = task.get_flat_relatives(upstream=False)
task_ids = [t.task_id for t in relatives]
task_ids.append(task.task_id)
altered = set_state(task=task, execution_date=self.execution_dates[0],
upstream=False, downstream=True, future=False,
past=False, state=State.SUCCESS, commit=True)
self.assertEqual(len(altered), 14)
# cannot use snapshot here as that will require drilling down the
# the sub dag tree essentially recreating the same code as in the
# tested logic.
self.verify_state(self.dag2, task_ids, [self.execution_dates[0]],
State.SUCCESS, [])
def tearDown(self):
self.dag1.clear()
self.dag2.clear()
# just to make sure we are fully cleaned up
self.session.query(models.DagRun).delete()
self.session.query(models.TaskInstance).delete()
self.session.commit()
self.session.close()
if __name__ == '__main__':
unittest.main()
|
|
import datetime
try:
import json
except ImportError:
from django.utils import simplejson as json
from djutils.dashboard.models import Panel, PanelData, PanelDataSet
from djutils.dashboard.provider import PanelProvider
from djutils.dashboard.registry import registry
from djutils.dashboard.views import dashboard_data_endpoint, dashboard as dashboard_view
from djutils.test import RequestFactoryTestCase
class TestPanelA(PanelProvider):
_i = 0
def get_title(self):
return 'a'
def get_data(self):
TestPanelA._i += 1
return {
'a': TestPanelA._i,
'x': 1,
}
class TestPanelB(PanelProvider):
def get_title(self):
return 'b'
def get_data(self):
return {'b': 1}
class DashboardTestCase(RequestFactoryTestCase):
urls = 'djutils.dashboard.urls'
def setUp(self):
super(DashboardTestCase, self).setUp()
TestPanelA._i = 0
registry.register(TestPanelA)
registry.register(TestPanelB)
self.panel_a = Panel.objects.create(title='a', slug='a')
self.panel_b = Panel.objects.create(title='b', slug='b')
self.seed = datetime.datetime(2011, 1, 1)
def tearDown(self):
registry._registry = {}
def create_data(self, seed=None, how_much=60):
seed = seed or self.seed
cur_time = seed
for i in range(1, how_much + 1):
for provider in registry.get_provider_instances():
# pull the data off the panel and store
panel_obj = provider.get_panel_instance()
panel_data_obj = PanelData.objects.create(
panel=panel_obj,
created_date=cur_time,
)
raw_panel_data = provider.get_data()
for key, value in raw_panel_data.items():
data_set_obj = PanelDataSet.objects.create(
panel_data=panel_data_obj,
key=key,
value=value,
)
if i % 60 == 0:
Panel.objects.generate_hourly_aggregates(cur_time)
if i % 1440 == 0:
Panel.objects.generate_daily_aggregates(cur_time)
cur_time += datetime.timedelta(seconds=60)
def clear_data(self):
Panel.objects.all().delete()
def test_panel_registry_to_model(self):
self.assertEqual(len(registry._registry), 2)
self.assertEqual(Panel.objects.count(), 2)
provider_a = registry._registry[TestPanelA]
provider_b = registry._registry[TestPanelB]
# behind-the-scenes does a get-or-create
panel_model_a = provider_a.get_panel_instance()
self.assertEqual(panel_model_a, self.panel_a)
panel_model_b = provider_b.get_panel_instance()
self.assertEqual(panel_model_b, self.panel_b)
# ensure that no new instances were created
self.assertEqual(Panel.objects.count(), 2)
# blow away all the panels
Panel.objects.all().delete()
panel_model_a = provider_a.get_panel_instance()
panel_model_b = provider_b.get_panel_instance()
self.assertEqual(Panel.objects.count(), 2)
def test_basic_data_generation(self):
self.create_data(self.seed, 2880)
for panel in (self.panel_a, self.panel_b):
# check to see that 2880 minutes of data was generated
self.assertEqual(panel.data.minute_data().count(), 2880)
# check to see that 48 hours of aggregate data was generated
self.assertEqual(panel.data.hour_data().count(), 48)
# two days of data generated
self.assertEqual(panel.data.day_data().count(), 2)
# grab the first and last minutes of generated data
minute_list = list(self.panel_a.data.minute_data())
first, last = minute_list[-1], minute_list[0]
# check that the datetimes are what we expect
self.assertEqual(first.created_date, datetime.datetime(2011, 1, 1, 0, 0))
self.assertEqual(last.created_date, datetime.datetime(2011, 1, 2, 23, 59))
# grab the hourly aggregate data
hour_list = list(self.panel_a.data.hour_data())
first, last = hour_list[-1], hour_list[0]
# check that the datetimes are what we expect
self.assertEqual(first.created_date, datetime.datetime(2011, 1, 1, 0, 59))
self.assertEqual(last.created_date, datetime.datetime(2011, 1, 2, 23, 59))
# grab the daily aggregate data
day_list = list(self.panel_a.data.day_data())
first, last = day_list[-1], day_list[0]
# check that the datetimes are what we expect
self.assertEqual(first.created_date, datetime.datetime(2011, 1, 1, 23, 59))
self.assertEqual(last.created_date, datetime.datetime(2011, 1, 2, 23, 59))
# check that the data being generated is correct
self.assertEqual(minute_list[-1].get_data(), {
'a': 1.0,
'x': 1.0,
})
self.assertEqual(minute_list[0].get_data(), {
'a': 2880.0,
'x': 1.0,
})
# check first hour of data
self.assertEqual(hour_list[-1].get_data(), {
'a': 30.5,
'x': 1.0,
})
# check last hour of data
self.assertEqual(hour_list[0].get_data(), {
'a': 2850.0,
'x': 1.0,
})
# check first day of data
self.assertEqual(day_list[-1].get_data(), {
'a': 720.5,
'x': 1.0,
})
# check last day of data
self.assertEqual(day_list[0].get_data(), {
'a': 2160.0,
'x': 1.0,
})
def test_dashboard_data_view(self):
# check that the dashboard view responds
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
# make sure our two panels are present
panel_list = response.context['panel_list']
self.assertEqual(len(panel_list), 2)
self.assertQuerysetEqual(panel_list, [self.panel_a, self.panel_b])
# ensure that only registered panels are displayed
registry.unregister(TestPanelA)
response = self.client.get('/')
panel_list = response.context['panel_list']
self.assertQuerysetEqual(panel_list, [self.panel_b])
# ensure that even if a panel is newly created, it won't display immediately
Panel.objects.all().delete()
response = self.client.get('/')
panel_list = response.context['panel_list']
self.assertEqual(len(panel_list), 0)
# create some data and it will be shown
Panel.objects.update_panels()
response = self.client.get('/')
panel_list = response.context['panel_list']
self.assertEqual(len(panel_list), 1)
panel = panel_list[0]
self.assertEqual(panel.title, 'b')
def test_dashboard_data_endpoints(self):
self.create_data(how_much=120)
request = self.request_factory.request()
response = dashboard_data_endpoint(request, 0)
data = json.loads(response.content)
# data is for both panels a and b, and should only be 120 since the
# last 60 is all we fetch
self.assertEqual(len(data), 120)
def transform_data(d):
# data looks like a list of {u'point_id': 2, u'data': {u'b': 1.0}, u'panel_id': 2}
a_data = [(item['point_id'], item['data']) for item in d if item['panel_id'] == self.panel_a.pk]
return [a[1] for a in sorted(a_data)]
just_data = transform_data(data)
self.assertEqual(just_data[0], {'a': 61.0, 'x': 1.0})
self.assertEqual(just_data[-1], {'a': 120.0, 'x': 1.0})
# test the hour endpoint
response = dashboard_data_endpoint(request, 1)
data = json.loads(response.content)
# data is for both panels a and b
self.assertEqual(len(data), 4)
def transform_data(d):
# data looks like a list of {u'point_id': 2, u'data': {u'b': 1.0}, u'panel_id': 2}
a_data = [(item['point_id'], item['data']) for item in d if item['panel_id'] == self.panel_a.pk]
return [a[1] for a in sorted(a_data)]
just_data = transform_data(data)
self.assertEqual(just_data[0], {'a': 30.5, 'x': 1.0})
self.assertEqual(just_data[-1], {'a': 90.0, 'x': 1.0})
|
|
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Backup manager manages volume backups.
Volume Backups are full copies of persistent volumes stored in a backup
store e.g. an object store or any other backup store if and when support is
added. They are usable without the original object being available. A
volume backup can be restored to the original volume it was created from or
any other available volume with a minimum size of the original volume.
Volume backups can be created, restored, deleted and listed.
**Related Flags**
:backup_topic: What :mod:`rpc` topic to listen to (default:
`cinder-backup`).
:backup_manager: The module name of a class derived from
:class:`manager.Manager` (default:
:class:`cinder.backup.manager.Manager`).
"""
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_utils import excutils
from oslo_utils import importutils
import six
from cinder.backup import driver
from cinder.backup import rpcapi as backup_rpcapi
from cinder import context
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder import manager
from cinder import objects
from cinder import quota
from cinder import rpc
from cinder import utils
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
backup_manager_opts = [
cfg.StrOpt('backup_driver',
default='cinder.backup.drivers.swift',
help='Driver to use for backups.',)
]
# This map doesn't need to be extended in the future since it's only
# for old backup services
mapper = {'cinder.backup.services.swift': 'cinder.backup.drivers.swift',
'cinder.backup.services.ceph': 'cinder.backup.drivers.ceph'}
CONF = cfg.CONF
CONF.register_opts(backup_manager_opts)
QUOTAS = quota.QUOTAS
class BackupManager(manager.SchedulerDependentManager):
"""Manages backup of block storage devices."""
RPC_API_VERSION = '1.2'
target = messaging.Target(version=RPC_API_VERSION)
def __init__(self, service_name=None, *args, **kwargs):
self.service = importutils.import_module(self.driver_name)
self.az = CONF.storage_availability_zone
self.volume_managers = {}
self._setup_volume_drivers()
self.backup_rpcapi = backup_rpcapi.BackupAPI()
super(BackupManager, self).__init__(service_name='backup',
*args, **kwargs)
@property
def driver_name(self):
"""This function maps old backup services to backup drivers."""
return self._map_service_to_driver(CONF.backup_driver)
def _map_service_to_driver(self, service):
"""Maps services to drivers."""
if service in mapper:
return mapper[service]
return service
@property
def driver(self):
return self._get_driver()
def _get_volume_backend(self, host=None, allow_null_host=False):
if host is None:
if not allow_null_host:
msg = _("NULL host not allowed for volume backend lookup.")
raise exception.BackupFailedToGetVolumeBackend(msg)
else:
LOG.debug("Checking hostname '%s' for backend info.", host)
part = host.partition('@')
if (part[1] == '@') and (part[2] != ''):
backend = part[2]
LOG.debug("Got backend '%s'.", backend)
return backend
LOG.info(_LI("Backend not found in hostname (%s) so using default."),
host)
if 'default' not in self.volume_managers:
# For multi-backend we just pick the top of the list.
return self.volume_managers.keys()[0]
return 'default'
def _get_manager(self, backend):
LOG.debug("Manager requested for volume_backend '%s'.",
backend)
if backend is None:
LOG.debug("Fetching default backend.")
backend = self._get_volume_backend(allow_null_host=True)
if backend not in self.volume_managers:
msg = (_("Volume manager for backend '%s' does not exist.") %
(backend))
raise exception.BackupFailedToGetVolumeBackend(msg)
return self.volume_managers[backend]
def _get_driver(self, backend=None):
LOG.debug("Driver requested for volume_backend '%s'.",
backend)
if backend is None:
LOG.debug("Fetching default backend.")
backend = self._get_volume_backend(allow_null_host=True)
mgr = self._get_manager(backend)
mgr.driver.db = self.db
return mgr.driver
def _setup_volume_drivers(self):
if CONF.enabled_backends:
for backend in CONF.enabled_backends:
host = "%s@%s" % (CONF.host, backend)
mgr = importutils.import_object(CONF.volume_manager,
host=host,
service_name=backend)
config = mgr.configuration
backend_name = config.safe_get('volume_backend_name')
LOG.debug("Registering backend %(backend)s (host=%(host)s "
"backend_name=%(backend_name)s).",
{'backend': backend, 'host': host,
'backend_name': backend_name})
self.volume_managers[backend] = mgr
else:
default = importutils.import_object(CONF.volume_manager)
LOG.debug("Registering default backend %s.", default)
self.volume_managers['default'] = default
def _init_volume_driver(self, ctxt, driver):
LOG.info(_LI("Starting volume driver %(driver_name)s (%(version)s)."),
{'driver_name': driver.__class__.__name__,
'version': driver.get_version()})
try:
driver.do_setup(ctxt)
driver.check_for_setup_error()
except Exception:
LOG.exception(_LE("Error encountered during initialization of "
"driver: %(name)s."),
{'name': driver.__class__.__name__})
# we don't want to continue since we failed
# to initialize the driver correctly.
return
driver.set_initialized()
def _update_backup_error(self, backup, context, err):
backup.status = 'error'
backup.fail_reason = err
backup.save()
def init_host(self):
"""Run initialization needed for a standalone service."""
ctxt = context.get_admin_context()
for mgr in self.volume_managers.values():
self._init_volume_driver(ctxt, mgr.driver)
LOG.info(_LI("Cleaning up incomplete backup operations."))
volumes = self.db.volume_get_all_by_host(ctxt, self.host)
for volume in volumes:
volume_host = volume_utils.extract_host(volume['host'], 'backend')
backend = self._get_volume_backend(host=volume_host)
mgr = self._get_manager(backend)
if volume['status'] == 'backing-up':
self._detach_all_attachments(ctxt, mgr, volume)
LOG.info(_LI('Resetting volume %(vol_id)s to previous '
'status %(status)s (was backing-up).'),
{'vol_id': volume['id'],
'status': volume['previous_status']})
self.db.volume_update(ctxt, volume['id'],
{'status': volume['previous_status']})
elif volume['status'] == 'restoring-backup':
self._detach_all_attachments(ctxt, mgr, volume)
LOG.info(_LI('setting volume %s to error_restoring '
'(was restoring-backup).'), volume['id'])
self.db.volume_update(ctxt, volume['id'],
{'status': 'error_restoring'})
# TODO(smulcahy) implement full resume of backup and restore
# operations on restart (rather than simply resetting)
backups = objects.BackupList.get_all_by_host(ctxt, self.host)
for backup in backups:
if backup['status'] == 'creating':
LOG.info(_LI('Resetting backup %s to error (was creating).'),
backup['id'])
err = 'incomplete backup reset on manager restart'
self._update_backup_error(backup, ctxt, err)
if backup['status'] == 'restoring':
LOG.info(_LI('Resetting backup %s to '
'available (was restoring).'),
backup['id'])
backup.status = 'available'
backup.save()
if backup['status'] == 'deleting':
LOG.info(_LI('Resuming delete on backup: %s.'), backup['id'])
self.delete_backup(ctxt, backup)
self._cleanup_temp_volumes_snapshots(backups)
def _detach_all_attachments(self, ctxt, mgr, volume):
attachments = volume['volume_attachment'] or []
for attachment in attachments:
if (attachment['attached_host'] == self.host and
attachment['instance_uuid'] is None):
mgr.detach_volume(ctxt, volume['id'],
attachment['id'])
def _cleanup_temp_volumes_snapshots(self, backups):
# NOTE(xyang): If the service crashes or gets restarted during the
# backup operation, there could be temporary volumes or snapshots
# that are not deleted. Make sure any temporary volumes or snapshots
# create by the backup job are deleted when service is started.
ctxt = context.get_admin_context()
for backup in backups:
try:
volume = self.db.volume_get(ctxt, backup.volume_id)
volume_host = volume_utils.extract_host(volume['host'],
'backend')
backend = self._get_volume_backend(host=volume_host)
mgr = self._get_manager(backend)
except (KeyError, exception.VolumeNotFound):
LOG.debug("Could not find a volume to clean up for "
"backup %s.", backup.id)
continue
if backup.temp_volume_id and backup.status == 'error':
try:
temp_volume = self.db.volume_get(ctxt,
backup.temp_volume_id)
# The temp volume should be deleted directly thru the
# the volume driver, not thru the volume manager.
mgr.driver.delete_volume(temp_volume)
self.db.volume_destroy(ctxt, temp_volume['id'])
except exception.VolumeNotFound:
LOG.debug("Could not find temp volume %(vol)s to clean up "
"for backup %(backup)s.",
{'vol': backup.temp_volume_id,
'backup': backup.id})
backup.temp_volume_id = None
backup.save()
if backup.temp_snapshot_id and backup.status == 'error':
try:
temp_snapshot = objects.Snapshot.get_by_id(
ctxt, backup.temp_snapshot_id)
# The temp snapshot should be deleted directly thru the
# volume driver, not thru the volume manager.
mgr.driver.delete_snapshot(temp_snapshot)
with temp_snapshot.obj_as_admin():
self.db.volume_glance_metadata_delete_by_snapshot(
ctxt, temp_snapshot.id)
temp_snapshot.destroy()
except exception.SnapshotNotFound:
LOG.debug("Could not find temp snapshot %(snap)s to clean "
"up for backup %(backup)s.",
{'snap': backup.temp_snapshot_id,
'backup': backup.id})
backup.temp_snapshot_id = None
backup.save()
def create_backup(self, context, backup):
"""Create volume backups using configured backup service."""
volume_id = backup.volume_id
volume = self.db.volume_get(context, volume_id)
previous_status = volume.get('previous_status', None)
LOG.info(_LI('Create backup started, backup: %(backup_id)s '
'volume: %(volume_id)s.'),
{'backup_id': backup.id, 'volume_id': volume_id})
self._notify_about_backup_usage(context, backup, "create.start")
volume_host = volume_utils.extract_host(volume['host'], 'backend')
backend = self._get_volume_backend(host=volume_host)
backup.host = self.host
backup.service = self.driver_name
backup.save()
expected_status = 'backing-up'
actual_status = volume['status']
if actual_status != expected_status:
err = _('Create backup aborted, expected volume status '
'%(expected_status)s but got %(actual_status)s.') % {
'expected_status': expected_status,
'actual_status': actual_status,
}
self._update_backup_error(backup, context, err)
raise exception.InvalidVolume(reason=err)
expected_status = 'creating'
actual_status = backup.status
if actual_status != expected_status:
err = _('Create backup aborted, expected backup status '
'%(expected_status)s but got %(actual_status)s.') % {
'expected_status': expected_status,
'actual_status': actual_status,
}
self._update_backup_error(backup, context, err)
backup.save()
raise exception.InvalidBackup(reason=err)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught,
# the volume status will be set back to available and
# the backup status to 'error'
utils.require_driver_initialized(self.driver)
backup_service = self.service.get_backup_driver(context)
self._get_driver(backend).backup_volume(context, backup,
backup_service)
except Exception as err:
with excutils.save_and_reraise_exception():
self.db.volume_update(context, volume_id,
{'status': previous_status,
'previous_status': 'error_backing-up'})
self._update_backup_error(backup, context, six.text_type(err))
# Restore the original status.
self.db.volume_update(context, volume_id,
{'status': previous_status,
'previous_status': 'backing-up'})
backup.status = 'available'
backup.size = volume['size']
backup.availability_zone = self.az
backup.save()
LOG.info(_LI('Create backup finished. backup: %s.'), backup.id)
self._notify_about_backup_usage(context, backup, "create.end")
def restore_backup(self, context, backup, volume_id):
"""Restore volume backups from configured backup service."""
LOG.info(_LI('Restore backup started, backup: %(backup_id)s '
'volume: %(volume_id)s.'),
{'backup_id': backup.id, 'volume_id': volume_id})
volume = self.db.volume_get(context, volume_id)
volume_host = volume_utils.extract_host(volume['host'], 'backend')
backend = self._get_volume_backend(host=volume_host)
self._notify_about_backup_usage(context, backup, "restore.start")
backup.host = self.host
backup.save()
expected_status = 'restoring-backup'
actual_status = volume['status']
if actual_status != expected_status:
err = (_('Restore backup aborted, expected volume status '
'%(expected_status)s but got %(actual_status)s.') %
{'expected_status': expected_status,
'actual_status': actual_status})
backup.status = 'available'
backup.save()
raise exception.InvalidVolume(reason=err)
expected_status = 'restoring'
actual_status = backup['status']
if actual_status != expected_status:
err = (_('Restore backup aborted: expected backup status '
'%(expected_status)s but got %(actual_status)s.') %
{'expected_status': expected_status,
'actual_status': actual_status})
self._update_backup_error(backup, context, err)
self.db.volume_update(context, volume_id, {'status': 'error'})
raise exception.InvalidBackup(reason=err)
if volume['size'] > backup['size']:
LOG.info(_LI('Volume: %(vol_id)s, size: %(vol_size)d is '
'larger than backup: %(backup_id)s, '
'size: %(backup_size)d, continuing with restore.'),
{'vol_id': volume['id'],
'vol_size': volume['size'],
'backup_id': backup['id'],
'backup_size': backup['size']})
backup_service = self._map_service_to_driver(backup['service'])
configured_service = self.driver_name
if backup_service != configured_service:
err = _('Restore backup aborted, the backup service currently'
' configured [%(configured_service)s] is not the'
' backup service that was used to create this'
' backup [%(backup_service)s].') % {
'configured_service': configured_service,
'backup_service': backup_service,
}
backup.status = 'available'
backup.save()
self.db.volume_update(context, volume_id, {'status': 'error'})
raise exception.InvalidBackup(reason=err)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught,
# the volume status will be set back to available and
# the backup status to 'error'
utils.require_driver_initialized(self.driver)
backup_service = self.service.get_backup_driver(context)
self._get_driver(backend).restore_backup(context, backup,
volume,
backup_service)
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_update(context, volume_id,
{'status': 'error_restoring'})
backup.status = 'available'
backup.save()
self.db.volume_update(context, volume_id, {'status': 'available'})
backup.status = 'available'
backup.save()
LOG.info(_LI('Restore backup finished, backup %(backup_id)s restored'
' to volume %(volume_id)s.'),
{'backup_id': backup.id, 'volume_id': volume_id})
self._notify_about_backup_usage(context, backup, "restore.end")
def delete_backup(self, context, backup):
"""Delete volume backup from configured backup service."""
LOG.info(_LI('Delete backup started, backup: %s.'), backup.id)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the backup status updated. Fail early since there
# are no other status to change but backup's
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized as err:
with excutils.save_and_reraise_exception():
self._update_backup_error(backup, context, six.text_type(err))
self._notify_about_backup_usage(context, backup, "delete.start")
backup.host = self.host
backup.save()
expected_status = 'deleting'
actual_status = backup.status
if actual_status != expected_status:
err = _('Delete_backup aborted, expected backup status '
'%(expected_status)s but got %(actual_status)s.') \
% {'expected_status': expected_status,
'actual_status': actual_status}
self._update_backup_error(backup, context, err)
raise exception.InvalidBackup(reason=err)
backup_service = self._map_service_to_driver(backup['service'])
if backup_service is not None:
configured_service = self.driver_name
if backup_service != configured_service:
err = _('Delete backup aborted, the backup service currently'
' configured [%(configured_service)s] is not the'
' backup service that was used to create this'
' backup [%(backup_service)s].')\
% {'configured_service': configured_service,
'backup_service': backup_service}
self._update_backup_error(backup, context, err)
raise exception.InvalidBackup(reason=err)
try:
backup_service = self.service.get_backup_driver(context)
backup_service.delete(backup)
except Exception as err:
with excutils.save_and_reraise_exception():
self._update_backup_error(backup, context,
six.text_type(err))
# Get reservations
try:
reserve_opts = {
'backups': -1,
'backup_gigabytes': -backup.size,
}
reservations = QUOTAS.reserve(context,
project_id=backup.project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Failed to update usages deleting backup"))
backup.destroy()
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations,
project_id=backup.project_id)
LOG.info(_LI('Delete backup finished, backup %s deleted.'), backup.id)
self._notify_about_backup_usage(context, backup, "delete.end")
def _notify_about_backup_usage(self,
context,
backup,
event_suffix,
extra_usage_info=None):
volume_utils.notify_about_backup_usage(
context, backup, event_suffix,
extra_usage_info=extra_usage_info,
host=self.host)
def export_record(self, context, backup):
"""Export all volume backup metadata details to allow clean import.
Export backup metadata so it could be re-imported into the database
without any prerequisite in the backup database.
:param context: running context
:param backup: backup object to export
:returns: backup_record - a description of how to import the backup
:returns: contains 'backup_url' - how to import the backup, and
:returns: 'backup_service' describing the needed driver.
:raises: InvalidBackup
"""
LOG.info(_LI('Export record started, backup: %s.'), backup.id)
expected_status = 'available'
actual_status = backup.status
if actual_status != expected_status:
err = (_('Export backup aborted, expected backup status '
'%(expected_status)s but got %(actual_status)s.') %
{'expected_status': expected_status,
'actual_status': actual_status})
raise exception.InvalidBackup(reason=err)
backup_record = {}
backup_record['backup_service'] = backup.service
backup_service = self._map_service_to_driver(backup.service)
configured_service = self.driver_name
if backup_service != configured_service:
err = (_('Export record aborted, the backup service currently'
' configured [%(configured_service)s] is not the'
' backup service that was used to create this'
' backup [%(backup_service)s].') %
{'configured_service': configured_service,
'backup_service': backup_service})
raise exception.InvalidBackup(reason=err)
# Call driver to create backup description string
try:
utils.require_driver_initialized(self.driver)
backup_service = self.service.get_backup_driver(context)
driver_info = backup_service.export_record(backup)
backup_url = backup.encode_record(driver_info=driver_info)
backup_record['backup_url'] = backup_url
except Exception as err:
msg = six.text_type(err)
raise exception.InvalidBackup(reason=msg)
LOG.info(_LI('Export record finished, backup %s exported.'), backup.id)
return backup_record
def import_record(self,
context,
backup,
backup_service,
backup_url,
backup_hosts):
"""Import all volume backup metadata details to the backup db.
:param context: running context
:param backup: The new backup object for the import
:param backup_service: The needed backup driver for import
:param backup_url: An identifier string to locate the backup
:param backup_hosts: Potential hosts to execute the import
:raises: InvalidBackup
:raises: ServiceNotFound
"""
LOG.info(_LI('Import record started, backup_url: %s.'), backup_url)
# Can we import this backup?
if (backup_service != self.driver_name):
# No, are there additional potential backup hosts in the list?
if len(backup_hosts) > 0:
# try the next host on the list, maybe he can import
first_host = backup_hosts.pop()
self.backup_rpcapi.import_record(context,
first_host,
backup,
backup_service,
backup_url,
backup_hosts)
else:
# empty list - we are the last host on the list, fail
err = _('Import record failed, cannot find backup '
'service to perform the import. Request service '
'%(service)s') % {'service': backup_service}
self._update_backup_error(backup, context, err)
raise exception.ServiceNotFound(service_id=backup_service)
else:
# Yes...
try:
# Deserialize backup record information
backup_options = backup.decode_record(backup_url)
# Extract driver specific info and pass it to the driver
driver_options = backup_options.pop('driver_info', {})
utils.require_driver_initialized(self.driver)
backup_service = self.service.get_backup_driver(context)
backup_service.import_record(backup, driver_options)
except Exception as err:
msg = six.text_type(err)
self._update_backup_error(backup, context, msg)
raise exception.InvalidBackup(reason=msg)
required_import_options = ['display_name',
'display_description',
'container',
'size',
'service_metadata',
'service',
'object_count']
backup_update = {}
backup_update['status'] = 'available'
backup_update['service'] = self.driver_name
backup_update['availability_zone'] = self.az
backup_update['host'] = self.host
for entry in required_import_options:
if entry not in backup_options:
msg = (_('Backup metadata received from driver for '
'import is missing %s.'), entry)
self._update_backup_error(backup, context, msg)
raise exception.InvalidBackup(reason=msg)
backup_update[entry] = backup_options[entry]
# Update the database
backup.update(backup_update)
backup.save()
# Verify backup
try:
if isinstance(backup_service, driver.BackupDriverWithVerify):
backup_service.verify(backup.id)
else:
LOG.warning(_LW('Backup service %(service)s does not '
'support verify. Backup id %(id)s is '
'not verified. Skipping verify.'),
{'service': self.driver_name,
'id': backup.id})
except exception.InvalidBackup as err:
with excutils.save_and_reraise_exception():
self._update_backup_error(backup, context,
six.text_type(err))
LOG.info(_LI('Import record id %s metadata from driver '
'finished.'), backup.id)
def reset_status(self, context, backup, status):
"""Reset volume backup status.
:param context: running context
:param backup: The backup object for reset status operation
:param status: The status to be set
:raises: InvalidBackup
:raises: BackupVerifyUnsupportedDriver
:raises: AttributeError
"""
LOG.info(_LI('Reset backup status started, backup_id: '
'%(backup_id)s, status: %(status)s.'),
{'backup_id': backup.id,
'status': status})
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the backup status updated. Fail early since there
# are no other status to change but backup's
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Backup driver has not been initialized"))
backup_service = self._map_service_to_driver(backup.service)
LOG.info(_LI('Backup service: %s.'), backup_service)
if backup_service is not None:
configured_service = self.driver_name
if backup_service != configured_service:
err = _('Reset backup status aborted, the backup service'
' currently configured [%(configured_service)s] '
'is not the backup service that was used to create'
' this backup [%(backup_service)s].') % \
{'configured_service': configured_service,
'backup_service': backup_service}
raise exception.InvalidBackup(reason=err)
# Verify backup
try:
# check whether the backup is ok or not
if status == 'available' and backup['status'] != 'restoring':
# check whether we could verify the backup is ok or not
if isinstance(backup_service,
driver.BackupDriverWithVerify):
backup_service.verify(backup.id)
backup.status = status
backup.save()
# driver does not support verify function
else:
msg = (_('Backup service %(configured_service)s '
'does not support verify. Backup id'
' %(id)s is not verified. '
'Skipping verify.') %
{'configured_service': self.driver_name,
'id': backup.id})
raise exception.BackupVerifyUnsupportedDriver(
reason=msg)
# reset status to error or from restoring to available
else:
if (status == 'error' or
(status == 'available' and
backup.status == 'restoring')):
backup.status = status
backup.save()
except exception.InvalidBackup:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Backup id %s is not invalid. "
"Skipping reset."), backup.id)
except exception.BackupVerifyUnsupportedDriver:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Backup service %(configured_service)s '
'does not support verify. Backup id '
'%(id)s is not verified. '
'Skipping verify.'),
{'configured_service': self.driver_name,
'id': backup.id})
except AttributeError:
msg = (_('Backup service %(service)s does not support '
'verify. Backup id %(id)s is not verified. '
'Skipping reset.') %
{'service': self.driver_name,
'id': backup.id})
LOG.error(msg)
raise exception.BackupVerifyUnsupportedDriver(
reason=msg)
# send notification to ceilometer
notifier_info = {'id': backup.id, 'update': {'status': status}}
notifier = rpc.get_notifier('backupStatusUpdate')
notifier.info(context, "backups.reset_status.end",
notifier_info)
def check_support_to_force_delete(self, context):
"""Check if the backup driver supports force delete operation.
:param context: running context
"""
backup_service = self.service.get_backup_driver(context)
return backup_service.support_force_delete
|
|
#!/usr/bin/env python
__version__="0.1.0"
# make the other metrics work
# generate the txt files, then work on the pdf otuput
import pandas as pd
import sys
import os
import re
import shelve
import numpy as np
import pprint as pp
import argparse, traceback
import core.graph_sampler as gs
import core.probabilistic_cfg as pcfg
import networkx as nx
import core.PHRG as phrg
from glob import glob
from core.utils import listify_rhs
from core.load_edgelist_from_dataframe import Pandas_DataFrame_From_Edgelist
from collections import Sequence
from explodingTree import graph_name
DBG = False
def genflat(l, ltypes=Sequence):
# by https://stackoverflow.com/users/95810/alex-martelli
l = list(l)
while l:
while l and isinstance(l[0], ltypes):
l[0:1] = l[0]
if l: yield l.pop(0)
def summarize_listify_rule(rhs_rule):
if DBG: print type(rhs_rule), len(rhs_rule)
rhs_clean= [f[1:-1] for f in re.findall("'.+?'", rhs_rule)]
return [len(x.split(",")) for x in rhs_clean if "N" in x]
def willFire_check(dat_frm):
""" Checks if the subset of prod rules will fire
:param dat_frm:
:return bool
"""
ret_val = False
if not len(dat_frm):
return ret_val
#print [nt for nt in dat_frm[1] if "S" in nt]#.apply(lambda x: [nt for nt in x if "S" in nt])
nt_symbs_s = [nt for nt in dat_frm[1] if "S" in nt]
if not len(nt_symbs_s):
print nt_symbs_s
print "_S:" # list(dat_frm[1].values)
return ret_val
else:
# print dat_frm.iloc[1][1], dat_frm.iloc[1][2]
rhs_els = dat_frm[2].apply(summarize_listify_rule)
lhs_els = dat_frm[1].apply(lambda x: len(x.split(",")))
df = pd.concat([lhs_els, rhs_els], axis=1)
# Test if for each rhs NT we have an equal sized LHS
# d = defaultdict(list)
# print df.head()
# print df.groupby([1]).groups.keys()
lhs_keys = df.groupby([1]).groups.keys()
key_seen = {}
for k in lhs_keys:
if k == 1: continue
if k in list(genflat(df[2].values)):
key_seen[k] = True
else:
key_seen[k] = False
# print key_seen
# print not any(key_seen.values())
ret_val = not any(x is False for x in key_seen.values())
return ret_val
def tst_prod_rules_isom_intrxn(fname,origfname):
"""
Test the isomorphic subset of rules
:param fname: isom intersection rules file
:param origfname: reference input network (dataset) edgelist file
:return:
"""
# Get the original file
fdf = Pandas_DataFrame_From_Edgelist([origfname])
origG = nx.from_pandas_dataframe(fdf[0], 'src', 'trg')
origG.name = graph_name(origfname)
print origG.name, "+"*80
# Read the subset of prod rules
df = pd.read_csv(fname, header=None, sep="\t", dtype={0: str, 1: list, 2: list, 3: float})
g = pcfg.Grammar('S')
if not willFire_check(df):
print "-"*10, fname, "contains production rules that WillNotFire"
return None
else:
print "+"*40
# Process dataframe
from td_isom_jaccard_sim import listify_rhs
for (id, lhs, rhs, prob) in df.values:
rhs = listify_rhs(rhs)
g.add_rule(pcfg.Rule(id, lhs, rhs, float(prob)))
print "\n","."*40 #print 'Added the rules to the datastructure'
num_nodes = origG.number_of_nodes()
# print "Starting max size", 'n=', num_nodes
g.set_max_size(num_nodes)
# print "Done with max size"
Hstars = []
ofname = "FakeGraphs/"+ origG.name+ "_isom_ntrxn.shl"
database = shelve.open(ofname)
num_samples = 20 #
print '~' * 40
for i in range(0, num_samples):
rule_list = g.sample(num_nodes)
hstar = phrg.grow(rule_list, g)[0]
Hstars.append(hstar)
print hstar.number_of_nodes(), hstar.number_of_edges()
print '-' * 40
database['hstars'] = Hstars
database.close()
def tst_prod_rules_level1_individual(in_path):
# files = glob("ProdRules/moreno_lesmis_lesmis.*_iprules.tsv")
mdf = pd.DataFrame()
for f in sorted(files, reverse=True):
df = pd.read_csv(f, header=None, sep="\t")
mdf = pd.concat([mdf, df])
# print f, mdf.shape
# print mdf.head()
g = pcfg.Grammar('S')
from td_isom_jaccard_sim import listify_rhs
for (id, lhs, rhs, prob) in df.values:
rhs = listify_rhs(rhs)
# print (id), (lhs), (rhs), (prob)
g.add_rule(pcfg.Rule(id, lhs, rhs, float(prob)))
num_nodes = 16 # G.number_of_nodes()
print "Starting max size", 'n=', num_nodes
g.set_max_size(num_nodes)
print "Done with max size"
Hstars = []
print '-' * 40
try:
rule_list = g.sample(num_nodes)
except Exception, e:
print str(e)
continue
hstar = phrg.grow(rule_list, g)[0]
Hstars.append(hstar)
print '+' * 40
# break
# def tst_prod_rules_canfire(infname):
# print infname
# print df[2].apply(lambda x: listify_rhs(x.split()[0]))
# tst_prod_rules_isom_intrxn("Results/moreno_vdb_vdb_isom_itrxn.tsv")
# tst_prod_rules_level1_individual("ProdRules/moreno_lesmis_lesmis.*_iprules.tsv")
# tst_prod_rules_isom_intrxn(fname)
#_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~#
def get_parser ():
parser = argparse.ArgumentParser(description="Test the reduced set of prod rules")
parser.add_argument('--prs',nargs=1, required=True, help="Filename to prod rules file")
parser.add_argument('--orig',nargs=1, required=True, help="Filename to original dataset file")
parser.add_argument('--version', action='version', version=__version__)
return parser
def main (argsd):
if argsd['prs']:
tst_prod_rules_isom_intrxn(argsd['prs'][0], argsd['orig'][0])
else:
print "I am not sure what you are trying to do"
sys.exit(1)
#test_isom_subset_of_prod_rules("Results/moreno_lesmis_lesmis_isom_itrxn.tsv")
#print
#test_isom_subset_of_prod_rules("Results/moreno_vdb_vdb_isom_itrxn.tsv")
#print
#test_isom_subset_of_prod_rules("Results/ucidata-gama_isom_itrxn.tsv")
#print
#test_isom_subset_of_prod_rules("Results/ucidata-zachary_isom_itrxn.tsv")
#
###
### tst_prod_rules_canfire("Results/ucidata-gama_stcked_prs_isom_itrxn.tsv")
###
#infname ="Results/ucidata-gama_isom_itrxn.tsv"
#df = pd.read_csv(infname, header=None, sep="\t", dtype={0: str, 1: str, 2: list, 3: float})
#
#df['lhs_n']=[len(x.split(',')) for x in df[1].values]
#df['els_n']=[len(listify_rhs(x)) for x in df[2].values]
#df['nt_els']=[len([k for k in listify_rhs(x) if ":N" in k]) for x in df[2].values]
#print '#'*10
#rhs_nt_els_nbr = {}
#for y,x in df[[1,2]].values:
# # print x
# rhs_nts =[]
# for k in listify_rhs(x):
# if ":N" in k:
# # print ' ', len(k.split(','))
# rhs_nts.append(len(k.split(',')))
# rhs_nt_els_nbr[len(y.split(','))] = rhs_nts
#for k,v in rhs_nt_els_nbr.items():
# print k,'\t',v
#
#print '^'*20
#print 'rhs',rhs_nt_els_nbr
#print 'lhs',[len(x.split(',')) for x in df[1].values]
## print df[[1,2,'lhs_n','els_n','nt_els']].head()
#print set(df['lhs_n']) & set(rhs_nt_els_nbr)
#print 'Test .... if each rhs is not in lhs ... we cannot fire (?)'
if __name__ == '__main__':
'''ToDo:
[] clean the edglists, write them back to disk and then run inddgo on 1 component graphs
[] Add WillFire
'''
parser = get_parser()
args = vars(parser.parse_args())
try:
main(args)
except Exception, e:
print str(e)
traceback.print_exc()
sys.exit(1)
sys.exit(0)
|
|
#!/usr/bin/env python
"""
Builds a Windows Resource Control Script
"""
import datetime
import getpass
import os
__version__ = '0.0.0'
#=============================================================================
default_template = """
/****************************************************************************
Windows Resource Control Script for "{project}"
List of available fields in templates:
{fields}
****************************************************************************/
//Set the program's application icon.
// e.g. a ICON "{project}.ico"
{icon_resource}
//Declare embedded executable information.
1 VERSIONINFO
FILEVERSION 0,0,0,0
PRODUCTVERSION {version_commas},0
BEGIN
BLOCK "StringFileInfo"
BEGIN
BLOCK "040904E4"
BEGIN
VALUE "CompanyName", "{author}"
VALUE "FileDescription", "{title}"
VALUE "FileVersion", "0.0.0"
VALUE "InternalName", "{name}"
VALUE "LegalCopyright", "{year} {author}"
VALUE "OriginalFilename", "{project}.exe"
VALUE "ProductName", "{project}"
VALUE "ProductVersion", "{version}"
END
END
BLOCK "VarFileInfo"
BEGIN
VALUE "Translation", 0x409, 1252
END
END
"""
#=============================================================================
def makerc( target = None, template = None, fields = None ):
"""
Creates a new Windows resource control script.
"""
# set a default project name
project = 'project'
# check for an unspecified target file
if target is None:
target = project + '.rc'
# get today's date
today = datetime.date.today()
# set default field values
_fields = {
'project' : project,
'title' : project,
'name' : project,
'author' : getpass.getuser(),
'version' : '0.0.0',
'date' : today.strftime( '%Y-%m-%d' ),
'year' : today.strftime( '%Y' ),
'icon' : project + '.ico'
}
# override defaults with user-supplied information
if fields is not None:
for k, v in fields.items():
_fields[ k ] = v
# version string with commas
_fields[ 'version_commas' ] = _fields[ 'version' ].replace( '.', ',' )
# check for an icon
if os.path.isfile( _fields[ 'icon' ] ) == True:
# set the appropriate icon resource target
_fields[ 'icon_resource' ] = 'a ICON "{}"'.format( _fields[ 'icon' ] )
# no icon easily found
else:
# set a comment in the script
_fields[ 'icon_resource' ] = '// ### no icon found at {} ###'.format(
_fields[ 'icon' ]
)
# provide the list of fields for would-be template writers
max_key = max( len( k ) for k in _fields.keys() )
field_format = '{{0:<{}}} : {{1}}'.format( max_key )
_fields[ 'fields' ] = '\n'.join(
field_format.format( k, v ) for k,v in _fields.items()
)
# see if the user supplied a script template
if template is not None:
if 'read' in template:
_template = template.read()
else:
_template = template
else:
_template = default_template
# format and write the template to the target file
with open( target, 'w' ) as tfh:
tfh.write( _template.format( **_fields ) )
# return success
return True
#=============================================================================
def main( argv ):
"""
Script execution entry point
@param argv Arguments passed to the script
@return Exit code (0 = success)
"""
# imports when using this as a script
import argparse
# create and configure an argument parser
parser = argparse.ArgumentParser(
description = 'Builds a Windows Resource Control Script',
add_help = False
)
parser.add_argument(
'-a',
'--author',
default = 'Zac Hester',
help = 'Specify program author name.'
)
parser.add_argument(
'-h',
'--help',
default = False,
help = 'Display this help message and exit.',
action = 'help'
)
parser.add_argument(
'-p',
'--project',
default = None,
help = 'Specify project name.'
)
parser.add_argument(
'-r',
'--revision',
default = '0.0.0',
help = 'Specify program revision (version).'
)
parser.add_argument(
'-v',
'--version',
default = False,
help = 'Display script version and exit.',
action = 'version',
version = __version__
)
parser.add_argument(
'target',
nargs = '?',
default = None,
help = 'Path to output file.'
)
# parse the arguments
args = parser.parse_args( argv[ 1 : ] )
# read fields from the command line
fields = { 'author' : args.author, 'version' : args.revision }
# look for a project name (overrides a few things)
if args.project is not None:
project = args.project
# otherwise, make a sane default
else:
project = os.path.basename( os.getcwd() )
# set the fields that use the project name
fields[ 'project' ] = project
fields[ 'title' ] = project
fields[ 'name' ] = project
fields[ 'icon' ] = project + '.ico'
# build the resource control script
result = makerc( target = args.target, fields = fields )
# return result of function
return os.EX_OK if result == True else os.EX_SOFTWARE
#=============================================================================
if __name__ == "__main__":
import sys
sys.exit( main( sys.argv ) )
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
Force-Directed Graph Layout
===========================
This module contains implementations for a force-directed layout, where the
graph is modelled like a collection of springs or as a collection of
particles attracting and repelling each other. The whole graph tries to
reach a state which requires the minimum energy.
"""
import numpy as np
try:
from scipy.sparse import issparse
except ImportError:
def issparse(*args, **kwargs):
return False
from ..util import _straight_line_vertices, _rescale_layout
class fruchterman_reingold(object):
"""
Fruchterman-Reingold implementation adapted from NetworkX.
In the Fruchterman-Reingold algorithm, the whole graph is modelled as a
collection of particles, it runs a simplified particle simulation to
find a nice layout for the graph.
Paramters
---------
optimal : number
Optimal distance between nodes. Defaults to :math:`1/\sqrt{N}` where
N is the number of nodes.
iterations : int
Number of iterations to perform for layout calculation.
pos : array
Initial positions of the nodes
Notes
-----
The algorithm is explained in more detail in the original paper [1]_.
.. [1] Fruchterman, Thomas MJ, and Edward M. Reingold. "Graph drawing by
force-directed placement." Softw., Pract. Exper. 21.11 (1991),
1129-1164.
"""
def __init__(self, optimal=None, iterations=50, pos=None):
self.dim = 2
self.optimal = optimal
self.iterations = iterations
self.num_nodes = None
self.pos = pos
def __call__(self, adjacency_mat, directed=False):
"""
Starts the calculation of the graph layout.
This is a generator, and after each iteration it yields the new
positions for the nodes, together with the vertices for the edges
and the arrows.
There are two solvers here: one specially adapted for SciPy sparse
matrices, and the other for larger networks.
Parameters
----------
adjacency_mat : array
The graph adjacency matrix.
directed : bool
Wether the graph is directed or not. If this is True,
it will draw arrows for directed edges.
Yields
------
layout : tuple
For each iteration of the layout calculation it yields a tuple
containing (node_vertices, line_vertices, arrow_vertices). These
vertices can be passed to the `MarkersVisual` and `ArrowVisual`.
"""
if adjacency_mat.shape[0] != adjacency_mat.shape[1]:
raise ValueError("Adjacency matrix should be square.")
self.num_nodes = adjacency_mat.shape[0]
if issparse(adjacency_mat):
# Use the sparse solver
solver = self._sparse_fruchterman_reingold
else:
solver = self._fruchterman_reingold
for result in solver(adjacency_mat, directed):
yield result
def _fruchterman_reingold(self, adjacency_mat, directed=False):
if self.optimal is None:
self.optimal = 1 / np.sqrt(self.num_nodes)
if self.pos is None:
# Random initial positions
pos = np.asarray(
np.random.random((self.num_nodes, self.dim)),
dtype=np.float32
)
else:
pos = self.pos.astype(np.float32)
# Yield initial positions
line_vertices, arrows = _straight_line_vertices(adjacency_mat, pos,
directed)
yield pos, line_vertices, arrows
# The initial "temperature" is about .1 of domain area (=1x1)
# this is the largest step allowed in the dynamics.
t = 0.1
# Simple cooling scheme.
# Linearly step down by dt on each iteration so last iteration is
# size dt.
dt = t / float(self.iterations+1)
# The inscrutable (but fast) version
# This is still O(V^2)
# Could use multilevel methods to speed this up significantly
for iteration in range(self.iterations):
delta_pos = _calculate_delta_pos(adjacency_mat, pos, t,
self.optimal)
pos += delta_pos
_rescale_layout(pos)
# cool temperature
t -= dt
# Calculate edge vertices and arrows
line_vertices, arrows = _straight_line_vertices(adjacency_mat,
pos, directed)
yield pos, line_vertices, arrows
def _sparse_fruchterman_reingold(self, adjacency_mat, directed=False):
# Optimal distance between nodes
if self.optimal is None:
self.optimal = 1 / np.sqrt(self.num_nodes)
# Change to list of list format
# Also construct the matrix in COO format for easy edge construction
adjacency_arr = adjacency_mat.toarray()
adjacency_coo = adjacency_mat.tocoo()
if self.pos is None:
# Random initial positions
pos = np.asarray(
np.random.random((self.num_nodes, self.dim)),
dtype=np.float32
)
else:
pos = self.pos.astype(np.float32)
# Yield initial positions
line_vertices, arrows = _straight_line_vertices(adjacency_coo, pos,
directed)
yield pos, line_vertices, arrows
# The initial "temperature" is about .1 of domain area (=1x1)
# This is the largest step allowed in the dynamics.
t = 0.1
# Simple cooling scheme.
# Linearly step down by dt on each iteration so last iteration is
# size dt.
dt = t / float(self.iterations+1)
for iteration in range(self.iterations):
delta_pos = _calculate_delta_pos(adjacency_arr, pos, t,
self.optimal)
pos += delta_pos
_rescale_layout(pos)
# Cool temperature
t -= dt
# Calculate line vertices
line_vertices, arrows = _straight_line_vertices(adjacency_coo,
pos, directed)
yield pos, line_vertices, arrows
def _calculate_delta_pos(adjacency_arr, pos, t, optimal):
"""Helper to calculate the delta position"""
# XXX eventually this should be refactored for the sparse case to only
# do the necessary pairwise distances
delta = pos[:, np.newaxis, :] - pos
# Distance between points
distance2 = (delta*delta).sum(axis=-1)
# Enforce minimum distance of 0.01
distance2 = np.where(distance2 < 0.0001, 0.0001, distance2)
distance = np.sqrt(distance2)
# Displacement "force"
displacement = np.zeros((len(delta), 2))
for ii in range(2):
displacement[:, ii] = (
delta[:, :, ii] *
((optimal * optimal) / (distance*distance) -
(adjacency_arr * distance) / optimal)).sum(axis=1)
length = np.sqrt((displacement**2).sum(axis=1))
length = np.where(length < 0.01, 0.1, length)
delta_pos = displacement * t / length[:, np.newaxis]
return delta_pos
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pprint
from bs4 import BeautifulSoup
from six.moves.urllib import parse as urlparse
import six
from astropy import units as u
from . import conf
from ..query import BaseQuery
from ..utils import prepend_docstr_nosections, commons, async_to_sync
__doctest_skip__ = [
'SkyViewClass.get_images',
'SkyViewClass.get_images_async',
'SkyViewClass.get_image_list']
@async_to_sync
class SkyViewClass(BaseQuery):
URL = conf.url
def __init__(self):
super(SkyViewClass, self).__init__()
self._default_form_values = None
def _get_default_form_values(self, form):
"""
Return the already selected values of a given form (a BeautifulSoup
form node) as a dict.
"""
res = []
for elem in form.find_all(['input', 'select']):
# ignore the submit and reset buttons
if elem.get('type') in ['submit', 'reset']:
continue
# check boxes: enabled boxes have the value "on" if not specified
# otherwise. Found out by debugging, perhaps not documented.
if (elem.get('type') == 'checkbox' and
elem.get('checked') in ["", "checked"]):
value = elem.get('value', 'on')
res.append((elem.get('name'), value))
# radio buttons and simple input fields
if elem.get('type') == 'radio' and\
elem.get('checked') in ["", "checked"] or\
elem.get('type') in [None, 'text']:
res.append((elem.get('name'), elem.get('value')))
# dropdown menu, multi-section possible
if elem.name == 'select':
for option in elem.find_all('option'):
if option.get('selected') == '':
value = option.get('value', option.text.strip())
res.append((elem.get('name'), value))
return {k: v
for (k, v) in res
if v not in [None, u'None', u'null'] and v
}
def _generate_payload(self, input=None):
"""
Fill out the form of the SkyView site and submit it with the
values given in ``input`` (a dictionary where the keys are the form
element's names and the values are their respective values).
"""
if input is None:
input = {}
form_response = self._request('GET', self.URL)
bs = BeautifulSoup(form_response.content, "html.parser")
form = bs.find('form')
# cache the default values to save HTTP traffic
if self._default_form_values is None:
self._default_form_values = self._get_default_form_values(form)
# only overwrite payload's values if the `input` value is not None
# to avoid overwriting of the form's default values
payload = self._default_form_values.copy()
for k, v in six.iteritems(input):
if v is not None:
payload[k] = v
url = urlparse.urljoin(self.URL, form.get('action'))
return url, payload
def _submit_form(self, input=None, cache=True):
url, payload = self._generate_payload(input=input)
response = self._request('GET', url, params=payload, cache=cache)
return response
def get_images(self, position, survey, coordinates=None, projection=None,
pixels=None, scaling=None, sampler=None, resolver=None,
deedger=None, lut=None, grid=None, gridlabels=None,
radius=None, height=None, width=None, cache=True,
show_progress=True):
"""
Query the SkyView service, download the FITS file that will be
found and return a generator over the local paths to the
downloaded FITS files.
Note that the files will be downloaded when the generator will be
exhausted, i.e. just calling this method alone without iterating
over the result won't issue a connection to the SkyView server.
Parameters
----------
position : str
Determines the center of the field to be retrieved. Both
coordinates (also equatorial ones) and object names are
supported. Object names are converted to coordinates via the
SIMBAD or NED name resolver. See the reference for more info
on the supported syntax for coordinates.
survey : str or list of str
Select data from one or more surveys. The number of surveys
determines the number of resulting file downloads. Passing a
list with just one string has the same effect as passing this
string directly.
coordinates : str
Choose among common equatorial, galactic and ecliptic
coordinate systems (``"J2000"``, ``"B1950"``, ``"Galactic"``,
``"E2000"``, ``"ICRS"``) or pass a custom string.
projection : str
Choose among the map projections (the value in parentheses
denotes the string to be passed):
Gnomonic (Tan), default value
good for small regions
Rectangular (Car)
simplest projection
Aitoff (Ait)
Hammer-Aitoff, equal area projection good for all sky maps
Orthographic (Sin)
Projection often used in interferometry
Zenith Equal Area (Zea)
equal area, azimuthal projection
COBE Spherical Cube (Csc)
Used in COBE data
Arc (Arc)
Similar to Zea but not equal-area
pixels : str
Selects the pixel dimensions of the image to be produced. A
scalar value or a pair of values separated by comma may be
given. If the value is a scalar the number of width and height
of the image will be the same. By default a 300x300 image is
produced.
scaling : str
Selects the transformation between pixel intensity and
intensity on the displayed image. The supported values are:
``"Log"``, ``"Sqrt"``, ``"Linear"``, ``"HistEq"``,
``"LogLog"``.
sampler : str
The sampling algorithm determines how the data requested will
be resampled so that it can be displayed.
resolver : str
The name resolver allows to choose a name resolver to use when
looking up a name which was passed in the ``position`` parameter
(as opposed to a numeric coordinate value). The default choice
is to call the SIMBAD name resolver first and then the NED
name resolver if the SIMBAD search fails.
deedger : str
When multiple input images with different backgrounds are
resampled the edges between the images may be apparent because
of the background shift. This parameter makes it possible to
attempt to minimize these edges by applying a de-edging
algorithm. The user can elect to choose the default given for
that survey, to turn de-edging off, or to use the default
de-edging algorithm. The supported values are: ``"_skip_"`` to
use the survey default, ``"skyview.process.Deedger"`` (for
enabling de-edging), and ``"null"`` to disable.
lut : str
Choose from the color table selections to display the data in
false color.
grid : bool
overlay a coordinate grid on the image if True
gridlabels : bool
annotate the grid with coordinates positions if True
radius : `~astropy.units.Quantity` or None
The radius of the specified field. Overrides width and height.
width : `~astropy.units.Quantity` or None
The width of the specified field. Must be specified
with ``height``.
height : `~astropy.units.Quantity` or None
The height of the specified field. Must be specified
with ``width``.
References
----------
.. [1] http://skyview.gsfc.nasa.gov/current/help/fields.html
Examples
--------
>>> sv = SkyView()
>>> paths = sv.get_images(position='Eta Carinae',
... survey=['Fermi 5', 'HRI', 'DSS'])
>>> for path in paths:
... print('\tnew file:', path)
Returns
-------
A list of `~astropy.io.fits.HDUList` objects.
"""
readable_objects = self.get_images_async(position, survey, coordinates,
projection, pixels, scaling,
sampler, resolver, deedger,
lut, grid, gridlabels,
radius=radius, height=height,
width=width,
cache=cache,
show_progress=show_progress)
return [obj.get_fits() for obj in readable_objects]
@prepend_docstr_nosections(get_images.__doc__)
def get_images_async(self, position, survey, coordinates=None,
projection=None, pixels=None, scaling=None,
sampler=None, resolver=None, deedger=None, lut=None,
grid=None, gridlabels=None, radius=None, height=None,
width=None, cache=True, show_progress=True):
"""
Returns
-------
A list of context-managers that yield readable file-like objects
"""
image_urls = self.get_image_list(position, survey, coordinates,
projection, pixels, scaling, sampler,
resolver, deedger, lut, grid,
gridlabels, radius=radius,
height=height, width=width,
cache=cache)
return [commons.FileContainer(url, encoding='binary',
show_progress=show_progress)
for url in image_urls]
@prepend_docstr_nosections(get_images.__doc__, sections=['Returns', 'Examples'])
def get_image_list(self, position, survey, coordinates=None,
projection=None, pixels=None, scaling=None,
sampler=None, resolver=None, deedger=None, lut=None,
grid=None, gridlabels=None, radius=None, width=None,
height=None, cache=True):
"""
Returns
-------
list of image urls
Examples
--------
>>> SkyView().get_image_list(position='Eta Carinae',
... survey=['Fermi 5', 'HRI', 'DSS'])
[u'http://skyview.gsfc.nasa.gov/tempspace/fits/skv6183161285798_1.fits',
u'http://skyview.gsfc.nasa.gov/tempspace/fits/skv6183161285798_2.fits',
u'http://skyview.gsfc.nasa.gov/tempspace/fits/skv6183161285798_3.fits']
"""
self._validate_surveys(survey)
if radius is not None:
size_deg = str(radius.to(u.deg).value)
elif width and height:
size_deg = "{0},{1}".format(width.to(u.deg).value,
height.to(u.deg).value)
elif width and height:
raise ValueError("Must specify width and height if you "
"specify either.")
else:
size_deg = None
input = {
'Position': parse_coordinates(position),
'survey': survey,
'Deedger': deedger,
'lut': lut,
'projection': projection,
'gridlabels': '1' if gridlabels else '0',
'coordinates': coordinates,
'scaling': scaling,
'grid': grid,
'resolver': resolver,
'Sampler': sampler,
'imscale': size_deg,
'size': size_deg,
'pixels': pixels}
response = self._submit_form(input, cache=cache)
urls = self._parse_response(response)
return urls
def _parse_response(self, response):
bs = BeautifulSoup(response.content, "html.parser")
urls = []
for a in bs.find_all('a'):
if a.text == 'FITS':
href = a.get('href')
urls.append(urlparse.urljoin(response.url, href))
return urls
@property
def survey_dict(self):
if not hasattr(self, '_survey_dict'):
response = self._request('GET', self.URL, cache=False)
page = BeautifulSoup(response.content, "html.parser")
surveys = page.findAll('select', {'name': 'survey'})
self._survey_dict = {
sel['id']: [x.text for x in sel.findAll('option')]
for sel in surveys
if 'overlay' not in sel['id']
}
return self._survey_dict
@property
def _valid_surveys(self):
# Return a flat list of all valid surveys
return [x for v in self.survey_dict.values() for x in v]
def _validate_surveys(self, surveys):
if not isinstance(surveys, list):
surveys = [surveys]
for sv in surveys:
if sv not in self._valid_surveys:
raise ValueError("Survey is not among the surveys hosted "
"at skyview. See list_surveys or "
"survey_dict for valid surveys.")
def list_surveys(self):
"""
Print out a formatted version of the survey dict
"""
pprint.pprint(self.survey_dict)
def parse_coordinates(position):
coord = commons.parse_coordinates(position)
return coord.fk5.to_string()
SkyView = SkyViewClass()
|
|
#!/usr/bin/env python
# Full license can be found in License.md
# Full author list can be found in .zenodo.json file
# DOI:10.5281/zenodo.1199703
# ----------------------------------------------------------------------------
"""Date and time handling utilities."""
import datetime as dt
import numpy as np
import pandas as pds
import re
def getyrdoy(date):
"""Return a tuple of year, day of year for a supplied datetime object.
Parameters
----------
date : datetime.datetime
Datetime object
Returns
-------
year : int
Integer year
doy : int
Integer day of year
Raises
------
AttributeError
If input date does not have `toordinal` method
"""
try:
doy = date.toordinal() - dt.datetime(date.year, 1, 1).toordinal() + 1
except AttributeError:
raise AttributeError(''.join(("Must supply a datetime object or an ",
"equivalent class object with the ",
"`toordinal` method")))
else:
return date.year, doy
def datetime_to_dec_year(dtime):
"""Convert datetime timestamp to a decimal year.
Parameters
----------
dtime : dt.datetime
Datetime timestamp
Returns
-------
year : float
Year with decimal containing time increments of less than a year
"""
year = float(dtime.year)
day = float(dtime.strftime("%j")) - 1.0
days_of_year = float(dt.datetime(dtime.year, 12, 31).strftime("%j"))
# Add fraction of day to the day
day += (dtime.hour + (dtime.minute
+ (dtime.second + dtime.microsecond * 1.0e-6) / 60.0)
/ 60.0) / 24.0
# Determine the fraction of days in this year and add to year
year += (day / days_of_year)
return year
def parse_date(str_yr, str_mo, str_day, str_hr='0', str_min='0', str_sec='0',
century=2000):
"""Convert string dates to dt.datetime.
Parameters
----------
str_yr : str
String containing the year (2 or 4 digits)
str_mo : str
String containing month digits
str_day : str
String containing day of month digits
str_hr : str
String containing the hour of day (default='0')
str_min : str
String containing the minutes of hour (default='0')
str_sec : str
String containing the seconds of minute (default='0')
century : int
Century, only used if str_yr is a 2-digit year (default=2000)
Returns
-------
out_date : dt.datetime
datetime object
Raises
------
ValueError
If any input results in an unrealistic datetime object value
"""
yr = int(str_yr) + century if len(str_yr) == 2 else int(str_yr)
out_date = dt.datetime(yr, int(str_mo), int(str_day), int(str_hr),
int(str_min), np.int64(str_sec))
return out_date
def calc_res(index, use_mean=False):
"""Determine the resolution for a time index.
Parameters
----------
index : array-like
Datetime list, array, or Index
use_mean : bool
Use the minimum time difference if False, use the mean time difference
if True (default=False)
Returns
-------
res_sec : float
Resolution value in seconds
Raises
------
ValueError
If `index` is too short to calculate a time resolution
"""
# Test the length of the input
if len(index) < 2:
raise ValueError("insufficient data to calculate resolution")
# Calculate the minimum temporal difference
del_time = (np.array(index[1:]) - np.array(index[:-1]))
if use_mean:
del_time = del_time.mean()
else:
del_time = del_time.min()
# Convert time difference to seconds, based on possible data types
try:
# First try as timedelta
res_sec = del_time.total_seconds()
except AttributeError as aerr:
# Now try as numpy.timedelta64
if isinstance(del_time, np.timedelta64):
res_sec = np.float64(del_time) * 1.0e-9
else:
raise AttributeError("Input should be times: {:}".format(aerr))
return res_sec
def calc_freq(index):
"""Determine the frequency for a time index.
Parameters
----------
index : array-like
Datetime list, array, or Index
Returns
-------
freq : str
Frequency string as described in Pandas Offset Aliases
Note
----
Calculates the minimum time difference and sets that as the frequency.
To reduce the amount of calculations done, the returned frequency is
either in seconds (if no sub-second resolution is found) or nanoseconds.
See Also
--------
pds.offsets.DateOffset
"""
# Get the frequency of the index in seconds
freq_sec = calc_res(index, use_mean=False)
# Format output frequency
if np.floor(freq_sec) == freq_sec:
# The frequency is on the order of seconds or greater
freq = "{:.0f}S".format(freq_sec)
else:
# There are sub-seconds. Go straigt to nanosec for best resoution
freq = "{:.0f}N".format(freq_sec * 1.0e9)
return freq
def freq_to_res(freq):
"""Convert a frequency string to a resolution value in seconds.
Parameters
----------
freq : str
Frequency string as described in Pandas Offset Aliases
Returns
-------
res_sec : np.float64
Resolution value in seconds
See Also
--------
pds.offsets.DateOffset
References
----------
Separating alpha and numeric portions of strings, as described in:
https://stackoverflow.com/a/12409995
"""
# Separate the alpha and numeric portions of the string
regex = re.compile(r'(\d+|\s+)')
out_str = [sval for sval in regex.split(freq) if len(sval) > 0]
if len(out_str) > 2:
raise ValueError('unexpected frequency format: {:s}'.format(freq))
# Cast the alpha and numeric portions
freq_str = out_str[-1]
freq_num = 1.0 if len(out_str) == 1 else np.float64(out_str[0])
# Calculate the resolution in seconds
res_sec = pds.Timedelta(freq_num, unit=freq_str).total_seconds()
return res_sec
def create_date_range(start, stop, freq='D'):
"""Create array of datetime objects using input freq from start to stop.
Parameters
----------
start : dt.datetime or list-like of dt.datetime
The beginning of the date range. Supports list, tuple, or ndarray of
start dates.
stop : dt.datetime or list-like of dt.datetime
The end of the date range. Supports list, tuple, or ndarray of
stop dates.
freq : str
The frequency of the desired output. Codes correspond to pandas
date_range codes: 'D' daily, 'M' monthly, 'S' secondly
Returns
-------
season : pds.date_range
Range of dates over desired time with desired frequency.
"""
if hasattr(start, '__iter__'):
# missing check for datetime
season = pds.date_range(start[0], stop[0], freq=freq)
for (sta, stp) in zip(start[1:], stop[1:]):
season = season.append(pds.date_range(sta, stp, freq=freq))
else:
season = pds.date_range(start, stop, freq=freq)
return season
def create_datetime_index(year=None, month=None, day=None, uts=None):
"""Create a timeseries index using supplied date and time.
Parameters
----------
year : array_like or NoneType
Array of year values as np.int (default=None)
month : array_like or NoneType
Array of month values as np.int. Leave None if using day for
day of year. (default=None)
day : array_like or NoneType
Array of number of days as np.int. If month=None then value
interpreted as day of year, otherwise, day of month. (default=None)
uts : array-like or NoneType
Array of UT seconds as np.float64 values (default=None)
Returns
-------
Pandas timeseries index.
Note
----
Leap seconds have no meaning here.
"""
# Get list of unique year, and month
if not hasattr(year, '__iter__'):
raise ValueError('Must provide an iterable for all inputs.')
if len(year) == 0:
raise ValueError('Length of array must be larger than 0.')
# Establish default month
if month is None:
# If no month, assume January. All days will be treated as day of year.
month = np.ones(shape=len(year))
# Initial day is first of given month.
day0 = np.ones(shape=len(year))
if day is None:
# If no day, assume first of month.
day = day0
if uts is None:
# If no seconds, assume start of day.
uts = np.zeros(shape=len(year))
# Initialize all dates as first of month and convert to index.
# This method allows month-day and day of year to be used.
df = pds.DataFrame({'year': year, 'month': month, 'day': day0})
index = pds.DatetimeIndex(pds.to_datetime(df))
# Add days (offset by 1) to each index.
# Day is added here in case input is in day of year format.
index += (day - 1).astype('timedelta64[D]')
# Add seconds to each index. Need to convert to nanoseconds first.
index += (1e9 * uts).astype('timedelta64[ns]')
return index
def filter_datetime_input(date):
"""Create a datetime object that only includes year, month, and day.
Parameters
----------
date : NoneType, array-like, or datetime
Single or sequence of datetime inputs
Returns
-------
out_date: NoneType, datetime, or array-like
NoneType input yeilds NoneType output, array-like yeilds list of
datetimes, datetime object yeilds like. All datetime output excludes
the sub-daily temporal increments (keeps only date information).
Note
----
Checks for timezone information not in UTC
"""
if date is None:
out_date = None
else:
# Check for timezone information and remove time of day for
# single datetimes and iterable containers of datetime objects
if hasattr(date, '__iter__'):
out_date = []
for in_date in date:
if(in_date.tzinfo is not None
and in_date.utcoffset() is not None):
in_date = in_date.astimezone(tz=dt.timezone.utc)
out_date.append(dt.datetime(in_date.year, in_date.month,
in_date.day))
else:
if date.tzinfo is not None and date.utcoffset() is not None:
date = date.astimezone(tz=dt.timezone.utc)
out_date = dt.datetime(date.year, date.month, date.day)
return out_date
def today():
"""Obtain today's date (UTC), with no hour, minute, second, etc.
Returns
-------
today_utc: datetime
Today's date in UTC
"""
today_utc = filter_datetime_input(dt.datetime.utcnow())
return today_utc
|
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# Get the GitHub repos from PNE course students
#
# Copyright (C) Alvaro del Castillo
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Authors:
# Alvaro del Castillo San Felix <acs@bitergia.com>
#
import argparse
import json
import operator
import os
import subprocess
import sys
from collections import OrderedDict
from shutil import copyfile
import requests
GITHUB_URL = 'https://github.com'
GITHUB_REPOS_API = 'https://api.github.com/repos'
GITHUB_USERS_API = 'https://api.github.com/users'
OPENFDA_REPO = "openfda" # Name of the repository with the practices
GENIUS_REPO = "genius"
PROJECT_DIR = "openfda-project" # Directory with the final project
PRACTICES_DIRS = ['openfda-1', 'openfda-2', 'openfda-3', 'openfda-4']
TEST_SCRIPT = 'test_openfda.py'
PROJECT_DIR = 'openfda-project'
STUDENT_RESULTS_FILE = 'report.json'
STUDENT_SCORES_FILE = 'scores.json'
PYTHON_CMD = os.path.abspath(sys.executable)
def send_github(url, headers=None):
headers = {'Authorization': 'token ' + args.token}
res = requests.get(url, headers=headers)
try:
res.raise_for_status()
except requests.exceptions.HTTPError:
print("Failed to get results from", url)
print("Can not get repository data (is empty?) from", url)
return res
class Evaluator():
""" Evaluator get all practices from GitHub and evaluate them """
@staticmethod
def get_classes_score(project_dir):
# 0.5 max score: 0.25 number of clasess, 0.25 classes name
score_classes = 0
cnames = ['OpenFDAClient', 'OpenFDAHTML', 'OpenFDAParser']
server_file = os.path.join(project_dir, "server.py")
classes = []
with open(server_file) as sfile:
for line in sfile.readlines():
for cname in cnames:
if 'class ' + cname in line:
classes.append(cname)
if len(classes) == 3:
score_classes = 1
print("Total score for classes: %0.2f" % score_classes)
return score_classes
@staticmethod
def get_score(test_results):
""" Get the score from the results of the tests:
scores: 5 basic, 4 additional
"""
# Sample test_resuts format
# Ran 11 tests in 7.064s
#
# FAILED (failures=3, errors=2)
score = 0
nerrors = 0
nfailures = 0
basic_score = 5
# Pending checking score for refactoring
additional_score = 3 # Max 8 if all is working
additional_tests = 5
total_tests = test_results.split("Ran ")[1].split(" ")[0]
# errors are the same than failures
error_tests = test_results.split("errors=")
failed_tests = test_results.split("failures=")
errors = 0
failed = 0
if len(failed_tests) > 1 or len(error_tests) > 1:
if len(error_tests) == 1:
# There are no errors
failed = int(failed_tests[1].split(")")[0])
# There are failures
elif len(failed_tests) == 1:
errors = int(error_tests[1].split(")")[0])
else:
failed = int(failed_tests[1].split(",")[0])
errors = int(error_tests[1].split(")")[0])
total_ko = failed + errors
if total_ko > 5:
print("Basic tests failed %i" % total_ko)
basic_score = 0
additional_score = 0
else:
additional_score = additional_score * (1 - (total_ko / additional_tests))
print("Additional tests failed %i, score %f" % (total_ko, basic_score + additional_score))
else:
print("No fails in tests, score %f" % (basic_score + additional_score))
return (basic_score, additional_score)
@staticmethod
def test_repo(repo_dir):
"""
Execute the tests for a repository
:param repo_dir: directory with the repository to be tested
:return:
"""
project_dir = repo_dir + "/" + PROJECT_DIR
# First step is to copy the test script inside the repository
copyfile("../openfda-project/" + TEST_SCRIPT, project_dir + "/" + TEST_SCRIPT)
# Time to get the score related to classes refactoring
classes_score = Evaluator.get_classes_score(project_dir)
# And now we need to execute the tests
cmd = [PYTHON_CMD, './' + TEST_SCRIPT]
errs_str, outs_str = Evaluator.execute_cmd(cmd, project_dir)
(basic_score, additional_score) = Evaluator.get_score(errs_str)
scores = {"basic": basic_score,
"additional": additional_score,
"classes": classes_score,
"total": basic_score + additional_score + classes_score}
print("Final score for %s: %f" % (project_dir, scores['total']))
return scores
@staticmethod
def execute_cmd(cmd, cwd):
""" Execute a shell command analyzing the output and errors """
print("Executing the command", cmd, os.getcwd(), cwd)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd)
outs, errs = proc.communicate()
outs_str = outs.decode("utf8")
errs_str = errs.decode("utf8")
return errs_str, outs_str
@staticmethod
def evaluate_students(students_data, no_update=False):
"""
Evaluate the practices for the github logins included in students_data
:param students_data: github logins to evaluate
:param no_update: don't update the practices
:return: a dict with gh_login as name and score as value
"""
gh_login_scores = {}
# To specify logins to avoid or to analyze: not supported in command line yet
blacklist_gh_login = []
whitelist_gh_login = []
# blacklist_gh_login = ["kcoutinho"]
# whitelist_gh_login = ["epolancosaiz", "mariaolleros"]
for name, gh_login in OrderedDict(sorted(students_data.items())).items():
scores = {"total": 0}
if whitelist_gh_login and gh_login not in whitelist_gh_login:
continue
if gh_login in blacklist_gh_login:
print("%s is in blacklist" % gh_login)
gh_login_scores[gh_login] = score
continue
# Check that the repository exists
check_url = "https://api.github.com/repos/%s/%s" % (gh_login, OPENFDA_REPO)
res = send_github(check_url)
# res = requests.get(check_url)
if res.status_code == 200:
if not os.path.isdir('repos/openfda-' + gh_login):
# If the repository is not cloned yet, do it!
print("Cloning for %s the repository %s" % (gh_login, OPENFDA_REPO))
clone_url = GITHUB_URL + "/" + gh_login + "/" + OPENFDA_REPO
cmd = ['git', 'clone', clone_url, 'repos/openfda-' + gh_login]
print (Evaluator.execute_cmd(cmd, "."))
else:
if not no_update:
# If it already exists, update it
print("Repository already cloned: %s. Updating it." % ('repos/openfda-' + gh_login))
cmd = ['git', 'pull']
print(Evaluator.execute_cmd(cmd, 'repos/openfda-' + gh_login))
else:
print("Not updating repository already cloned: %s." % ('repos/openfda-' + gh_login))
# Check that the final project dir exists
final_project_dir = 'repos/openfda-' + gh_login + "/" + PROJECT_DIR
if not os.path.isdir(final_project_dir):
print("Final project does not exists", final_project_dir)
else:
# Time to execute the tests
scores = Evaluator.test_repo("repos/openfda-" + gh_login)
elif res.status_code == 403:
print("Review the API token, access is forbidden")
print(res.text)
sys.exit(1)
res.raise_for_status()
else:
print("Repository not found for", gh_login, GITHUB_URL + "/" + gh_login + "/" + OPENFDA_REPO)
gh_login_scores[gh_login] = scores
return OrderedDict(sorted(gh_login_scores.items()))
@staticmethod
def clone_genius(students_data, no_update=False):
"""
Try to clone the genius practice
:param students_data: github logins to evaluate
:param no_update: don't update the practices
:return: a list with gh_logins with genius practice
"""
genius_logins = []
for name, gh_login in OrderedDict(sorted(students_data.items())).items():
# Check that the repository exists
check_url = "https://api.github.com/repos/%s/%s" % (gh_login, GENIUS_REPO)
res = send_github(check_url)
# res = requests.get(check_url)
if res.status_code == 200:
if not os.path.isdir('repos/genius-' + gh_login):
# If the repository is not cloned yet, do it!
print("Cloning for %s the repository %s" % (gh_login, GENIUS_REPO))
clone_url = GITHUB_URL + "/" + gh_login + "/" + GENIUS_REPO
cmd = ['git', 'clone', clone_url, 'repos/genius-' + gh_login]
print(Evaluator.execute_cmd(cmd, '.'))
else:
if not no_update:
# If it already exists, update it
print("Repository already cloned: %s. Updating it." % ('repos/genius-' + gh_login))
cmd = ['git', 'pull']
print(Evaluator.execute_cmd(cmd, 'repos/genius-' + gh_login))
else:
print("Not updating repository already cloned: %s." % ('repos/genius-' + gh_login))
genius_logins.append(gh_login)
elif res.status_code == 403:
print("Review the API token, access is forbidden")
print(res.text)
sys.exit(1)
res.raise_for_status()
else:
print("Repository not found for", gh_login, GITHUB_URL + "/" + gh_login + "/" + GENIUS_REPO)
return sorted(genius_logins)
class Report():
""" This Report is useful to track the activity of students in GitHub """
students = 0
repos_found = 0
repos_not_found = 0
repos_not_found_students = []
repo_ok = 0
repo_ok_students = []
repo_ko = 0
repo_ko_students = []
repo_main_not_found = 0
repo_main_not_found_students = []
students_res = {}
@staticmethod
def check_repo(gh_login):
"""
Check is a github user has the openfda repo and its contents
:param gh_login: github login for the user to test
:return:
"""
def do_checks(gh_repo):
"""
:rtype: Boolean with the result of the checks
"""
check = False
print("Checking", gh_repo)
# Checks that practices exists
practices_pending = list(PRACTICES_DIRS)
res = send_github(gh_repo + "/contents")
res.raise_for_status()
for content in res.json():
if content['type'] == "dir":
try:
practices_pending.remove(content['name'])
except ValueError:
pass
if not practices_pending:
print("All practices found")
check = True
else:
print("Practices not found for %s: %s" % (gh_repo, practices_pending))
check = False
# Add the practices pending data
Report.students_res[gh_login]['practices_pending'] = practices_pending
Report.students_res[gh_login]['number_practices_found'] = len(PRACTICES_DIRS) - len(practices_pending)
# Check last commit date: get last commit and get the date
commits_url = GITHUB_REPOS_API + "/" + gh_login + "/" + OPENFDA_REPO + "/commits/master"
res = send_github(commits_url)
last_commit_date = res.json()['commit']['committer']['date']
Report.students_res[gh_login]['last_commit_date'] = last_commit_date
print("Last commit date", last_commit_date)
# Check number of commits from the list of contributors
contribs_url = GITHUB_REPOS_API + "/" + gh_login + "/" + OPENFDA_REPO + "/contributors"
res = send_github(contribs_url)
if len(res.json()) > 1:
print("A student with contributors!", contribs_url)
commits = 0
for contributor in res.json():
commits += int(contributor['contributions'])
Report.students_res[gh_login]['number_commits'] = commits
print("Number of commits", commits)
return check
repos_url = GITHUB_USERS_API + "/" + gh_login + "/repos"
page = 1
check_repos = False # Check if there are repositories
check_main_repo = False # Checks for the main repository
check_main_repo_not_found = True # Check if the main repository exists
while True:
try:
res = send_github(repos_url + "?page=%i" % page)
page += 1 # Prepare for next page request
res.raise_for_status()
Report.repos_found += 1
check_repos = True
except Exception as ex:
print(ex)
print("Can not find", repos_url)
Report.repos_not_found += 1
Report.repos_not_found_students.append(gh_login)
break
repos_dict = res.json()
if not repos_dict:
break
for repo in repos_dict:
if repo['name'] != OPENFDA_REPO:
continue
else:
print("Found repo %s for %s" % (repo['name'], gh_login))
Report.students_res[gh_login]['url'] = repo['html_url']
check_main_repo_not_found = False
check_main_repo = do_checks(repo['url'])
break
if check_main_repo_not_found:
print("Not found %s for %s" % (OPENFDA_REPO, gh_login))
Report.repo_main_not_found += 1
Report.repo_main_not_found_students.append(gh_login)
return check_repos and check_main_repo
@staticmethod
def fetch_report_data(students_data):
"""
Fetch the data from GitHub to generate the report
:param students_data: dict with the logins in github of the students
:return: None, it fills the report data
"""
for name, gh_login in students_data.items():
Report.students += 1
Report.students_res[gh_login] = {
'last_commit_date': None,
'number_commits': 0,
'number_practices_found': 0,
'practices_pending': [],
'project_found': False,
"url": None
}
print("Checking repo for", name, gh_login)
try:
if not Report.check_repo(gh_login):
print("%s (%s) repo KO" % (name, gh_login))
Report.repo_ko += 1
Report.repo_ko_students.append(name)
else:
print("%s (%s) repo OK" % (name, gh_login))
Report.repo_ok += 1
Report.repo_ok_students.append(name)
except Exception as ex:
print("Can't not get data from %s (%s)" % (name, gh_login))
print(ex)
@staticmethod
def show():
""" Generate the report from a report dict"""
# List ordered by last modification
login_date = {}
for login, value in Report.students_res.items():
login_date[login] = value['last_commit_date'] if value['last_commit_date'] else ''
login_date_list = sorted(login_date.items(), key=operator.itemgetter(1), reverse=True)
print("\nTop Dates\n--------")
for entry in login_date_list:
print("{0:25} {1}".format(entry[0], entry[1]))
# List ordered by number of commits
top_commits = {login: value['number_commits'] for (login, value) in Report.students_res.items()}
top_commits_list = sorted(top_commits.items(), key=operator.itemgetter(1), reverse=True)
print("\nTop Commits\n-----------")
for entry in top_commits_list:
print("{0:25} {1}".format(entry[0], entry[1]))
# List ordered by number of practices
top_practices = {login: value['number_practices_found'] for (login, value) in Report.students_res.items()}
top_practices_list = sorted(top_practices.items(), key=operator.itemgetter(1), reverse=True)
print("\nTop Practices\n-----------")
for entry in top_practices_list:
print("{0:25} {1}".format(entry[0], entry[1]))
@staticmethod
def do_report(students_data=None, students_res=None):
"""
Generate the report for the activity of the students in GitHub
:param students_data: dict with the logins of the students
:param students_res: an already generated report with students results
:return:
"""
if not students_res:
# If the data is not collected yet, collect it
Report.fetch_report_data(students_data)
print("Total students", Report.students)
print("Total repos not found", Report.repos_not_found, Report.repos_not_found_students)
print("Total %s no found: %i %s" % (OPENFDA_REPO, Report.repo_main_not_found, Report.repo_main_not_found_students))
print(json.dumps(Report.students_res, indent=True, sort_keys=True))
freport = open(STUDENT_RESULTS_FILE, "w")
json.dump(Report.students_res, freport, indent=True, sort_keys=True)
freport.close()
else:
Report.students_res = students_res
Report.show()
@staticmethod
def do_scores_report(scores, students_data=None):
"""
Show a report with the scores
:param scores: a dict with students logins as keys and the scores as values
:param students_data: an optional dict with the mapping from login to name of the students
:return:
"""
not_approved = []
login_names = {}
scores_names = {}
if students_data:
with open(students_data) as file_student_data:
names_logins = json.load(file_student_data)
# We need the reverse dict from names_logins
login_names = {login:name for name, login in names_logins.items()}
# Let's build a new dict with scores using names
for login in scores:
print("Score for %s (%s): %f" % (login_names[login], login, scores[login]['total']))
scores_names[login_names[login]] = scores[login]['total']
if scores[login]['total'] < 5:
not_approved.append(login)
else:
for login in OrderedDict(sorted(scores.items())):
if not isinstance(scores[login], dict):
print("Format error for %s: %s" % (login, scores[login]))
not_approved.append(login)
continue
print("Score for %s: %f" % (login, scores[login]['total']))
if scores[login]['total'] < 5:
not_approved.append(login)
print("Total number of scores: %i" % len(scores.items()))
print("Total approved/not_approved: %i/%i" % (len(scores.items()) - len(not_approved), len(not_approved)))
def get_params():
parser = argparse.ArgumentParser(usage="usage:check_repos.py [options]",
description="Check the repos contents from the students in PNE course")
parser.add_argument("-t", "--token", required=True, help="GitHub API token")
parser.add_argument("-s", "--students-data", required=True, help="JSON file with students data")
parser.add_argument("-r", "--report", action='store_true', default=True, help="Generate the activity report")
parser.add_argument("-e", "--evaluate", action='store_true', help="Generate the scores report")
parser.add_argument("--no-update", action='store_true', help="Generate the scores report")
parser.add_argument("--genius", action='store_true', help="Clone the genius projects")
return parser.parse_args()
if __name__ == '__main__':
args = get_params()
if args.report and not args.evaluate:
print("Generating the activity report")
if os.path.isfile(STUDENT_RESULTS_FILE):
print("Using the already generated students results", STUDENT_RESULTS_FILE)
with open(STUDENT_RESULTS_FILE) as file_results_data:
Report.do_report(students_res=json.load(file_results_data))
else:
with open(args.students_data) as file_student_data:
Report.do_report(students_data=json.load(file_student_data))
elif args.genius:
print("Getting the genius projects")
with open(args.students_data) as file_student_data:
genius_logins = Evaluator.clone_genius(json.load(file_student_data), args.no_update)
print("Students with genius: ", genius_logins)
else:
print("Evaluating the practices")
if os.path.isfile(STUDENT_SCORES_FILE):
print("Using the already generated students results", STUDENT_SCORES_FILE)
else:
with open(args.students_data) as file_student_data:
logins_scores = Evaluator.evaluate_students(json.load(file_student_data), args.no_update)
with open(STUDENT_SCORES_FILE, "w") as file_scores_data:
json.dump(logins_scores, file_scores_data, indent=True, sort_keys=True)
# Show the report in both cases
with open(STUDENT_SCORES_FILE) as file_scores_data:
Report.do_scores_report(scores=json.load(file_scores_data), students_data=args.students_data)
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Used render templates for datastore admin."""
import base64
import datetime
import logging
import os
import random
from google.appengine.datastore import entity_pb
from google.appengine.api import datastore
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.datastore import datastore_rpc
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.datastore_admin import config
from google.appengine.ext.db import stats
from google.appengine.ext.webapp import _template
try:
from google.appengine.ext.mapreduce import context
from google.appengine.ext.mapreduce import control
from google.appengine.ext.mapreduce import model
from google.appengine.ext.mapreduce import operation as mr_operation
from google.appengine.ext.mapreduce import util
except ImportError:
from google.appengine._internal.mapreduce import context
from google.appengine._internal.mapreduce import control
from google.appengine._internal.mapreduce import model
from google.appengine._internal.mapreduce import operation as mr_operation
from google.appengine._internal.mapreduce import util
MEMCACHE_NAMESPACE = '_ah-datastore_admin'
XSRF_VALIDITY_TIME = 600
KINDS_AND_SIZES_VAR = 'kinds_and_sizes'
MAPREDUCE_MIN_SHARDS = 8
MAPREDUCE_DEFAULT_SHARDS = 32
MAPREDUCE_MAX_SHARDS = 256
RESERVE_KEY_POOL_MAX_SIZE = 1000
DATASTORE_ADMIN_OPERATION_KIND = '_AE_DatastoreAdmin_Operation'
BACKUP_INFORMATION_KIND = '_AE_Backup_Information'
BACKUP_INFORMATION_FILES_KIND = '_AE_Backup_Information_Kind_Files'
BACKUP_INFORMATION_KIND_TYPE_INFO = '_AE_Backup_Information_Kind_Type_Info'
DATASTORE_ADMIN_KINDS = (DATASTORE_ADMIN_OPERATION_KIND,
BACKUP_INFORMATION_KIND,
BACKUP_INFORMATION_FILES_KIND,
BACKUP_INFORMATION_KIND_TYPE_INFO)
def IsKindNameVisible(kind_name):
return not (kind_name.startswith('__') or
kind_name in DATASTORE_ADMIN_KINDS or
kind_name in model._MAP_REDUCE_KINDS)
def RenderToResponse(handler, template_file, template_params):
"""Render the given template_file using template_vals and write to response.
Args:
handler: the handler whose response we should render to
template_file: the file name only of the template file we are using
template_params: the parameters used to render the given template
"""
template_params = _GetDefaultParams(template_params)
handler.response.headers['X-FRAME-OPTIONS'] = ('ALLOW-FROM %s' %
config.ADMIN_CONSOLE_URL)
template_params['admin_console_url'] = config.ADMIN_CONSOLE_URL
rendered = _template.render(_GetTemplatePath(template_file), template_params)
handler.response.out.write(rendered)
def _GetTemplatePath(template_file):
"""Return the expected path for the template to render.
Args:
template_file: simple file name of template to render.
Returns:
path of template to render.
"""
return os.path.join(
os.path.dirname(__file__), 'templates', template_file)
def _GetDefaultParams(template_params):
"""Update template_params to always contain necessary paths and never None."""
if not template_params:
template_params = {}
template_params.update({
'base_path': config.BASE_PATH,
'mapreduce_path': config.MAPREDUCE_PATH,
})
return template_params
def CreateXsrfToken(action):
"""Generate a token to be passed with a form for XSRF protection.
Args:
action: action to restrict token to
Returns:
suitably random token which is only valid for ten minutes and, if the user
is authenticated, is only valid for the user that generated it.
"""
user_str = _MakeUserStr()
token = base64.b64encode(
''.join(chr(int(random.random()*255)) for _ in range(0, 64)))
memcache.set(token,
(user_str, action),
time=XSRF_VALIDITY_TIME,
namespace=MEMCACHE_NAMESPACE)
return token
def ValidateXsrfToken(token, action):
"""Validate a given XSRF token by retrieving it from memcache.
If the token has not been evicted from memcache (past ten minutes) and the
user strings are equal, then this is a valid token.
Args:
token: token to validate from memcache.
action: action that token should correspond to
Returns:
True if the token exists in memcache and the user strings are equal,
False otherwise.
"""
user_str = _MakeUserStr()
token_obj = memcache.get(token, namespace=MEMCACHE_NAMESPACE)
if not token_obj:
return False
token_str, token_action = token_obj
if user_str != token_str or action != token_action:
return False
return True
def CacheStats(formatted_results):
"""Cache last retrieved kind size values in memcache.
Args:
formatted_results: list of dictionaries of the form returnned by
main._PresentableKindStats.
"""
kinds_and_sizes = dict((kind['kind_name'], kind['total_bytes'])
for kind in formatted_results)
memcache.set(KINDS_AND_SIZES_VAR,
kinds_and_sizes,
namespace=MEMCACHE_NAMESPACE)
def RetrieveCachedStats():
"""Retrieve cached kind sizes from last datastore stats call.
Returns:
Dictionary mapping kind names to total bytes.
"""
return memcache.get(KINDS_AND_SIZES_VAR, namespace=MEMCACHE_NAMESPACE)
def _MakeUserStr():
"""Make a user string to use to represent the user. 'noauth' by default."""
user = users.get_current_user()
return user.nickname() if user else 'noauth'
def GetPrettyBytes(bytes_num, significant_digits=0):
"""Get a pretty print view of the given number of bytes.
This will give a string like 'X MBytes'.
Args:
bytes_num: the original number of bytes to pretty print.
significant_digits: number of digits to display after the decimal point.
Returns:
A string that has the pretty print version of the given bytes.
If bytes_num is to big the string 'Alot' will be returned.
"""
byte_prefixes = ['', 'K', 'M', 'G', 'T', 'P', 'E']
for i in range(0, 7):
exp = i * 10
if bytes_num < 1<<(exp + 10):
if i == 0:
formatted_bytes = str(bytes_num)
else:
formatted_bytes = '%.*f' % (significant_digits,
(bytes_num * 1.0 / (1<<exp)))
if formatted_bytes != '1':
plural = 's'
else:
plural = ''
return '%s %sByte%s' % (formatted_bytes, byte_prefixes[i], plural)
logging.error('Number too high to convert: %d', bytes_num)
return 'Alot'
def FormatThousands(value):
"""Format a numerical value, inserting commas as thousands separators.
Args:
value: An integer, float, or string representation thereof.
If the argument is a float, it is converted to a string using '%.2f'.
Returns:
A string with groups of 3 digits before the decimal point (if any)
separated by commas.
NOTE: We don't deal with whitespace, and we don't insert
commas into long strings of digits after the decimal point.
"""
if isinstance(value, float):
value = '%.2f' % value
else:
value = str(value)
if '.' in value:
head, tail = value.split('.', 1)
tail = '.' + tail
elif 'e' in value:
head, tail = value.split('e', 1)
tail = 'e' + tail
else:
head = value
tail = ''
sign = ''
if head.startswith('-'):
sign = '-'
head = head[1:]
while len(head) > 3:
tail = ',' + head[-3:] + tail
head = head[:-3]
return sign + head + tail
def TruncDelta(delta):
"""Strips microseconds from a timedelta."""
return datetime.timedelta(days=delta.days, seconds=delta.seconds)
def GetPrintableStrs(namespace, kinds):
"""Returns tuples describing affected kinds and namespace.
Args:
namespace: namespace being targeted.
kinds: list of kinds being targeted.
Returns:
(namespace_str, kind_str) tuple used for display to user.
"""
namespace_str = namespace or ''
if kinds:
kind_str = 'all %s entities' % ', '.join(kinds)
else:
kind_str = ''
return (namespace_str, kind_str)
def ParseKindsAndSizes(kinds):
"""Parses kind|size list and returns template parameters.
Args:
kinds: list of kinds to process.
Returns:
sizes_known: whether or not all kind objects have known sizes.
size_total: total size of objects with known sizes.
len(kinds) - 2: for template rendering of greater than 3 kinds.
"""
sizes_known = True
size_total = 0
kinds_and_sizes = RetrieveCachedStats()
if kinds_and_sizes:
for kind in kinds:
if kind in kinds_and_sizes:
size_total += kinds_and_sizes[kind]
else:
sizes_known = False
else:
sizes_known = False
if size_total:
size_total = GetPrettyBytes(size_total)
return sizes_known, size_total, len(kinds) - 2
def _CreateDatastoreConfig():
"""Create datastore config for use during datastore operations."""
return datastore_rpc.Configuration(force_writes=True, deadline=60)
def GenerateHomeUrl(request):
"""Generates a link to the Datastore Admin main page.
Primarily intended to be used for cancel buttons or links on error pages. To
avoid any XSS security vulnerabilities the URL should not use any
user-defined strings (unless proper precautions are taken).
Args:
request: the webapp.Request object (to determine if certain query
parameters need to be used).
Returns:
domain-relative URL for the main Datastore Admin page.
"""
datastore_admin_home = config.BASE_PATH
if request and request.get('run_as_a_service'):
datastore_admin_home += '?run_as_a_service=True'
return datastore_admin_home
class MapreduceDoneHandler(webapp.RequestHandler):
"""Handler to delete data associated with successful MapReduce jobs."""
SUFFIX = 'mapreduce_done'
def post(self):
"""Mapreduce done callback to delete job data if it was successful."""
if 'Mapreduce-Id' in self.request.headers:
mapreduce_id = self.request.headers['Mapreduce-Id']
mapreduce_state = model.MapreduceState.get_by_job_id(mapreduce_id)
mapreduce_params = mapreduce_state.mapreduce_spec.params
db_config = _CreateDatastoreConfig()
if mapreduce_state.result_status == model.MapreduceState.RESULT_SUCCESS:
operation_key = mapreduce_params.get(
DatastoreAdminOperation.PARAM_DATASTORE_ADMIN_OPERATION)
if operation_key is None:
logging.error('Done callback for job %s without operation key.',
mapreduce_id)
else:
def tx():
operation = DatastoreAdminOperation.get(operation_key)
if mapreduce_id in operation.active_job_ids:
operation.active_jobs -= 1
operation.completed_jobs += 1
operation.active_job_ids.remove(mapreduce_id)
if not operation.active_jobs:
if operation.status == DatastoreAdminOperation.STATUS_ACTIVE:
operation.status = DatastoreAdminOperation.STATUS_COMPLETED
db.delete(DatastoreAdminOperationJob.all().ancestor(operation),
config=db_config)
operation.put(config=db_config)
if 'done_callback_handler' in mapreduce_params:
done_callback_handler = util.for_name(
mapreduce_params['done_callback_handler'])
if done_callback_handler:
done_callback_handler(operation, mapreduce_id, mapreduce_state)
else:
logging.error('done_callbackup_handler %s was not found',
mapreduce_params['done_callback_handler'])
db.run_in_transaction(tx)
if config.CLEANUP_MAPREDUCE_STATE:
keys = []
keys = model.ShardState.calculate_keys_by_mapreduce_state(
mapreduce_state)
keys.append(model.MapreduceControl.get_key_by_job_id(mapreduce_id))
db.delete(keys, config=db_config)
db.delete(mapreduce_state, config=db_config)
logging.info('State for successful job %s was deleted.', mapreduce_id)
else:
logging.info('Job %s was not successful so no state was deleted.',
mapreduce_id)
else:
logging.error('Done callback called without Mapreduce Id.')
class Error(Exception):
"""Base DatastoreAdmin error type."""
class DatastoreAdminOperation(db.Model):
"""An entity to keep progress and status of datastore admin operation."""
STATUS_CREATED = 'Created'
STATUS_ACTIVE = 'Active'
STATUS_COMPLETED = 'Completed'
STATUS_FAILED = 'Failed'
STATUS_ABORTED = 'Aborted'
PARAM_DATASTORE_ADMIN_OPERATION = 'datastore_admin_operation'
DEFAULT_LAST_UPDATED_VALUE = datetime.datetime(1970, 1, 1)
description = db.TextProperty()
status = db.StringProperty(default=STATUS_CREATED)
active_jobs = db.IntegerProperty(default=0)
active_job_ids = db.StringListProperty()
completed_jobs = db.IntegerProperty(default=0)
last_updated = db.DateTimeProperty(default=DEFAULT_LAST_UPDATED_VALUE,
auto_now=True)
status_info = db.StringProperty(default='', indexed=False)
service_job_id = db.StringProperty()
@classmethod
def kind(cls):
return DATASTORE_ADMIN_OPERATION_KIND
class DatastoreAdminOperationJob(db.Model):
"""An entity to keep track of started jobs to ensure idempotency.
This entity can be used during spawning additional jobs. It is
always stored as a child entity of DatastoreAdminOperation.
Entity key name is job unique id.
"""
pass
def StartOperation(description):
"""Start datastore admin operation.
Args:
description: operation description to be displayed to user.
Returns:
an instance of DatastoreAdminOperation.
"""
operation = DatastoreAdminOperation(
description=description,
id=db.allocate_ids(
db.Key.from_path(DatastoreAdminOperation.kind(), 1), 1)[0])
operation.put(config=_CreateDatastoreConfig())
return operation
@db.non_transactional(allow_existing=False)
def StartMap(operation_key,
job_name,
handler_spec,
reader_spec,
writer_spec,
mapper_params,
mapreduce_params=None,
queue_name=None,
shard_count=MAPREDUCE_DEFAULT_SHARDS):
"""Start map as part of datastore admin operation.
Will increase number of active jobs inside the operation and start new map.
Args:
operation_key: Key of the DatastoreAdminOperation for current operation.
job_name: Map job name.
handler_spec: Map handler specification.
reader_spec: Input reader specification.
writer_spec: Output writer specification.
mapper_params: Custom mapper parameters.
mapreduce_params: Custom mapreduce parameters.
queue_name: the name of the queue that will be used by the M/R.
shard_count: the number of shards the M/R will try to use.
Returns:
resulting map job id as string.
"""
if not mapreduce_params:
mapreduce_params = {}
mapreduce_params[DatastoreAdminOperation.PARAM_DATASTORE_ADMIN_OPERATION] = (
str(operation_key))
mapreduce_params['done_callback'] = '%s/%s' % (config.BASE_PATH,
MapreduceDoneHandler.SUFFIX)
if queue_name is not None:
mapreduce_params['done_callback_queue'] = queue_name
mapreduce_params['force_writes'] = 'True'
def tx(is_xg_transaction):
"""Start MapReduce job and update datastore admin state.
Args:
is_xg_transaction: True if we are running inside a xg-enabled
transaction, else False if we are running inside a non-xg-enabled
transaction (which means the datastore admin state is updated in one
transaction and the MapReduce job in an indepedent transaction).
Returns:
result MapReduce job id as a string.
"""
job_id = control.start_map(
job_name, handler_spec, reader_spec,
mapper_params,
output_writer_spec=writer_spec,
mapreduce_parameters=mapreduce_params,
base_path=config.MAPREDUCE_PATH,
shard_count=shard_count,
in_xg_transaction=is_xg_transaction,
queue_name=queue_name)
operation = DatastoreAdminOperation.get(operation_key)
operation.status = DatastoreAdminOperation.STATUS_ACTIVE
operation.active_jobs += 1
operation.active_job_ids = list(set(operation.active_job_ids + [job_id]))
operation.put(config=_CreateDatastoreConfig())
return job_id
datastore_type = datastore_rpc._GetDatastoreType()
if datastore_type != datastore_rpc.BaseConnection.MASTER_SLAVE_DATASTORE:
return db.run_in_transaction_options(
db.create_transaction_options(xg=True), tx, True)
else:
return db.run_in_transaction(tx, False)
def RunMapForKinds(operation_key,
kinds,
job_name_template,
handler_spec,
reader_spec,
writer_spec,
mapper_params,
mapreduce_params=None,
queue_name=None,
max_shard_count=None):
"""Run mapper job for all entities in specified kinds.
Args:
operation_key: The key of the DatastoreAdminOperation to record all jobs.
kinds: list of entity kinds as strings.
job_name_template: template for naming individual mapper jobs. Can
reference %(kind)s and %(namespace)s formatting variables.
handler_spec: mapper handler specification.
reader_spec: reader specification.
writer_spec: writer specification.
mapper_params: custom parameters to pass to mapper.
mapreduce_params: dictionary parameters relevant to the whole job.
queue_name: the name of the queue that will be used by the M/R.
max_shard_count: maximum value for shards count.
Returns:
Ids of all started mapper jobs as list of strings.
"""
jobs = []
try:
for kind in kinds:
mapper_params['entity_kind'] = kind
job_name = job_name_template % {'kind': kind, 'namespace':
mapper_params.get('namespace', '')}
shard_count = GetShardCount(kind, max_shard_count)
jobs.append(StartMap(operation_key, job_name, handler_spec, reader_spec,
writer_spec, mapper_params, mapreduce_params,
queue_name=queue_name, shard_count=shard_count))
return jobs
except BaseException, ex:
AbortAdminOperation(operation_key,
_status=DatastoreAdminOperation.STATUS_FAILED,
_status_info='%s: %s' % (ex.__class__.__name__, ex))
raise
def GetShardCount(kind, max_shard_count=None):
stat = stats.KindStat.all().filter('kind_name =', kind).get()
if stat:
shard_count = min(max(MAPREDUCE_MIN_SHARDS,
stat.bytes // (32 * 1024 * 1024)),
MAPREDUCE_MAX_SHARDS)
if max_shard_count and max_shard_count < shard_count:
shard_count = max_shard_count
return shard_count
return MAPREDUCE_DEFAULT_SHARDS
def AbortAdminOperation(operation_key,
_status=DatastoreAdminOperation.STATUS_ABORTED,
_status_info=''):
"""Aborts active jobs."""
operation = DatastoreAdminOperation.get(operation_key)
operation.status = _status
operation.status_info = _status_info
operation.put(config=_CreateDatastoreConfig())
for job in operation.active_job_ids:
logging.info('Aborting Job %s', job)
model.MapreduceControl.abort(job, config=_CreateDatastoreConfig())
def get_kind_from_entity_pb(entity):
element_list = entity.key().path().element_list()
return element_list[-1].type() if element_list else None
def FixKeys(entity_proto, app_id):
"""Go over keys in the given entity and update the application id.
Args:
entity_proto: An EntityProto to be fixed up. All identifiable keys in the
proto will have the 'app' field reset to match app_id.
app_id: The desired application id, typically os.getenv('APPLICATION_ID').
"""
def FixKey(mutable_key):
mutable_key.set_app(app_id)
def FixPropertyList(property_list):
for prop in property_list:
prop_value = prop.mutable_value()
if prop_value.has_referencevalue():
FixKey(prop_value.mutable_referencevalue())
elif prop.meaning() == entity_pb.Property.ENTITY_PROTO:
embedded_entity_proto = entity_pb.EntityProto()
try:
embedded_entity_proto.ParsePartialFromString(prop_value.stringvalue())
except Exception:
logging.exception('Failed to fix-keys for property %s of %s',
prop.name(),
entity_proto.key())
else:
FixKeys(embedded_entity_proto, app_id)
prop_value.set_stringvalue(
embedded_entity_proto.SerializePartialToString())
if entity_proto.has_key() and entity_proto.key().path().element_size():
FixKey(entity_proto.mutable_key())
FixPropertyList(entity_proto.property_list())
FixPropertyList(entity_proto.raw_property_list())
class ReserveKeyPool(object):
"""Mapper pool which buffers keys with ids to reserve.
Runs v4 AllocateIds rpc(s) when flushed.
"""
def __init__(self):
self.keys = []
def reserve_key(self, key):
for id_or_name in key.to_path()[1::2]:
if isinstance(id_or_name, (int, long)):
self.keys.append(key)
if len(self.keys) >= RESERVE_KEY_POOL_MAX_SIZE:
self.flush()
return
def flush(self):
datastore._GetConnection()._reserve_keys(self.keys)
self.keys = []
class ReserveKey(mr_operation.Operation):
"""Mapper operation to reserve key ids."""
def __init__(self, key):
self.key = key
self.app_id = key.app()
self.pool_id = 'reserve_key_%s_pool' % self.app_id
def __call__(self, ctx):
pool = ctx.get_pool(self.pool_id)
if not pool:
pool = ReserveKeyPool()
ctx.register_pool(self.pool_id, pool)
pool.reserve_key(self.key)
class PutPool(context.Pool):
"""A trimmed copy of the MutationPool class.
Properties:
puts: a list of entities to put to datastore.
max_entity_count: maximum number of entities before flushing it to db.
"""
POOL_NAME = 'put_pool'
def __init__(self, max_entity_count=context.MAX_ENTITY_COUNT):
"""Constructor.
Args:
max_entity_count: maximum number of entities before flushing it to db.
"""
self.max_entity_count = max_entity_count
self.puts = []
def Put(self, entity):
"""Registers entity to put to datastore.
Args:
entity: The EntityProto for the entity to be put.
"""
if len(self.puts) >= self.max_entity_count:
self.flush()
self.puts.append(entity)
def flush(self):
"""Flush all puts to datastore."""
if self.puts:
datastore_rpc.Connection(config=_CreateDatastoreConfig()).put(self.puts)
self.puts = []
class Put(mr_operation.Operation):
"""Mapper operation to batch puts."""
def __init__(self, entity):
"""Constructor.
Args:
entity: The EntityProto of the entity to put.
"""
self.entity = entity
def __call__(self, ctx):
pool = ctx.get_pool(PutPool.POOL_NAME)
if not pool:
pool = PutPool(
max_entity_count=(context.MAX_ENTITY_COUNT/(2**ctx.task_retry_count)))
ctx.register_pool(PutPool.POOL_NAME, pool)
pool.Put(self.entity)
|
|
from django.conf import settings
from django.db import models
from django.utils.translation import gettext_lazy as _
from rdmo.conditions.models import Condition
from rdmo.core.models import TranslationMixin
from rdmo.core.plugins import get_plugin
from rdmo.core.utils import copy_model, join_url
class OptionSet(models.Model):
uri = models.URLField(
max_length=640, blank=True,
verbose_name=_('URI'),
help_text=_('The Uniform Resource Identifier of this option set (auto-generated).')
)
uri_prefix = models.URLField(
max_length=256,
verbose_name=_('URI Prefix'),
help_text=_('The prefix for the URI of this option set.')
)
key = models.SlugField(
max_length=128, blank=True,
verbose_name=_('Key'),
help_text=_('The internal identifier of this option set.')
)
comment = models.TextField(
blank=True,
verbose_name=_('Comment'),
help_text=_('Additional internal information about this option set.')
)
locked = models.BooleanField(
default=False,
verbose_name=_('Locked'),
help_text=_('Designates whether this option set (and it\'s options) can be changed.')
)
order = models.IntegerField(
default=0,
verbose_name=_('Order'),
help_text=_('The position of this option set in lists.')
)
provider_key = models.SlugField(
max_length=128, blank=True,
verbose_name=_('Provider'),
help_text=_('The provider for this optionset. If set, it will create dynamic options for this optionset.')
)
conditions = models.ManyToManyField(
Condition, blank=True, related_name='optionsets',
verbose_name=_('Conditions'),
help_text=_('The list of conditions evaluated for this option set.')
)
class Meta:
ordering = ('uri', )
verbose_name = _('Option set')
verbose_name_plural = _('Option sets')
def __str__(self):
return self.key
def save(self, *args, **kwargs):
self.uri = self.build_uri(self.uri_prefix, self.key)
super().save(*args, **kwargs)
for option in self.options.all():
option.save()
def copy(self, uri_prefix, key):
optionset = copy_model(self, uri_prefix=uri_prefix, key=key)
# copy m2m fields
optionset.conditions.set(self.conditions.all())
# copy children
for option in self.options.all():
option.copy(uri_prefix, option.key, optionset=optionset)
return optionset
@property
def label(self):
return self.uri
@property
def provider(self):
return get_plugin('OPTIONSET_PROVIDERS', self.provider_key)
@property
def has_provider(self):
return self.provider is not None
@property
def has_search(self):
return self.has_provider and self.provider.search
@property
def has_conditions(self):
return self.conditions.exists()
@property
def is_locked(self):
return self.locked
@classmethod
def build_uri(cls, uri_prefix, key):
assert key
return join_url(uri_prefix or settings.DEFAULT_URI_PREFIX, '/options/', key)
class Option(models.Model, TranslationMixin):
uri = models.URLField(
max_length=640, blank=True,
verbose_name=_('URI'),
help_text=_('The Uniform Resource Identifier of this option (auto-generated).')
)
uri_prefix = models.URLField(
max_length=256,
verbose_name=_('URI Prefix'),
help_text=_('The prefix for the URI of this option.')
)
key = models.SlugField(
max_length=128, blank=True,
verbose_name=_('Key'),
help_text=_('The internal identifier of this option.')
)
path = models.SlugField(
max_length=512, blank=True,
verbose_name=_('Path'),
help_text=_('The path part of the URI for this option (auto-generated).')
)
comment = models.TextField(
blank=True,
verbose_name=_('Comment'),
help_text=_('Additional internal information about this option.')
)
locked = models.BooleanField(
default=False,
verbose_name=_('Locked'),
help_text=_('Designates whether this option can be changed.')
)
optionset = models.ForeignKey(
'OptionSet', on_delete=models.CASCADE, related_name='options',
verbose_name=_('Option set'),
help_text=_('The option set this option belongs to.')
)
order = models.IntegerField(
default=0,
verbose_name=_('Order'),
help_text=_('Position in lists.')
)
text_lang1 = models.CharField(
max_length=256, blank=True,
verbose_name=_('Text (primary)'),
help_text=_('The text for this option in the primary language.')
)
text_lang2 = models.CharField(
max_length=256, blank=True,
verbose_name=_('Text (secondary)'),
help_text=_('The text for this option in the secondary language.')
)
text_lang3 = models.CharField(
max_length=256, blank=True,
verbose_name=_('Text (tertiary)'),
help_text=_('The text for this option in the tertiary language.')
)
text_lang4 = models.CharField(
max_length=256, blank=True,
verbose_name=_('Text (quaternary)'),
help_text=_('The text for this option in the quaternary language.')
)
text_lang5 = models.CharField(
max_length=256, blank=True,
verbose_name=_('Text (quinary)'),
help_text=_('The text for this option in the quinary language.')
)
additional_input = models.BooleanField(
default=False,
verbose_name=_('Additional input'),
help_text=_('Designates whether an additional input is possible for this option.')
)
class Meta:
ordering = ('optionset__order', 'optionset__key', 'order', 'key')
verbose_name = _('Option')
verbose_name_plural = _('Options')
def __str__(self):
return self.path
def save(self, *args, **kwargs):
self.path = self.build_path(self.key, self.optionset)
self.uri = self.build_uri(self.uri_prefix, self.path)
super().save(*args, **kwargs)
def copy(self, uri_prefix, key, optionset=None):
return copy_model(self, uri_prefix=uri_prefix, key=key, optionset=optionset or self.optionset)
@property
def parent_fields(self):
return ('optionset', )
@property
def text(self):
return self.trans('text')
@property
def label(self):
return '%s ("%s")' % (self.uri, self.text)
@property
def is_locked(self):
return self.locked or self.optionset.locked
@classmethod
def build_path(cls, key, optionset):
assert key
assert optionset
return '%s/%s' % (optionset.key, key) if (optionset and key) else None
@classmethod
def build_uri(cls, uri_prefix, path):
assert path
return join_url(uri_prefix or settings.DEFAULT_URI_PREFIX, '/options/', path)
|
|
from ipykernel.kernelbase import Kernel
from IPython.utils.path import locate_profile
from jupyter_client import MultiKernelManager
from pexpect import replwrap,EOF,spawn
import signal
import re
import os
from distutils.spawn import find_executable
import sys
from IPython.display import Image
from glob import glob
import tempfile
import random
from shutil import rmtree
from base64 import b64encode
from itertools import cycle
__version__ = '1.0.1'
km=None
class JythonKernel(Kernel):
implementation = 'IDV Kernel'
implementation_version = __version__
language = 'jython'
language_version = '2.7.0'
language_info = {'mimetype': 'text/x-python','name':'jython','file_extension':'.py','codemirror_mode':{'version':2,'name':'text/x-python'},'pygments_lexer':'python','help_links':[{'text':'Jython', 'url': 'www.jython.org'},{'text':'Jython Kernel Help','url':'https://github.com/suvarchal/IJython'}]}
banner = "IDV Kernel"
def __init__(self, **kwargs):
Kernel.__init__(self, **kwargs)
self._start_jython()
try:
self.hist_file = os.path.join(locate_profile(),'jython_kernel.hist')
except:
self.hist_file = None
self.log.warn('No default profile found, history unavailable')
self.max_hist_cache = 500
self.hist_cache = []
def _start_jython(self):
sig = signal.signal(signal.SIGINT, signal.SIG_DFL)
#for some reason kernel needs two excepts with jython executable so using only jython.jar
try:
if "IDV_HOME" in os.environ:
#self._executable=os.environ['IDV_HOME']+"/jre.bundle/Contents/Home/jre/bin/java"
if find_executable(os.environ['IDV_HOME']+"/jre/bin/java"):
self._executable=os.environ['IDV_HOME']+"/jre/bin/java"
elif find_executable(os.environ['IDV_HOME']+"/jre.bundle/Contents/Home/jre/bin/java"):
self._executable=os.environ['IDV_HOME']+"/jre.bundle/Contents/Home/jre/bin/java"
else:
self._executable="java"
liblist=["idv.jar","ncIdv.jar","external.jar","visad.jar","jython.jar"]
libs=libs=":".join([os.environ['IDV_HOME']+"/"+lib for lib in liblist])
opts=" -Xmx2048m -XX:+DisableExplicitGC -Didv.enableStereo=false -cp "+libs+" org.python.util.jython -i "+os.environ['IDV_HOME']+"/.jythonrc.py"
self._executable=self._executable+opts
else:
raise Exception("IDV_HOME not found")
self._child = spawn(self._executable,timeout = None)
self._child.waitnoecho(True)
self._child.expect(u">>> ")
self._child.expect(u">>> ")
self._child.setwinsize(600,400)
finally:
signal.signal(signal.SIGINT, sig)
def do_execute(self, code, silent, store_history=False, user_expressions=None,
allow_stdin=False):
code = code.strip()
abort_msg = {'status': 'abort',
'execution_count': self.execution_count}
interrupt = False
doDisplay = False
try:
if code.strip().startswith("%%python"):
code=code.lstrip("%%python").strip()
output=None
display_data=self.do_ipython(code)
if len(display_data)>0:
doDisplay=True
elif code.strip().startswith("%%isl"):
code=code.lstrip("%%isl").strip()
cmd="""runIsl("%s")"""%code
output=self.jyrepl(cmd,timeout=None)
elif code.strip().startswith("%%HTML"):
code=code.lstrip("%%HTML").strip()
cmd="""%s"""%code
display_data=[]
doDisplay=True
display_data.append({'text/html':code})
elif code.strip().startswith("%%Latex"):
code=code.lstrip("%%Latex").strip()
cmd="""$$%s$$"""%code
display_data=[]
doDisplay=True
display_data.append({'text/latex':cmd})
elif code.strip().startswith("%Image"):
code=code.lstrip("%Image").strip()
if glob("%s" % code):
display_data=[]
doDisplay=True
file_enc=b64encode(open(code, "rb").read())
#html_tag = '<img alt="Embedded Image" src="data:video/x-m4v;base64,{0}">'.format(file_enc)
html_tag = '<img alt="Embedded Image" src="data:image/png;base64,{0}">'.format(file_enc)
display_data.append({'text/html':html_tag})
else:
output=None
#display_data.append({'image/png':})
elif code.strip().startswith("%showMovie"):
plot_dir = tempfile.mkdtemp(dir=os.path.expanduser("~"))
plot_file="plot_"+str(random.randint(1000, 9999))+".gif"
plot_file=os.path.join(plot_dir,plot_file)
cmd='idv.waitUntilDisplaysAreDone();writeMovie('+repr(plot_file)+')'
self.jyrepl(cmd)
display_data = []
if not len(glob("%s/*.gif" % plot_dir))==0:
gifimages = [open(imgfile, 'rb').read() for imgfile in glob("%s/*.gif" % plot_dir)]
for image in gifimages:
display_data.append({'image/png': b64encode(image).decode('ascii')})
doDisplay=True
rmtree(plot_dir)
else:
output=None
#### below works when showMovie imagefile
#plot_file=code.strip("showMovie").strip()
#display_data = []
#if os.path.isfile(plot_file):
# gifimage = open(plot_file, 'rb').read()
# display_data.append({'image/png': b64encode(gifimage).decode('ascii')})
else:
output = self.jyrepl(code, timeout=None)
if output.lstrip().startswith("{"):
out=eval(output.strip())
display_data=[]
try:
display_data.append({'image/png': out['data']})
doDisplay=True
except KeyError:
output = '\n'.join([line for line in output.splitlines()])+'\n'
else:
output = '\n'.join([line for line in output.splitlines()])+'\n'
except KeyboardInterrupt:
self._child.sendintr()
output = self._child.before+'\n Got interrupt: Current Jython doesnt support Interrupting ...so Restarting.....'
interrupt = True
#self.jyrepl("exit()")
#self._start_jython()
except EOF:
output = self._child.before + 'Reached EOF Restarting Jython'
self._start_jython()
if not silent and not doDisplay:
stream_content = {'name': 'stdout', 'text': output}
self.send_response(self.iopub_socket, 'stream', stream_content)
if not silent and doDisplay:
for data in display_data:
self.send_response(self.iopub_socket, 'display_data',{'data':data,'metadata':{}})
#doDisplay=True
#if doDisplay:
# print("i am in Display")
# plot_dir = "/home/suvarchal" #tempfile.mkdtemp(dir=os.path.expanduser("~"))
# plot_file=plot_dir+"/"+"plot_"+str(random.randint(1000, 9999))+".png"
#plot_opts=display_code.strip('()')
#output = self.jywrapper.run_command("getImage();writeImage('"+plot_file+"')", timeout=None)
#if not len(glob("%s/plot_jumbo.png" % plot_dir))==0:
#print("found plot")
#images = [open(imgfile, 'rb').read() for imgfile in glob("%s/plot_jumbo.png" % plot_dir)]
#display_data = []
#for image in images:
# print(image)
# display_data.append({'image/png': b64encode(image).decode('ascii')})
#for data in display_data:
# self.send_response(self.iopub_socket, 'display_data',{'data':data,'metadata':{}})
if code.strip() and store_history:
self.hist_cache.append(code.strip())
#rmtree(plot_dir)
if interrupt:
return {'status': 'abort', 'execution_count': self.execution_count}
return {'status': 'ok','execution_count': self.execution_count,'payload': [],'user_expressions': {}}
def do_complete(self, code, cursor_pos):
code = code[:cursor_pos]
default = {'matches': [], 'cursor_start': 0,
'cursor_end': cursor_pos, 'metadata': dict(),
'status': 'ok'}
if not code or code[-1] == ' ':
return default
#tokens = code.split()
tokens = re.split(r"[^\w\.]",code)
if not tokens:
return default
token = tokens[-1]
start = cursor_pos - len(token)
# if len(re.split(r"[^\w]",token)) > 1:
# cmd="dir("+re.split(r"[^\w]",token)[-2]+")"
# output=self.jyrepl(cmd,timeout=None)
# matches.extend([e for e in re.split(r"[^\w]",output)[:] if not e.strip()=="" and not e.strip().startswith("__")])
# token=re.split(r"[^\w]",token)[-1]
# start = cursor_pos - len(token)
# else:
# cmd=("import sys;sys.builtins.keys()")
# output=self.jyrepl(cmd,timeout=None)
# matches.extend([e for e in re.split(r"[^\w]",output)[:] if not e.strip()=="" and not e.strip().startswith("__")])
#self._child.send(code.strip()+'\t')
#self._child.expect(u">>> ",timeout=None)
#self._child.expect(u">>> ",timeout=None)
#output=self._child.before
matches=[]
matches=["%%isl","%%python","%showMovie","%%Latex","%%HTML","%Image"]
code='do_complete('+repr(token)+')'
output=self.jyrepl(code)
if len(output)>1: matches.extend(eval(output))
#matches.extend([e for e in re.split(r"[^\w]",output)[:] if not e.strip()=="" and not e.strip().startswith("__")])
if not matches:
return default
matches = [m for m in matches if m.startswith(token)]
return {'matches': sorted(matches), 'cursor_start': start,
'cursor_end': cursor_pos, 'metadata': dict(),
'status': 'ok'}
def do_history(self,hist_access_type,output,raw,session=None,start=None,stoop=None,n=None,pattern=None,unique=None):
if not self.hist_file:
return {'history':[]}
if not os.path.exists(self.hist_file):
with open(self.hist_file, 'wb') as f:
f.write('')
with open(self.hist_file, 'rb') as f:
history = f.readlines()
history = history[:self.max_hist_cache]
self.hist_cache = history
self.log.debug('**HISTORY:')
self.log.debug(history)
history = [(None, None, h) for h in history]
return {'history': history}
def do_inspect(self,code,cursor_pos,detail_level=1):
found=False
default={'status':'ok', 'found': False,
'data': dict(), 'metadata': dict()}
if not code or code[-1] == ' ':
return default
#if len(re.split(r"[^\w]",token)) > 1:
# cmd="dir("+re.split(r"[^\w]",token)[-2]+")"
# output=self.jyrepl(cmd,timeout=None)
# matches.extend([e for e in re.split(r"[^\w]",output)[:] if not e.strip()=="" and not e.strip().startswith("__")])
# token=re.split(r"[^\w]",token)[-1]
# start = cursor_pos - len(token)
#else:
# cmd=("import sys;sys.builtins.keys()")
# output=self.jyrepl(cmd,timeout=None)
# matches.extend([e for e in re.split(r"[^\w]",output)[:] if not e.strip()=="" and not e.strip().startswith("__")])
code='do_inspect('+repr(code)+')'
data=self.jyrepl(code)
try:
data=eval(data)
found=True
except:
found=False
return {'status':'ok', 'found': found,
'data': {'text/plain':data}, 'metadata': dict()}
def do_shutdown(self,restart):
#self.send("exit()")
self._child.kill(signal.SIGKILL)
return {'status':'ok', 'restart':restart}
def jyrepl(self,code,timeout=None):
out=""
if (len(re.split(r"\=",code.strip()))==1) and (len(re.split(r"[\ ]",code.strip()))==1) and not code.strip().startswith("print"):
code='eval('+repr(code.strip())+')'
self._child.sendline(code)
now_prompt=self._child.expect_exact([u">>> ",u"... "])
if len(self._child.before.splitlines())>1: out+='\n'.join(self._child.before.splitlines()[1:])+'\n'
now_prompt=self._child.expect_exact([u">>> ",u"... "])
else:
code='exec('+repr(code)+')'
for line in code.splitlines():
self._child.sendline(line)
now_prompt=self._child.expect_exact([u">>> ",u"... "])
if len(self._child.before.splitlines())>1: out+='\n'.join(self._child.before.splitlines()[1:])+'\n'
now_prompt=self._child.expect_exact([u">>> ",u"... "])
return out
def do_ipython(self,code):
global km
global km
global remote_id
global remote
global kernelmanager
# python=True
if km==None:
kernelmanager = MultiKernelManager()
remote_id = kernelmanager.start_kernel('python2')
remote_kernel = kernelmanager.get_kernel(remote_id)
remote = remote_kernel.client()
km=remote.blocking_client()
km.start_channels()
if km.shell_channel.msg_ready():
km.shell_channel.get_msg()
km.iopub_channel.get_msg()
#if km.shell_channel.msg_ready():
# km.shell_channel.get_msg()
#if km.iopub_channel.msg_ready():
# km.iopub_channel.get_msg()
km.execute(code)
display_data=[]
msgS=km.shell_channel.get_msg(block=True,timeout=-1)
msg=km.iopub_channel.get_msg(block=True,timeout=-1)
msgs=km.iopub_channel.get_msgs()
for m in msgs:
if m['msg_type']=='error':
output=m['content']['text'] #.__repr__()#+msg+id
display_data.append({'text/plain':output})
break
if m['msg_type']=='stream':
output=m['content']['text'] #.__repr__()#+msg+id
display_data.append({'text/plain':output})
if m['msg_type']=='display_data':
display_data.append(m['content']['data'])
return display_data
if __name__ == '__main__':
from ipykernel.kernelapp import IPKernelApp
IPKernelApp.launch_instance(kernel_class=JythonKernel)
|
|
<<<<<<< HEAD
<<<<<<< HEAD
"""TestSuite"""
import sys
from . import case
from . import util
__unittest = True
def _call_if_exists(parent, attr):
func = getattr(parent, attr, lambda: None)
func()
class BaseTestSuite(object):
"""A simple test suite that doesn't provide class or module shared fixtures.
"""
_cleanup = True
def __init__(self, tests=()):
self._tests = []
self._removed_tests = 0
self.addTests(tests)
def __repr__(self):
return "<%s tests=%s>" % (util.strclass(self.__class__), list(self))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return list(self) == list(other)
def __iter__(self):
return iter(self._tests)
def countTestCases(self):
cases = self._removed_tests
for test in self:
if test:
cases += test.countTestCases()
return cases
def addTest(self, test):
# sanity checks
if not callable(test):
raise TypeError("{} is not callable".format(repr(test)))
if isinstance(test, type) and issubclass(test,
(case.TestCase, TestSuite)):
raise TypeError("TestCases and TestSuites must be instantiated "
"before passing them to addTest()")
self._tests.append(test)
def addTests(self, tests):
if isinstance(tests, str):
raise TypeError("tests must be an iterable of tests, not a string")
for test in tests:
self.addTest(test)
def run(self, result):
for index, test in enumerate(self):
if result.shouldStop:
break
test(result)
if self._cleanup:
self._removeTestAtIndex(index)
return result
def _removeTestAtIndex(self, index):
"""Stop holding a reference to the TestCase at index."""
try:
test = self._tests[index]
except TypeError:
# support for suite implementations that have overriden self._tests
pass
else:
# Some unittest tests add non TestCase/TestSuite objects to
# the suite.
if hasattr(test, 'countTestCases'):
self._removed_tests += test.countTestCases()
self._tests[index] = None
def __call__(self, *args, **kwds):
return self.run(*args, **kwds)
def debug(self):
"""Run the tests without collecting errors in a TestResult"""
for test in self:
test.debug()
class TestSuite(BaseTestSuite):
"""A test suite is a composite test consisting of a number of TestCases.
For use, create an instance of TestSuite, then add test case instances.
When all tests have been added, the suite can be passed to a test
runner, such as TextTestRunner. It will run the individual test cases
in the order in which they were added, aggregating the results. When
subclassing, do not forget to call the base class constructor.
"""
def run(self, result, debug=False):
topLevel = False
if getattr(result, '_testRunEntered', False) is False:
result._testRunEntered = topLevel = True
for index, test in enumerate(self):
if result.shouldStop:
break
if _isnotsuite(test):
self._tearDownPreviousClass(test, result)
self._handleModuleFixture(test, result)
self._handleClassSetUp(test, result)
result._previousTestClass = test.__class__
if (getattr(test.__class__, '_classSetupFailed', False) or
getattr(result, '_moduleSetUpFailed', False)):
continue
if not debug:
test(result)
else:
test.debug()
if self._cleanup:
self._removeTestAtIndex(index)
if topLevel:
self._tearDownPreviousClass(None, result)
self._handleModuleTearDown(result)
result._testRunEntered = False
return result
def debug(self):
"""Run the tests without collecting errors in a TestResult"""
debug = _DebugResult()
self.run(debug, True)
################################
def _handleClassSetUp(self, test, result):
previousClass = getattr(result, '_previousTestClass', None)
currentClass = test.__class__
if currentClass == previousClass:
return
if result._moduleSetUpFailed:
return
if getattr(currentClass, "__unittest_skip__", False):
return
try:
currentClass._classSetupFailed = False
except TypeError:
# test may actually be a function
# so its class will be a builtin-type
pass
setUpClass = getattr(currentClass, 'setUpClass', None)
if setUpClass is not None:
_call_if_exists(result, '_setupStdout')
try:
setUpClass()
except Exception as e:
if isinstance(result, _DebugResult):
raise
currentClass._classSetupFailed = True
className = util.strclass(currentClass)
errorName = 'setUpClass (%s)' % className
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
def _get_previous_module(self, result):
previousModule = None
previousClass = getattr(result, '_previousTestClass', None)
if previousClass is not None:
previousModule = previousClass.__module__
return previousModule
def _handleModuleFixture(self, test, result):
previousModule = self._get_previous_module(result)
currentModule = test.__class__.__module__
if currentModule == previousModule:
return
self._handleModuleTearDown(result)
result._moduleSetUpFailed = False
try:
module = sys.modules[currentModule]
except KeyError:
return
setUpModule = getattr(module, 'setUpModule', None)
if setUpModule is not None:
_call_if_exists(result, '_setupStdout')
try:
setUpModule()
except Exception as e:
if isinstance(result, _DebugResult):
raise
result._moduleSetUpFailed = True
errorName = 'setUpModule (%s)' % currentModule
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
def _addClassOrModuleLevelException(self, result, exception, errorName):
error = _ErrorHolder(errorName)
addSkip = getattr(result, 'addSkip', None)
if addSkip is not None and isinstance(exception, case.SkipTest):
addSkip(error, str(exception))
else:
result.addError(error, sys.exc_info())
def _handleModuleTearDown(self, result):
previousModule = self._get_previous_module(result)
if previousModule is None:
return
if result._moduleSetUpFailed:
return
try:
module = sys.modules[previousModule]
except KeyError:
return
tearDownModule = getattr(module, 'tearDownModule', None)
if tearDownModule is not None:
_call_if_exists(result, '_setupStdout')
try:
tearDownModule()
except Exception as e:
if isinstance(result, _DebugResult):
raise
errorName = 'tearDownModule (%s)' % previousModule
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
def _tearDownPreviousClass(self, test, result):
previousClass = getattr(result, '_previousTestClass', None)
currentClass = test.__class__
if currentClass == previousClass:
return
if getattr(previousClass, '_classSetupFailed', False):
return
if getattr(result, '_moduleSetUpFailed', False):
return
if getattr(previousClass, "__unittest_skip__", False):
return
tearDownClass = getattr(previousClass, 'tearDownClass', None)
if tearDownClass is not None:
_call_if_exists(result, '_setupStdout')
try:
tearDownClass()
except Exception as e:
if isinstance(result, _DebugResult):
raise
className = util.strclass(previousClass)
errorName = 'tearDownClass (%s)' % className
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
class _ErrorHolder(object):
"""
Placeholder for a TestCase inside a result. As far as a TestResult
is concerned, this looks exactly like a unit test. Used to insert
arbitrary errors into a test suite run.
"""
# Inspired by the ErrorHolder from Twisted:
# http://twistedmatrix.com/trac/browser/trunk/twisted/trial/runner.py
# attribute used by TestResult._exc_info_to_string
failureException = None
def __init__(self, description):
self.description = description
def id(self):
return self.description
def shortDescription(self):
return None
def __repr__(self):
return "<ErrorHolder description=%r>" % (self.description,)
def __str__(self):
return self.id()
def run(self, result):
# could call result.addError(...) - but this test-like object
# shouldn't be run anyway
pass
def __call__(self, result):
return self.run(result)
def countTestCases(self):
return 0
def _isnotsuite(test):
"A crude way to tell apart testcases and suites with duck-typing"
try:
iter(test)
except TypeError:
return True
return False
class _DebugResult(object):
"Used by the TestSuite to hold previous class when running in debug."
_previousTestClass = None
_moduleSetUpFailed = False
shouldStop = False
=======
"""TestSuite"""
import sys
from . import case
from . import util
__unittest = True
def _call_if_exists(parent, attr):
func = getattr(parent, attr, lambda: None)
func()
class BaseTestSuite(object):
"""A simple test suite that doesn't provide class or module shared fixtures.
"""
_cleanup = True
def __init__(self, tests=()):
self._tests = []
self._removed_tests = 0
self.addTests(tests)
def __repr__(self):
return "<%s tests=%s>" % (util.strclass(self.__class__), list(self))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return list(self) == list(other)
def __iter__(self):
return iter(self._tests)
def countTestCases(self):
cases = self._removed_tests
for test in self:
if test:
cases += test.countTestCases()
return cases
def addTest(self, test):
# sanity checks
if not callable(test):
raise TypeError("{} is not callable".format(repr(test)))
if isinstance(test, type) and issubclass(test,
(case.TestCase, TestSuite)):
raise TypeError("TestCases and TestSuites must be instantiated "
"before passing them to addTest()")
self._tests.append(test)
def addTests(self, tests):
if isinstance(tests, str):
raise TypeError("tests must be an iterable of tests, not a string")
for test in tests:
self.addTest(test)
def run(self, result):
for index, test in enumerate(self):
if result.shouldStop:
break
test(result)
if self._cleanup:
self._removeTestAtIndex(index)
return result
def _removeTestAtIndex(self, index):
"""Stop holding a reference to the TestCase at index."""
try:
test = self._tests[index]
except TypeError:
# support for suite implementations that have overriden self._tests
pass
else:
# Some unittest tests add non TestCase/TestSuite objects to
# the suite.
if hasattr(test, 'countTestCases'):
self._removed_tests += test.countTestCases()
self._tests[index] = None
def __call__(self, *args, **kwds):
return self.run(*args, **kwds)
def debug(self):
"""Run the tests without collecting errors in a TestResult"""
for test in self:
test.debug()
class TestSuite(BaseTestSuite):
"""A test suite is a composite test consisting of a number of TestCases.
For use, create an instance of TestSuite, then add test case instances.
When all tests have been added, the suite can be passed to a test
runner, such as TextTestRunner. It will run the individual test cases
in the order in which they were added, aggregating the results. When
subclassing, do not forget to call the base class constructor.
"""
def run(self, result, debug=False):
topLevel = False
if getattr(result, '_testRunEntered', False) is False:
result._testRunEntered = topLevel = True
for index, test in enumerate(self):
if result.shouldStop:
break
if _isnotsuite(test):
self._tearDownPreviousClass(test, result)
self._handleModuleFixture(test, result)
self._handleClassSetUp(test, result)
result._previousTestClass = test.__class__
if (getattr(test.__class__, '_classSetupFailed', False) or
getattr(result, '_moduleSetUpFailed', False)):
continue
if not debug:
test(result)
else:
test.debug()
if self._cleanup:
self._removeTestAtIndex(index)
if topLevel:
self._tearDownPreviousClass(None, result)
self._handleModuleTearDown(result)
result._testRunEntered = False
return result
def debug(self):
"""Run the tests without collecting errors in a TestResult"""
debug = _DebugResult()
self.run(debug, True)
################################
def _handleClassSetUp(self, test, result):
previousClass = getattr(result, '_previousTestClass', None)
currentClass = test.__class__
if currentClass == previousClass:
return
if result._moduleSetUpFailed:
return
if getattr(currentClass, "__unittest_skip__", False):
return
try:
currentClass._classSetupFailed = False
except TypeError:
# test may actually be a function
# so its class will be a builtin-type
pass
setUpClass = getattr(currentClass, 'setUpClass', None)
if setUpClass is not None:
_call_if_exists(result, '_setupStdout')
try:
setUpClass()
except Exception as e:
if isinstance(result, _DebugResult):
raise
currentClass._classSetupFailed = True
className = util.strclass(currentClass)
errorName = 'setUpClass (%s)' % className
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
def _get_previous_module(self, result):
previousModule = None
previousClass = getattr(result, '_previousTestClass', None)
if previousClass is not None:
previousModule = previousClass.__module__
return previousModule
def _handleModuleFixture(self, test, result):
previousModule = self._get_previous_module(result)
currentModule = test.__class__.__module__
if currentModule == previousModule:
return
self._handleModuleTearDown(result)
result._moduleSetUpFailed = False
try:
module = sys.modules[currentModule]
except KeyError:
return
setUpModule = getattr(module, 'setUpModule', None)
if setUpModule is not None:
_call_if_exists(result, '_setupStdout')
try:
setUpModule()
except Exception as e:
if isinstance(result, _DebugResult):
raise
result._moduleSetUpFailed = True
errorName = 'setUpModule (%s)' % currentModule
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
def _addClassOrModuleLevelException(self, result, exception, errorName):
error = _ErrorHolder(errorName)
addSkip = getattr(result, 'addSkip', None)
if addSkip is not None and isinstance(exception, case.SkipTest):
addSkip(error, str(exception))
else:
result.addError(error, sys.exc_info())
def _handleModuleTearDown(self, result):
previousModule = self._get_previous_module(result)
if previousModule is None:
return
if result._moduleSetUpFailed:
return
try:
module = sys.modules[previousModule]
except KeyError:
return
tearDownModule = getattr(module, 'tearDownModule', None)
if tearDownModule is not None:
_call_if_exists(result, '_setupStdout')
try:
tearDownModule()
except Exception as e:
if isinstance(result, _DebugResult):
raise
errorName = 'tearDownModule (%s)' % previousModule
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
def _tearDownPreviousClass(self, test, result):
previousClass = getattr(result, '_previousTestClass', None)
currentClass = test.__class__
if currentClass == previousClass:
return
if getattr(previousClass, '_classSetupFailed', False):
return
if getattr(result, '_moduleSetUpFailed', False):
return
if getattr(previousClass, "__unittest_skip__", False):
return
tearDownClass = getattr(previousClass, 'tearDownClass', None)
if tearDownClass is not None:
_call_if_exists(result, '_setupStdout')
try:
tearDownClass()
except Exception as e:
if isinstance(result, _DebugResult):
raise
className = util.strclass(previousClass)
errorName = 'tearDownClass (%s)' % className
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
class _ErrorHolder(object):
"""
Placeholder for a TestCase inside a result. As far as a TestResult
is concerned, this looks exactly like a unit test. Used to insert
arbitrary errors into a test suite run.
"""
# Inspired by the ErrorHolder from Twisted:
# http://twistedmatrix.com/trac/browser/trunk/twisted/trial/runner.py
# attribute used by TestResult._exc_info_to_string
failureException = None
def __init__(self, description):
self.description = description
def id(self):
return self.description
def shortDescription(self):
return None
def __repr__(self):
return "<ErrorHolder description=%r>" % (self.description,)
def __str__(self):
return self.id()
def run(self, result):
# could call result.addError(...) - but this test-like object
# shouldn't be run anyway
pass
def __call__(self, result):
return self.run(result)
def countTestCases(self):
return 0
def _isnotsuite(test):
"A crude way to tell apart testcases and suites with duck-typing"
try:
iter(test)
except TypeError:
return True
return False
class _DebugResult(object):
"Used by the TestSuite to hold previous class when running in debug."
_previousTestClass = None
_moduleSetUpFailed = False
shouldStop = False
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
"""TestSuite"""
import sys
from . import case
from . import util
__unittest = True
def _call_if_exists(parent, attr):
func = getattr(parent, attr, lambda: None)
func()
class BaseTestSuite(object):
"""A simple test suite that doesn't provide class or module shared fixtures.
"""
_cleanup = True
def __init__(self, tests=()):
self._tests = []
self._removed_tests = 0
self.addTests(tests)
def __repr__(self):
return "<%s tests=%s>" % (util.strclass(self.__class__), list(self))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return list(self) == list(other)
def __iter__(self):
return iter(self._tests)
def countTestCases(self):
cases = self._removed_tests
for test in self:
if test:
cases += test.countTestCases()
return cases
def addTest(self, test):
# sanity checks
if not callable(test):
raise TypeError("{} is not callable".format(repr(test)))
if isinstance(test, type) and issubclass(test,
(case.TestCase, TestSuite)):
raise TypeError("TestCases and TestSuites must be instantiated "
"before passing them to addTest()")
self._tests.append(test)
def addTests(self, tests):
if isinstance(tests, str):
raise TypeError("tests must be an iterable of tests, not a string")
for test in tests:
self.addTest(test)
def run(self, result):
for index, test in enumerate(self):
if result.shouldStop:
break
test(result)
if self._cleanup:
self._removeTestAtIndex(index)
return result
def _removeTestAtIndex(self, index):
"""Stop holding a reference to the TestCase at index."""
try:
test = self._tests[index]
except TypeError:
# support for suite implementations that have overriden self._tests
pass
else:
# Some unittest tests add non TestCase/TestSuite objects to
# the suite.
if hasattr(test, 'countTestCases'):
self._removed_tests += test.countTestCases()
self._tests[index] = None
def __call__(self, *args, **kwds):
return self.run(*args, **kwds)
def debug(self):
"""Run the tests without collecting errors in a TestResult"""
for test in self:
test.debug()
class TestSuite(BaseTestSuite):
"""A test suite is a composite test consisting of a number of TestCases.
For use, create an instance of TestSuite, then add test case instances.
When all tests have been added, the suite can be passed to a test
runner, such as TextTestRunner. It will run the individual test cases
in the order in which they were added, aggregating the results. When
subclassing, do not forget to call the base class constructor.
"""
def run(self, result, debug=False):
topLevel = False
if getattr(result, '_testRunEntered', False) is False:
result._testRunEntered = topLevel = True
for index, test in enumerate(self):
if result.shouldStop:
break
if _isnotsuite(test):
self._tearDownPreviousClass(test, result)
self._handleModuleFixture(test, result)
self._handleClassSetUp(test, result)
result._previousTestClass = test.__class__
if (getattr(test.__class__, '_classSetupFailed', False) or
getattr(result, '_moduleSetUpFailed', False)):
continue
if not debug:
test(result)
else:
test.debug()
if self._cleanup:
self._removeTestAtIndex(index)
if topLevel:
self._tearDownPreviousClass(None, result)
self._handleModuleTearDown(result)
result._testRunEntered = False
return result
def debug(self):
"""Run the tests without collecting errors in a TestResult"""
debug = _DebugResult()
self.run(debug, True)
################################
def _handleClassSetUp(self, test, result):
previousClass = getattr(result, '_previousTestClass', None)
currentClass = test.__class__
if currentClass == previousClass:
return
if result._moduleSetUpFailed:
return
if getattr(currentClass, "__unittest_skip__", False):
return
try:
currentClass._classSetupFailed = False
except TypeError:
# test may actually be a function
# so its class will be a builtin-type
pass
setUpClass = getattr(currentClass, 'setUpClass', None)
if setUpClass is not None:
_call_if_exists(result, '_setupStdout')
try:
setUpClass()
except Exception as e:
if isinstance(result, _DebugResult):
raise
currentClass._classSetupFailed = True
className = util.strclass(currentClass)
errorName = 'setUpClass (%s)' % className
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
def _get_previous_module(self, result):
previousModule = None
previousClass = getattr(result, '_previousTestClass', None)
if previousClass is not None:
previousModule = previousClass.__module__
return previousModule
def _handleModuleFixture(self, test, result):
previousModule = self._get_previous_module(result)
currentModule = test.__class__.__module__
if currentModule == previousModule:
return
self._handleModuleTearDown(result)
result._moduleSetUpFailed = False
try:
module = sys.modules[currentModule]
except KeyError:
return
setUpModule = getattr(module, 'setUpModule', None)
if setUpModule is not None:
_call_if_exists(result, '_setupStdout')
try:
setUpModule()
except Exception as e:
if isinstance(result, _DebugResult):
raise
result._moduleSetUpFailed = True
errorName = 'setUpModule (%s)' % currentModule
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
def _addClassOrModuleLevelException(self, result, exception, errorName):
error = _ErrorHolder(errorName)
addSkip = getattr(result, 'addSkip', None)
if addSkip is not None and isinstance(exception, case.SkipTest):
addSkip(error, str(exception))
else:
result.addError(error, sys.exc_info())
def _handleModuleTearDown(self, result):
previousModule = self._get_previous_module(result)
if previousModule is None:
return
if result._moduleSetUpFailed:
return
try:
module = sys.modules[previousModule]
except KeyError:
return
tearDownModule = getattr(module, 'tearDownModule', None)
if tearDownModule is not None:
_call_if_exists(result, '_setupStdout')
try:
tearDownModule()
except Exception as e:
if isinstance(result, _DebugResult):
raise
errorName = 'tearDownModule (%s)' % previousModule
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
def _tearDownPreviousClass(self, test, result):
previousClass = getattr(result, '_previousTestClass', None)
currentClass = test.__class__
if currentClass == previousClass:
return
if getattr(previousClass, '_classSetupFailed', False):
return
if getattr(result, '_moduleSetUpFailed', False):
return
if getattr(previousClass, "__unittest_skip__", False):
return
tearDownClass = getattr(previousClass, 'tearDownClass', None)
if tearDownClass is not None:
_call_if_exists(result, '_setupStdout')
try:
tearDownClass()
except Exception as e:
if isinstance(result, _DebugResult):
raise
className = util.strclass(previousClass)
errorName = 'tearDownClass (%s)' % className
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
class _ErrorHolder(object):
"""
Placeholder for a TestCase inside a result. As far as a TestResult
is concerned, this looks exactly like a unit test. Used to insert
arbitrary errors into a test suite run.
"""
# Inspired by the ErrorHolder from Twisted:
# http://twistedmatrix.com/trac/browser/trunk/twisted/trial/runner.py
# attribute used by TestResult._exc_info_to_string
failureException = None
def __init__(self, description):
self.description = description
def id(self):
return self.description
def shortDescription(self):
return None
def __repr__(self):
return "<ErrorHolder description=%r>" % (self.description,)
def __str__(self):
return self.id()
def run(self, result):
# could call result.addError(...) - but this test-like object
# shouldn't be run anyway
pass
def __call__(self, result):
return self.run(result)
def countTestCases(self):
return 0
def _isnotsuite(test):
"A crude way to tell apart testcases and suites with duck-typing"
try:
iter(test)
except TypeError:
return True
return False
class _DebugResult(object):
"Used by the TestSuite to hold previous class when running in debug."
_previousTestClass = None
_moduleSetUpFailed = False
shouldStop = False
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'ExperimentRelatedLink.created'
db.add_column(u'calc_experimentrelatedlink', 'created',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True),
keep_default=False)
# Adding field 'ExperimentRelatedLink.modified'
db.add_column(u'calc_experimentrelatedlink', 'modified',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True),
keep_default=False)
# Adding field 'WorkflowIndexPage.created'
db.add_column(u'calc_workflowindexpage', 'created',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True),
keep_default=False)
# Adding field 'WorkflowIndexPage.modified'
db.add_column(u'calc_workflowindexpage', 'modified',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True),
keep_default=False)
# Adding field 'WorkflowPage.created'
db.add_column(u'calc_workflowpage', 'created',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True),
keep_default=False)
# Adding field 'WorkflowPage.modified'
db.add_column(u'calc_workflowpage', 'modified',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True),
keep_default=False)
# Adding field 'WorkflowSourceDataFile.created'
db.add_column(u'calc_workflowsourcedatafile', 'created',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True),
keep_default=False)
# Adding field 'WorkflowSourceDataFile.modified'
db.add_column(u'calc_workflowsourcedatafile', 'modified',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True),
keep_default=False)
# Adding field 'BaseWorkflowStepPage.created'
db.add_column(u'calc_baseworkflowsteppage', 'created',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True),
keep_default=False)
# Adding field 'BaseWorkflowStepPage.modified'
db.add_column(u'calc_baseworkflowsteppage', 'modified',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'ExperimentRelatedLink.created'
db.delete_column(u'calc_experimentrelatedlink', 'created')
# Deleting field 'ExperimentRelatedLink.modified'
db.delete_column(u'calc_experimentrelatedlink', 'modified')
# Deleting field 'WorkflowIndexPage.created'
db.delete_column(u'calc_workflowindexpage', 'created')
# Deleting field 'WorkflowIndexPage.modified'
db.delete_column(u'calc_workflowindexpage', 'modified')
# Deleting field 'WorkflowPage.created'
db.delete_column(u'calc_workflowpage', 'created')
# Deleting field 'WorkflowPage.modified'
db.delete_column(u'calc_workflowpage', 'modified')
# Deleting field 'WorkflowSourceDataFile.created'
db.delete_column(u'calc_workflowsourcedatafile', 'created')
# Deleting field 'WorkflowSourceDataFile.modified'
db.delete_column(u'calc_workflowsourcedatafile', 'modified')
# Deleting field 'BaseWorkflowStepPage.created'
db.delete_column(u'calc_baseworkflowsteppage', 'created')
# Deleting field 'BaseWorkflowStepPage.modified'
db.delete_column(u'calc_baseworkflowsteppage', 'modified')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'calc.baseworkflowsteppage': {
'Meta': {'object_name': 'BaseWorkflowStepPage', '_ormbases': [u'wagtailcore.Page']},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['wagtailcore.Page']", 'unique': 'True', 'primary_key': 'True'})
},
u'calc.createcolumnbyconcatenate': {
'Meta': {'object_name': 'CreateColumnByConcatenate', '_ormbases': [u'calc.BaseWorkflowStepPage']},
u'baseworkflowsteppage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['calc.BaseWorkflowStepPage']", 'unique': 'True', 'primary_key': 'True'}),
'column_1': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'column_2': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'column_3': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'column_4': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'new_column_name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'tag': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['calc.Tag']", 'symmetrical': 'False'})
},
u'calc.createcolumnbylookup': {
'Meta': {'object_name': 'CreateColumnByLookup', '_ormbases': [u'calc.BaseWorkflowStepPage']},
u'baseworkflowsteppage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['calc.BaseWorkflowStepPage']", 'unique': 'True', 'primary_key': 'True'}),
'create_column_using': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'field_in_lookup_table': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'field_in_workflow_data': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'new_column_name': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
u'calc.createcustomformulacolumn': {
'Meta': {'object_name': 'CreateCustomFormulaColumn', '_ormbases': [u'calc.BaseWorkflowStepPage']},
u'baseworkflowsteppage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['calc.BaseWorkflowStepPage']", 'unique': 'True', 'primary_key': 'True'}),
'formula': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'new_column_name': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
u'calc.experimentindexpage': {
'Meta': {'object_name': 'ExperimentIndexPage', '_ormbases': [u'wagtailcore.Page']},
'intro': ('wagtail.wagtailcore.fields.RichTextField', [], {'blank': 'True'}),
u'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['wagtailcore.Page']", 'unique': 'True', 'primary_key': 'True'})
},
u'calc.experimentpage': {
'Meta': {'object_name': 'ExperimentPage', '_ormbases': [u'wagtailcore.Page']},
'body': ('wagtail.wagtailcore.fields.RichTextField', [], {'blank': 'True'}),
'intro': ('wagtail.wagtailcore.fields.RichTextField', [], {'blank': 'True'}),
u'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['wagtailcore.Page']", 'unique': 'True', 'primary_key': 'True'})
},
u'calc.experimentrelatedlink': {
'Meta': {'ordering': "['sort_order']", 'object_name': 'ExperimentRelatedLink'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link_document': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['wagtaildocs.Document']"}),
'link_page': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['wagtailcore.Page']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'page': ('modelcluster.fields.ParentalKey', [], {'related_name': "'related_links'", 'to': u"orm['calc.ExperimentPage']"}),
'sort_order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'calc.filterorlabel': {
'Meta': {'object_name': 'FilterOrLabel', '_ormbases': [u'calc.BaseWorkflowStepPage']},
u'baseworkflowsteppage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['calc.BaseWorkflowStepPage']", 'unique': 'True', 'primary_key': 'True'}),
'filter_options': ('django.db.models.fields.CharField', [], {'default': "'kp'", 'max_length': '2'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'label_field_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'query': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
u'calc.standardstats': {
'Meta': {'object_name': 'StandardStats', '_ormbases': [u'calc.BaseWorkflowStepPage']},
u'baseworkflowsteppage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['calc.BaseWorkflowStepPage']", 'unique': 'True', 'primary_key': 'True'}),
'group_by_field': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'include_max': ('django.db.models.fields.NullBooleanField', [], {'default': 'True', 'null': 'True', 'blank': 'True'}),
'include_mean': ('django.db.models.fields.NullBooleanField', [], {'default': 'True', 'null': 'True', 'blank': 'True'}),
'include_min': ('django.db.models.fields.NullBooleanField', [], {'default': 'True', 'null': 'True', 'blank': 'True'}),
'include_standard_deviation': ('django.db.models.fields.NullBooleanField', [], {'default': 'True', 'null': 'True', 'blank': 'True'}),
'include_standard_error': ('django.db.models.fields.NullBooleanField', [], {'default': 'True', 'null': 'True', 'blank': 'True'})
},
u'calc.standardstatspagetag': {
'Meta': {'object_name': 'StandardStatsPageTag'},
'content_object': ('modelcluster.fields.ParentalKey', [], {'related_name': "'group_by_field_names_items'", 'to': u"orm['calc.StandardStats']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'calc_standardstatspagetag_items'", 'to': u"orm['taggit.Tag']"})
},
u'calc.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tag': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'})
},
u'calc.workflowindexpage': {
'Meta': {'object_name': 'WorkflowIndexPage', '_ormbases': [u'wagtailcore.Page']},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'intro': ('wagtail.wagtailcore.fields.RichTextField', [], {'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['wagtailcore.Page']", 'unique': 'True', 'primary_key': 'True'})
},
u'calc.workflowpage': {
'Meta': {'object_name': 'WorkflowPage', '_ormbases': [u'wagtailcore.Page']},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'intro': ('wagtail.wagtailcore.fields.RichTextField', [], {'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['wagtailcore.Page']", 'unique': 'True', 'primary_key': 'True'})
},
u'calc.workflowsourcedatafile': {
'Meta': {'ordering': "['sort_order']", 'object_name': 'WorkflowSourceDataFile'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'page': ('modelcluster.fields.ParentalKey', [], {'related_name': "'source_data_files'", 'to': u"orm['calc.WorkflowPage']"}),
'sort_order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'source_data_file': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['wagtaildocs.Document']"})
},
u'calc.workflowstep': {
'Meta': {'ordering': "['sort_order']", 'object_name': 'WorkFlowStep'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sort_order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'taggit.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'wagtailcore.page': {
'Meta': {'object_name': 'Page'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pages'", 'to': u"orm['contenttypes.ContentType']"}),
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'expire_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'expired': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'go_live_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'has_unpublished_changes': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'live': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_pages'", 'null': 'True', 'to': u"orm['auth.User']"}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'search_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'seo_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'show_in_menus': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url_path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
u'wagtaildocs.document': {
'Meta': {'object_name': 'Document'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'uploaded_by_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['calc']
|
|
import sys
import os
import numpy as np
from pprint import pprint
from datetime import datetime
from datetime import timedelta
import mysql.connector
import math
import matplotlib.pyplot as plt
import matplotlib.colors
from matplotlib import dates
from mpl_toolkits.basemap import Basemap
import calendar
from scipy.optimize import curve_fit
flight = 'science 10'
flight_times = {
#'science 1' : [datetime(2015,4,5,9,0), datetime(2015,4,5,14,0) ,''],
#'ferry 1' : [datetime(2015,4,6,9,0), datetime(2015,4,6,11,0) ,'UHSAS_Polar6_20150406_R0_V1.ict'],
#'ferry 2' : [datetime(2015,4,6,15,0), datetime(2015,4,6,18,0) ,'UHSAS_Polar6_20150406_R0_V2.ict'],
#'science 2' : [datetime(2015,4,7,16,0), datetime(2015,4,7,21,0) ,'UHSAS_Polar6_20150407_R0_V1.ict'],
#'science 3' : [datetime(2015,4,8,13,0), datetime(2015,4,8,17,0) ,'UHSAS_Polar6_20150408_R0_V1.ict'],
#'science 4' : [datetime(2015,4,8,17,30),datetime(2015,4,8,22,0) ,'UHSAS_Polar6_20150408_R0_V2.ict'],
#'science 5' : [datetime(2015,4,9,13,30),datetime(2015,4,9,18,0) ,'UHSAS_Polar6_20150409_R0_V1.ict'],
#'ferry 3' : [datetime(2015,4,10,14,0),datetime(2015,4,10,17,0),'UHSAS_Polar6_20150410_R0_V1.ict'],
#'science 6' : [datetime(2015,4,11,15,0),datetime(2015,4,11,22,0),'UHSAS_Polar6_20150411_R0_V1.ict'],
#'science 7' : [datetime(2015,4,13,15,0),datetime(2015,4,13,21,0),'UHSAS_Polar6_20150413_R0_V1.ict'],
#'science 8' : [datetime(2015,4,20,15,0),datetime(2015,4,20,20,0),'UHSAS_Polar6_20150420_R0_V1.ict'],
#'science 9' : [datetime(2015,4,20,21,0),datetime(2015,4,21,2,0) ,'UHSAS_Polar6_20150420_R0_V2.ict'],
'science 10' : [datetime(2015,4,21,16,8),datetime(2015,4,21,16,18),'UHSAS_Polar6_20150421_R0_V1.ict'], ###
}
start_time = flight_times[flight][0]
end_time = flight_times[flight][1]
UNIX_start_time = calendar.timegm(start_time.utctimetuple())
UNIX_end_time = calendar.timegm(end_time.utctimetuple())
print start_time, UNIX_start_time
print end_time, UNIX_end_time
incand_calib_intercept = 0.19238 #alert = 0.19238
incand_calib_slope = 0.00310 #alert = 0.00310
R = 8.3144621 # in m3*Pa/(K*mol)
sample_flow_lower_limit = 100
min_BC_VED = 70
max_BC_VED = 220
min_rBC_mass = ((min_BC_VED/(10.**7))**3)*(math.pi/6.)*1.8*(10.**15)
max_rBC_mass = ((max_BC_VED/(10.**7))**3)*(math.pi/6.)*1.8*(10.**15)
incand_min = (min_rBC_mass-incand_calib_intercept)/incand_calib_slope
incand_max = (max_rBC_mass-incand_calib_intercept)/incand_calib_slope
#database connection
cnx = mysql.connector.connect(user='root', password='Suresh15', host='localhost', database='black_carbon')
cursor = cnx.cursor()
for UHSAS_file in ['UHSAS_Polar6_20150410_R0_V1.ict','UHSAS_Polar6_20150411_R0_V1.ict','UHSAS_Polar6_20150413_R0_V1.ict','UHSAS_Polar6_20150420_R0_V1.ict','UHSAS_Polar6_20150420_R0_V2.ict']:
with open(UHSAS_file, 'r') as f:
print UHSAS_file
file_date = UHSAS_file[13:21]
date = datetime.strptime(file_date, '%Y%m%d')
##get bin limits
i=0
while i < 9: #indep_var_number is always on line 10
f.readline()
i+=1
indep_var_number = float(f.readline())
i=0
while i < (indep_var_number + 11): #check that 11 is right for each set of files
f.readline()
i+=1
bin_LL_line = (f.readline()).split()
f.readline() #skip this line
bin_UL_line = (f.readline()).split()
##create bins dict
bin_dict = {}
i=0
for LL_limit in bin_LL_line:
bin_dict[i] = [float(LL_limit),float(bin_UL_line[i])]
i+=1
#start analysis
data_start = False
for line in f:
no_prev_particle = False
if line.startswith('time,flow,pressure,total_number_conc'):
data_start = True
continue
if data_start == True:
newline = line.split()
time_stamp = date + timedelta(seconds = float(newline[0].rstrip(',')))
UNIX_time_stamp = calendar.timegm(time_stamp.utctimetuple())
time_min = UNIX_time_stamp - 1
time_max = UNIX_time_stamp
#print progress reports
if printcounter == 100:
print time_stamp
printcounter = 0
printcounter += 1
####
#get the sample flow from the hk data to calc sampled volume
cursor.execute(('SELECT sample_flow from polar6_hk_data_2015 where UNIX_UTC_ts >= %s and UNIX_UTC_ts < %s'),(time_min,time_max))
hk_data = cursor.fetchall()
#if no hk data collected we can't get a number conc, but we can still get a mean coating and core size, so continue, but sample flow is nan
if hk_data == []:
sample_flow = np.nan
else:
sample_flow = hk_data[0][0] #in vccm
#drops in the sample flow are an issue so don't calc a conc for these periods
if sample_flow <= sample_flow_lower_limit:
sample_flow = np.nan
#get the timestamp from the last valid particle in the interval
cursor.execute(('SELECT UNIX_UTC_ts FROM polar6_coating_2015 WHERE UNIX_UTC_ts < %s AND particle_type = %s and instrument = %s and incand_amp >=%s and incand_amp <=%s order by UNIX_UTC_ts desc limit 1'),(time_max, 'incand', 'UBCSP2',incand_min,incand_max))
last_particle_data = cursor.fetchall()
last_particle_ts = last_particle_data[0][0]
#get timestamp from last valid particle before this interval so we can caluculate the volume sampled
cursor.execute(('SELECT UNIX_UTC_ts FROM polar6_coating_2015 WHERE UNIX_UTC_ts < %s AND particle_type = %s and instrument = %s and incand_amp >=%s and incand_amp <=%s order by UNIX_UTC_ts desc limit 1'),(time_min, 'incand', 'UBCSP2',incand_min,incand_max))
prev_particle_data = cursor.fetchall()
#take care of the edge-case where we're looking at the first particle of the run, in this case we'll ignore the first particle in the interval since we don't know when we started waiting for it to be detected
if prev_particle_data == []:
#in this case get the timestamp from the first valid particle in the interval
cursor.execute(('SELECT UNIX_UTC_ts FROM polar6_coating_2015 WHERE UNIX_UTC_ts >= %s AND particle_type = %s and instrument = %s and incand_amp >=%s and incand_amp <=%s order by UNIX_UTC_ts limit 1'),(time_min, 'incand', 'UBCSP2',incand_min,incand_max))
substitute_prev_particle_data = cursor.fetchall()
prev_particle_ts = substitute_prev_particle_data[0][0]
no_prev_particle = True
else:
prev_particle_ts = prev_particle_data[0][0]
#calc total interval sampling time and sampled volume
interval_sampling_time = last_particle_ts - prev_particle_ts
if interval_sampling_time <= 0:
#print 'interval_sampling_time bad', interval_sampling_time
interval_sampled_volume = np.nan
else:
interval_sampled_volume = sample_flow*interval_sampling_time/60 #factor of 60 to convert minutes to secs, result is in cc
#get T and P for correction to STP/SCCM
cursor.execute(('SELECT temperature_C,BP_Pa from polar6_flight_track_details where UNIX_UTC_ts > %s and UNIX_UTC_ts <= %s'),(time_min,time_max))
TandP_data = cursor.fetchall()
#now get the particle data per bin
for bin_number in range(0,len(bin_dict)):
bin_LL = bin_dict[bin_number][0]
bin_UL = bin_dict[bin_number][1]
##### SP2 data
#get core + coating count
cursor.execute(('SELECT count(*) from polar6_coating_2015 where UNIX_UTC_ts >= %s and UNIX_UTC_ts < %s and particle_type = %s and instrument = %s and (POW(rBC_mass_fg,(1/3.0))*101.994391398+2*coat_thickness_nm) >=%s and (POW(rBC_mass_fg,(1/3.0))*101.994391398+2*coat_thickness_nm) <=%s'),
(time_min,time_max, 'incand', 'UBCSP2',bin_LL,bin_UL))
core_plus_coating_count = cursor.fetchall()[0][0]
#get core only data
cursor.execute(('SELECT rBC_mass_fg, coat_thickness_nm from polar6_coating_2015 where UNIX_UTC_ts >= %s and UNIX_UTC_ts < %s and particle_type = %s and instrument = %s and (POW(rBC_mass_fg,(1/3.0))*101.994391398) >=%s and (POW(rBC_mass_fg,(1/3.0))*101.994391398) <=%s'),
(time_min,time_max, 'incand', 'UBCSP2',bin_LL,bin_UL))
core_only_data = cursor.fetchall()
#### UHSAS data
UHSAS_norm_number = float(newline[bin_number+4].rstrip(',')) #this is dN/dLogD per sccm
#check that we have both valid UHSAS and SP2 data, we can only make a meaningful UHSAS/SP2 conc ratio if we have valid measurements for both
if UHSAS_norm_number < 0: #-9999 is missing data and any other negative is a data problem
#print UNIX_time_stamp, 'no UHSAS data'
UHSAS_number = None
core_plus_coating_number_conc = None
core_only_number_conc = None
elif TandP_data == []:
#print UNIX_time_stamp, 'no SP2 data: T and P missing'
UHSAS_number = None
core_plus_coating_number_conc = None
core_only_number_conc = None
elif np.isnan(interval_sampled_volume) == True:
#print UNIX_time_stamp, 'no SP2 data: no, or bad, sample flow data'
UHSAS_number = None
core_plus_coating_number_conc = None
core_only_number_conc = None
else:
temperature = TandP_data[0][0] + 273.15 #convert to Kelvin
pressure = TandP_data[0][1]
correction_factor_for_STP = (101325/pressure)*(temperature/273.15)
UHSAS_number = UHSAS_norm_number*(math.log(bin_UL)-math.log(bin_LL)) #this is dN per sccm
core_plus_coating_number_conc = core_plus_coating_count*correction_factor_for_STP/interval_sampled_volume #dN/sccm
core_only_number_conc = len(core_only_data)*correction_factor_for_STP/interval_sampled_volume
if no_prev_particle == True: #in this case need to ignore first particle (but don't want negative if the count is zero)
if core_only_count > 0:
core_only_number_conc = (len(core_only_data)-1)*correction_factor_for_STP/interval_sampled_volume
else:
core_only_number_conc = 0
if core_plus_coating_count > 0:
core_plus_coating_number_conc = (core_plus_coating_count-1)*correction_factor_for_STP/interval_sampled_volume #dN/sccm
else:
core_plus_coating_number_conc = 0
#calcualte and write mean core and coating sizes (need a core and a coating value)
new_list = []
for row in core_only_data:
mass = row[0]
coat = row[1]
if mass != None and coat != None:
new_list.append([mass, coat])
if new_list != []:
mean_rBC_mass = np.mean([row[0] for row in new_list])
mean_core_dia = (((mean_rBC_mass/(10**15*1.8))*6/3.14159)**(1/3.0))*10**7 #VED in nm with 10^15fg/g and 10^7nm/cm
mean_coating = np.mean([row[1] for row in new_list])
binned_data = {
'UNIX_UTC_ts': UNIX_time_stamp,
'binLL': bin_LL,
'binUL': bin_UL,
'property': 'mean_core_dia',
'prop_value': float(mean_core_dia),
}
cursor.execute(add_data, binned_data)
cnx.commit()
binned_data = {
'UNIX_UTC_ts': UNIX_time_stamp,
'binLL': bin_LL,
'binUL': bin_UL,
'property': 'mean_coating_th',
'prop_value': float(mean_coating),
}
cursor.execute(add_data, binned_data)
cnx.commit()
#write number concs if we have the available data
if UHSAS_number != None and core_plus_coating_number_conc != None and core_only_number_conc != None:
binned_data = {
'UNIX_UTC_ts': UNIX_time_stamp,
'binLL': bin_LL,
'binUL': bin_UL,
'property': 'UHSAS_#',
'prop_value': UHSAS_number,
}
cursor.execute(add_data, binned_data)
cnx.commit()
binned_data = {
'UNIX_UTC_ts': UNIX_time_stamp,
'binLL': bin_LL,
'binUL': bin_UL,
'property': 'SP2_coated_#',
'prop_value': core_plus_coating_number_conc,
}
cursor.execute(add_data, binned_data)
cnx.commit()
binned_data = {
'UNIX_UTC_ts': UNIX_time_stamp,
'binLL': bin_LL,
'binUL': bin_UL,
'property': 'SP2_core_#',
'prop_value': core_only_number_conc,
}
cursor.execute(add_data, binned_data)
cnx.commit()
cnx.close()
|
|
#
# BitBake Graphical GTK based Dependency Explorer
#
# Copyright (C) 2007 Ross Burton
# Copyright (C) 2007 - 2008 Richard Purdie
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import gobject
import gtk
import Queue
import threading
import xmlrpclib
import bb
import bb.event
from bb.ui.crumbs.progressbar import HobProgressBar
# Package Model
(COL_PKG_NAME) = (0)
# Dependency Model
(TYPE_DEP, TYPE_RDEP) = (0, 1)
(COL_DEP_TYPE, COL_DEP_PARENT, COL_DEP_PACKAGE) = (0, 1, 2)
class PackageDepView(gtk.TreeView):
def __init__(self, model, dep_type, label):
gtk.TreeView.__init__(self)
self.current = None
self.dep_type = dep_type
self.filter_model = model.filter_new()
self.filter_model.set_visible_func(self._filter)
self.set_model(self.filter_model)
#self.connect("row-activated", self.on_package_activated, COL_DEP_PACKAGE)
self.append_column(gtk.TreeViewColumn(label, gtk.CellRendererText(), text=COL_DEP_PACKAGE))
def _filter(self, model, iter):
(this_type, package) = model.get(iter, COL_DEP_TYPE, COL_DEP_PARENT)
if this_type != self.dep_type: return False
return package == self.current
def set_current_package(self, package):
self.current = package
self.filter_model.refilter()
class PackageReverseDepView(gtk.TreeView):
def __init__(self, model, label):
gtk.TreeView.__init__(self)
self.current = None
self.filter_model = model.filter_new()
self.filter_model.set_visible_func(self._filter)
self.set_model(self.filter_model)
self.append_column(gtk.TreeViewColumn(label, gtk.CellRendererText(), text=COL_DEP_PARENT))
def _filter(self, model, iter):
package = model.get_value(iter, COL_DEP_PACKAGE)
return package == self.current
def set_current_package(self, package):
self.current = package
self.filter_model.refilter()
class DepExplorer(gtk.Window):
def __init__(self):
gtk.Window.__init__(self)
self.set_title("Dependency Explorer")
self.set_default_size(500, 500)
self.connect("delete-event", gtk.main_quit)
# Create the data models
self.pkg_model = gtk.ListStore(gobject.TYPE_STRING)
self.pkg_model.set_sort_column_id(COL_PKG_NAME, gtk.SORT_ASCENDING)
self.depends_model = gtk.ListStore(gobject.TYPE_INT, gobject.TYPE_STRING, gobject.TYPE_STRING)
self.depends_model.set_sort_column_id(COL_DEP_PACKAGE, gtk.SORT_ASCENDING)
pane = gtk.HPaned()
pane.set_position(250)
self.add(pane)
# The master list of packages
scrolled = gtk.ScrolledWindow()
scrolled.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
scrolled.set_shadow_type(gtk.SHADOW_IN)
self.pkg_treeview = gtk.TreeView(self.pkg_model)
self.pkg_treeview.get_selection().connect("changed", self.on_cursor_changed)
column = gtk.TreeViewColumn("Package", gtk.CellRendererText(), text=COL_PKG_NAME)
self.pkg_treeview.append_column(column)
pane.add1(scrolled)
scrolled.add(self.pkg_treeview)
box = gtk.VBox(homogeneous=True, spacing=4)
# Runtime Depends
scrolled = gtk.ScrolledWindow()
scrolled.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
scrolled.set_shadow_type(gtk.SHADOW_IN)
self.rdep_treeview = PackageDepView(self.depends_model, TYPE_RDEP, "Runtime Depends")
self.rdep_treeview.connect("row-activated", self.on_package_activated, COL_DEP_PACKAGE)
scrolled.add(self.rdep_treeview)
box.add(scrolled)
# Build Depends
scrolled = gtk.ScrolledWindow()
scrolled.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
scrolled.set_shadow_type(gtk.SHADOW_IN)
self.dep_treeview = PackageDepView(self.depends_model, TYPE_DEP, "Build Depends")
self.dep_treeview.connect("row-activated", self.on_package_activated, COL_DEP_PACKAGE)
scrolled.add(self.dep_treeview)
box.add(scrolled)
pane.add2(box)
# Reverse Depends
scrolled = gtk.ScrolledWindow()
scrolled.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
scrolled.set_shadow_type(gtk.SHADOW_IN)
self.revdep_treeview = PackageReverseDepView(self.depends_model, "Reverse Depends")
self.revdep_treeview.connect("row-activated", self.on_package_activated, COL_DEP_PARENT)
scrolled.add(self.revdep_treeview)
box.add(scrolled)
pane.add2(box)
self.show_all()
def on_package_activated(self, treeview, path, column, data_col):
model = treeview.get_model()
package = model.get_value(model.get_iter(path), data_col)
pkg_path = []
def finder(model, path, iter, needle):
package = model.get_value(iter, COL_PKG_NAME)
if package == needle:
pkg_path.append(path)
return True
else:
return False
self.pkg_model.foreach(finder, package)
if pkg_path:
self.pkg_treeview.get_selection().select_path(pkg_path[0])
self.pkg_treeview.scroll_to_cell(pkg_path[0])
def on_cursor_changed(self, selection):
(model, it) = selection.get_selected()
if iter is None:
current_package = None
else:
current_package = model.get_value(it, COL_PKG_NAME)
self.rdep_treeview.set_current_package(current_package)
self.dep_treeview.set_current_package(current_package)
self.revdep_treeview.set_current_package(current_package)
def parse(depgraph, pkg_model, depends_model):
for package in depgraph["pn"]:
pkg_model.set(pkg_model.append(), COL_PKG_NAME, package)
for package in depgraph["depends"]:
for depend in depgraph["depends"][package]:
depends_model.set (depends_model.append(),
COL_DEP_TYPE, TYPE_DEP,
COL_DEP_PARENT, package,
COL_DEP_PACKAGE, depend)
for package in depgraph["rdepends-pn"]:
for rdepend in depgraph["rdepends-pn"][package]:
depends_model.set (depends_model.append(),
COL_DEP_TYPE, TYPE_RDEP,
COL_DEP_PARENT, package,
COL_DEP_PACKAGE, rdepend)
class gtkthread(threading.Thread):
quit = threading.Event()
def __init__(self, shutdown):
threading.Thread.__init__(self)
self.setDaemon(True)
self.shutdown = shutdown
def run(self):
gobject.threads_init()
gtk.gdk.threads_init()
gtk.main()
gtkthread.quit.set()
def main(server, eventHandler):
try:
cmdline, error = server.runCommand(["getCmdLineAction"])
if error:
print("Error getting bitbake commandline: %s" % error)
return 1
elif not cmdline:
print("Nothing to do. Use 'bitbake world' to build everything, or run 'bitbake --help' for usage information.")
return 1
elif not cmdline or cmdline[0] != "generateDotGraph":
print("This UI is only compatible with the -g option")
return 1
ret, error = server.runCommand(["generateDepTreeEvent", cmdline[1], cmdline[2]])
if error:
print("Error running command '%s': %s" % (cmdline, error))
return 1
elif ret != True:
print("Error running command '%s': returned %s" % (cmdline, ret))
return 1
except xmlrpclib.Fault as x:
print("XMLRPC Fault getting commandline:\n %s" % x)
return
shutdown = 0
gtkgui = gtkthread(shutdown)
gtkgui.start()
gtk.gdk.threads_enter()
dep = DepExplorer()
bardialog = gtk.Dialog(parent=dep,
flags=gtk.DIALOG_MODAL|gtk.DIALOG_DESTROY_WITH_PARENT)
bardialog.set_default_size(400, 50)
pbar = HobProgressBar()
bardialog.vbox.pack_start(pbar)
bardialog.show_all()
bardialog.connect("delete-event", gtk.main_quit)
gtk.gdk.threads_leave()
progress_total = 0
while True:
try:
event = eventHandler.waitEvent(0.25)
if gtkthread.quit.isSet():
_, error = server.runCommand(["stateStop"])
if error:
print('Unable to cleanly stop: %s' % error)
break
if event is None:
continue
if isinstance(event, bb.event.CacheLoadStarted):
progress_total = event.total
gtk.gdk.threads_enter()
bardialog.set_title("Loading Cache")
pbar.update(0)
gtk.gdk.threads_leave()
if isinstance(event, bb.event.CacheLoadProgress):
x = event.current
gtk.gdk.threads_enter()
pbar.update(x * 1.0 / progress_total)
pbar.set_title('')
gtk.gdk.threads_leave()
continue
if isinstance(event, bb.event.CacheLoadCompleted):
bardialog.hide()
continue
if isinstance(event, bb.event.ParseStarted):
progress_total = event.total
if progress_total == 0:
continue
gtk.gdk.threads_enter()
pbar.update(0)
bardialog.set_title("Processing recipes")
gtk.gdk.threads_leave()
if isinstance(event, bb.event.ParseProgress):
x = event.current
gtk.gdk.threads_enter()
pbar.update(x * 1.0 / progress_total)
pbar.set_title('')
gtk.gdk.threads_leave()
continue
if isinstance(event, bb.event.ParseCompleted):
bardialog.hide()
continue
if isinstance(event, bb.event.DepTreeGenerated):
gtk.gdk.threads_enter()
parse(event._depgraph, dep.pkg_model, dep.depends_model)
gtk.gdk.threads_leave()
if isinstance(event, bb.command.CommandCompleted):
continue
if isinstance(event, bb.command.CommandFailed):
print("Command execution failed: %s" % event.error)
return event.exitcode
if isinstance(event, bb.command.CommandExit):
return event.exitcode
if isinstance(event, bb.cooker.CookerExit):
break
continue
except EnvironmentError as ioerror:
# ignore interrupted io
if ioerror.args[0] == 4:
pass
except KeyboardInterrupt:
if shutdown == 2:
print("\nThird Keyboard Interrupt, exit.\n")
break
if shutdown == 1:
print("\nSecond Keyboard Interrupt, stopping...\n")
_, error = server.runCommand(["stateStop"])
if error:
print('Unable to cleanly stop: %s' % error)
if shutdown == 0:
print("\nKeyboard Interrupt, closing down...\n")
_, error = server.runCommand(["stateShutdown"])
if error:
print('Unable to cleanly shutdown: %s' % error)
shutdown = shutdown + 1
pass
|
|
#!/usr/bin/env python
#-
# Copyright (c) 2006 Verdens Gang AS
# Copyright (c) 2006-2015 Varnish Software AS
# All rights reserved.
#
# Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# Generate various .c and .h files for the VCL compiler and the interfaces
# for it.
#######################################################################
# These are our tokens
# We could drop all words such as "include", "if" etc, and use the
# ID type instead, but declaring them tokens makes them reserved words
# which hopefully makes for better error messages.
# XXX: does it actually do that ?
import sys
srcroot = "../.."
buildroot = "../.."
if len(sys.argv) == 3:
srcroot = sys.argv[1]
buildroot = sys.argv[2]
tokens = {
"T_INC": "++",
"T_DEC": "--",
"T_CAND": "&&",
"T_COR": "||",
"T_LEQ": "<=",
"T_EQ": "==",
"T_NEQ": "!=",
"T_GEQ": ">=",
"T_SHR": ">>",
"T_SHL": "<<",
"T_INCR": "+=",
"T_DECR": "-=",
"T_MUL": "*=",
"T_DIV": "/=",
"T_NOMATCH": "!~",
# Single char tokens, for convenience on one line
None: "{}()*+-/%><=;!&.|~,",
# These have handwritten recognizers
"ID": None,
"CNUM": None,
"CSTR": None,
"EOI": None,
"CSRC": None,
}
#######################################################################
# Our methods and actions
returns =(
###############################################################
# Client side
('recv',
"C",
('synth', 'pass', 'pipe', 'hash', 'purge',)
),
('pipe',
"C",
('synth', 'pipe',)
),
('pass',
"C",
('synth', 'restart', 'fetch',)
),
('hash',
"C",
('lookup',)
),
('purge',
"C",
('synth', 'restart',)
),
('miss',
"C",
('synth', 'restart', 'pass', 'fetch',)
),
('hit',
"C",
('synth', 'restart', 'pass', 'fetch', 'miss', 'deliver',)
),
('deliver',
"C",
('synth', 'restart', 'deliver',)
),
('synth',
"C",
('restart', 'deliver',)
),
###############################################################
# Backend-fetch
('backend_fetch',
"B",
('fetch', 'abandon')
),
('backend_response',
"B",
('deliver', 'retry', 'abandon')
),
('backend_error',
"B",
('deliver', 'retry', 'abandon')
),
###############################################################
# Housekeeping
('init',
"",
('ok', 'fail')
),
('fini',
"",
('ok',)
),
)
#######################################################################
# Variables available in sessions
#
# 'all' means all methods
# 'client' means all methods tagged "C"
# 'backend' means all methods tagged "B"
# 'both' means all methods tagged "B" or "C"
sp_variables = [
('remote.ip',
'IP',
( 'client',),
( ), """
The IP address of the other end of the TCP connection.
This can either be the clients IP, or the outgoing IP
of a proxy server.
"""
),
('client.ip',
'IP',
( 'client',),
( ), """
The client's IP address.
"""
),
('client.identity',
'STRING',
( 'client',),
( 'client',), """
Identification of the client, used to load balance
in the client director.
"""
),
('local.ip',
'IP',
( 'client',),
( ), """
The IP address of the local end of the TCP connection.
"""
),
('server.ip',
'IP',
( 'client',),
( ), """
The IP address of the socket on which the client
connection was received.
"""
),
('server.hostname',
'STRING',
( 'all',),
( ), """
The host name of the server.
"""
),
('server.identity',
'STRING',
( 'all',),
( ), """
The identity of the server, as set by the -i
parameter. If the -i parameter is not passed to varnishd,
server.identity will be set to the name of the instance, as
specified by the -n parameter.
"""
),
('req',
'HTTP',
( 'client',),
( ), """
The entire request HTTP data structure
"""
),
('req.method',
'STRING',
( 'client',),
( 'client',), """
The request type (e.g. "GET", "HEAD").
"""
),
('req.url',
'STRING',
( 'client',),
( 'client',), """
The requested URL.
"""
),
('req.proto',
'STRING',
( 'client',),
( 'client',), """
The HTTP protocol version used by the client.
"""
),
('req.http.',
'HEADER',
( 'client',),
( 'client',), """
The corresponding HTTP header.
"""
),
('req.restarts',
'INT',
( 'client',),
( ), """
A count of how many times this request has been restarted.
"""
),
('req.esi_level',
'INT',
( 'client',),
( ), """
A count of how many levels of ESI requests we're currently at.
"""
),
('req.ttl',
'DURATION',
( 'client',),
( 'client',), """
"""
),
('req.xid',
'STRING',
( 'client',),
( ), """
Unique ID of this request.
"""
),
('req.esi',
'BOOL',
( 'client',),
( 'client',), """
Boolean. Set to false to disable ESI processing
regardless of any value in beresp.do_esi. Defaults
to true. This variable is subject to change in
future versions, you should avoid using it.
"""
),
('req.can_gzip',
'BOOL',
( 'client',),
( ), """
Does the client accept the gzip transfer encoding.
"""
),
('req.backend_hint',
'BACKEND',
( 'client', ),
( 'client',), """
Set bereq.backend to this if we attempt to fetch.
"""
),
('req.hash_ignore_busy',
'BOOL',
( 'recv',),
( 'recv',), """
Ignore any busy object during cache lookup. You
would want to do this if you have two server looking
up content from each other to avoid potential deadlocks.
"""
),
('req.hash_always_miss',
'BOOL',
( 'recv',),
( 'recv',), """
Force a cache miss for this request. If set to true
Varnish will disregard any existing objects and
always (re)fetch from the backend.
"""
),
('req_top.method',
'STRING',
( 'client',),
(), """
The request method of the top-level request in a tree
of ESI requests. (e.g. "GET", "HEAD").
Identical to req.method in non-ESI requests.
"""
),
('req_top.url',
'STRING',
( 'client',),
(), """
The requested URL of the top-level request in a tree
of ESI requests.
Identical to req.url in non-ESI requests.
"""
),
('req_top.http.',
'HEADER',
( 'client',),
(), """
HTTP headers of the top-level request in a tree of ESI requests.
Identical to req.http. in non-ESI requests.
"""
),
('req_top.proto',
'STRING',
( 'client',),
(), """
HTTP protocol version of the top-level request in a tree of
ESI requests.
Identical to req.proto in non-ESI requests.
"""
),
('bereq',
'HTTP',
( 'backend',),
( ), """
The entire backend request HTTP data structure
"""
),
('bereq.xid',
'STRING',
( 'backend',),
( ), """
Unique ID of this request.
"""
),
('bereq.retries',
'INT',
( 'backend',),
( ), """
A count of how many times this request has been retried.
"""
),
('bereq.backend',
'BACKEND',
( 'pipe', 'backend', ),
( 'pipe', 'backend', ), """
This is the backend or director we attempt to fetch from.
"""
),
('bereq.method',
'STRING',
( 'pipe', 'backend', ),
( 'pipe', 'backend', ), """
The request type (e.g. "GET", "HEAD").
"""
),
('bereq.url',
'STRING',
( 'pipe', 'backend', ),
( 'pipe', 'backend', ), """
The requested URL.
"""
),
('bereq.proto',
'STRING',
( 'pipe', 'backend', ),
( 'pipe', 'backend', ), """
The HTTP protocol version used to talk to the server.
"""
),
('bereq.http.',
'HEADER',
( 'pipe', 'backend', ),
( 'pipe', 'backend', ), """
The corresponding HTTP header.
"""
),
('bereq.uncacheable',
'BOOL',
( 'backend', ),
( ), """
Indicates whether this request is uncacheable due
to a pass in the client side or a hit on an existing
uncacheable object (aka hit-for-pass).
"""
),
('bereq.connect_timeout',
'DURATION',
( 'pipe', 'backend', ),
( 'pipe', 'backend', ), """
The time in seconds to wait for a backend connection.
"""
),
('bereq.first_byte_timeout',
'DURATION',
( 'backend', ),
( 'backend', ), """
The time in seconds to wait for the first byte from
the backend. Not available in pipe mode.
"""
),
('bereq.between_bytes_timeout',
'DURATION',
( 'backend', ),
( 'backend', ), """
The time in seconds to wait between each received byte from the
backend. Not available in pipe mode.
"""
),
('beresp',
'HTTP',
( 'backend_response', 'backend_error'),
( ), """
The entire backend response HTTP data structure
"""
),
('beresp.proto',
'STRING',
( 'backend_response', 'backend_error'),
( 'backend_response', 'backend_error'), """
The HTTP protocol version used the backend replied with.
"""
),
('beresp.status',
'INT',
( 'backend_response', 'backend_error'),
( 'backend_response', 'backend_error'), """
The HTTP status code returned by the server.
"""
),
('beresp.reason',
'STRING',
( 'backend_response', 'backend_error'),
( 'backend_response', 'backend_error'), """
The HTTP status message returned by the server.
"""
),
('beresp.http.',
'HEADER',
( 'backend_response', 'backend_error'),
( 'backend_response', 'backend_error'), """
The corresponding HTTP header.
"""
),
('beresp.do_esi',
'BOOL',
( 'backend_response', 'backend_error'),
( 'backend_response', 'backend_error'), """
Boolean. ESI-process the object after fetching it.
Defaults to false. Set it to true to parse the
object for ESI directives. Will only be honored if
req.esi is true.
"""
),
('beresp.do_stream',
'BOOL',
( 'backend_response', 'backend_error'),
( 'backend_response', 'backend_error'), """
Deliver the object to the client directly without
fetching the whole object into varnish. If this
request is pass'ed it will not be stored in memory.
"""
),
('beresp.do_gzip',
'BOOL',
( 'backend_response', 'backend_error'),
( 'backend_response', 'backend_error'), """
Boolean. Gzip the object before storing it. Defaults
to false. When http_gzip_support is on Varnish will
request already compressed content from the backend
and as such compression in Varnish is not needed.
"""
),
('beresp.do_gunzip',
'BOOL',
( 'backend_response', 'backend_error'),
( 'backend_response', 'backend_error'), """
Boolean. Unzip the object before storing it in the
cache. Defaults to false.
"""
),
('beresp.was_304',
'BOOL',
( 'backend_response', 'backend_error'),
( ), """
Boolean. If this is a successful 304 response to a
backend conditional request refreshing an existing
cache object.
"""
),
('beresp.uncacheable',
'BOOL',
( 'backend_response', 'backend_error'),
( 'backend_response', 'backend_error'), """
Inherited from bereq.uncacheable, see there.
Setting this variable makes the object uncacheable, which may
get stored as a hit-for-pass object in the cache.
Clearing the variable has no effect and will log the warning
"Ignoring attempt to reset beresp.uncacheable".
"""
),
('beresp.ttl',
'DURATION',
( 'backend_response', 'backend_error'),
( 'backend_response', 'backend_error'), """
The object's remaining time to live, in seconds.
"""
),
('beresp.age',
'DURATION',
( 'backend_response', 'backend_error'),
( ), """
The age of the object.
"""
),
('beresp.grace',
'DURATION',
( 'backend_response', 'backend_error'),
( 'backend_response', 'backend_error'), """
Set to a period to enable grace.
"""
),
('beresp.keep',
'DURATION',
( 'backend_response', 'backend_error'),
( 'backend_response', 'backend_error'), """
Set to a period to enable conditional backend requests.
The keep time is cache lifetime in addition to the ttl.
Objects with ttl expired but with keep time left may be used
to issue conditional (If-Modified-Since / If-None-Match)
requests to the backend to refresh them.
"""
),
('beresp.backend',
'BACKEND',
( 'backend_response', 'backend_error'),
( ), """
This is the backend we fetched from. If bereq.backend
was set to a director, this will be the backend selected
by the director.
"""
),
('beresp.backend.name',
'STRING',
( 'backend_response', 'backend_error'),
( ), """
Name of the backend this response was fetched from.
"""
),
('beresp.backend.ip',
'IP',
( 'backend_response', 'backend_error'),
( ), """
IP of the backend this response was fetched from.
"""
),
('beresp.storage_hint',
'STRING',
( 'backend_response', 'backend_error'),
( 'backend_response', 'backend_error'), """
Hint to Varnish that you want to save this object to a
particular storage backend.
"""
),
('obj.proto',
'STRING',
( 'hit', ),
( ), """
The HTTP protocol version used when the object was retrieved.
"""
),
('obj.status',
'INT',
( 'hit',),
( ), """
The HTTP status code returned by the server.
"""
),
('obj.reason',
'STRING',
( 'hit',),
( ), """
The HTTP status message returned by the server.
"""
),
('obj.hits',
'INT',
( 'hit', 'deliver',),
( ), """
The count of cache-hits on this object. A value of 0 indicates a
cache miss.
"""
),
('obj.http.',
'HEADER',
( 'hit', ),
( ), """
The corresponding HTTP header.
"""
),
('obj.ttl',
'DURATION',
( 'hit', ),
( ), """
The object's remaining time to live, in seconds.
"""
),
('obj.age',
'DURATION',
( 'hit', ),
( ), """
The age of the object.
"""
),
('obj.grace',
'DURATION',
( 'hit', ),
( ), """
The object's remaining grace period in seconds.
"""
),
('obj.keep',
'DURATION',
( 'hit', ),
( ), """
The object's remaining keep period in seconds.
"""
),
('obj.uncacheable',
'BOOL',
( 'deliver', ),
( ), """
Whether the object is uncacheable (pass or hit-for-pass).
"""
),
('resp',
'HTTP',
( 'deliver', 'synth'),
( ), """
The entire response HTTP data structure.
"""
),
('resp.proto',
'STRING',
( 'deliver', 'synth', ),
( 'deliver', 'synth', ), """
The HTTP protocol version to use for the response.
"""
),
('resp.status',
'INT',
( 'deliver', 'synth', ),
( 'deliver', 'synth', ), """
The HTTP status code that will be returned.
Assigning a HTTP standardized code to resp.status will also
set resp.reason to the corresponding status message.
"""
),
('resp.reason',
'STRING',
( 'deliver', 'synth', ),
( 'deliver', 'synth', ), """
The HTTP status message that will be returned.
"""
),
('resp.http.',
'HEADER',
( 'deliver', 'synth', ),
( 'deliver', 'synth', ), """
The corresponding HTTP header.
"""
),
('resp.is_streaming',
'BOOL',
( 'deliver', 'synth', ),
( ), """
Returns true when the response will be streamed
from the backend.
"""
),
('now',
'TIME',
( 'all',),
( ), """
The current time, in seconds since the epoch. When
used in string context it returns a formatted string.
"""
),
]
# Backwards compatibility:
aliases = [
]
stv_variables = (
('free_space', 'BYTES', "0.", 'storage.<name>.free_space', """
Free space available in the named stevedore. Only available for
the malloc stevedore.
"""),
('used_space', 'BYTES', "0.", 'storage.<name>.used_space', """
Used space in the named stevedore. Only available for the malloc
stevedore.
"""),
('happy', 'BOOL', "0", 'storage.<name>.happy', """
Health status for the named stevedore. Not available in any of the
current stevedores.
"""),
)
#######################################################################
# VCL to C type conversion
vcltypes = {
'STRING_LIST': "void*",
}
fi = open(srcroot + "/include/vrt.h")
for i in fi:
j = i.split();
if len(j) < 3:
continue
if j[0] != "typedef":
continue
if j[-1][-1] != ";":
continue
if j[-1][:4] != "VCL_":
continue
d = " ".join(j[1:-1])
vcltypes[j[-1][4:-1]] = d
fi.close()
#######################################################################
# Nothing is easily configurable below this line.
#######################################################################
import sys
import copy
#######################################################################
# Emit a function to recognize tokens in a string
def emit_vcl_fixed_token(fo, tokens):
recog = list()
emit = dict()
for i in tokens:
j = tokens[i]
if (j != None):
recog.append(j)
emit[j] = i
recog.sort()
rrecog = copy.copy(recog)
rrecog.sort(key = lambda x: -len(x))
fo.write("""
#define M1()\tdo {*q = p + 1; return (p[0]); } while (0)
#define M2(c,t)\tdo {if (p[1] == (c)) { *q = p + 2; return (t); }} while (0)
unsigned
vcl_fixed_token(const char *p, const char **q)
{
\tswitch (p[0]) {
""")
last_initial = None
for i in recog:
if (i[0] == last_initial):
continue
last_initial = i[0]
fo.write("\tcase '%s':\n" % last_initial)
need_ret = True
for j in rrecog:
if (j[0] != last_initial):
continue
if len(j) == 2:
fo.write("\t\tM2('%s', %s);\n" %
(j[1], emit[j]))
elif len(j) == 1:
fo.write("\t\tM1();\n")
need_ret = False
else:
fo.write("\t\tif (")
k = 1
l = len(j)
while (k < l):
fo.write("p[%d] == '%s'" % (k, j[k]))
fo.write(" &&")
if (k % 3) == 0:
fo.write("\n\t\t ")
else:
fo.write(" ")
k += 1
fo.write("!isvar(p[%d])) {\n" % l)
fo.write("\t\t\t*q = p + %d;\n" % l)
fo.write("\t\t\treturn (%s);\n" % emit[j])
fo.write("\t\t}\n")
if need_ret:
fo.write("\t\treturn (0);\n")
fo.write("\tdefault:\n\t\treturn (0);\n\t}\n}\n")
#######################################################################
# Emit the vcl_tnames (token->string) conversion array
def emit_vcl_tnames(fo, tokens):
fo.write("\nconst char * const vcl_tnames[256] = {\n")
l = list(tokens.keys())
l.sort()
for i in l:
j = tokens[i]
if j == None:
j = i
if i[0] == "'":
j = i
fo.write("\t[%s] = \"%s\",\n" % (i, j))
fo.write("};\n")
#######################################################################
# Read a C-source file and spit out code that outputs it with VSB_cat()
def emit_file(fo, fd, bn):
fn = fd + "/" + bn
fi = open(fn)
fc = fi.read()
fi.close()
w = 66 # Width of lines, after white space prefix
maxlen = 10240 # Max length of string literal
x = 0
l = 0
fo.write("\n\t/* %s */\n\n" % fn)
fo.write('\tVSB_cat(sb, "/* ---===### %s ###===--- */\\n\\n");\n' % bn)
for c in fc:
if l == 0:
fo.write("\tVSB_cat(sb, \"")
l += 12
x += 12
if x == 0:
fo.write("\t \"")
d = c
if c == '\n':
d = "\\n"
elif c == '\t':
d = "\\t"
elif c == '"':
d = "\\\""
elif c == '\\':
d = "\\\\"
if c == '\n' and x > w - 20:
fo.write(d + "\"\n")
x = 0
continue
if c.isspace() and x > w - 10:
fo.write(d + "\"\n")
x = 0
continue
fo.write(d)
x += len(d)
l += len(d)
if l > maxlen:
fo.write("\");\n")
l = 0;
x = 0
if x > w - 3:
fo.write("\"\n")
x = 0
if x != 0:
fo.write("\"\n")
if l != 0:
fo.write("\t);\n")
fo.write('\tVSB_cat(sb, "\\n");\n')
#######################################################################
def polish_tokens(tokens):
# Expand single char tokens
st = tokens[None]
del tokens[None]
for i in st:
tokens["'" + i + "'"] = i
#######################################################################
def file_header(fo):
fo.write("""/*
* NB: This file is machine generated, DO NOT EDIT!
*
* Edit and run generate.py instead
*/
""")
#######################################################################
polish_tokens(tokens)
fo = open(buildroot + "/lib/libvcc/vcc_token_defs.h", "w")
file_header(fo)
j = 128
l = list(tokens.keys())
l.sort()
for i in l:
if i[0] == "'":
continue
fo.write("#define\t%s %d\n" % (i, j))
j += 1
assert j < 256
fo.close()
#######################################################################
rets = dict()
vcls = list()
vcls_client = list()
vcls_backend = list()
for i in returns:
vcls.append(i[0])
for j in i[1]:
if j == "B":
vcls_backend.append(i[0])
elif j == "C":
vcls_client.append(i[0])
for j in i[2]:
rets[j] = True
#######################################################################
fo = open(buildroot + "/include/tbl/vcl_returns.h", "w")
file_header(fo)
fo.write("\n/*lint -save -e525 -e539 */\n")
fo.write("\n#ifdef VCL_RET_MAC\n")
l = list(rets.keys())
l.sort()
ll = list(returns)
ll.sort()
for i in l:
fo.write("VCL_RET_MAC(%s, %s" % (i.lower(), i.upper()))
s=",\n\t"
for j in ll:
if i in j[2]:
fo.write("%sVCL_MET_%s" % (s, j[0].upper()))
s = " |\n\t"
fo.write("\n)\n")
fo.write("#endif\n")
fo.write("\n#ifdef VCL_MET_MAC\n")
for i in ll:
fo.write("VCL_MET_MAC(%s, %s," % (i[0].lower(), i[0].upper()))
p = " (\n\t"
lll = list(i[2])
lll.sort()
for j in lll:
fo.write("%s(1U << VCL_RET_%s)" % (p, j.upper()))
p = " |\n\t"
fo.write("\n))\n")
fo.write("#endif\n")
fo.write("\n/*lint -restore */\n")
fo.close()
#######################################################################
fo = open(buildroot + "/include/vcl.h", "w")
file_header(fo)
fo.write("""
struct vrt_ctx;
#define VRT_CTX const struct vrt_ctx *ctx
struct req;
struct busyobj;
struct ws;
struct cli;
struct worker;
enum vcl_event_e {
VCL_EVENT_LOAD,
VCL_EVENT_WARM,
VCL_EVENT_USE,
VCL_EVENT_COLD,
VCL_EVENT_DISCARD,
};
typedef int vcl_event_f(VRT_CTX, enum vcl_event_e);
typedef int vcl_init_f(VRT_CTX);
typedef void vcl_fini_f(VRT_CTX);
typedef int vcl_func_f(VRT_CTX);
""")
def tbl40(a, b):
while len(a.expandtabs()) < 40:
a += "\t"
return a + b
fo.write("\n/* VCL Methods */\n")
n = 1
for i in returns:
fo.write(
tbl40("#define VCL_MET_%s" % i[0].upper(), "(1U << %d)\n" % n)
)
n += 1
fo.write("\n" + tbl40("#define VCL_MET_MAX", "%d\n" % n))
fo.write("\n" + tbl40("#define VCL_MET_MASK", "0x%x\n" % ((1 << n) - 1)))
fo.write("\n/* VCL Returns */\n")
n = 0
l = list(rets.keys())
l.sort()
for i in l:
fo.write(tbl40("#define VCL_RET_%s" % i.upper(), "%d\n" % n))
n += 1
fo.write("\n" + tbl40("#define VCL_RET_MAX", "%d\n" % n))
fo.write("""
struct VCL_conf {
unsigned magic;
#define VCL_CONF_MAGIC 0x7406c509 /* from /dev/random */
struct director **default_director;
const struct vrt_backend_probe *default_probe;
unsigned nref;
struct vrt_ref *ref;
unsigned nsrc;
const char **srcname;
const char **srcbody;
vcl_event_f *event_vcl;
""")
for i in returns:
fo.write("\tvcl_func_f\t*" + i[0] + "_func;\n")
fo.write("""
};
""")
fo.close()
#######################################################################
def restrict(fo, spec):
d = dict()
for j in spec:
if j == 'all':
for i in vcls:
d[i] = True
elif j == 'backend':
for i in vcls_backend:
d[i] = True
elif j == 'client':
for i in vcls_client:
d[i] = True
elif j == 'both':
for i in vcls_client:
d[i] = True
for i in vcls_backend:
d[i] = True
else:
assert j in vcls
d[j] = True
p = ""
n = 0
l = list(d.keys())
l.sort()
w = 0
fo.write("\t\t")
for j in l:
x = p + "VCL_MET_" + j.upper()
if w + len(x) > 60:
fo.write("\n\t\t")
w = 0
fo.write(x)
w += len(x)
p = " | "
if len(d) == 0:
fo.write("0")
fo.write(",\n")
#######################################################################
fh = open(buildroot + "/include/vrt_obj.h", "w")
file_header(fh)
fo = open(buildroot + "/lib/libvcc/vcc_obj.c", "w")
file_header(fo)
fo.write("""
#include "config.h"
#include <stdio.h>
#include "vcc_compile.h"
const struct var vcc_vars[] = {
""")
def one_var(nm, spec):
fh.write("\n")
typ = spec[1]
cnam = i[0].replace(".", "_")
ctyp = vcltypes[typ]
fo.write("\t{ \"%s\", %s, %d,\n" % (nm, typ, len(nm)))
if len(spec[2]) == 0:
fo.write('\t NULL,\t/* No reads allowed */\n')
elif typ == "HEADER":
fo.write('\t "HDR_')
fo.write(nm.split(".")[0].upper())
fo.write('",\n')
else:
fo.write('\t "VRT_r_%s(ctx)",\n' % cnam)
if nm == i[0]:
fh.write("VCL_" + typ +
" VRT_r_%s(VRT_CTX);\n" % cnam )
restrict(fo, spec[2])
if len(spec[3]) == 0:
fo.write('\t NULL,\t/* No writes allowed */\n')
elif typ == "HEADER":
fo.write('\t "HDR_')
fo.write(nm.split(".")[0].upper())
fo.write('",\n')
else:
fo.write('\t "VRT_l_%s(ctx, ",\n' % cnam)
if nm == i[0]:
fh.write(
"void VRT_l_%s(VRT_CTX, " % cnam)
if typ != "STRING":
fh.write("VCL_" + typ + ");\n")
else:
fh.write(ctyp + ", ...);\n")
restrict(fo, spec[3])
fo.write("\t},\n")
sp_variables.sort()
aliases.sort()
for i in sp_variables:
one_var(i[0], i)
for j in aliases:
if j[1] == i[0]:
one_var(j[0], i)
fo.write("\t{ NULL }\n};\n")
fh.write("\n")
for i in stv_variables:
fh.write(vcltypes[i[1]] + " VRT_Stv_" + i[0] + "(const char *);\n")
fo.close()
fh.close()
#######################################################################
fo = open(buildroot + "/lib/libvcc/vcc_fixed_token.c", "w")
file_header(fo)
fo.write("""
#include "config.h"
#include <ctype.h>
#include <stdio.h>
#include "vcc_compile.h"
""")
emit_vcl_fixed_token(fo, tokens)
emit_vcl_tnames(fo, tokens)
fo.write("""
void
vcl_output_lang_h(struct vsb *sb)
{
""")
emit_file(fo, srcroot, "include/vdef.h")
emit_file(fo, buildroot, "include/vcl.h")
emit_file(fo, srcroot, "include/vrt.h")
emit_file(fo, buildroot, "include/vrt_obj.h")
fo.write("""
}
""")
fo.close()
#######################################################################
ft = open(buildroot + "/include/tbl/vcc_types.h", "w")
file_header(ft)
ft.write("/*lint -save -e525 -e539 */\n")
i = list(vcltypes.keys())
i.sort()
for j in i:
ft.write("VCC_TYPE(" + j + ")\n")
ft.write("/*lint -restore */\n")
ft.close()
#######################################################################
fo = open(buildroot + "/include/tbl/vrt_stv_var.h", "w")
file_header(fo)
fo.write("""
#ifndef VRTSTVTYPE
#define VRTSTVTYPE(ct)
#define VRTSTVTYPEX
#endif
#ifndef VRTSTVVAR
#define VRTSTVVAR(nm, vtype, ctype, dval)
#define VRTSTVVARX
#endif
""")
x=dict()
for i in stv_variables:
ct = vcltypes[i[1]]
if not ct in x:
fo.write("VRTSTVTYPE(" + ct + ")\n")
x[ct] = 1
fo.write("VRTSTVVAR(" + i[0] + ",\t" + i[1] + ",\t")
fo.write(ct + ",\t" + i[2] + ")")
fo.write("\n")
fo.write("""
#ifdef VRTSTVTYPEX
#undef VRTSTVTYPEX
#undef VRTSTVTYPE
#endif
#ifdef VRTSTVVARX
#undef VRTSTVVARX
#undef VRTSTVVAR
#endif
""")
fo.close
#######################################################################
fp_vclvar = open(buildroot + "/doc/sphinx/include/vcl_var.rst", "w")
l = list()
for i in sp_variables:
l.append(i)
l.sort()
def rst_where(fo, h, l):
ll = list()
if len(l) == 0:
return
fo.write("\t" + h)
s = ""
for j in l:
if j == "both":
ll.append("client")
ll.append("backend")
elif j == "client":
ll.append(j)
elif j == "backend":
ll.append(j)
elif j == "all":
ll.append(j)
else:
ll.append("vcl_" + j)
for j in ll:
fo.write(s + j)
s = ", "
fo.write("\n\n")
hdr=""
for i in l:
j = i[0].split(".")
if j[0] != hdr:
fp_vclvar.write("\n" + j[0] + "\n")
fp_vclvar.write("~" * len(j[0]) + "\n")
hdr = j[0]
fp_vclvar.write("\n" + i[0] + "\n\n")
fp_vclvar.write("\tType: " + i[1] + "\n\n")
rst_where(fp_vclvar, "Readable from: ", i[2])
rst_where(fp_vclvar, "Writable from: ", i[3])
for j in i[4].split("\n"):
fp_vclvar.write("\t%s\n" % j.strip())
hdr="storage"
fp_vclvar.write("\n" + hdr + "\n");
fp_vclvar.write("~" * len(hdr) + "\n");
for i in stv_variables:
fp_vclvar.write("\n" + i[3] + "\n\n")
fp_vclvar.write("\tType: " + i[1] + "\n\n")
fp_vclvar.write("\tReadable from: client, backend\n\n")
for j in i[4].split("\n"):
fp_vclvar.write("\t%s\n" % j.strip())
fp_vclvar.close()
|
|
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import struct
from conary.lib.compat import namedtuple
SMALL = 0
LARGE = 1
DYNAMIC = 2
FAIL_UNKNOWN = 0
SKIP_UNKNOWN = 1
PRESERVE_UNKNOWN = 2
class _BaseStream(object):
pass
class _ValueStream(_BaseStream):
_valueDefault = None
def __init__(self, value=None):
if value is None:
self._value = self._valueDefault
elif type(value) is str:
# This catches StringStream values, but conveniently its frozen
# form is identical to its value.
self.thaw(value)
else:
self.set(value)
def __call__(self):
return self._value
def __eq__(self, other):
return self._value == other._value
def __ne__(self, other):
return self._value != other._value
def __cmp__(self, other):
if type(self) != type(other):
raise TypeError("invalid type")
return cmp(self._value, other._value)
def __hash__(self):
return hash(self._value)
def set(self, value):
raise NotImplementedError
def freeze(self, skipSet=None):
return self._value
def thaw(self, frozen):
self.set(frozen)
def diff(self, other):
if other is None:
return None
elif type(self) != type(other):
raise TypeError("invalid type")
elif self._value == other._value:
# same as other, no diff
return None
else:
# different from other, diff is the entire value
return self.freeze()
def twm(self, diff, other):
if type(self) != type(other):
raise TypeError("invalid type")
if self._value == other._value:
# same as other, keep the diff
self.thaw(diff)
return False
elif self._value == diff:
# same as diff, keep self
return False
else:
# three different values, conflict
return True
class StringStream(_ValueStream):
_valueDefault = ''
def set(self, value):
if value is None:
# Technically this isn't compatible because cstreams would return
# None on get later, but when freezing None is treated the same as
# empty string by containers.
value = ''
elif not isinstance(value, str):
raise TypeError("invalid type '%s' for string stream" %
type(value).__name__)
self._value = value
class _NumericStream(_ValueStream):
_valueDefault = None
_intFormat = None
def set(self, value):
if isinstance(value, float):
value = int(value)
elif not isinstance(value, (int, long)) and value is not None:
raise TypeError("invalid type '%s' for numeric stream" %
type(value).__name__)
self._value = value
def freeze(self, skipSet=None):
if self._value is None:
return ''
else:
return struct.pack(self._intFormat, self._value)
def thaw(self, frozen):
if frozen:
value = struct.unpack(self._intFormat, frozen)[0]
else:
value = None
self.set(value)
class ByteStream(_NumericStream):
_intFormat = '>B'
class ShortStream(_NumericStream):
_intFormat = '>h'
class IntStream(_NumericStream):
_intFormat = '>i'
class LongLongStream(_NumericStream):
_intFormat = '>Q'
_tagInfo = namedtuple('_tagInfo', 'tag sizeType type name')
class StreamSet(_BaseStream):
streamDict = None
ignoreUnknown = FAIL_UNKNOWN
def __init__(self, data=None, offset=0):
self._unknownTags = []
for tag in self._getTags():
setattr(self, tag.name, tag.type())
if data is not None:
self.thaw(data[offset:])
@classmethod
def _getTags(cls):
# Cache stream set definition in a class variable, but look only
# exactly in the current class, never a parent class.
tags = cls.__dict__.get('_streamTags', None)
if not tags:
if not cls.streamDict:
raise ValueError(
"%s class is missing a streamDict class variable" %
cls.__name__)
tags = sorted(_tagInfo(tag, sizeType, type_, name)
for (tag, (sizeType, type_, name))
in cls.streamDict.items())
cls._streamTags = tags
return tags
def __eq__(self, other, skipSet=None):
if type(self) != type(other):
return False
for tag in self._getTags():
if skipSet and tag.name in skipSet:
continue
if getattr(self, tag.name) != getattr(other, tag.name):
return False
return True
def __ne__(self, other, skipSet=None):
return not self.__eq__(other, skipSet=skipSet)
def __hash__(self):
return hash(self.freeze())
def __deepcopy__(self, memo):
raise NotImplementedError
@staticmethod
def _pack(values, includeEmpty):
# equivalent to concatStrings from streamset.c
values.sort()
words = []
for tag, substream in values:
if substream is None:
continue
if not substream and not includeEmpty:
continue
sizeType = tag.sizeType
size = len(substream)
if sizeType == DYNAMIC:
if size < 0x8000:
sizeType = SMALL
else:
sizeType = LARGE
if sizeType == SMALL:
if size >= 0x8000:
raise ValueError("short int overflow")
fmt = '>BH'
elif sizeType == LARGE:
if size >= 0x80000000:
raise ValueError("long int overflow")
size |= 0x80000000
fmt = '>BI'
else:
raise TypeError("Invalid tag size")
words.append(struct.pack(fmt, tag.tag, size))
words.append(substream)
if includeEmpty and not words:
return None
return ''.join(words)
def freeze(self, skipSet=None, freezeKnown=True, freezeUnknown=True):
out = []
if freezeKnown:
for tag in self._getTags():
if skipSet and tag.name in skipSet:
continue
value = getattr(self, tag.name)
if isinstance(value, StreamSet):
substream = value.freeze(skipSet, freezeKnown,
freezeUnknown)
else:
substream = value.freeze(skipSet)
out.append((tag, substream))
if freezeUnknown:
for tag, substream in self._unknownTags:
if skipSet and tag.tag in skipSet:
continue
out.append((tag, substream))
return self._pack(out, includeEmpty=False)
@staticmethod
def _readTag(frozen):
tagNum = ord(frozen[0])
if ord(frozen[1]) & 0x80:
# 31 bit size
size = struct.unpack('>I', frozen[1:5])[0] & 0x7fffffff
sizeType = LARGE
frozen = frozen[5:]
else:
# 15 bit size
size = struct.unpack('>H', frozen[1:3])[0]
sizeType = SMALL
frozen = frozen[3:]
if len(frozen) < size:
raise ValueError("not enough data thawing stream set")
substream, frozen = frozen[:size], frozen[size:]
return tagNum, sizeType, substream, frozen
def thaw(self, frozen):
tagMap = dict((x.tag, x) for x in self._getTags())
self._unknownTags = []
while frozen:
tagNum, sizeType, substream, frozen = self._readTag(frozen)
# Find the matching stream from our stream definition
tag = tagMap.get(tagNum)
if not tag:
if self.ignoreUnknown == SKIP_UNKNOWN:
continue
elif self.ignoreUnknown == PRESERVE_UNKNOWN:
self._unknownTags.append((
_tagInfo(tagNum, sizeType, None, None), substream))
continue
else:
raise ValueError("unknown tag in stream set")
setattr(self, tag.name, tag.type(substream))
def diff(self, other, ignoreUnknown=False):
if type(self) != type(other):
raise TypeError("invalid type")
elif not ignoreUnknown and (self._unknownTags or other._unknownTags):
raise ValueError("Cannot diff streams with unknown tags")
out = []
for tag in self._getTags():
myvalue = getattr(self, tag.name)
othervalue = getattr(other, tag.name)
if isinstance(myvalue, StreamSet):
substream = myvalue.diff(othervalue, ignoreUnknown)
else:
substream = myvalue.diff(othervalue)
out.append((tag, substream))
return self._pack(out, includeEmpty=True)
def twm(self, diff, base, skip=None):
if type(self) != type(base):
raise TypeError("invalid type")
if not diff:
return False
tagMap = dict((x.tag, x) for x in self._getTags())
while diff:
tagNum, sizeType, substream, diff = self._readTag(diff)
tag = tagMap.get(tagNum)
if not tag:
raise NotImplementedError
if skip and tag.name in skip:
continue
myvalue = getattr(self, tag.name)
basevalue = getattr(base, tag.name)
myvalue.twm(substream, basevalue)
@classmethod
def find(cls, tagNum, frozen):
for tag in cls._getTags():
if tag.tag == tagNum:
break
else:
raise ValueError("unknown tag in stream set")
while frozen:
tagNum2, sizeType, substream, frozen = cls._readTag(frozen)
if tagNum2 == tagNum:
return tag.type(substream)
return None
def splitFrozenStreamSet(frozen):
raise NotImplementedError
def whiteOutFrozenStreamSet(frozen, skipId):
raise NotImplementedError
|
|
#! /usr/bin/env python
# Copyright (c) 2013, Rob Ward
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# gerrit-flow -- Is a set of functionality to support interaction with
# the gerrit code review tool (http://code.google.com/p/gerrit/)
#
# This tool supports interaction with the gerrit server in a way that
# allows a number of workflows to be used in development.
#
# Please report any bugs to or feature requests to;
# https://github.com/rob-ward/gerrit-flow
#
import sys
import os
import logging
import subprocess
import hashlib
import json
import datetime
import webbrowser
import random
from git import *
global GERRIT_FLOW_VERSION
GERRIT_FLOW_VERSION = "0.0.1"
#############################
def get_origin_url():
logging.info("entering")
origin_url = subprocess.check_output(["git", "config", "--get", "remote.origin.url"]).rstrip()
logging.info("origin_url = " + origin_url)
return origin_url
#############################
def get_server_hostname(url):
logging.info("entering")
start = url.find("@")
if start == -1:
#we need to handle urls without a username
start = url.find(":")
start = start + 2 # miss the //
url = url[start + 1:]
end = url.find(":")
hostname = url[:end]
logging.info("hostname = " + hostname)
return hostname
#############################
def get_server_port(url):
logging.info("entering")
start = url.find(":")
url = url[start + 1:]
start = url.find(":")
url = url[start + 1:]
end = url.find("/")
port = url[:end]
logging.info("port = " + port)
return port
#############################
def create_remote(repo):
logging.info("entering")
exists = False
for r in repo.remotes:
if r.name == "gerrit_upstream_remote":
exists = True
logging.info("repo already exists")
if exists == False:
origin_url = get_origin_url()
logging.info("create new remote")
repo.create_remote('gerrit_upstream_remote', origin_url)
logging.info("fetching from remote")
repo.remote("gerrit_upstream_remote").fetch()
return repo.remote("gerrit_upstream_remote")
#############################
def branch_exist_local(bname, repo):
logging.info("entering")
found = False
for b in repo.branches:
if str(b) == bname:
found = True
logging.info("branch exists local")
return found;
#############################
def branch_exist_remote(bname, repo, remote):
logging.info("entering")
found = False
for r in remote.refs:
if str(r) == "gerrit_upstream_remote/" + bname:
found = True
logging.info("branch exists remote")
return found;
#############################
def branch_exist(bname, repo, remote):
logging.info("entering")
found = branch_exist_local(bname, repo)
if found != True:
found = branch_exist_remote(bname, repo, remote)
if found == True:
logging.info("Branch exists")
else:
logging.info("Branch DOES NOT exist")
return found
#############################
def write_config(repo, issueid, key, value):
logging.info("entering")
logging.info("paramaters[repo, issueid = " + issueid + ", key = " + key + ", value = " + value + "]")
writer = repo.config_writer("repository")
sectionname = 'gerrit-flow "' + issueid + '"'
logging.info("section name = " + sectionname)
if writer.has_section(sectionname) == False:
logging.info("writer doesn't have section")
writer.add_section(sectionname)
writer.set(sectionname, key, value)
#############################
def read_config(repo, issueid, key):
logging.info("entering")
logging.info("paramaters[repo, issueid = " + issueid + ", key = " + key + "]")
reader = repo.config_reader("repository")
sectionname = 'gerrit-flow "' + issueid + '"'
logging.info("section name = " + sectionname)
value = reader.get(sectionname, key)
logging.info("value = " + value)
return value
#############################
def get_commit_hash(issue_name):
logging.info("entering")
logging.info("Issue name =" + issue_name)
commithash = hashlib.sha1()
commithash.update(issue_name)
logging.info("Commit Hash = I" + commithash.hexdigest())
return commithash.hexdigest()
#############################
def issue_exists_on_server(issue_name):
logging.info("entering")
url = get_origin_url()
hostname = get_server_hostname(url)
port = get_server_port(url)
commithash = get_commit_hash(issue_name)
info = subprocess.check_output(['ssh', hostname, "-p", port, "gerrit", "query", "--format", "JSON", "--current-patch-set", "change:I" + commithash ])
if len(info.splitlines()) > 1:
logging.info("Issue exists")
return True
else:
logging.info("Issue DOES NOT exist")
return False
#############################
def checkout(repo, bname):
logging.info("entering")
if repo.is_dirty() == True:
print "Oh Dear:\n\tYour repo is dirty(changed files, unresolved merges etc.\n\n\tPlease resolve these and try again."
logging.info("Dirty repo")
return False
# need to check if there are modified files, if there are fail
for b in repo.branches:
if str(b) == bname:
logging.info("found branch")
b.checkout()
return True
return False
#############################
def do_start(argv):
logging.info("entering")
# start ISSUEID <origin point>
if len(argv) < 3 or len(argv) > 4:
# not a valid star command
print "Invalid command parameters"
logging.info("Bad parameters")
return
repo = Repo(os.getcwd())
if repo.is_dirty() == True:
print "Oh Dear:\n\tYour repo is dirty(changed files, unresolved merges etc.\n\n\tPlease resolve these and try again."
logging.info("Repor dirty")
return
issueid = argv[2]
startpoint = "master"
if len(argv) == 4:
startpoint = argv[3]
logging.info("Startpoint is " + startpoint)
remote = create_remote(repo)
if branch_exist_remote(startpoint, repo, remote) == False:
print "Oh Dear:\n\tThe requested startpoint cannot be found on the gerrit server, you must" + \
"\tspecify a branch which exists upstream(where you changes will be merged back onto)"
logging.info("Startpoint not on server")
else:
if branch_exist(issueid, repo, remote) == False:
logging.info("No branch called " + issueid + " exists")
repo.git.branch(issueid, 'gerrit_upstream_remote/' + startpoint)
if branch_exist_local(issueid, repo) == True:
# creation of branch was succesful
write_config(repo, issueid, "startpoint" , startpoint)
checkout(repo, issueid)
print("You are now checkedout on " + issueid)
logging.info("Branch creation was succesful")
else:
logging.info("Branch creation Failed")
else:
print "Oh Dear:\n\tA local branch called " + issueid + " exists!.\n\tAs such we cannot start a new instance for this issue."
logging.info("Local branch already exists")
#############################
def submit(repo, ref, append):
logging.info("entering")
remote = create_remote(repo)
issueid = repo.active_branch
startpoint = read_config(repo, issueid.name, "startpoint")
logging.info("Startpoint = " + startpoint)
# Check that the branch doesn't exist, then create it
if branch_exist(issueid.name + append, repo, remote) == True:
print "PANIC Stations:\n\tThe branch for this change commit already exists, this\n\tlikely means that a" + \
" previous draft upload\n\tfailed, the branch called " + issueid.name + append + \
" must be\n\tremoved before you can continue."
logging.debug("Submit Branch already exits, this is bad")
else:
failed = False
retval = repo.git.branch(issueid.name + append, 'gerrit_upstream_remote/' + startpoint)
print "\nCreating patchset for submittion... Please Stand By...\n"
retval = checkout(repo, issueid.name + append)
try:
retval = repo.git.merge("--squash", "--no-commit", issueid)
except:
print "Oh Dear:\n\tThe merge into the latest tip of " + startpoint + " failed." + \
"\n\tThe likely hood is that you need to merge in the latest changes in " + startpoint + \
"\n\tinto your branch"
logging.info("Merge into latest tip of startpoint failed")
logging.info("Reset head --hard")
repo.git.reset("--hard", "HEAD")
issueid.checkout()
logging.info("Deleting branch " + issueid.name + append)
repo.git.branch("-D", issueid.name + append)
return
commithash = get_commit_hash(issueid.name)
url = get_origin_url()
hostname = get_server_hostname(url)
port = get_server_port(url)
print"Contacting server to confirm that no current commit message is present, Standby...\n"
commitmessage = subprocess.check_output(['ssh', hostname, "-p", port, "gerrit", "query", "--format", "JSON", "--commit-message", "change:I" + commithash ])
if commitmessage.find('"rowCount":0') >= 0:
print "Generating default commit message.\n"
# we don't have so a commit message
logging.info("No commit message exists so making one")
commitmessage = issueid.name + " - \n# Brief summary on line above(<50 chars)\n\n\n" + \
"# Describe in detail the change below\nChange-Description:\n\n\n# Describe how to test your change below\n" + \
"Change-TestProcedure:\n\n\n# DO NOT EDIT ANYTHING BELOW HERE\n\nGerrit.startpoint:" + startpoint + \
"\n\nChange-Id:I" + commithash
else:
# we have a commit message be we have to parse if from json
#todo why is this not in proper json????
logging.info("We have a commit message")
start = commitmessage.find(',"commitMessage":"')
start = start + 18
end = commitmessage.find('","createdOn":')
commitmessage = commitmessage[start:end].replace("\\n", "\n")
commitmessage = commitmessage.replace("Gerrit.startpoint:", "# DO NOT EDIT ANYTHING BELOW HERE\n\nGerrit.startpoint:")
logging.info("Writing commit message")
f = open(issueid.name + '_commitmessage', 'w')
f.write(commitmessage)
f.close()
subprocess.call(['vim', issueid.name + '_commitmessage'])
commitmessage = ""
f = file(issueid.name + '_commitmessage', "r")
for line in f:
if not line.startswith("#"):
commitmessage = commitmessage + line
print "Commiting you change to local git history.\n"
repo.git.commit("-a", '-m', commitmessage)
try:
print "Attempting to push change to the gerrit server, Please Stand By...\n"
retval = subprocess.check_output(["git", "push", "gerrit_upstream_remote", ref + startpoint], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
retval = e.output
#we want the output so print
print retval
if retval.find("(no changes made)") >= 0:
logging.info("No changes made")
print "Oh Dear: \n\tYou don't seem to have commited any changes, make\n\tsure you have saved your files, and committed them!!"
failed = True
issueid.checkout()
logging.info("Checked out original branch")
logging.info("Deleting branch " + issueid.name + append)
repo.git.branch("-D", issueid.name + append)
if failed == False:
print "Successfully pushed to Gerrit server"
#############################
def do_draft(argv):
logging.info("entering")
if len(argv) != 2:
# not a valid star command
print "Invalid command syntax, please try again"
logging.info("Invalid command parameters")
return
repo = Repo(os.getcwd())
if repo.is_dirty() == True:
print "Oh Dear:\n\tYour repo is dirty(changed files, unresolved merges etc.\n\n\tPlease resolve these and try again."
logging.info("Repo is dirty")
return
repo = Repo(os.getcwd())
submit(repo, "HEAD:refs/drafts/", "_draft")
#############################
def do_push(argv):
logging.info("entering")
if len(argv) != 2:
# not a valid star command
print "Invalid command syntax, please try again"
logging.info("Invalid command parameters")
return
repo = Repo(os.getcwd())
if repo.is_dirty() == True:
print "Oh Dear:\n\tYour repo is dirty(changed files, unresolved merges etc.\n\n\tPlease resolve these and try again."
logging.info("Repo is dirty")
return
submit(repo, "HEAD:refs/for/", "_push")
#############################
def clone_ref(issue_name, repo):
logging.info("entering")
commithash = get_commit_hash(issue_name)
url = get_origin_url()
hostname = get_server_hostname(url)
port = get_server_port(url)
commitmessage = subprocess.check_output(['ssh', hostname, "-p", port, "gerrit", "query", "--format", "JSON", "--current-patch-set", "change:I" + commithash ])
# TODO use issue_exists_on_server function?
if commitmessage.find('"rowCount":0') >= 0:
# we don't have so a commit message
print "Oh Dear:\n\tThe issue name you provided doesn't seem to exist on\n\tthe server(" + hostname + "), check you know how to type and\n\tthe change is on the server."
logging.info("Issue doesn't exist on server")
return ""
else:
# TODO use JSON properly
create_remote(repo)
start = commitmessage.find('"ref":"')
start = start + 7
end = commitmessage.find('","uploader"')
ref = commitmessage[start:end]
repo.git.fetch("gerrit_upstream_remote", ref)
repo.git.checkout("FETCH_HEAD")
logging.info("returning ref = " + ref)
return ref
#############################
def do_rework(argv):
logging.info("entering")
if len(argv) < 3 or len(argv) > 4 :
# not a valid star command
print "Invalid command"
logging.warning("Invalid command options")
return
repo = Repo(os.getcwd())
if repo.is_dirty() == True:
print "Oh Dear:\n\tYour repo is dirty(changed files, unresolved merges etc.\n\n\tPlease resolve these and try again."
logging.warning("Dirty repo")
return
issue_name = argv[2]
logging.info("issue anme = " + issue_name)
mergechanges = False
if len(argv) == 4:
if argv[3] == "merge":
mergechanges = True
logging.info("Merge changes selected")
ref = clone_ref(issue_name, repo)
if ref != "":
# we have a ref
if branch_exist_local(issue_name, repo) == False:
if mergechanges == False:
repo.git.checkout("-b", issue_name)
logging.info("checkout -b " + issue_name)
if(repo.active_branch.name != issue_name):
print "Oh Dear:\n\tCheckout of the new branch failed. Please clean the git repo and try again!"
logging.info("Failed to checkout branch " + issue_name)
else:
print "You are now on branch " + issue_name
logging.info("Checked out " + issue_name)
commithash = get_commit_hash(issue_name)
url = get_origin_url()
hostname = get_server_hostname(url)
port = get_server_port(url)
commitmessage = subprocess.check_output(['ssh', hostname, "-p", port, "gerrit", "query", "--format", "JSON", "--commit-message", "change:I" + commithash ])
# TODO This should be parsed from json, not from string
start = commitmessage.find(',"commitMessage":"')
start = start + 18
end = commitmessage.find('","createdOn":')
commitmessage = commitmessage[start:end].replace("\\n", "\n")
startpoint = "master"
for line in commitmessage.split('\n'):
if line.find("Gerrit.startpoint:") != -1:
startpoint = line.split(':')[1]
logging.info("Startpoint = " + startpoint)
write_config(repo, issue_name, "startpoint" , startpoint)
else:
print "Oh Dear: You have requested a merge but the branch doesn't currently exist locally."
logging.info("Merge requested but branch doesn't exist")
else:
# branch exists
if mergechanges == False:
print "Oh Dear:\n\tIt appears that the creation of the new branch " + issue_name + " can't \n\thappen " + \
"due to a branch with this name already existing. If you want to" + \
"\n\tmerge the changes onto that branch then run git gerrit rework ISSUEID merge" + \
"\n\tPlease remove this branch and try again!"
logging.info("Branch name seems to exist so can't create")
else:
logging.info("checkout " + issue_name)
repo.git.checkout(issue_name)
if(repo.active_branch.name != issue_name):
logging.info("Failed to chechout branch " + issue_name)
print "Oh Dear:\n\tCheckout of the existing branch failed, please check that you have a clean git repo"
else:
print "You are now on branch " + issue_name
logging.info("Checked out " + issue_name)
try:
logging.info("pulling gerrit remote with ref = " + ref)
repo.git.pull("gerrit_upstream_remote", ref)
except GitCommandError as e:
if e.status == 1:
print "Oh Dear:\n\tIt appears that the automerge into " + startpoint + " failed, please use\n\t git mergetool to complete the action and then perform a commit."
logging.info("automerge failed")
else:
logging.warning("pull failed, big issue")
################################
def do_suck(argv):
logging.info("entering")
if len(argv) != 3 :
# not a valid star command
print "Invalid command options, please read the docs"
logging.info("Invalid command options")
return
repo = Repo(os.getcwd())
if repo.is_dirty() == True:
print "Oh Dear:\n\tYour repo is dirty(changed files, unresolved merges etc.\n\n\tPlease resolve these and try again."
logging.info("Repo is dirty")
return
issue_name = argv[2]
if branch_exist_local(issue_name, repo) == False:
clone_ref(issue_name, repo,)
try:
logging.info("checkout -b" + issue_name + "_suck")
repo.git.checkout("-b", issue_name + "_suck")
print "You are now on branch" + issue_name + "_suck, please delete when done"
except:
print "Oh Dear:\n\tIt appears that the creation of the new branch " + issue_name + "_suck has\n\tfailed. Please check you git repo and try again."
logging.info("Creation of branch " + issue_name + "_suck failed")
else:
print "Oh Dear:\n\tIt appears that the creation of the new branch " + issue_name + "_suck can't \n\thappen" + \
"due to a branch with this name already existing. If you want to" + \
"\n\tmerge the changes onto that branch then run git gerrit rework ISSUEID merge" + \
"\n\tPlease remove this branch and try again!"
logging.info("branch called " + issue_name + "_suck already exists")
#############################
def do_share(argv):
logging.info("entering")
repo = Repo(os.getcwd())
if repo.is_dirty() == True:
print "Oh Dear:\n\tYour repo is dirty(changed files, unresolved merges etc.\n\n\tPlease resolve these and try again."
logging.warning("Repo Dirty")
return
url = get_origin_url()
hostname = get_server_hostname(url)
port = get_server_port(url)
issueid = repo.active_branch
remote = create_remote(repo)
share_name = "share/" + issueid.name
# we need to check that we aren't on a share branch, we don't want share/share/share....
if issueid.name[:6] == "share/":
print "Oh Dear:\n\tIt appears that the branch you are on is already a share!!"
logging.warning("Share - " + share_name + " is already a share")
return
#Check that share/<ISSUEID> doesn't exists, if it does error as we can't have two
if branch_exist(share_name, repo, remote) == True:
print "Oh Dear:\n\tShare " + share_name + " already exists"
logging.warning("Share - " + share_name + " already exists")
return
#Move the branch to a share version
repo.git.branch("-m", issueid, share_name)
repo.git.push("origin", share_name)
try:
retval = subprocess.check_output(["git", "push", "gerrit_upstream_remote", share_name], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
retval = e.output
print retval
#############################
def do_scrunch(argv):
logging.info("entering")
repo = Repo(os.getcwd())
if repo.is_dirty() == True:
print "Oh Dear:\n\tYour repo is dirty(changed files, unresolved merges etc.\n\n\tPlease resolve these and try again."
logging.warning("Repo Dirty")
return
if len(argv) != 4:
print "Oh Dear:\n\tScrunch only supports a command with a branch in the form share/ISSUEID and a merge to branch " + \
"after it, please see help for more info!"
logging.warning("Scrunch - didn't provide branch from and branch to")
return
url = get_origin_url()
hostname = get_server_hostname(url)
port = get_server_port(url)
branch_from = argv[2]
branch_to = argv[3]
branch_issuename = branch_from[6:]
remote = create_remote(repo)
#We take files to merge from the server so it must exist
if branch_exist_remote(branch_from, repo, remote) == False:
print "Oh Dear:\n\tBranch " + branch_from + " does not exist on the gerrit server, we will only merge from the server!!!"
logging.warning("Branch " + branch_from + " does not exist on server for scrunching")
return
print "Using branch " + branch_from + " from server"
if branch_exist_local(branch_issuename, repo) == True:
print "Oh Dear:\n\tA local branch called " + branch_issuename + " exists, we cannot scrunch while it exists!"
logging.warning("Branch " + branch_issuename + " exist locally")
return
if issue_exists_on_server(branch_issuename) == True:
print "Oh Dear:\n\tThe issue " + branch_issuename + " appears to exist on the server already, I don't know what you are doing but be careful!"
logging.warning("Issue " + branch_issuename + " exist already")
return
if branch_exist_remote(branch_to, repo, remote) == False:
print "Oh Dear:\n\tThe branch you want to merge to - " + branch_to + " - doesn't appears to exist on the server - Aborting!"
logging.warning("Branch " + branch_to + " doesn't exist on the server")
return
repo.git.branch(branch_issuename, 'gerrit_upstream_remote/' + branch_to)
if branch_exist_local(branch_issuename, repo) == False:
print "Oh Dear:\n\tThe creation of the branch " + branch_issuename + " failed - Aborting!"
logging.info("The creation of the branch " + branch_issuename + " failed")
return
write_config(repo, branch_issuename, "startpoint" , branch_to)
checkout(repo, branch_issuename)
try:
retval = repo.git.merge("--squash", "--no-commit", branch_from)
except:
print "Oh Dear:\n\tThe merge into the latest tip of " + branch_to + " failed." + \
"\n\tThe likely hood is that you need to merge in the latest changes in " + branch_to + \
"\n\tinto your branch or deal with the merge conflicts using git mergetool \n\n\tYou " + \
"are in an unclean state"
logging.info("Merge into latest tip of startpoint " + startpoint + " failed")
return
print "Merge from " + branch_from + " into " + branch_to + " was successful. Created issue " + branch_issuename
commithash = get_commit_hash(branch_issuename)
commitmessage = branch_issuename + " - \n# Brief summary on line above(<50 chars)\n\n" + \
"# Describe in detail the change below\nChange-Description:\n\n\n# Describe how to test your change below\n" + \
"Change-TestProcedure:\n\n\n# DO NOT EDIT ANYTHING BELOW HERE\n\nGerrit.startpoint:" + branch_to + \
"\n\nChange-Id: I" + commithash
logging.info("Writing commit message")
f = open(branch_issuename + '_commitmessage', 'w')
f.write(commitmessage)
f.close()
subprocess.call(['vim', branch_issuename + '_commitmessage'])
commitmessage = ""
f = file(branch_issuename + '_commitmessage', "r")
for line in f:
if not line.startswith("#"):
commitmessage = commitmessage + line
repo.git.commit("-a", '-m', commitmessage)
f.close()
print "The merge appears to be successful, please check and then push to gerrit using gerrit push"
#############################
def review_summary(issue_name):
logging.info("entering")
url = get_origin_url()
hostname = get_server_hostname(url)
port = get_server_port(url)
commithash = get_commit_hash(issue_name)
info = subprocess.check_output(['ssh', hostname, "-p", port, "gerrit", "query", "--format", "JSON", "--commit-message", "--current-patch-set", "change:I" + commithash ])
# info.replace("id: I", "Change ID: I")
decoded = json.loads(info.splitlines()[0])
project = decoded['project']
branch = decoded['branch']
owner = decoded['owner']['name']
owner_email = decoded['owner']['email']
status = decoded['status']
created = datetime.datetime.fromtimestamp(decoded['createdOn']).strftime('%d-%m-%Y %H:%M:%S')
updated = datetime.datetime.fromtimestamp(decoded['lastUpdated']).strftime('%d-%m-%Y %H:%M:%S')
commitmessage = decoded['commitMessage']
numberofpatches = decoded['currentPatchSet']['number']
uri = decoded['url']
print "Project : " + project
print "Branch : " + branch
print "Change Owner : " + owner + " - " + owner_email
print "\nStatus : " + status
print "\nCreated on : " + created
print "Updated On : " + updated
print "Commit message: "
for l in commitmessage.splitlines():
print "\t\t" + l
print "\nNumber of Patchsets : " + str(numberofpatches)
print "Change URI : " + uri
#############################
def review_web(issue_name):
logging.info("entering")
url = get_origin_url()
hostname = get_server_hostname(url)
port = get_server_port(url)
commithash = get_commit_hash(issue_name)
info = subprocess.check_output(['ssh', hostname, "-p", port, "gerrit", "query", "--format", "JSON", "--commit-message", "--current-patch-set", "change:I" + commithash ])
decoded = json.loads(info.splitlines()[0])
uri = decoded['url']
try:
webbrowser.open(uri)
except:
print "Oh Dear:\n\tIt appears that we can't open a browser or that the uri we have is invalid. Try visiting: " + uri
#############################
def review_patch(issue_name):
logging.info("entering")
repo = Repo(os.getcwd())
url = get_origin_url()
hostname = get_server_hostname(url)
port = get_server_port(url)
commithash = get_commit_hash(issue_name)
info = subprocess.check_output(['ssh', hostname, "-p", port, "gerrit", "query", "--format", "JSON", "--current-patch-set", "change:I" + commithash ])
decoded = json.loads(info.splitlines()[0])
ref = decoded['currentPatchSet']['ref']
logging.info("ref = " + ref)
repo.git.fetch(url, ref)
patch = subprocess.check_output(['git', "format-patch", "-1", "--stdout", "FETCH_HEAD" ])
print patch
#############################
def review_tool(issue_name):
logging.info("entering")
repo = Repo(os.getcwd())
url = get_origin_url()
hostname = get_server_hostname(url)
port = get_server_port(url)
commithash = get_commit_hash(issue_name)
info = subprocess.check_output(['ssh', hostname, "-p", port, "gerrit", "query", "--format", "JSON", "--current-patch-set", "change:I" + commithash])
decoded = json.loads(info.splitlines()[0])
ref = decoded['currentPatchSet']['ref']
logging.info("ref = " + ref)
repo.git.fetch(url, ref)
repo.git.difftool("--no-prompt", "FETCH_HEAD~..FETCH_HEAD")
#############################
review_type = {
'web': review_web,
'summary': review_summary,
'patch': review_patch,
'tool': review_tool,
}
def do_review(argv):
logging.info("entering")
if len(argv) < 3 or len(argv) > 4 :
# not a valid star command
print "Oh Dear:\n\tInvalid command, make sure you specified an issue and try again."
logging.warning("Invalid command used")
return
issue_name = argv[2]
if False == issue_exists_on_server(issue_name):
print "Oh Dear:\n\tThe issue appears not to exist on the server, please check for typos!"
return
stype = "web"
if len(argv) == 4:
stype = argv[3]
if stype in review_type:
logging.info("Summary type running - " + stype)
review_type[stype](issue_name)
else:
logging.warning("Not a Valid review type")
print "Oh Dear:\n\tThis is not a valid review type. Check for a type!! \n\n\tIf you would like a new type adding let us know!"
#############################
def do_cherrypick(argv):
logging.info("entering")
repo = Repo(os.getcwd())
if repo.is_dirty() == True:
print "Oh Dear:\n\tYour repo is dirty(changed files, unresolved merges etc.\n\n\tPlease resolve these and try again."
logging.warning("Repo Dirty")
return
if len(argv) != 3:
print "Oh Dear:\n\tCherrypick only supports a command with a issue name after it, please try again!"
logging.warning("Bad command used")
return
issue_name = argv[2]
url = get_origin_url()
hostname = get_server_hostname(url)
port = get_server_port(url)
commithash = get_commit_hash(issue_name)
info = subprocess.check_output(['ssh', hostname, "-p", port, "gerrit", "query", "--format", "JSON", "--current-patch-set", "change:I" + commithash ])
decoded = json.loads(info.splitlines()[0])
ref = decoded['currentPatchSet']['ref']
logging.info("ref = " + ref)
repo.git.fetch(url, ref)
repo.git.cherry_pick("FETCH_HEAD")
#############################
def help_start():
logging.info("entering")
print "\n\nstart:\n\n\tgit gerrit start <ISSUEID> (STARTPOINT)" + \
"\n\n\tWhere <ISSUEID> is a unique id, this is normally taken from an issue control system such as redmine" + \
"\n\n\tWhere (STARTPOINT) is an optional argument dictating which branch you are developing on, the default unless set in a config file is master"
"\n\n\tStart is used to setup a new set of changes, this creates a branch and puts tracking information in your configuration"
#############################
def help_draft():
logging.info("entering")
print "\n\ndraft:\n\n\tgit gerrit draft" + \
"\n\n\tDraft is used to push the changes on the current branch onto the gerrit server in draft mode, these changes cannot be seen until published"
#############################
def help_push():
logging.info("entering")
print "\n\npush:\n\n\tgit gerrit push" + \
"\n\n\tpush is used to push the changes on the current branch onto the gerrit server for review. Depending on your" + \
"\n\tworkflow you will likely need to add reviewers to the issue after pushing"
#############################
def help_rework():
logging.info("entering")
print "\n\nrework:\n\n\tgit gerrit rework <ISSUEID>" + \
"\n\n\tWhere <ISSUEID> is a unique id, this is normally taken from an issue control system such as redmine" + \
"\n\n\trework will create you a branch called <ISSUEID> where you can make any changes you require and push" + \
"\n\tthem back to the server, this allows you to take control of a change already pushed by someopne else or" + \
"\n\treclaim a change if someone else has worked on it"
#############################
def help_suck():
logging.info("entering")
print "\n\nsuck:\n\n\tgit gerrit suck <ISSUEID>" + \
"\n\n\tWhere <ISSUEID> is a unique id, this is normally taken from an issue control system such as redmine" + \
"\n\n\tsuck downloads a copy of changes for <ISSUEID> from the server into a branch called <ISSUEID>_suck." + \
"\n\tHere you can build changes for testing etc. You should not use this brnahc to modify the code if you want the" + \
"\n\tchanges to go back to the server. For this you shuld use rework. Once you have finished with the changes you can delete the branch"
#############################
def help_review():
logging.info("entering")
print "\n\nreview:\n\n\tgit gerrit review <ISSUEID> (TYPE)" + \
"\n\n\tWhere <ISSUEID> is a unique id, this is normally taken from an issue control system such as redmine" + \
"\n\n\tWhere (TYPE) is an optional argument stating the review type wanted, valid types are:" + \
"\n\t\t\tsummary - This will output a summary of the change on the commandline" + \
"\n\t\t\tweb - This will take you to the change on the website" + \
"\n\t\t\tpatch - This will give you a patchset for the change" + \
"\n\t\t\ttool - This will show you the delta in a GUI tool" + \
"\n\n\treview is primarily used for getting information about a change, the default review command will take you to the gerrit review page i.e. web mode"
#############################
def help_share():
logging.info("entering")
print "\n\nshare:\n\n\tgit gerrit share" + \
"\n\n\tshare is used to push the current issue to a branch called share/<ISSUEID> on the gerrit server" + \
"\n\tThis branch can then be accessed like any other branch and shared between multiple people in order" + \
"\n\tto work together on a feature. This branch can then be merged onto the" + \
"\n\tdevelopment branches via a standard code review process\n\n\tSee scrunch command for more info"
#############################
def help_scrunch():
logging.info("entering")
print "\n\nscrunch:\n\n\tgit gerrit scrunch <SHARED/BRANCH> <TARGETBRANCH>" + \
"\n\n\tWhere <SHARED/BRANCH> is the name of a branch currently shared on the gerrit server" + \
"\n\tWhere <TARGETBRANCH> is the name of a branch you want the changes onto i.e. master" + \
"\n\n\tScrunch is used to migrate a shared development branch into a standard gerrit issue that can" + \
"\n\tthen be pushed to the gerrit server for review. This comman merges the branch from the SERVER not a" + \
"\n\tlocal copy, as such any local changes you have should be pushed to the server first.\n\n\tSee share command for more info"
#############################
def help_cherrypick():
logging.info("entering")
print "\n\ncherrypick:\n\n\tgit gerrit cherrypick <ISSUEID>" + \
"\n\n\tWhere <ISSUEID> is a unique id, this is normally taken from an issue control system such as redmine" + \
"\n\n\tcherrypick is used to merge a given change on the server into your local branch. Please note, currently dependancy management is not done automatically"
#############################
def help_version():
logging.info("entering")
print "\n\nversion:\n\n\tgit gerrit version <TYPE>" + \
"\n\n\tWhere <TYPE> is an output format, currrently only long and short are supported. long is default" + \
"\n\n\tUsed to print version info, if short is passed as an option then only version number is printed"
#############################
helpmap = {
'cherrypick': help_cherrypick,
'draft': help_draft,
'push': help_push,
'review': help_review,
'rework': help_rework,
'scrunch': help_scrunch,
'share': help_share,
'start': help_start,
'suck': help_suck,
'version': help_version,
}
#############################
def do_help(argv):
logging.info("entering")
threeargs = False
if len(argv) == 3:
if sys.argv[2] in helpmap:
print "Gerritflow " + sys.argv[2] + " usage:"
helpmap[sys.argv[2]]()
return
threeargs = True
print "Gerrit-flow usage is as follows:"
print "\tSubcommand list is:"
print "\t\tcherrypick\n\t\tdraft\n\t\tpush\n\t\treview\n\t\trework\n\t\tscrunch\n\t\tshare\n\t\tstart\n\t\tsuck\n\t\tversion\n\n"
if threeargs == True:
if sys.argv[2] == "all":
for c in helpmap:
helpmap[c]()
else:
print "For more information run 'git gerrit help <COMMAND>'"
print "Run 'git gerrit help all' for help output of all gerrit-flow comamnds"
#############################
def do_version(argv):
logging.info("entering")
short_mode = False
if len(argv) == 3:
if sys.argv[2] == "short":
short_mode = True
message = ""
if short_mode == False:
message = "Gerrit-flow version is - "
message = message + str(GERRIT_FLOW_VERSION)
if short_mode == False:
message = message + "\n\nFor usage info see git gerrit help"
print message
#############################
dispatch = {
'start': do_start,
'draft': do_draft,
'push': do_push,
'rework': do_rework,
'suck': do_suck,
'review': do_review,
'cherrypick': do_cherrypick,
'cherry-pick': do_cherrypick,
'share': do_share,
'scrunch': do_scrunch,
'help': do_help,
'version': do_version,
}
def main():
logging.info("entering")
logging.info("os.getcwd() = " + os.getcwd())
logging.info("Number of arguments:" + str(len(sys.argv)))
logging.info("Arguments:")
#if no commands are give show help
if len(sys.argv) == 1:
do_help(sys.argv)
return
for a in sys.argv:
logging.info("\t" + a)
if sys.argv[1] in dispatch:
dispatch[sys.argv[1]](sys.argv)
else:
logging.warning("No matching command")
print "Oh Dear:\n\tThere is no matching command, did you RTFM? or do we have a bug?"
#############################
if __name__ == "__main__":
rnum = random.randint(100000, 999999)
logging.basicConfig(format="%(asctime)s: - " + str(rnum) + " - %(filename)s: - %(funcName)s() - %(lineno)d : %(message)s", level=logging.DEBUG, filename='/tmp/gerrit-flow.log')
main()
|
|
#! /usr/bin/env python
from datetime import datetime, date
import os
import re
import time
import logging
import json
import requests
import sys
import config
import slackbot
import utils
# An arbitrary past date, as a default value for the earliest archive date
PAST_DATE_STRING = '2000-01-01'
class Destalinator(object):
closure_text_fname = "closure.txt"
warning_text_fname = "warning.txt"
def __init__(self, slacker, slackbot, activated, logger=None):
"""
slacker is a Slacker() object
slackbot should be an initialized slackbot.Slackbot() object
activated is a boolean indicating whether destalinator should do dry runs or real runs
"""
self.closure_text = utils.get_local_file_content(self.closure_text_fname)
self.warning_text = utils.get_local_file_content(self.warning_text_fname)
self.slacker = slacker
self.slackbot = slackbot
self.user = os.getenv("USER")
self.config = config.Config()
self.output_debug_to_slack_flag = False
if os.getenv(self.config.output_debug_env_varname):
self.output_debug_to_slack_flag = True
self.logger = logger or logging.getLogger(__name__)
self.destalinator_activated = activated
self.logger.debug("destalinator_activated is %s", self.destalinator_activated)
self.earliest_archive_date = self.get_earliest_archive_date()
self.cache = {}
self.now = int(time.time())
## utility & data fetch methods
def action(self, message):
message = "*ACTION: " + message + "*"
self.logger.info(message)
def add_slack_channel_markup_item(self, item):
return self.slacker.add_channel_markup(item.group(1))
def add_slack_channel_markup(self, text):
marked_up = re.sub(r"\#([a-z0-9_-]+)", self.add_slack_channel_markup_item, text)
return marked_up
def channel_minimum_age(self, channel_name, days):
"""Return True if channel represented by `channel_name` is at least `days` old, otherwise False."""
info = self.slacker.get_channel_info(channel_name)
age = info['age']
age = age / 86400
return age > days
def debug(self, message):
self.logger.debug(message)
message = "DEBUG: " + message
if self.output_debug_to_slack_flag:
self.log(message)
def get_earliest_archive_date(self):
"""Return a datetime.date object representing the earliest archive date."""
date_string = os.getenv(self.config.get('earliest_archive_date_env_varname') or '') \
or self.config.get('earliest_archive_date') \
or PAST_DATE_STRING
return datetime.strptime(date_string, "%Y-%m-%d").date()
def get_messages(self, channel_name, days):
"""Return `days` worth of messages for channel `channel_name`. Caches messages per channel & days."""
oldest = self.now - days * 86400
cid = self.slacker.get_channelid(channel_name)
if oldest in self.cache.get(cid, {}):
self.debug("Returning {} cached messages for #{} over {} days".format(len(self.cache[cid][oldest]), channel_name, days))
return self.cache[cid][oldest]
messages = self.slacker.get_messages_in_time_range(oldest, cid)
self.debug("Fetched {} messages for #{} over {} days".format(len(messages), channel_name, days))
messages = [x for x in messages if (x.get("subtype") is None or x.get("subtype") in self.config.included_subtypes)]
self.debug("Filtered down to {} messages based on included_subtypes: {}".format(len(messages), ", ".join(self.config.included_subtypes)))
if cid not in self.cache:
self.cache[cid] = {}
self.cache[cid][oldest] = messages
return messages
def get_stale_channels(self, days):
"""Return a list of channel names that have been stale for `days`."""
ret = []
for channel in sorted(self.slacker.channels_by_name.keys()):
if self.stale(channel, days):
ret.append(channel)
self.debug("{} channels quiet for {} days: {}".format(len(ret), days, ret))
return ret
def ignore_channel(self, channel_name):
"""Return True if `channel_name` is a channel we should ignore based on config settings."""
if channel_name in self.config.ignore_channels:
return True
for pat in self.config.ignore_channel_patterns:
if re.match(pat, channel_name):
return True
return False
def log(self, message):
timestamp = time.strftime("%H:%M:%S: ", time.localtime())
message = timestamp + " ({}) ".format(self.user) + message
self.slacker.post_message(self.config.log_channel, message, message_type='log')
def stale(self, channel_name, days):
"""
Return True if channel represented by `channel_name` is stale.
Definition of stale is: no messages in the last `days` which are not from config.ignore_users.
"""
if not self.channel_minimum_age(channel_name, days):
self.debug("Channel #{} is not yet of minimum_age; skipping stale messages check".format(channel_name))
return False
messages = self.get_messages(channel_name, days)
# return True (stale) if none of the messages match the criteria below
return not any(
# the message is not from an ignored user
x.get("user") not in self.config.ignore_users
and (
# the message must have text that doesn't include ignored words
(x.get("text") and b":dolphin:" not in x.get("text").encode('utf-8', 'ignore'))
# or the message must have attachments
or x.get("attachments")
)
for x in messages
)
## channel actions
def archive(self, channel_name):
"""Archive the given channel name, returning the Slack API response as a JSON string."""
if self.ignore_channel(channel_name):
self.debug("Not archiving #{} because it's in ignore_channels".format(channel_name))
return
if self.destalinator_activated:
self.debug("Announcing channel closure in #{}".format(channel_name))
self.slacker.post_message(channel_name, self.closure_text, message_type='channel_archive')
members = self.slacker.get_channel_member_names(channel_name)
say = "Members at archiving are {}".format(", ".join(sorted(members)))
self.debug("Telling channel #{}: {}".format(channel_name, say))
self.slacker.post_message(channel_name, say, message_type='channel_archive_members')
self.action("Archiving channel #{}".format(channel_name))
payload = self.slacker.archive(channel_name)
if payload['ok']:
self.debug("Slack API response to archive: {}".format(json.dumps(payload, indent=4)))
self.logger.info("Archived #{}".format(channel_name))
else:
error = payload.get('error', '!! No error found in payload %s !!' % payload)
self.logger.error("Failed to archive {channel_name}: {error}. See https://api.slack.com/methods/channels.archive for more context.".format(channel_name=channel_name, error=error))
return payload
def safe_archive(self, channel_name):
"""
Archive channel if today's date is after `self.earliest_archive_date`
and if channel does not only contain single-channel guests.
"""
if self.slacker.channel_has_only_restricted_members(channel_name):
self.debug("Would have archived #{} but it contains only restricted users".format(channel_name))
return
today = date.today()
if today >= self.earliest_archive_date:
self.action("Archiving channel #{}".format(channel_name))
self.archive(channel_name)
else:
self.debug("Would have archived #{} but it's not yet {}".format(channel_name, self.earliest_archive_date))
def safe_archive_all(self, days):
"""Safe archive all channels stale longer than `days`."""
self.action("Safe-archiving all channels stale for more than {} days".format(days))
for channel in sorted(self.slacker.channels_by_name.keys()):
if self.stale(channel, days):
self.debug("Attempting to safe-archive #{}".format(channel))
self.safe_archive(channel)
def warn(self, channel_name, days, force_warn=False):
"""
Send warning text to channel_name, if it has not been sent already in the last `days`.
Using `force_warn=True` will warn even if a previous warning exists.
Return True if we actually warned, otherwise False.
"""
if self.slacker.channel_has_only_restricted_members(channel_name):
self.debug("Would have warned #{} but it contains only restricted users".format(channel_name))
return False
if self.ignore_channel(channel_name):
self.debug("Not warning #{} because it's in ignore_channels".format(channel_name))
return False
messages = self.get_messages(channel_name, days)
texts = [x.get("text").strip() for x in messages if x.get("text")]
if (not force_warn and
(self.add_slack_channel_markup(self.warning_text) in texts or
any(any(a.get('fallback') == 'channel_warning' for a in m.get('attachments', [])) for m in messages))):
self.debug("Not warning #{} because we found a prior warning".format(channel_name))
return False
if self.destalinator_activated:
self.slacker.post_message(channel_name, self.warning_text, message_type='channel_warning')
self.action("Warned #{}".format(channel_name))
return True
def warn_all(self, days, force_warn=False):
"""Warn all channels which are `days` idle; if `force_warn`, will warn even if we already have."""
if not self.destalinator_activated:
self.logger.info("Note, destalinator is not activated and is in a dry-run mode. For help, see the " \
"documentation on the DESTALINATOR_ACTIVATED environment variable.")
self.action("Warning all channels stale for more than {} days".format(days))
stale = []
for channel in sorted(self.slacker.channels_by_name.keys()):
if self.ignore_channel(channel):
self.debug("Not warning #{} because it's in ignore_channels".format(channel))
continue
if self.stale(channel, days):
if self.warn(channel, days, force_warn):
stale.append(channel)
if stale and self.config.general_message_channel:
self.debug("Notifying #{} of warned channels".format(self.config.general_message_channel))
self.warn_in_general(stale)
def warn_in_general(self, stale_channels):
if not stale_channels:
return
if len(stale_channels) > 1:
channel = "channels"
being = "are"
there = "them"
else:
channel = "channel"
being = "is"
there = "it"
message = "Hey, heads up -- the following {} {} stale and will be "
message += "archived if no one participates in {} over the next 7 days: "
message += ", ".join(["#" + x for x in stale_channels])
message = message.format(channel, being, there)
if self.destalinator_activated:
self.slacker.post_message(self.config.general_message_channel, message, message_type='warn_in_general')
self.debug("Notified #{} with: {}".format(self.config.general_message_channel, message))
|
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bisect
import collections
import copy
import time
import six
from stackalytics.openstack.common import log as logging
from stackalytics.processor import launchpad_utils
from stackalytics.processor import utils
LOG = logging.getLogger(__name__)
class RecordProcessor(object):
def __init__(self, runtime_storage_inst):
self.runtime_storage_inst = runtime_storage_inst
self.domains_index = runtime_storage_inst.get_by_key('companies')
self.releases = runtime_storage_inst.get_by_key('releases')
self.releases_dates = [r['end_date'] for r in self.releases]
self.modules = None
self.alias_module_map = None
def _get_release(self, timestamp):
release_index = bisect.bisect(self.releases_dates, timestamp)
if release_index >= len(self.releases):
LOG.warn('Timestamp %s is beyond releases boundaries, the last '
'release will be used. Please consider adding a '
'new release into default_data.json', timestamp)
release_index = len(self.releases) - 1
return self.releases[release_index]['release_name']
def _get_modules(self):
if self.modules is None:
self.modules = set()
self.alias_module_map = dict()
for repo in utils.load_repos(self.runtime_storage_inst):
module = repo['module'].lower()
module_aliases = repo.get('aliases') or []
add = True
for module_name in ([module] + module_aliases):
for m in self.modules:
if module_name.find(m) >= 0:
add = False
break
if m.find(module_name) >= 0:
self.modules.remove(m)
break
if add:
self.modules.add(module_name)
for alias in module_aliases:
self.alias_module_map[alias] = module
return self.modules, self.alias_module_map
def _find_company(self, companies, date):
for r in companies:
if date < r['end_date']:
return r['company_name']
return companies[-1]['company_name']
def _get_company_by_email(self, email):
if not email:
return None
name, at, domain = email.partition('@')
if domain:
parts = domain.split('.')
for i in range(len(parts), 1, -1):
m = '.'.join(parts[len(parts) - i:])
if m in self.domains_index:
return self.domains_index[m]
return None
def _create_user(self, launchpad_id, email, user_name):
company = (self._get_company_by_email(email) or
self._get_independent())
user = {
'user_id': launchpad_id or email,
'launchpad_id': launchpad_id,
'user_name': user_name or '',
'companies': [{
'company_name': company,
'end_date': 0,
}],
}
if email:
user['emails'] = [email]
else:
user['emails'] = []
return user
def _get_lp_info(self, email):
lp_profile = None
if not utils.check_email_validity(email):
LOG.debug('User email is not valid %s', email)
else:
lp_profile = launchpad_utils.lp_profile_by_email(email)
if not lp_profile:
LOG.debug('User with email %s not found', email)
return None, None
LOG.debug('Email %(email)s is mapped to launchpad user %(lp)s',
{'email': email, 'lp': lp_profile['name']})
return lp_profile['name'], lp_profile['display_name']
def _get_lp_user_name(self, launchpad_id):
if not launchpad_id:
return None
lp_profile = launchpad_utils.lp_profile_by_launchpad_id(launchpad_id)
if not lp_profile:
LOG.debug('User with id %s not found', launchpad_id)
return launchpad_id
return lp_profile['display_name']
def _get_independent(self):
return '*independent'
def _update_user_affiliation(self, user):
for email in user.get('emails'):
company_name = self._get_company_by_email(email)
uc = user['companies']
if (company_name and (len(uc) == 1) and
(uc[0]['company_name'] == self._get_independent())):
LOG.debug('Updating affiliation of user %s to %s',
user['user_id'], company_name)
uc[0]['company_name'] = company_name
break
def _get_user_exact_company(self, user):
if len(user.get('companies', [])) == 1:
return user['companies'][0]['company_name']
return None
def _merge_user_profiles(self, user_a, user_b, user_c):
user = {}
for key in ['seq', 'user_name', 'user_id',
'launchpad_id', 'companies']:
user[key] = user_a.get(key) or user_b.get(key) or user_c.get(key)
if user['launchpad_id'] and user['user_id'] != user['launchpad_id']:
user['user_id'] = user['launchpad_id']
emails = set([])
core_in = set([])
for u in [user_a, user_b, user_c]:
emails |= set(u.get('emails', []))
core_in |= set(u.get('core', []))
user['emails'] = list(emails)
user['core'] = list(core_in)
self._update_user_affiliation(user)
if user_a.get('seq') and user_b.get('seq'):
LOG.debug('Delete user: %s', user_b)
utils.delete_user(self.runtime_storage_inst, user_b)
return user
def update_user(self, record):
email = record.get('author_email')
user_e = utils.load_user(self.runtime_storage_inst, email) or {}
user_name = record.get('author_name')
launchpad_id = record.get('launchpad_id')
if email and (not user_e) and (not launchpad_id):
# query LP
launchpad_id, lp_user_name = self._get_lp_info(email)
if lp_user_name:
user_name = lp_user_name
user_l = utils.load_user(self.runtime_storage_inst, launchpad_id) or {}
user = self._create_user(launchpad_id, email, user_name)
if (user_e.get('seq') == user_l.get('seq')) and user_e.get('seq'):
# sequence numbers are set and the same, merge is not needed
user = user_e
else:
if user_e or user_l:
user = self._merge_user_profiles(user_e, user_l, user)
else:
# create new
if not user_name:
user_name = self._get_lp_user_name(launchpad_id)
if user_name:
user['user_name'] = user_name
LOG.debug('Created new user: %s', user)
utils.store_user(self.runtime_storage_inst, user)
return user
def _update_record_and_user(self, record):
user = self.update_user(record)
record['user_id'] = user['user_id']
record['launchpad_id'] = user['launchpad_id']
if user.get('user_name'):
record['author_name'] = user['user_name']
company = self._find_company(user['companies'], record['date'])
if company != '*robots':
company = (self._get_company_by_email(record.get('author_email'))
or company)
record['company_name'] = company
def _process_commit(self, record):
record['primary_key'] = record['commit_id']
record['loc'] = record['lines_added'] + record['lines_deleted']
record['author_email'] = record['author_email'].lower()
record['commit_date'] = record['date']
coauthors = record.get('coauthor')
if not coauthors:
self._update_record_and_user(record)
if record['company_name'] != '*robots':
yield record
else:
coauthors.append({'author_name': record['author_name'],
'author_email': record['author_email']})
for coauthor in coauthors:
coauthor['date'] = record['date']
self._update_record_and_user(coauthor)
for coauthor in coauthors:
new_record = copy.deepcopy(record)
new_record.update(coauthor)
new_record['primary_key'] += coauthor['author_email']
yield new_record
def _make_review_record(self, record):
# copy everything except patchsets and flatten user data
review = dict([(k, v) for k, v in six.iteritems(record)
if k not in ['patchSets', 'owner', 'createdOn']])
owner = record['owner']
review['primary_key'] = review['id']
review['launchpad_id'] = owner['username']
review['author_name'] = owner['name']
review['author_email'] = owner['email'].lower()
review['date'] = record['createdOn']
patch_sets = record.get('patchSets', [])
review['updated_on'] = review['date']
if patch_sets:
patch = patch_sets[-1]
if 'approvals' in patch:
review['value'] = min([int(p['value'])
for p in patch['approvals']])
review['updated_on'] = patch['approvals'][0]['grantedOn']
else:
review['updated_on'] = patch['createdOn']
if 'value' not in review:
review['value'] = 0
self._update_record_and_user(review)
return review
def _make_patch_record(self, review, patch):
patch_record = dict()
patch_record['record_type'] = 'patch'
patch_record['primary_key'] = utils.get_patch_id(
review['id'], patch['number'])
patch_record['number'] = patch['number']
patch_record['date'] = patch['createdOn']
uploader = patch['uploader']
patch_record['launchpad_id'] = uploader['username']
patch_record['author_name'] = uploader['name']
patch_record['author_email'] = uploader['email'].lower()
patch_record['module'] = review['module']
patch_record['branch'] = review['branch']
patch_record['review_id'] = review['id']
self._update_record_and_user(patch_record)
return patch_record
def _make_mark_record(self, review, patch, approval):
# copy everything and flatten user data
mark = dict([(k, v) for k, v in six.iteritems(approval)
if k not in ['by', 'grantedOn', 'value']])
reviewer = approval['by']
mark['record_type'] = 'mark'
mark['value'] = int(approval['value'])
mark['date'] = approval['grantedOn']
mark['primary_key'] = (review['id'] + str(mark['date']) + mark['type'])
mark['launchpad_id'] = reviewer['username']
mark['author_name'] = reviewer['name']
mark['author_email'] = reviewer['email'].lower()
mark['module'] = review['module']
mark['branch'] = review['branch']
mark['review_id'] = review['id']
mark['patch'] = int(patch['number'])
# map type from new Gerrit to old
mark['type'] = {'Approved': 'APRV', 'Code-Review': 'CRVW',
'Verified': 'VRIF'}.get(mark['type'], mark['type'])
self._update_record_and_user(mark)
return mark
def _process_review(self, record):
"""
Process a review. Review spawns into records of three types:
* review - records that a user created review request
* patch - records that a user submitted another patch set
* mark - records that a user set approval mark to given review
"""
owner = record['owner']
if 'email' not in owner or 'username' not in owner:
return # ignore
yield self._make_review_record(record)
for patch in record.get('patchSets', []):
if (('email' not in patch['uploader']) or
('username' not in patch['uploader'])):
continue # ignore
yield self._make_patch_record(record, patch)
if 'approvals' not in patch:
continue # not reviewed by anyone
for approval in patch['approvals']:
if approval['type'] not in ('CRVW', 'APRV',
'Code-Review', 'Approved'):
continue # keep only Code-Review and Approved
if ('email' not in approval['by'] or
'username' not in approval['by']):
continue # ignore
yield self._make_mark_record(record, patch, approval)
def _guess_module(self, record):
subject = record['subject'].lower()
pos = len(subject)
best_guess_module = None
modules, alias_module_map = self._get_modules()
for module in modules:
find = subject.find(module)
if (find >= 0) and (find < pos):
pos = find
best_guess_module = module
if best_guess_module:
if (((pos > 0) and (subject[pos - 1] == '[')) or
(not record.get('module'))):
record['module'] = best_guess_module
if not record.get('module'):
record['module'] = 'unknown'
elif record['module'] in alias_module_map:
record['module'] = alias_module_map[record['module']]
def _process_email(self, record):
record['primary_key'] = record['message_id']
record['author_email'] = record['author_email'].lower()
self._update_record_and_user(record)
self._guess_module(record)
if not record.get('blueprint_id'):
del record['body']
yield record
def _process_blueprint(self, record):
bpd_author = record.get('drafter') or record.get('owner')
bpd = dict([(k, v) for k, v in six.iteritems(record)
if k.find('_link') < 0])
bpd['record_type'] = 'bpd'
bpd['primary_key'] = 'bpd:' + record['id']
bpd['launchpad_id'] = bpd_author
bpd['date'] = record['date_created']
self._update_record_and_user(bpd)
yield bpd
if record.get('assignee') and record['date_completed']:
bpc = dict([(k, v) for k, v in six.iteritems(record)
if k.find('_link') < 0])
bpc['record_type'] = 'bpc'
bpc['primary_key'] = 'bpc:' + record['id']
bpc['launchpad_id'] = record['assignee']
bpc['date'] = record['date_completed']
self._update_record_and_user(bpc)
yield bpc
def _process_member(self, record):
user_id = "member:" + record['member_id']
record['primary_key'] = user_id
record['date'] = utils.member_date_to_timestamp(record['date_joined'])
record['author_name'] = record['member_name']
record['module'] = 'unknown'
company_draft = record['company_draft']
company_name = self.domains_index.get(company_draft) or company_draft
# author_email is a key to create new user
record['author_email'] = user_id
record['company_name'] = company_name
# _update_record_and_user function will create new user if needed
self._update_record_and_user(record)
record['company_name'] = company_name
user = utils.load_user(self.runtime_storage_inst, user_id)
user['user_name'] = record['author_name']
user['companies'] = [{
'company_name': company_name,
'end_date': 0,
}]
user['company_name'] = company_name
utils.store_user(self.runtime_storage_inst, user)
record['company_name'] = company_name
yield record
def _apply_type_based_processing(self, record):
if record['record_type'] == 'commit':
for r in self._process_commit(record):
yield r
elif record['record_type'] == 'review':
for r in self._process_review(record):
yield r
elif record['record_type'] == 'email':
for r in self._process_email(record):
yield r
elif record['record_type'] == 'bp':
for r in self._process_blueprint(record):
yield r
elif record['record_type'] == 'member':
for r in self._process_member(record):
yield r
def _renew_record_date(self, record):
record['week'] = utils.timestamp_to_week(record['date'])
if ('release' not in record) or (not record['release']):
record['release'] = self._get_release(record['date'])
def process(self, record_iterator):
for record in record_iterator:
for r in self._apply_type_based_processing(record):
if r['company_name'] == '*robots':
continue
self._renew_record_date(r)
yield r
def _update_records_with_releases(self, release_index):
LOG.debug('Update records with releases')
for record in self.runtime_storage_inst.get_all_records():
if record['primary_key'] in release_index:
release = release_index[record['primary_key']]
else:
release = self._get_release(record['date'])
if record['release'] != release:
record['release'] = release
yield record
def _update_records_with_user_info(self):
LOG.debug('Update user info in records')
for record in self.runtime_storage_inst.get_all_records():
company_name = record['company_name']
user_id = record['user_id']
author_name = record['author_name']
self._update_record_and_user(record)
if ((record['company_name'] != company_name) or
(record['user_id'] != user_id) or
(record['author_name'] != author_name)):
LOG.debug('User info (%(id)s, %(name)s, %(company)s) has '
'changed in record %(record)s',
{'id': user_id, 'name': author_name,
'company': company_name, 'record': record})
yield record
def _update_commits_with_merge_date(self):
change_id_to_date = {}
for record in self.runtime_storage_inst.get_all_records():
if (record['record_type'] == 'review' and
record.get('status') == 'MERGED'):
change_id_to_date[record['id']] = record['lastUpdated']
for record in self.runtime_storage_inst.get_all_records():
if record['record_type'] == 'commit':
change_id_list = record.get('change_id')
if change_id_list and len(change_id_list) == 1:
change_id = change_id_list[0]
if change_id in change_id_to_date:
old_date = record['date']
if old_date != change_id_to_date[change_id]:
record['date'] = change_id_to_date[change_id]
self._renew_record_date(record)
LOG.debug('Date %(date)s has changed in record '
'%(record)s', {'date': old_date,
'record': record})
yield record
def _update_blueprints_with_mention_info(self):
LOG.debug('Process blueprints and calculate mention info')
valid_blueprints = {}
mentioned_blueprints = {}
for record in self.runtime_storage_inst.get_all_records():
for bp in record.get('blueprint_id', []):
if bp in mentioned_blueprints:
mentioned_blueprints[bp]['count'] += 1
if record['date'] > mentioned_blueprints[bp]['date']:
mentioned_blueprints[bp]['date'] = record['date']
else:
mentioned_blueprints[bp] = {
'count': 1,
'date': record['date']
}
if record['record_type'] in ['bpd', 'bpc']:
valid_blueprints[record['id']] = {
'primary_key': record['primary_key'],
'count': 0,
'date': record['date']
}
for bp_name, bp in six.iteritems(valid_blueprints):
if bp_name in mentioned_blueprints:
bp['count'] = mentioned_blueprints[bp_name]['count']
bp['date'] = mentioned_blueprints[bp_name]['date']
else:
bp['count'] = 0
bp['date'] = 0
for record in self.runtime_storage_inst.get_all_records():
need_update = False
valid_bp = set([])
for bp in record.get('blueprint_id', []):
if bp in valid_blueprints:
valid_bp.add(bp)
else:
LOG.debug('Update record %s: removed invalid bp: %s',
record['primary_key'], bp)
need_update = True
record['blueprint_id'] = list(valid_bp)
if record['record_type'] in ['bpd', 'bpc']:
bp = valid_blueprints[record['id']]
if ((record.get('mention_count') != bp['count']) or
(record.get('mention_date') != bp['date'])):
record['mention_count'] = bp['count']
record['mention_date'] = bp['date']
LOG.debug('Update record %s: mention stats: (%s:%s)',
record['primary_key'], bp['count'], bp['date'])
need_update = True
if need_update:
yield record
def _update_reviews_with_sequence_number(self):
LOG.debug('Set review number in review records')
users_reviews = {}
for record in self.runtime_storage_inst.get_all_records():
if record['record_type'] == 'review':
launchpad_id = record['launchpad_id']
review = {'date': record['date'], 'id': record['id']}
if launchpad_id in users_reviews:
users_reviews[launchpad_id].append(review)
else:
users_reviews[launchpad_id] = [review]
reviews_index = {}
for launchpad_id, reviews in six.iteritems(users_reviews):
reviews.sort(key=lambda x: x['date'])
review_number = 0
for review in reviews:
review_number += 1
review['review_number'] = review_number
reviews_index[review['id']] = review
for record in self.runtime_storage_inst.get_all_records():
if record['record_type'] == 'review':
review = reviews_index[record['id']]
if record.get('review_number') != review['review_number']:
record['review_number'] = review['review_number']
yield record
def _determine_core_contributors(self):
LOG.debug('Determine core contributors')
core_engineers = {}
quarter_ago = int(time.time()) - 60 * 60 * 24 * 30 * 3 # a quarter ago
for record in self.runtime_storage_inst.get_all_records():
if (record['record_type'] == 'mark' and
record['date'] > quarter_ago and
record['value'] in [2, -2]):
module_branch = (record['module'], record['branch'])
user_id = record['user_id']
if user_id in core_engineers:
core_engineers[user_id].add(module_branch)
else:
core_engineers[user_id] = set([module_branch])
for user in self.runtime_storage_inst.get_all_users():
core_old = user.get('core')
user['core'] = list(core_engineers.get(user['user_id'], []))
if user['core'] != core_old:
utils.store_user(self.runtime_storage_inst, user)
def _close_patch(self, cores, marks):
if len(marks) < 2:
return
core_mark = 0
for mark in sorted(marks, key=lambda x: x['date'], reverse=True):
if core_mark == 0:
if (mark['module'], mark['branch'], mark['user_id']) in cores:
# mark is from core engineer
core_mark = mark['value']
continue
disagreement = ((core_mark != 0) and
((core_mark < 0 < mark['value']) or
(core_mark > 0 > mark['value'])))
old_disagreement = mark.get('x')
mark['x'] = disagreement
if old_disagreement != disagreement:
yield mark
def _update_marks_with_disagreement(self):
LOG.debug('Process marks to find disagreements')
cores = set()
for user in self.runtime_storage_inst.get_all_users():
for (module, branch) in (user['core'] or []):
cores.add((module, branch, user['user_id']))
# map from review_id to current patch and list of marks
marks_per_patch = collections.defaultdict(
lambda: {'patch_number': 0, 'marks': []})
for record in self.runtime_storage_inst.get_all_records():
if record['record_type'] == 'mark' and record['type'] == 'CRVW':
review_id = record['review_id']
patch_number = record['patch']
if review_id in marks_per_patch:
# review is already seen, check if patch is newer
if (marks_per_patch[review_id]['patch_number'] <
patch_number):
# the patch is new, close the current
for processed in self._close_patch(
cores, marks_per_patch[review_id]['marks']):
yield processed
del marks_per_patch[review_id]
marks_per_patch[review_id]['patch_number'] = patch_number
marks_per_patch[review_id]['marks'].append(record)
# purge the rest
for marks_patch in marks_per_patch.values():
for processed in self._close_patch(cores, marks_patch['marks']):
yield processed
def update(self, release_index=None):
self.runtime_storage_inst.set_records(
self._update_records_with_user_info())
if release_index:
self.runtime_storage_inst.set_records(
self._update_records_with_releases(release_index))
self.runtime_storage_inst.set_records(
self._update_reviews_with_sequence_number())
self.runtime_storage_inst.set_records(
self._update_blueprints_with_mention_info())
self.runtime_storage_inst.set_records(
self._update_commits_with_merge_date())
self._determine_core_contributors()
# disagreement calculation must go after determining core contributors
self.runtime_storage_inst.set_records(
self._update_marks_with_disagreement())
|
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from resource_management.libraries.functions import format
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions.stack_features import get_stack_feature_version
from resource_management.libraries.functions.default import default
from utils import get_bare_principal
from resource_management.libraries.functions.get_stack_version import get_stack_version
from resource_management.libraries.functions.is_empty import is_empty
import status_params
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
from resource_management.libraries.functions.setup_ranger_plugin_xml import get_audit_configs
# server configurations
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
stack_root = Script.get_stack_root()
stack_name = default("/hostLevelParams/stack_name", None)
retryAble = default("/commandParams/command_retry_enabled", False)
# Version being upgraded/downgraded to
version = default("/commandParams/version", None)
# Version that is CURRENT.
current_version = default("/hostLevelParams/current_version", None)
stack_version_unformatted = config['hostLevelParams']['stack_version']
upgrade_direction = default("/commandParams/upgrade_direction", None)
# get the correct version to use for checking stack features
version_for_stack_feature_checks = get_stack_feature_version(config)
# Force True since we doesn't support check stack feature
stack_supports_ranger_kerberos = True
stack_supports_ranger_audit_db = False
stack_supports_core_site_for_ranger_plugin = check_stack_feature(StackFeature.CORE_SITE_FOR_RANGER_PLUGINS_SUPPORT, version_for_stack_feature_checks)
# When downgrading the 'version' and 'current_version' are both pointing to the downgrade-target version
# downgrade_from_version provides the source-version the downgrade is happening from
downgrade_from_version = default("/commandParams/downgrade_from_version", None)
hostname = config['hostname']
# default kafka parameters
kafka_home = '/usr/lib/kafka'
kafka_bin = kafka_home+'/bin/kafka'
conf_dir = "/etc/kafka/conf"
limits_conf_dir = "/etc/security/limits.d"
# Used while upgrading the stack in a kerberized cluster and running kafka-acls.sh
zookeeper_connect = default("/configurations/kafka-broker/zookeeper.connect", None)
kafka_user_nofile_limit = config['configurations']['kafka-env']['kafka_user_nofile_limit']
kafka_user_nproc_limit = config['configurations']['kafka-env']['kafka_user_nproc_limit']
# parameters for 2.2+
kafka_user = config['configurations']['kafka-env']['kafka_user']
kafka_log_dir = config['configurations']['kafka-env']['kafka_log_dir']
kafka_pid_dir = status_params.kafka_pid_dir
kafka_pid_file = kafka_pid_dir+"/kafka.pid"
# This is hardcoded on the kafka bash process lifecycle on which we have no control over
kafka_managed_pid_dir = "/var/run/kafka"
kafka_managed_log_dir = "/var/log/kafka"
user_group = config['configurations']['cluster-env']['user_group']
java64_home = config['hostLevelParams']['java_home']
kafka_env_sh_template = config['configurations']['kafka-env']['content']
kafka_jaas_conf_template = default("/configurations/kafka_jaas_conf/content", None)
kafka_client_jaas_conf_template = default("/configurations/kafka_client_jaas_conf/content", None)
kafka_hosts = config['clusterHostInfo']['kafka_broker_hosts']
kafka_hosts.sort()
zookeeper_hosts = config['clusterHostInfo']['zookeeper_hosts']
zookeeper_hosts.sort()
secure_acls = default("/configurations/kafka-broker/zookeeper.set.acl", False)
kafka_security_migrator = os.path.join(kafka_home, "bin", "zookeeper-security-migration.sh")
#Kafka Manager
kafka_manager_port = config['configurations']['kafka-manager-env']['kafka.manager.port']
kafka_manager_env_content = config['configurations']['kafka-manager-env']['kafka_manager_content']
kafka_manager_conf_dir="/usr/lib/kafka-manager/conf"
kafka_manager_application_conf_content = config['configurations']['kafka-manager-conf']['application.conf']
kafka_manager_host = config['clusterHostInfo']['kafka_manager_hosts'][0]
#Kafka broker jmx port
kafka_broker_jmx_port = config['configurations']['kafka-broker']['kafka.broker.jmx.port']
#Kafka log4j
kafka_log_maxfilesize = default('/configurations/kafka-log4j/kafka_log_maxfilesize',256)
kafka_log_maxbackupindex = default('/configurations/kafka-log4j/kafka_log_maxbackupindex',20)
controller_log_maxfilesize = default('/configurations/kafka-log4j/controller_log_maxfilesize',256)
controller_log_maxbackupindex = default('/configurations/kafka-log4j/controller_log_maxbackupindex',20)
if (('kafka-log4j' in config['configurations']) and ('content' in config['configurations']['kafka-log4j'])):
log4j_props = config['configurations']['kafka-log4j']['content']
else:
log4j_props = None
if 'ganglia_server_host' in config['clusterHostInfo'] and \
len(config['clusterHostInfo']['ganglia_server_host'])>0:
ganglia_installed = True
ganglia_server = config['clusterHostInfo']['ganglia_server_host'][0]
ganglia_report_interval = 60
else:
ganglia_installed = False
metric_collector_port = ""
metric_collector_protocol = ""
metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
has_metric_collector = not len(ams_collector_hosts) == 0
if has_metric_collector:
if 'cluster-env' in config['configurations'] and \
'metrics_collector_vip_port' in config['configurations']['cluster-env']:
metric_collector_port = config['configurations']['cluster-env']['metrics_collector_vip_port']
else:
metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188")
if metric_collector_web_address.find(':') != -1:
metric_collector_port = metric_collector_web_address.split(':')[1]
else:
metric_collector_port = '6188'
if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
metric_collector_protocol = 'https'
else:
metric_collector_protocol = 'http'
pass
# Security-related params
kerberos_security_enabled = config['configurations']['cluster-env']['security_enabled']
kafka_kerberos_enabled = (('security.inter.broker.protocol' in config['configurations']['kafka-broker']) and
(config['configurations']['kafka-broker']['security.inter.broker.protocol'] in ("PLAINTEXTSASL", "SASL_PLAINTEXT", "SASL_SSL")))
kafka_other_sasl_enabled = not kerberos_security_enabled and \
(("SASL_PLAINTEXT" in config['configurations']['kafka-broker']['listeners']) or
("PLAINTEXTSASL" in config['configurations']['kafka-broker']['listeners']) or
("SASL_SSL" in config['configurations']['kafka-broker']['listeners']))
if kerberos_security_enabled and 'kafka_principal_name' in config['configurations']['kafka-env']:
_hostname_lowercase = config['hostname'].lower()
_kafka_principal_name = config['configurations']['kafka-env']['kafka_principal_name']
kafka_jaas_principal = _kafka_principal_name.replace('_HOST',_hostname_lowercase)
kafka_keytab_path = config['configurations']['kafka-env']['kafka_keytab']
kafka_bare_jaas_principal = get_bare_principal(_kafka_principal_name)
kafka_kerberos_params = "-Djava.security.auth.login.config="+ conf_dir +"/kafka_jaas.conf"
elif kafka_other_sasl_enabled:
kafka_kerberos_params = "-Djava.security.auth.login.config="+ conf_dir +"/kafka_jaas.conf"
else:
kafka_kerberos_params = ''
kafka_jaas_principal = None
kafka_keytab_path = None
# for curl command in ranger plugin to get db connector
jdk_location = config['hostLevelParams']['jdk_location']
# ranger kafka plugin section start
# ranger host
ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
has_ranger_admin = not len(ranger_admin_hosts) == 0
# ranger support xml_configuration flag, instead of depending on ranger xml_configurations_supported/ranger-env, using stack feature
xml_configurations_supported = check_stack_feature(StackFeature.RANGER_XML_CONFIGURATION, version_for_stack_feature_checks)
# ambari-server hostname
ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
ranger_admin_log_dir = default("/configurations/ranger-env/ranger_admin_log_dir","/var/log/ranger/admin")
# ranger kafka plugin enabled property
enable_ranger_kafka = default("configurations/ranger-kafka-plugin-properties/ranger-kafka-plugin-enabled", "No")
enable_ranger_kafka = True if enable_ranger_kafka.lower() == 'yes' else False
# ranger kafka-plugin supported flag, instead of dependending on is_supported_kafka_ranger/kafka-env.xml, using stack feature
# Force True since we doesn't support check feature
is_supported_kafka_ranger = True
# ranger kafka properties
if enable_ranger_kafka and is_supported_kafka_ranger:
# get ranger policy url
policymgr_mgr_url = config['configurations']['ranger-kafka-security']['ranger.plugin.kafka.policy.rest.url']
if not is_empty(policymgr_mgr_url) and policymgr_mgr_url.endswith('/'):
policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
# ranger audit db user
xa_audit_db_user = default('/configurations/admin-properties/audit_db_user', 'rangerlogger')
xa_audit_db_password = ''
if not is_empty(config['configurations']['admin-properties']['audit_db_password']) and stack_supports_ranger_audit_db and has_ranger_admin:
xa_audit_db_password = config['configurations']['admin-properties']['audit_db_password']
# ranger kafka service/repository name
repo_name = str(config['clusterName']) + '_kafka'
repo_name_value = config['configurations']['ranger-kafka-security']['ranger.plugin.kafka.service.name']
if not is_empty(repo_name_value) and repo_name_value != "{{repo_name}}":
repo_name = repo_name_value
ranger_env = config['configurations']['ranger-env']
# create ranger-env config having external ranger credential properties
if not has_ranger_admin and enable_ranger_kafka:
external_admin_username = default('/configurations/ranger-kafka-plugin-properties/external_admin_username', 'admin')
external_admin_password = default('/configurations/ranger-kafka-plugin-properties/external_admin_password', 'admin')
external_ranger_admin_username = default('/configurations/ranger-kafka-plugin-properties/external_ranger_admin_username', 'amb_ranger_admin')
external_ranger_admin_password = default('/configurations/ranger-kafka-plugin-properties/external_ranger_admin_password', 'amb_ranger_admin')
ranger_env = {}
ranger_env['admin_username'] = external_admin_username
ranger_env['admin_password'] = external_admin_password
ranger_env['ranger_admin_username'] = external_ranger_admin_username
ranger_env['ranger_admin_password'] = external_ranger_admin_password
ranger_plugin_properties = config['configurations']['ranger-kafka-plugin-properties']
ranger_kafka_audit = config['configurations']['ranger-kafka-audit']
ranger_kafka_audit_attrs = config['configuration_attributes']['ranger-kafka-audit']
ranger_kafka_security = config['configurations']['ranger-kafka-security']
ranger_kafka_security_attrs = config['configuration_attributes']['ranger-kafka-security']
ranger_kafka_policymgr_ssl = config['configurations']['ranger-kafka-policymgr-ssl']
ranger_kafka_policymgr_ssl_attrs = config['configuration_attributes']['ranger-kafka-policymgr-ssl']
policy_user = config['configurations']['ranger-kafka-plugin-properties']['policy_user']
ranger_plugin_config = {
'username' : config['configurations']['ranger-kafka-plugin-properties']['REPOSITORY_CONFIG_USERNAME'],
'password' : config['configurations']['ranger-kafka-plugin-properties']['REPOSITORY_CONFIG_PASSWORD'],
'zookeeper.connect' : config['configurations']['ranger-kafka-plugin-properties']['zookeeper.connect'],
'commonNameForCertificate' : config['configurations']['ranger-kafka-plugin-properties']['common.name.for.certificate']
}
kafka_ranger_plugin_repo = {
'isEnabled': 'true',
'configs': ranger_plugin_config,
'description': 'kafka repo',
'name': repo_name,
'repositoryType': 'kafka',
'type': 'kafka',
'assetType': '1'
}
if stack_supports_ranger_kerberos and kerberos_security_enabled:
ranger_plugin_config['policy.download.auth.users'] = kafka_user
ranger_plugin_config['tag.download.auth.users'] = kafka_user
ranger_plugin_config['ambari.service.check.user'] = policy_user
downloaded_custom_connector = None
previous_jdbc_jar_name = None
driver_curl_source = None
driver_curl_target = None
previous_jdbc_jar = None
if has_ranger_admin and stack_supports_ranger_audit_db:
xa_audit_db_flavor = config['configurations']['admin-properties']['DB_FLAVOR']
jdbc_jar_name, previous_jdbc_jar_name, audit_jdbc_url, jdbc_driver = get_audit_configs(config)
downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
driver_curl_source = format("{jdk_location}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
driver_curl_target = format("{kafka_home}/libs/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
previous_jdbc_jar = format("{kafka_home}/libs/{previous_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
xa_audit_db_is_enabled = False
if xml_configurations_supported and stack_supports_ranger_audit_db:
xa_audit_db_is_enabled = config['configurations']['ranger-kafka-audit']['xasecure.audit.destination.db']
xa_audit_hdfs_is_enabled = default('/configurations/ranger-kafka-audit/xasecure.audit.destination.hdfs', False)
ssl_keystore_password = config['configurations']['ranger-kafka-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password'] if xml_configurations_supported else None
ssl_truststore_password = config['configurations']['ranger-kafka-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password'] if xml_configurations_supported else None
credential_file = format('/etc/ranger/{repo_name}/cred.jceks')
setup_ranger_env_sh_source = format('{stack_root}/ranger-kafka-plugin/install/conf.templates/enable/kafka-ranger-env.sh')
setup_ranger_env_sh_target = format("{conf_dir}/kafka-ranger-env.sh")
# for SQLA explicitly disable audit to DB for Ranger
if has_ranger_admin and stack_supports_ranger_audit_db and xa_audit_db_flavor.lower() == 'sqla':
xa_audit_db_is_enabled = False
# ranger kafka plugin section end
namenode_hosts = default("/clusterHostInfo/namenode_host", [])
has_namenode = not len(namenode_hosts) == 0
hdfs_user = config['configurations']['hadoop-env']['hdfs_user'] if has_namenode else None
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab'] if has_namenode else None
hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name'] if has_namenode else None
hdfs_site = config['configurations']['hdfs-site'] if has_namenode else None
default_fs = config['configurations']['core-site']['fs.defaultFS'] if has_namenode else None
hadoop_bin_dir = stack_select.get_hadoop_dir("bin") if has_namenode else None
hadoop_conf_dir = conf_select.get_hadoop_conf_dir() if has_namenode else None
kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
import functools
#create partial functions with common arguments for every HdfsResource call
#to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
HdfsResource = functools.partial(
HdfsResource,
user=hdfs_user,
hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
security_enabled = kerberos_security_enabled,
keytab = hdfs_user_keytab,
kinit_path_local = kinit_path_local,
hadoop_bin_dir = hadoop_bin_dir,
hadoop_conf_dir = hadoop_conf_dir,
principal_name = hdfs_principal_name,
hdfs_site = hdfs_site,
default_fs = default_fs,
immutable_paths = get_not_managed_resources()
)
|
|
from collections import namedtuple
from games import (Game)
class GameState:
def __init__(self, to_move, board, label=None):
self.to_move = to_move
self.board = board
self.label = label
def __str__(self):
if self.label == None:
return super(GameState, self).__str__()
return self.label
class FlagrantCopy(Game):
"""Game of Hex.
The goal of the game is for one player to cut
the other off from making a complete connection to their side.
X has the vertical, or connecting the top and bottom.
O has the horizontal, or connecting the left and right."""
def __init__(self, h=3, v=3, k=3):
self.h = h
self.v = v
self.k = k
self.initial = GameState(to_move='X', board={})
def actions(self, state):
try:
return state.moves
except:
pass
"Legal moves are any square not yet taken."
moves = []
for x in range(0, self.h):
for y in range(0, self.v):
if (x, y) not in state.board.keys():
moves.append((x, y))
state.moves = moves
return moves
# defines the order of play
def opponent(self, player):
if player == 'X':
return 'O'
if player == 'O':
return 'X'
return None
def result(self, state, move):
if move not in self.actions(state):
return state # Illegal move has no effect
board = state.board.copy()
player = state.to_move
board[move] = player
next_mover = self.opponent(player)
return GameState(to_move=next_mover, board=board)
def utility(self, state, player):
"Return the value to player; 1 for win, -1 for loss, 0 otherwise."
try:
return state.utility if player == 'X' else -state.utility
except:
pass
board = state.board
util = self.check_win(board, 'X')
if util == 0:
util = -self.check_win(board, 'O')
state.utility = util
return util if player == 'X' else -util
# Did I win?
def check_win(self, board, player):
# check vertical line
if player == 'X':
return self.check_connected(board, player)
# check horizontal
if player == 'O':
return self.check_connected(board, player)
return 0
def check_connected(self, board, player):
if player == 'X':
for a in range(0, self.h):
coor = (0, a)
try: # if the coordinate does not exist
if board[coor] == 'X':
tree = [coor]
surr = self.get_surrounding(coor, tree, board, player)
won = self.recur(surr, tree, board, player)
if won == 1:
return 1
else:
continue
except:
pass
return 0
if player == 'O':
for a in range(0, self.v):
coor = (a, 0)
try: # if the coordinate does not exist
if board[coor] == 'O':
tree = [coor]
surr = self.get_surrounding(coor, tree, board, player)
won = self.recur(surr, tree, board, player)
if won == 1:
return 1
else:
continue
except:
pass
return 0
def recur(self, surrounding, tree, board, player):
# Reached end of tree, but did not reach target
if len(surrounding) < 1:
return 0
elif not self.hit_target(surrounding, player):
for s in surrounding:
tree.append(s)
surr = self.get_surrounding(s, tree, board, player)
won = self.recur(surr, tree, board, player)
if won == 1:
return 1
else:
continue
else:
return 1
return 0
def hit_target(self, surrounding, player):
for cor in surrounding:
# Check vertical
if player == 'X':
r, _ = cor
if r == self.v - 1:
return True
# check horizontal
else: # if player is O
_, c = cor
if c == self.h - 1:
return True
return False
def get_surrounding(self, coor, tree, board, player):
surrounding = []
y, x = coor
for row in range(y - 1, y + 2):
for col in range(x - 1, x + 2):
# don't count self
if (row, col) in tree:
continue
try: # coordinate is out of bounds
if board[(row, col)] == player:
surrounding.append((row, col))
except:
pass
return surrounding
def terminal_test(self, state):
"A state is terminal if it is won or there are no empty squares."
return self.utility(state, 'X') != 0 or len(self.actions(state)) == 0
def display(self, state):
board = state.board
for x in range(0, self.h):
for y in range(0, self.v):
print(board.get((x, y), '.'), end=' ')
print()
myGame = FlagrantCopy(4, 4, 4)
won = GameState(
to_move='O',
board={
(0, 2): 'X',
(1, 0): 'O', (1, 1): 'O', (1, 2): 'X',
(2, 1): 'O', (2, 2): 'X',
(3, 2): 'X',
},
label='won'
)
win1 = GameState(
to_move='X',
board={
(0, 2): 'X',
(1, 0): 'O', (1, 1): 'O', (1, 2): 'X',
(2, 1): 'O', (2, 2): 'X',
},
label='win1'
)
win2 = GameState(
to_move='X',
board={
(1, 0): 'O', (1, 2): 'X',
(2, 0): 'O', (2, 1): 'O', (2, 2): 'X',
},
label='win2'
)
win3 = GameState(
to_move='X',
board={
(0, 0): 'O', (1, 2): 'X',
},
label='win3'
)
lose = GameState(
to_move='X',
board={
(0, 2): 'X',
(1, 0): 'O', (1, 1): 'O', (1, 2): 'O',
(2, 1): 'X', (2, 2): 'X', (2, 3): 'O',
},
label='lose'
)
lose1 = GameState(
to_move='O',
board={
(0, 2): 'X',
(1, 0): 'O', (1, 1): 'O', (1, 2): 'O',
(2, 1): 'X', (2, 2): 'X',
},
label='lose1'
)
myGames = {
myGame: [
won,
win1,
win2,
]
}
|
|
# Thanks to Zhao Yu for converting the .ipynb notebook to
# this simplified Python script that I edited a little.
# Note that the dataset must be already downloaded for this script to work, do:
# $ cd data/
# $ python download_dataset.py
import tensorflow as tf
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn import metrics
import os
import sys
file_name = os.path.splitext(os.path.basename(sys.argv[0]))[0]
print(" File Name:")
print(file_name)
print("")
FLAG = 'train_asdsd'
save_path_name = "models/" + file_name + "/model.ckpt"
if __name__ == "__main__":
# -----------------------------
# step1: load and prepare data
# -----------------------------
# Those are separate normalised input features for the neural network
INPUT_SIGNAL_TYPES = [
"body_acc_x_",
"body_acc_y_",
"body_acc_z_",
"body_gyro_x_",
"body_gyro_y_",
"body_gyro_z_",
"total_acc_x_",
"total_acc_y_",
"total_acc_z_"
]
# Output classes to learn how to classify
LABELS = [
"WALKING",
"WALKING_UPSTAIRS",
"WALKING_DOWNSTAIRS",
"SITTING",
"STANDING",
"LAYING"
]
DATA_PATH = "data/"
DATASET_PATH = DATA_PATH + "UCI HAR Dataset_2/"
print("\n" + "Dataset is now located at: " + DATASET_PATH)
# Preparing data set:
TRAIN = "train/"
TEST = "test/"
# Load "X" (the neural network's training and testing inputs)
def load_X(X_signals_paths):
X_signals = []
for signal_type_path in X_signals_paths:
file = open(signal_type_path, 'rb')
# Read dataset from disk, dealing with text files' syntax
X_signals.append(
[np.array(serie, dtype=np.float32) for serie in [
row.replace(' ', ' ').strip().split(' ') for row in file
]]
)
file.close()
"""Examples
--------
>> > x = np.arange(4).reshape((2, 2))
>> > x
array([[0, 1],
[2, 3]])
>> > np.transpose(x)
array([[0, 2],
[1, 3]])
>> > x = np.ones((1, 2, 3))
>> > np.transpose(x, (1, 0, 2)).shape
(2, 1, 3)
"""
return np.transpose(np.array(X_signals), (1, 2, 0))
X_train_signals_paths = [
DATASET_PATH + TRAIN + "Inertial Signals/" + signal + "train.txt" for signal in INPUT_SIGNAL_TYPES
]
X_test_signals_paths = [
DATASET_PATH + TEST + "Inertial Signals/" + signal + "test.txt" for signal in INPUT_SIGNAL_TYPES
]
X_train = load_X(X_train_signals_paths) # [7352, 128, 9]
X_test = load_X(X_test_signals_paths) # [7352, 128, 9]
# print(X_train)
print(len(X_train)) # 7352
print(len(X_train[0])) # 128
print(len(X_train[0][0])) # 9
print(type(X_train))
X_train = np.reshape(X_train, [-1, 32, 36])
X_test = np.reshape(X_test, [-1, 32, 36])
print("-----------------X_train---------------")
# print(X_train)
print(len(X_train)) # 7352
print(len(X_train[0])) # 32
print(len(X_train[0][0])) # 36
print(type(X_train))
# exit()
y_train_path = DATASET_PATH + TRAIN + "y_train.txt"
y_test_path = DATASET_PATH + TEST + "y_test.txt"
def one_hot(label):
"""convert label from dense to one hot
argument:
label: ndarray dense label ,shape: [sample_num,1]
return:
one_hot_label: ndarray one hot, shape: [sample_num,n_class]
"""
label_num = len(label)
new_label = label.reshape(label_num) # shape : [sample_num]
# because max is 5, and we will create 6 columns
n_values = np.max(new_label) + 1
return np.eye(n_values)[np.array(new_label, dtype=np.int32)]
# Load "y" (the neural network's training and testing outputs)
def load_y(y_path):
file = open(y_path, 'rb')
# Read dataset from disk, dealing with text file's syntax
y_ = np.array(
[elem for elem in [
row.replace(' ', ' ').strip().split(' ') for row in file
]],
dtype=np.int32
)
file.close()
# Subtract 1 to each output class for friendly 0-based indexing
return y_ - 1
y_train = one_hot(load_y(y_train_path))
y_test = one_hot(load_y(y_test_path))
print("---------y_train----------")
# print(y_train)
print(len(y_train)) # 7352
print(len(y_train[0])) # 6
# exit()
# -----------------------------------
# step2: define parameters for model
# -----------------------------------
class Config(object):
"""
define a class to store parameters,
the input should be feature mat of training and testing
"""
def __init__(self, X_train, X_test):
# Input data
self.train_count = len(X_train) # 7352 training series
self.test_data_count = len(X_test) # 2947 testing series
self.n_steps = len(X_train[0]) # 128 time_steps per series
# Training
self.learning_rate = 0.0025
self.lambda_loss_amount = 0.0015
self.training_epochs = 2
self.batch_size = 1000
# LSTM structure
self.n_inputs = len(X_train[0][0]) # Features count is of 9: three 3D sensors features over time
self.n_hidden = 16 # nb of neurons inside the neural network
self.n_classes = 6 # Final output classes
self.W = {
'hidden': tf.Variable(tf.random_normal([self.n_inputs, self.n_hidden])), # [9, 32]
'output': tf.Variable(tf.random_normal([self.n_hidden, self.n_classes])) # [32, 6]
}
self.biases = {
'hidden': tf.Variable(tf.random_normal([self.n_hidden], mean=1.0)), # [32]
'output': tf.Variable(tf.random_normal([self.n_classes])) # [6]
}
config = Config(X_train, X_test)
# print("Some useful info to get an insight on dataset's shape and normalisation:")
# print("features shape, labels shape, each features mean, each features standard deviation")
# print(X_test.shape, y_test.shape,
# np.mean(X_test), np.std(X_test))
# print("the dataset is therefore properly normalised, as expected.")
#
#
# ------------------------------------------------------
# step3: Let's get serious and build the neural network
# ------------------------------------------------------
# [none, 128, 9]
X = tf.placeholder(tf.float32, [None, config.n_steps, config.n_inputs])
# [none, 6]
Y = tf.placeholder(tf.float32, [None, config.n_classes])
print("-------X Y----------")
print(X)
X = tf.reshape(X, shape=[-1, 32, 36])
print(X)
print(Y)
Y = tf.reshape(Y, shape=[-1, 6])
print(Y)
# Weight Initialization
def weight_variable(shape):
# tra ve 1 gia tri random theo thuat toan truncated_ normal
initial = tf.truncated_normal(shape, mean=0.0, stddev=0.1, dtype=tf.float32)
return tf.Variable(initial)
def bias_varibale(shape):
initial = tf.constant(0.1, shape=shape, name='Bias')
return tf.Variable(initial)
# Convolution and Pooling
def conv2d(x, W):
# Must have `strides[0] = strides[3] = 1 `.
# For the most common case of the same horizontal and vertices strides, `strides = [1, stride, stride, 1] `.
return tf.nn.conv2d(input=x, filter=W, strides=[1, 1, 1, 1], padding='SAME', name='conv_2d')
def max_pool_2x2(x):
return tf.nn.max_pool(value=x, ksize=[1, 2, 2, 1],
strides=[1, 1, 1, 1], padding='SAME', name='max_pool')
def LSTM_Network(feature_mat, config):
"""model a LSTM Network,
it stacks 2 LSTM layers, each layer has n_hidden=32 cells
and 1 output layer, it is a full connet layer
argument:
feature_mat: ndarray feature matrix, shape=[batch_size,time_steps,n_inputs]
config: class containing config of network
return:
: matrix output shape [batch_size,n_classes]
"""
W_conv1 = weight_variable([5, 5, 1, 8])
b_conv1 = bias_varibale([8])
# x_image = tf.reshape(x, shape=[-1, 28, 28, 1])
feature_mat_image = tf.reshape(feature_mat, shape=[-1, 32, 36, 1])
print("----feature_mat_image-----")
print(feature_mat_image.get_shape())
h_conv1 = tf.nn.relu(conv2d(feature_mat_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
# Second Convolutional Layer
W_conv2 = weight_variable([5, 5, 8, 1])
b_conv2 = weight_variable([1])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
h_pool2 = tf.reshape(h_pool2, shape=[-1, 32, 36])
feature_mat = h_pool2
print("----feature_mat-----")
print(feature_mat)
# exit()
tf.add_to_collection('vars', W_conv1)
tf.add_to_collection('vars', b_conv1)
# W_fc1 = weight_variable([8 * 9 * 1, 1024])
# b_fc1 = bias_varibale([1024])
# h_pool2_flat = tf.reshape(h_pool2, [-1, 8 * 9 * 1])
# h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# print("----h_fc1_drop-----")
# print(h_fc1)
# exit()
#
# # keep_prob = tf.placeholder(tf.float32)
# keep_prob = tf.placeholder(1.0)
# h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob=keep_prob)
# print("----h_fc1_drop-----")
# print(h_fc1_drop)
# exit()
#
# W_fc2 = weight_variable([1024, 10])
# b_fc2 = bias_varibale([10])
#
# y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
# print("----y_conv-----")
# print(y_conv)
# exit()
# Exchange dim 1 and dim 0
# Ban dau: [0,1,2] = [batch_size, 128, 9] => [batch_size, 32, 36]
feature_mat = tf.transpose(feature_mat, [1, 0, 2])
# New feature_mat's shape: [time_steps, batch_size, n_inputs] [128, batch_size, 9]
print("----feature_mat-----")
print(feature_mat)
# exit()
# Temporarily crush the feature_mat's dimensions
feature_mat = tf.reshape(feature_mat, [-1, config.n_inputs]) # 9
# New feature_mat's shape: [time_steps*batch_size, n_inputs] # 128 * batch_size, 9
# Linear activation, reshaping inputs to the LSTM's number of hidden:
hidden = tf.nn.relu(tf.matmul(
feature_mat, config.W['hidden']
) + config.biases['hidden'])
# New feature_mat (hidden) shape: [time_steps*batch_size, n_hidden] [128*batch_size, 32]
print("--n_steps--")
print(config.n_steps)
print("--n_steps--")
print(hidden)
# exit()
# Split the series because the rnn cell needs time_steps features, each of shape:
hidden = tf.split(0, config.n_steps, hidden) # (0, 128, [128*batch_size, 32])
# New hidden's shape: a list of length "time_step" containing tensors of shape [batch_size, n_hidden]
# Define LSTM cell of first hidden layer:
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(config.n_hidden, forget_bias=1.0)
# Stack two LSTM layers, both layers has the same shape
lsmt_layers = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * 2)
# Get LSTM outputs, the states are internal to the LSTM cells,they are not our attention here
outputs, _ = tf.nn.rnn(lsmt_layers, hidden, dtype=tf.float32)
# outputs' shape: a list of lenght "time_step" containing tensors of shape [batch_size, n_hidden]
print("------------------list-------------------")
print(outputs)
# Get last time step's output feature for a "many to one" style classifier,
# as in the image describing RNNs at the top of this page
lstm_last_output = outputs[-1] # Chi lay phan tu cuoi cung voi shape: [?, 32]
print("------------------last outputs-------------------")
print (lstm_last_output)
# Linear activation
return tf.matmul(lstm_last_output, config.W['output']) + config.biases['output']
pred_Y = LSTM_Network(X, config) # shape[?,6]
print("------------------pred_Y-------------------")
print(pred_Y)
# exit()
# Loss,train_step,evaluation
l2 = config.lambda_loss_amount * \
sum(tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables())
# Softmax loss and L2
cost = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(pred_Y, Y)) + l2
train_step = tf.train.AdamOptimizer(
learning_rate=config.learning_rate).minimize(cost)
correct_prediction = tf.equal(tf.argmax(pred_Y, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, dtype=tf.float32))
# --------------------------------------------
# step4: Hooray, now train the neural network
# --------------------------------------------
# Note that log_device_placement can be turned ON but will cause console spam.
# Initializing the variables
init = tf.initialize_all_variables()
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
best_accuracy = 0.0
# sess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=False))
if (FLAG == 'train') :
with tf.Session() as sess:
# tf.initialize_all_variables().run()
sess.run(init) # .run()
# Start training for each batch and loop epochs
for i in range(config.training_epochs):
for start, end in zip(range(0, config.train_count, config.batch_size), # (0, 7352, 1500)
range(config.batch_size, config.train_count + 1,
config.batch_size)): # (1500, 7353, 1500)
print(start)
print(end)
sess.run(train_step, feed_dict={X: X_train[start:end],
Y: y_train[start:end]})
# Test completely at every epoch: calculate accuracy
pred_out, accuracy_out, loss_out = sess.run([pred_Y, accuracy, cost], feed_dict={
X: X_test, Y: y_test})
print("traing iter: {},".format(i) + \
" test accuracy : {},".format(accuracy_out) + \
" loss : {}".format(loss_out))
best_accuracy = max(best_accuracy, accuracy_out)
save_path = saver.save(sess, "models/model.ckpt")
print("Model saved in file: %s" % save_path)
# print("")
# print("final test accuracy: {}".format(accuracy_out))
# print("best epoch's test accuracy: {}".format(best_accuracy))
# print("")
else :
# Running a new session
print("Starting 2nd session...")
with tf.Session() as sess:
# Initialize variables
sess.run(init)
# Restore model weights from previously saved model
saver.restore(sess, "models/model.ckpt")
print("Model restored from file: %s" % save_path_name)
# Test completely at every epoch: calculate accuracy
pred_out, accuracy_out, loss_out = sess.run([pred_Y, accuracy, cost], feed_dict={
X: X_test, Y: y_test})
# print("traing iter: {}," + \
# " test accuracy : {},".format(accuracy_out) + \
# " loss : {}".format(loss_out))
best_accuracy = max(best_accuracy, accuracy_out)
print("")
print("final test accuracy: {}".format(accuracy_out))
print("best epoch's test accuracy: {}".format(best_accuracy))
print("")
#
# #------------------------------------------------------------------
# # step5: Training is good, but having visual insight is even better
# #------------------------------------------------------------------
# # The code is in the .ipynb
#
# #------------------------------------------------------------------
# # step6: And finally, the multi-class confusion matrix and metrics!
# #------------------------------------------------------------------
# # The code is in the .ipynb
|
|
"""Extends the environment by adding observation and action history.
The implementation is a bit dirty import of the implementation in
the experimental branch.
"""
from gym import spaces
import numpy as np
from pybullet_envs.minitaur.envs.minitaur_reactive_env import MinitaurReactiveEnv
class MinitaurExtendedEnv(MinitaurReactiveEnv):
"""The 'extended' environment for Markovian property.
This class implements to include prior actions and observations to the
observation vector, thus making the environment "more" Markovian. This is
especially useful for systems with latencies.
Args:
history_length: the length of the historic data
history_include_actions: a flag for including actions as history
history_include_states: a flag for including states as history
include_state_difference: a flag for including the first-order differences
as history
include_second_state_difference: a flag for including the second-order state
differences as history.
include_base_position: a flag for including the base as observation,
never_terminate: if this is on, the environment unconditionally never
terminates.
action_scale: the scale of actions,
"""
MAX_BUFFER_SIZE = 1001
ACTION_DIM = 8
PARENT_OBSERVATION_DIM = 12
INIT_EXTENSION_POS = 2.0
INIT_SWING_POS = 0.0
metadata = {
"render.modes": ["human", "rgb_array"],
"video.frames_per_second": 50,
}
def __init__(self,
history_length=1,
history_include_actions=True,
history_include_states=False,
include_state_difference=False,
include_second_state_difference=False,
include_base_position=False,
include_leg_model=False,
never_terminate=False,
action_scale=0.5,
**kwargs):
self._kwargs = kwargs
self._history_length = history_length
self._history_include_actions = history_include_actions
self._history_include_states = history_include_states
self._include_state_difference = include_state_difference
self._include_second_state_difference = include_second_state_difference
self._include_base_position = include_base_position
self._include_leg_model = include_leg_model
self._never_terminate = never_terminate
self._action_scale = action_scale
self._past_parent_observations = np.zeros((self.MAX_BUFFER_SIZE + 1,
self.PARENT_OBSERVATION_DIM))
self._past_motor_angles = np.zeros((self.MAX_BUFFER_SIZE + 1, 8))
self._past_actions = np.zeros((self.MAX_BUFFER_SIZE, self.ACTION_DIM))
self._counter = 0
super(MinitaurExtendedEnv, self).__init__(**kwargs)
self.action_space = spaces.Box(-1.0, 1.0, self.action_space.shape)
self.observation_space = spaces.Box(-np.inf, np.inf,
self._get_observation().shape)
# This is mainly for the TF-Agents compatibility
self.action_space.flat_dim = len(self.action_space.low)
self.observation_space.flat_dim = len(self.observation_space.low)
def _get_observation(self):
"""Maybe concatenate motor velocity and torque into observations."""
parent_observation = super(MinitaurExtendedEnv, self)._get_observation()
parent_observation = np.array(parent_observation)
# Base class might require this.
self._observation = parent_observation
self._past_parent_observations[self._counter] = parent_observation
num_motors = self.minitaur.num_motors
self._past_motor_angles[self._counter] = parent_observation[-num_motors:]
history_states = []
history_actions = []
for i in range(self._history_length):
t = max(self._counter - i - 1, 0)
if self._history_include_states:
history_states.append(self._past_parent_observations[t])
if self._history_include_actions:
history_actions.append(self._past_actions[t])
t = self._counter
tm, tmm = max(0, self._counter - 1), max(0, self._counter - 2)
state_difference, second_state_difference = [], []
if self._include_state_difference:
state_difference = [
self._past_motor_angles[t] - self._past_motor_angles[tm]
]
if self._include_second_state_difference:
second_state_difference = [
self._past_motor_angles[t] - 2 * self._past_motor_angles[tm] +
self._past_motor_angles[tmm]
]
base_position = []
if self._include_base_position:
base_position = np.array((self.minitaur.GetBasePosition()))
leg_model = []
if self._include_leg_model:
raw_motor_angles = self.minitaur.GetMotorAngles()
leg_model = self._convert_to_leg_model(raw_motor_angles)
observation_list = (
[parent_observation] + history_states + history_actions +
state_difference + second_state_difference + [base_position] +
[leg_model])
full_observation = np.concatenate(observation_list)
return full_observation
def reset(self):
"""Resets the time and history buffer."""
self._counter = 0
self._signal(self._counter) # This sets the current phase
self._past_parent_observations = np.zeros((self.MAX_BUFFER_SIZE + 1,
self.PARENT_OBSERVATION_DIM))
self._past_motor_angles = np.zeros((self.MAX_BUFFER_SIZE + 1, 8))
self._past_actions = np.zeros((self.MAX_BUFFER_SIZE, self.ACTION_DIM))
self._counter = 0
return np.array(super(MinitaurExtendedEnv, self).reset())
def step(self, action):
"""Step function wrapper can be used to add shaping terms to the reward.
Args:
action: an array of the given action
Returns:
next_obs: the next observation
reward: the reward for this experience tuple
done: the terminal flag
info: an additional information
"""
action *= self._action_scale
self._past_actions[self._counter] = action
self._counter += 1
next_obs, _, done, info = super(MinitaurExtendedEnv, self).step(action)
reward = self.reward()
info.update(base_reward=reward)
return next_obs, reward, done, info
def terminate(self):
"""The helper function to terminate the environment."""
super(MinitaurExtendedEnv, self)._close()
def _termination(self):
"""Determines whether the env is terminated or not.
checks whether 1) the front leg is bent too much or 2) the time exceeds
the manually set weights.
Returns:
terminal: the terminal flag whether the env is terminated or not
"""
if self._never_terminate:
return False
leg_model = self._convert_to_leg_model(self.minitaur.GetMotorAngles())
swing0 = leg_model[0]
swing1 = leg_model[2]
maximum_swing_angle = 0.8
if swing0 > maximum_swing_angle or swing1 > maximum_swing_angle:
return True
if self._counter >= 500:
return True
return False
def reward(self):
"""Compute rewards for the given time step.
It considers two terms: 1) forward velocity reward and 2) action
acceleration penalty.
Returns:
reward: the computed reward.
"""
current_base_position = self.minitaur.GetBasePosition()
dt = self.control_time_step
velocity = (current_base_position[0] - self._last_base_position[0]) / dt
velocity_reward = np.clip(velocity, -0.5, 0.5)
action = self._past_actions[self._counter - 1]
prev_action = self._past_actions[max(self._counter - 2, 0)]
prev_prev_action = self._past_actions[max(self._counter - 3, 0)]
acc = action - 2 * prev_action + prev_prev_action
action_acceleration_penalty = np.mean(np.abs(acc))
reward = 0.0
reward += 1.0 * velocity_reward
reward -= 0.1 * action_acceleration_penalty
return reward
@staticmethod
def convert_to_leg_model(motor_angles):
"""A helper function to convert motor angles to leg model.
Args:
motor_angles: raw motor angles:
Returns:
leg_angles: the leg pose model represented in swing and extension.
"""
# TODO(sehoonha): clean up model conversion codes
num_legs = 4
# motor_angles = motor_angles / (np.pi / 4.)
leg_angles = np.zeros(num_legs * 2)
for i in range(num_legs):
motor1, motor2 = motor_angles[2 * i:2 * i + 2]
swing = (-1)**(i // 2) * 0.5 * (motor2 - motor1)
extension = 0.5 * (motor1 + motor2)
leg_angles[i] = swing
leg_angles[i + num_legs] = extension
return leg_angles
def __getstate__(self):
"""A helper get state function for pickling."""
return {"kwargs": self._kwargs}
def __setstate__(self, state):
"""A helper set state function for pickling."""
self.__init__(**state["kwargs"])
|
|
"""Solvers for Ridge and LogisticRegression using SAG algorithm"""
# Authors: Tom Dupre la Tour <tom.dupre-la-tour@m4x.org>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import make_dataset
from .sag_fast import sag
from ..exceptions import ConvergenceWarning
from ..utils import check_array
from ..utils.extmath import row_norms
def get_auto_step_size(max_squared_sum, alpha_scaled, loss, fit_intercept,
n_samples=None,
is_saga=False):
"""Compute automatic step size for SAG solver
The step size is set to 1 / (alpha_scaled + L + fit_intercept) where L is
the max sum of squares for over all samples.
Parameters
----------
max_squared_sum : float
Maximum squared sum of X over samples.
alpha_scaled : float
Constant that multiplies the regularization term, scaled by
1. / n_samples, the number of samples.
loss : string, in {"log", "squared"}
The loss function used in SAG solver.
fit_intercept : bool
Specifies if a constant (a.k.a. bias or intercept) will be
added to the decision function.
n_samples : int, optional
Number of rows in X. Useful if is_saga=True.
is_saga : boolean, optional
Whether to return step size for the SAGA algorithm or the SAG
algorithm.
Returns
-------
step_size : float
Step size used in SAG solver.
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/document
Defazio, A., Bach F. & Lacoste-Julien S. (2014).
SAGA: A Fast Incremental Gradient Method With Support
for Non-Strongly Convex Composite Objectives
https://arxiv.org/abs/1407.0202
"""
if loss in ('log', 'multinomial'):
L = (0.25 * (max_squared_sum + int(fit_intercept)) + alpha_scaled)
elif loss == 'squared':
# inverse Lipschitz constant for squared loss
L = max_squared_sum + int(fit_intercept) + alpha_scaled
else:
raise ValueError("Unknown loss function for SAG solver, got %s "
"instead of 'log' or 'squared'" % loss)
if is_saga:
# SAGA theoretical step size is 1/3L or 1 / (2 * (L + mu n))
# See Defazio et al. 2014
mun = min(2 * n_samples * alpha_scaled, L)
step = 1. / (2 * L + mun)
else:
# SAG theoretical step size is 1/16L but it is recommended to use 1 / L
# see http://www.birs.ca//workshops//2014/14w5003/files/schmidt.pdf,
# slide 65
step = 1. / L
return step
def sag_solver(X, y, sample_weight=None, loss='log', alpha=1., beta=0.,
max_iter=1000, tol=0.001, verbose=0, random_state=None,
check_input=True, max_squared_sum=None,
warm_start_mem=None,
is_saga=False):
"""SAG solver for Ridge and LogisticRegression
SAG stands for Stochastic Average Gradient: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a constant learning rate.
IMPORTANT NOTE: 'sag' solver converges faster on columns that are on the
same scale. You can normalize the data by using
sklearn.preprocessing.StandardScaler on your data before passing it to the
fit method.
This implementation works with data represented as dense numpy arrays or
sparse scipy arrays of floating point values for the features. It will
fit the data according to squared loss or log loss.
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using the squared euclidean norm L2.
.. versionadded:: 0.17
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values. With loss='multinomial', y must be label encoded
(see preprocessing.LabelEncoder).
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
loss : 'log' | 'squared' | 'multinomial'
Loss function that will be optimized:
-'log' is the binary logistic loss, as used in LogisticRegression.
-'squared' is the squared loss, as used in Ridge.
-'multinomial' is the multinomial logistic loss, as used in
LogisticRegression.
.. versionadded:: 0.18
*loss='multinomial'*
alpha : float, optional
L2 regularization term in the objective function
``(0.5 * alpha * || W ||_F^2)``. Defaults to 1.
beta : float, optional
L1 regularization term in the objective function
``(beta * || W ||_1)``. Only applied if ``is_saga`` is set to True.
Defaults to 0.
max_iter : int, optional
The max number of passes over the training data if the stopping
criteria is not reached. Defaults to 1000.
tol : double, optional
The stopping criteria for the weights. The iterations will stop when
max(change in weights) / max(weights) < tol. Defaults to .001
verbose : integer, optional
The verbosity level.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. If None, it will be computed,
going through all the samples. The value should be precomputed
to speed up cross validation.
warm_start_mem : dict, optional
The initialization parameters used for warm starting. Warm starting is
currently used in LogisticRegression but not in Ridge.
It contains:
- 'coef': the weight vector, with the intercept in last line
if the intercept is fitted.
- 'gradient_memory': the scalar gradient for all seen samples.
- 'sum_gradient': the sum of gradient over all seen samples,
for each feature.
- 'intercept_sum_gradient': the sum of gradient over all seen
samples, for the intercept.
- 'seen': array of boolean describing the seen samples.
- 'num_seen': the number of seen samples.
is_saga : boolean, optional
Whether to use the SAGA algorithm or the SAG algorithm. SAGA behaves
better in the first epochs, and allow for l1 regularisation.
Returns
-------
coef_ : array, shape (n_features)
Weight vector.
n_iter_ : int
The number of full pass on all samples.
warm_start_mem : dict
Contains a 'coef' key with the fitted result, and possibly the
fitted intercept at the end of the array. Contains also other keys
used for warm starting.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> X = np.random.randn(n_samples, n_features)
>>> y = np.random.randn(n_samples)
>>> clf = linear_model.Ridge(solver='sag')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='sag', tol=0.001)
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> clf = linear_model.LogisticRegression(
... solver='sag', multi_class='multinomial')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
LogisticRegression(C=1.0, class_weight=None, dual=False,
fit_intercept=True, intercept_scaling=1, max_iter=100,
multi_class='multinomial', n_jobs=None, penalty='l2',
random_state=None, solver='sag', tol=0.0001, verbose=0,
warm_start=False)
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/document
Defazio, A., Bach F. & Lacoste-Julien S. (2014).
SAGA: A Fast Incremental Gradient Method With Support
for Non-Strongly Convex Composite Objectives
https://arxiv.org/abs/1407.0202
See also
--------
Ridge, SGDRegressor, ElasticNet, Lasso, SVR, and
LogisticRegression, SGDClassifier, LinearSVC, Perceptron
"""
if warm_start_mem is None:
warm_start_mem = {}
# Ridge default max_iter is None
if max_iter is None:
max_iter = 1000
if check_input:
X = check_array(X, dtype=np.float64, accept_sparse='csr', order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='C')
n_samples, n_features = X.shape[0], X.shape[1]
# As in SGD, the alpha is scaled by n_samples.
alpha_scaled = float(alpha) / n_samples
beta_scaled = float(beta) / n_samples
# if loss == 'multinomial', y should be label encoded.
n_classes = int(y.max()) + 1 if loss == 'multinomial' else 1
# initialization
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
if 'coef' in warm_start_mem.keys():
coef_init = warm_start_mem['coef']
else:
# assume fit_intercept is False
coef_init = np.zeros((n_features, n_classes), dtype=np.float64,
order='C')
# coef_init contains possibly the intercept_init at the end.
# Note that Ridge centers the data before fitting, so fit_intercept=False.
fit_intercept = coef_init.shape[0] == (n_features + 1)
if fit_intercept:
intercept_init = coef_init[-1, :]
coef_init = coef_init[:-1, :]
else:
intercept_init = np.zeros(n_classes, dtype=np.float64)
if 'intercept_sum_gradient' in warm_start_mem.keys():
intercept_sum_gradient = warm_start_mem['intercept_sum_gradient']
else:
intercept_sum_gradient = np.zeros(n_classes, dtype=np.float64)
if 'gradient_memory' in warm_start_mem.keys():
gradient_memory_init = warm_start_mem['gradient_memory']
else:
gradient_memory_init = np.zeros((n_samples, n_classes),
dtype=np.float64, order='C')
if 'sum_gradient' in warm_start_mem.keys():
sum_gradient_init = warm_start_mem['sum_gradient']
else:
sum_gradient_init = np.zeros((n_features, n_classes),
dtype=np.float64, order='C')
if 'seen' in warm_start_mem.keys():
seen_init = warm_start_mem['seen']
else:
seen_init = np.zeros(n_samples, dtype=np.int32, order='C')
if 'num_seen' in warm_start_mem.keys():
num_seen_init = warm_start_mem['num_seen']
else:
num_seen_init = 0
dataset, intercept_decay = make_dataset(X, y, sample_weight, random_state)
if max_squared_sum is None:
max_squared_sum = row_norms(X, squared=True).max()
step_size = get_auto_step_size(max_squared_sum, alpha_scaled, loss,
fit_intercept, n_samples=n_samples,
is_saga=is_saga)
if step_size * alpha_scaled == 1:
raise ZeroDivisionError("Current sag implementation does not handle "
"the case step_size * alpha_scaled == 1")
num_seen, n_iter_ = sag(dataset, coef_init,
intercept_init, n_samples,
n_features, n_classes, tol,
max_iter,
loss,
step_size, alpha_scaled,
beta_scaled,
sum_gradient_init,
gradient_memory_init,
seen_init,
num_seen_init,
fit_intercept,
intercept_sum_gradient,
intercept_decay,
is_saga,
verbose)
if n_iter_ == max_iter:
warnings.warn("The max_iter was reached which means "
"the coef_ did not converge", ConvergenceWarning)
if fit_intercept:
coef_init = np.vstack((coef_init, intercept_init))
warm_start_mem = {'coef': coef_init, 'sum_gradient': sum_gradient_init,
'intercept_sum_gradient': intercept_sum_gradient,
'gradient_memory': gradient_memory_init,
'seen': seen_init, 'num_seen': num_seen}
if loss == 'multinomial':
coef_ = coef_init.T
else:
coef_ = coef_init[:, 0]
return coef_, n_iter_, warm_start_mem
|
|
# pylint: disable=missing-docstring
import io
from django.apps import apps
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser, Group
from django.core.management import call_command
from django.test import override_settings
from guardian.shortcuts import assign_perm, remove_perm
# Import signals manually because we ignore them in App ready for tests
from resolwe.elastic import signals # pylint: disable=unused-import
from resolwe.elastic.builder import index_builder
from resolwe.test import ElasticSearchTestCase
CUSTOM_SETTINGS = {
'INSTALLED_APPS': settings.INSTALLED_APPS + ('resolwe.elastic.tests.test_app',),
}
@override_settings(**CUSTOM_SETTINGS)
class IndexTest(ElasticSearchTestCase):
def setUp(self):
apps.clear_cache()
call_command('migrate', verbosity=0, interactive=False, load_initial_data=False)
super().setUp()
def test_mapping_multiple_times(self):
index_builder.create_mappings()
index_builder.create_mappings()
def test_indexing(self):
from .test_app.models import TestModel
from .test_app.elastic_indexes import TestSearchDocument, TestSearchIndex
# Create new object
test_obj = TestModel.objects.create(name='Object name', number=43)
es_objects = TestSearchDocument.search().execute()
self.assertEqual(len(es_objects), 1)
self.assertEqual(es_objects[0].name, 'Object name')
self.assertEqual(es_objects[0].num, 43)
self.assertEqual(es_objects[0].json['key'], 'value')
# Update existing object
test_obj.name = 'Another name'
test_obj.save()
es_objects = TestSearchDocument.search().execute()
self.assertEqual(len(es_objects), 1)
self.assertEqual(es_objects[0].name, 'Another name')
self.assertEqual(es_objects[0].num, 43)
self.assertEqual(es_objects[0].json['key'], 'value')
# Create another object
TestModel.objects.create(name='Another object', number=3)
es_objects = TestSearchDocument.search().execute()
self.assertEqual(len(es_objects), 2)
# Delete object
test_obj.delete()
es_objects = TestSearchDocument.search().execute()
self.assertEqual(len(es_objects), 1)
# Create incorrect object (User object) and try to index it
user_model = get_user_model()
test_incorrect = user_model.objects.create(username='user_one')
TestSearchIndex().build(test_incorrect)
def test_bulk_indexing(self):
from .test_app.models import TestModel
from .test_app.elastic_indexes import TestSearchDocument
first_obj = TestModel.objects.create(name='First name', number=42)
TestModel.objects.create(name='Second name', number=43)
# Delete whole index
index_builder.delete()
es_objects = TestSearchDocument.search().execute()
self.assertEqual(len(es_objects), 0)
# Build empty queryset
index_builder.build(queryset=TestModel.objects.none())
es_objects = TestSearchDocument.search().execute()
self.assertEqual(len(es_objects), 0)
# Build only the subset of queryset defined in index
index_builder.build(queryset=TestModel.objects.filter(pk=first_obj.pk))
es_objects = TestSearchDocument.search().execute()
self.assertEqual(len(es_objects), 1)
# Delete whole index
index_builder.delete()
es_objects = TestSearchDocument.search().execute()
self.assertEqual(len(es_objects), 0)
# Build only object
index_builder.build(obj=first_obj)
es_objects = TestSearchDocument.search().execute()
self.assertEqual(len(es_objects), 1)
# Delete whole index
index_builder.delete()
es_objects = TestSearchDocument.search().execute()
self.assertEqual(len(es_objects), 0)
# Build whole queryset defined in index
index_builder.build()
es_objects = TestSearchDocument.search().execute()
self.assertEqual(len(es_objects), 2)
def test_management_commands(self):
from .test_app.models import TestModel
from .test_app.elastic_indexes import TestSearchDocument, TestAnalyzerSearchDocument
# Prepare test data
TestModel.objects.create(name='Object name', number=43)
es_objects = TestSearchDocument.search().execute()
self.assertEqual(len(es_objects), 1)
# Purge index
call_command('elastic_purge', interactive=False, verbosity=0)
es_objects = TestSearchDocument.search().execute()
self.assertEqual(len(es_objects), 0)
# Recreate index
call_command('elastic_index', interactive=False, verbosity=0)
es_objects = TestSearchDocument.search().execute()
self.assertEqual(len(es_objects), 1)
# Purge index
call_command('elastic_purge', interactive=False, verbosity=0)
es_objects = TestSearchDocument.search().execute()
self.assertEqual(len(es_objects), 0)
# Recreate only a specific index
call_command('elastic_index', index=['TestAnalyzerSearchIndex'], interactive=False, verbosity=0)
es_objects = TestSearchDocument.search().execute()
self.assertEqual(len(es_objects), 0)
es_objects = TestAnalyzerSearchDocument.search().execute()
self.assertEqual(len(es_objects), 1)
# Purge only a specific index
call_command('elastic_purge', index=['TestAnalyzerSearchIndex'], interactive=False, verbosity=0)
es_objects = TestAnalyzerSearchDocument.search().execute()
self.assertEqual(len(es_objects), 0)
call_command('elastic_index', exclude=['TestAnalyzerSearchIndex'], interactive=False, verbosity=0)
es_objects = TestAnalyzerSearchDocument.search().execute()
self.assertEqual(len(es_objects), 0)
call_command('elastic_index', interactive=False, verbosity=0)
es_objects = TestAnalyzerSearchDocument.search().execute()
self.assertEqual(len(es_objects), 1)
call_command('elastic_purge', exclude=['TestAnalyzerSearchIndex'], interactive=False, verbosity=0)
es_objects = TestAnalyzerSearchDocument.search().execute()
self.assertEqual(len(es_objects), 1)
# Recreate an invalid index
output = io.StringIO()
call_command('elastic_index', index=['InvalidIndex'], interactive=False, verbosity=0, stderr=output)
self.assertIn("Unknown index: InvalidIndex", output.getvalue())
call_command('elastic_index', exclude=['InvalidIndex'], interactive=False, verbosity=0, stderr=output)
self.assertIn("Unknown index: InvalidIndex", output.getvalue())
# Purge an invalid index
call_command('elastic_purge', index=['InvalidIndex'], interactive=False, verbosity=0, stderr=output)
self.assertIn("Unknown index: InvalidIndex", output.getvalue())
call_command('elastic_purge', exclude=['InvalidIndex'], interactive=False, verbosity=0, stderr=output)
self.assertIn("Unknown index: InvalidIndex", output.getvalue())
# Create mappings.
call_command('elastic_mapping', interactive=False, verbosity=0)
def test_permissions(self):
from .test_app.models import TestModel
from .test_app.elastic_indexes import TestSearchDocument
# Prepare users and groups
user_model = get_user_model()
user_1 = user_model.objects.create(username='user_one')
user_2 = user_model.objects.create(username='user_two')
user_3 = user_model.objects.create(username='user_three')
user_pub = user_model.objects.get(username='public')
group = Group.objects.create(name='group')
# Create test object
test_obj = TestModel.objects.create(name='Object name', number=43)
assign_perm('view_testmodel', user_1, test_obj)
assign_perm('view_testmodel', user_2, test_obj)
assign_perm('view_testmodel', group, test_obj)
es_objects = TestSearchDocument.search().execute()
self.assertEqual(es_objects[0].users_with_permissions, [user_1.pk, user_2.pk])
self.assertEqual(es_objects[0].groups_with_permissions, [group.pk])
self.assertEqual(es_objects[0].public_permission, False)
# Add user permission
assign_perm('view_testmodel', user_3, test_obj)
es_objects = TestSearchDocument.search().execute()
self.assertEqual(es_objects[0].users_with_permissions, [user_1.pk, user_2.pk, user_3.pk])
self.assertEqual(es_objects[0].groups_with_permissions, [group.pk])
self.assertEqual(es_objects[0].public_permission, False)
# Remove user permission
remove_perm('view_testmodel', user_2, test_obj)
es_objects = TestSearchDocument.search().execute()
self.assertEqual(es_objects[0].users_with_permissions, [user_1.pk, user_3.pk])
self.assertEqual(es_objects[0].groups_with_permissions, [group.pk])
self.assertEqual(es_objects[0].public_permission, False)
# Remove group permission
remove_perm('view_testmodel', group, test_obj)
es_objects = TestSearchDocument.search().execute()
self.assertEqual(es_objects[0].users_with_permissions, [user_1.pk, user_3.pk])
self.assertEqual(es_objects[0].groups_with_permissions, [])
self.assertEqual(es_objects[0].public_permission, False)
# Add group permission
assign_perm('view_testmodel', group, test_obj)
es_objects = TestSearchDocument.search().execute()
self.assertEqual(es_objects[0].users_with_permissions, [user_1.pk, user_3.pk])
self.assertEqual(es_objects[0].groups_with_permissions, [group.pk])
self.assertEqual(es_objects[0].public_permission, False)
# Add public permission
assign_perm('view_testmodel', AnonymousUser(), test_obj)
es_objects = TestSearchDocument.search().execute()
self.assertEqual(es_objects[0].users_with_permissions, [user_pub.pk, user_1.pk, user_3.pk])
self.assertEqual(es_objects[0].groups_with_permissions, [group.pk])
self.assertEqual(es_objects[0].public_permission, True)
# Remove public permission
remove_perm('view_testmodel', AnonymousUser(), test_obj)
es_objects = TestSearchDocument.search().execute()
self.assertEqual(es_objects[0].users_with_permissions, [user_1.pk, user_3.pk])
self.assertEqual(es_objects[0].groups_with_permissions, [group.pk])
self.assertEqual(es_objects[0].public_permission, False)
def test_field_name(self):
from .test_app.models import TestModel
from .test_app.elastic_indexes import TestSearchDocument
TestModel.objects.create(name='Hello world FOO_BAR-G17-SA', number=42)
es_objects = TestSearchDocument.search().execute()
self.assertEqual(len(es_objects), 1)
es_objects = TestSearchDocument.search().query('match', field_name='hello').execute()
self.assertEqual(len(es_objects), 1)
es_objects = TestSearchDocument.search().query('match', field_name='world').execute()
self.assertEqual(len(es_objects), 1)
es_objects = TestSearchDocument.search().query('match', **{'field_name.raw': 'hello'}).execute()
self.assertEqual(len(es_objects), 0)
es_objects = TestSearchDocument.search().query(
'match', **{'field_name.raw': 'Hello world FOO_BAR-G17-SA'}
).execute()
self.assertEqual(len(es_objects), 1)
es_objects = TestSearchDocument.search().query('match', field_name='foo').execute()
self.assertEqual(len(es_objects), 1)
es_objects = TestSearchDocument.search().query('match', field_name='bar').execute()
self.assertEqual(len(es_objects), 1)
es_objects = TestSearchDocument.search().query('match', field_name='g17').execute()
self.assertEqual(len(es_objects), 1)
es_objects = TestSearchDocument.search().query('match', field_name='g17-sa').execute()
self.assertEqual(len(es_objects), 1)
es_objects = TestSearchDocument.search().query('match', field_name='17').execute()
self.assertEqual(len(es_objects), 1)
def test_field_process_type(self):
from .test_app.models import TestModel
from .test_app.elastic_indexes import TestSearchDocument
TestModel.objects.create(field_process_type='data:geneset', number=42)
TestModel.objects.create(field_process_type='data:geneset:venn', number=42)
TestModel.objects.create(field_process_type='data:geneset:venn:omg', number=42)
es_objects = TestSearchDocument.search().execute()
self.assertEqual(len(es_objects), 3)
es_objects = TestSearchDocument.search().query('match', field_process_type='data').execute()
self.assertEqual(len(es_objects), 3)
es_objects = TestSearchDocument.search().query('match', field_process_type='data:geneset').execute()
self.assertEqual(len(es_objects), 3)
es_objects = TestSearchDocument.search().query('match', field_process_type='data:geneset:venn').execute()
self.assertEqual(len(es_objects), 2)
es_objects = TestSearchDocument.search().query('match', field_process_type='data:geneset:venn:omg').execute()
self.assertEqual(len(es_objects), 1)
# Check if tokenizer did not include intermediate terms.
es_objects = TestSearchDocument.search().query('match', field_process_type='geneset').execute()
self.assertEqual(len(es_objects), 0)
es_objects = TestSearchDocument.search().query('match', field_process_type='venn').execute()
self.assertEqual(len(es_objects), 0)
es_objects = TestSearchDocument.search().query('match', field_process_type='omg').execute()
self.assertEqual(len(es_objects), 0)
def test_dependencies(self):
from .test_app.models import TestModelWithDependency, TestDependency
from .test_app.elastic_indexes import TestModelWithDependencyDocument, TestModelWithFilterDependencyDocument
model = TestModelWithDependency.objects.create(name='Deps')
dep1 = TestDependency.objects.create(name='one')
dep2 = TestDependency.objects.create(name='two')
dep3 = TestDependency.objects.create(name='three')
model.dependencies.add(dep1)
model.dependencies.add(dep2)
dep3.testmodelwithdependency_set.add(model)
es_objects = TestModelWithDependencyDocument.search().query('match', name='deps').execute()
self.assertEqual(len(es_objects), 1)
es_objects = TestModelWithDependencyDocument.search().query('match', name='one').execute()
self.assertEqual(len(es_objects), 1)
es_objects = TestModelWithDependencyDocument.search().query('match', name='two').execute()
self.assertEqual(len(es_objects), 1)
es_objects = TestModelWithDependencyDocument.search().query('match', name='three').execute()
self.assertEqual(len(es_objects), 1)
es_objects = TestModelWithDependencyDocument.search().query('match', name='four').execute()
self.assertEqual(len(es_objects), 0)
dep3.name = 'four'
dep3.save()
es_objects = TestModelWithDependencyDocument.search().query('match', name='four').execute()
self.assertEqual(len(es_objects), 1)
es_objects = TestModelWithDependencyDocument.search().query('match', name='three').execute()
self.assertEqual(len(es_objects), 0)
dep3.delete()
es_objects = TestModelWithDependencyDocument.search().query('match', name='four').execute()
self.assertEqual(len(es_objects), 0)
# Ensure that previous updates did not cause the filtered version to be updated.
es_objects = TestModelWithFilterDependencyDocument.search().execute()
self.assertEqual(len(es_objects), 1)
# If the filtered version would be updated, this would instead equal 'Deps: one, two'.
self.assertEqual(es_objects[0].name, 'Deps: ')
dep4 = TestDependency.objects.create(name='hello')
dep5 = TestDependency.objects.create(name='hello')
model.dependencies.add(dep4)
dep5.testmodelwithdependency_set.add(model)
es_objects = TestModelWithFilterDependencyDocument.search().execute()
self.assertEqual(len(es_objects), 1)
# It is correct that even non-dependencies are contained in the name as dependencies are
# only used to determine when to trigger updates.
self.assertEqual(es_objects[0].name, 'Deps: one, two, hello, hello')
model.dependencies.remove(dep4)
dep5.testmodelwithdependency_set.remove(model)
es_objects = TestModelWithFilterDependencyDocument.search().execute()
self.assertEqual(len(es_objects), 1)
# It is correct that even non-dependencies are contained in the name as dependencies are
# only used to determine when to trigger updates.
self.assertEqual(es_objects[0].name, 'Deps: one, two')
def test_dependencies_reverse(self):
from .test_app.models import TestModelWithDependency, TestDependency
from .test_app.elastic_indexes import TestModelWithReverseDependencyDocument
model1 = TestModelWithDependency.objects.create(name='One')
model2 = TestModelWithDependency.objects.create(name='Two')
model3 = TestModelWithDependency.objects.create(name='Three')
dep = TestDependency.objects.create(name='deps')
model1.dependencies.add(dep)
model2.dependencies.add(dep)
dep.testmodelwithdependency_set.add(model3)
es_objects = TestModelWithReverseDependencyDocument.search().query('match', name='deps').execute()
self.assertEqual(len(es_objects), 1)
es_objects = TestModelWithReverseDependencyDocument.search().query('match', name='one').execute()
self.assertEqual(len(es_objects), 1)
es_objects = TestModelWithReverseDependencyDocument.search().query('match', name='two').execute()
self.assertEqual(len(es_objects), 1)
es_objects = TestModelWithReverseDependencyDocument.search().query('match', name='three').execute()
self.assertEqual(len(es_objects), 1)
es_objects = TestModelWithReverseDependencyDocument.search().query('match', name='four').execute()
self.assertEqual(len(es_objects), 0)
model3.name = 'Four'
model3.save()
es_objects = TestModelWithReverseDependencyDocument.search().query('match', name='four').execute()
self.assertEqual(len(es_objects), 1)
model3.delete()
es_objects = TestModelWithReverseDependencyDocument.search().query('match', name='four').execute()
self.assertEqual(len(es_objects), 0)
def test_dependencies_self(self):
from .test_app.models import TestSelfDependency
from .test_app.elastic_indexes import TestModelWithSelfDependencyDocument
dep1 = TestSelfDependency.objects.create(name='One')
dep2 = TestSelfDependency.objects.create(name='Two')
dep3 = TestSelfDependency.objects.create(name='Three')
parent = TestSelfDependency.objects.create(name='Parent')
parent.dependencies.add(dep1)
parent.dependencies.add(dep2)
dep3.parents.add(parent)
es_objects = TestModelWithSelfDependencyDocument.search().query('match', name='parent').execute()
self.assertEqual(len(es_objects), 1)
self.assertEqual(es_objects[0].name, 'Parent: One, Two, Three')
dep2.name = 'Too'
dep2.save()
es_objects = TestModelWithSelfDependencyDocument.search().query('match', name='parent').execute()
self.assertEqual(len(es_objects), 1)
self.assertEqual(es_objects[0].name, 'Parent: One, Too, Three')
|
|
# -*- coding: utf-8 -*-
import copy
import re
import tempfile
from typing import Any, Dict, List, Optional, Union
from pytest import mark, param, raises
from omegaconf import (
MISSING,
AnyNode,
DictConfig,
DictKeyType,
ListConfig,
MissingMandatoryValue,
OmegaConf,
UnsupportedValueType,
ValidationError,
_utils,
flag_override,
open_dict,
)
from omegaconf._utils import _ensure_container
from omegaconf.basecontainer import BaseContainer
from omegaconf.errors import (
ConfigAttributeError,
ConfigKeyError,
ConfigTypeError,
InterpolationKeyError,
InterpolationToMissingValueError,
KeyValidationError,
)
from tests import (
ConcretePlugin,
Enum1,
IllegalType,
Plugin,
StructuredWithMissing,
SubscriptedDict,
User,
)
def test_setattr_deep_value() -> None:
c = OmegaConf.create({"a": {"b": {"c": 1}}})
c.a.b = 9
assert {"a": {"b": 9}} == c
def test_setattr_deep_from_empty() -> None:
c = OmegaConf.create()
# Unfortunately we can't just do c.a.b = 9 here.
# The reason is that if c.a is being accessed first and it does not exist, so there
# is nothing to call .b = 9 on.
# The alternative is to auto-create fields as they are being accessed, but this is opening
# a whole new can of worms, and is also breaking map semantics.
c.a = {}
c.a.b = 9 # type: ignore
assert {"a": {"b": 9}} == c
def test_setattr_dict_nested() -> None:
c = OmegaConf.create({"a": {"b": {"c": 1}}})
c.a.b = {"z": 10}
assert c == {"a": {"b": {"z": 10}}}
def test_getattr() -> None:
c = OmegaConf.create("a: b")
assert isinstance(c, DictConfig)
assert "b" == c.a
def test_getattr_dict() -> None:
c = OmegaConf.create("a: {b: 1}")
assert isinstance(c, DictConfig)
assert {"b": 1} == c.a
@mark.parametrize("struct", [False, True])
@mark.parametrize(
"cfg",
[
param({"name": "alice", "age": 1}, id="dict"),
param(User(name="alice", age=1), id="structured_config"),
],
)
def test_delattr(cfg: Any, struct: bool) -> None:
cfg = OmegaConf.create(cfg)
OmegaConf.set_struct(cfg, struct)
delattr(cfg, "name")
assert cfg == {"age": 1}
with raises(ConfigAttributeError):
delattr(cfg, "c")
@mark.parametrize(
"key,match",
[
param("a", "a", id="str"),
param(1, "1", id="int"),
param(123.45, "123.45", id="float"),
param(True, "True", id="bool-T"),
param(False, "False", id="bool-F"),
param(Enum1.FOO, "FOO", id="enum"),
],
)
class TestDictKeyTypes:
def test_mandatory_value(self, key: DictKeyType, match: str) -> None:
c = OmegaConf.create({key: "???"})
with raises(MissingMandatoryValue, match=match):
c[key]
if isinstance(key, str):
with raises(MissingMandatoryValue, match=match):
getattr(c, key)
def test_nested_dict_mandatory_value_inner(
self, key: DictKeyType, match: str
) -> None:
c = OmegaConf.create({"b": {key: "???"}})
with raises(MissingMandatoryValue, match=match):
c.b[key]
if isinstance(key, str):
with raises(MissingMandatoryValue, match=match):
getattr(c.b, key)
def test_nested_dict_mandatory_value_outer(
self, key: DictKeyType, match: str
) -> None:
c = OmegaConf.create({key: {"b": "???"}})
with raises(MissingMandatoryValue, match=match):
c[key].b
if isinstance(key, str):
with raises(MissingMandatoryValue, match=match):
getattr(c, key).b
def test_subscript_get(self, key: DictKeyType, match: str) -> None:
c = OmegaConf.create({key: "b"})
assert isinstance(c, DictConfig)
assert "b" == c[key]
def test_subscript_set(self, key: DictKeyType, match: str) -> None:
c = OmegaConf.create()
c[key] = "b"
assert {key: "b"} == c
@mark.parametrize(
"src,key,expected",
[
({"a": 10, "b": 11}, "a", {"b": 11}),
({1: "a", 2: "b"}, 1, {2: "b"}),
({123.45: "a", 67.89: "b"}, 67.89, {123.45: "a"}),
({True: "a", False: "b"}, False, {True: "a"}),
({Enum1.FOO: "foo", Enum1.BAR: "bar"}, Enum1.FOO, {Enum1.BAR: "bar"}),
],
)
class TestDelitemKeyTypes:
def test_dict_delitem(self, src: Any, key: DictKeyType, expected: Any) -> None:
c = OmegaConf.create(src)
assert c == src
del c[key]
assert c == expected
def test_dict_delitem_KeyError(
self, src: Any, key: DictKeyType, expected: Any
) -> None:
c = OmegaConf.create(expected)
assert c == expected
with raises(KeyError):
del c[key]
def test_dict_struct_delitem(
self, src: Any, key: DictKeyType, expected: Any
) -> None:
c = OmegaConf.create(src)
OmegaConf.set_struct(c, True)
with raises(ConfigTypeError):
del c[key]
with open_dict(c):
del c[key]
assert key not in c
def test_attribute_error() -> None:
c = OmegaConf.create()
with raises(ConfigAttributeError):
assert c.missing_key
@mark.parametrize("c", [{}, OmegaConf.create()])
def test_get_default_value(c: Any) -> None:
assert c.get("missing_key", "a default value") == "a default value"
def test_scientific_notation_float() -> None:
c = OmegaConf.create("a: 10e-3")
assert isinstance(c, DictConfig)
assert 10e-3 == c.a
@mark.parametrize("struct", [None, True, False])
@mark.parametrize("default_val", [4, True, False, None])
class TestGetWithDefault:
@mark.parametrize(
"d,select,key",
[
({"key": {"subkey": 2}}, "", "missing"),
({"key": {"subkey": 2}}, "key", "missing"),
({"key": "???"}, "", "key"),
({"key": DictConfig(content="???")}, "", "key"),
({"key": ListConfig(content="???")}, "", "key"),
],
)
def test_dict_get_with_default(
self, d: Any, select: Any, key: Any, default_val: Any, struct: Optional[bool]
) -> None:
c = OmegaConf.create(d)
c = OmegaConf.select(c, select)
OmegaConf.set_struct(c, struct)
assert c.get(key, default_val) == default_val
@mark.parametrize(
("d", "select", "key", "expected"),
[
({"key": "value"}, "", "key", "value"),
({"key": None}, "", "key", None),
({"key": {"subkey": None}}, "key", "subkey", None),
({"key": DictConfig(is_optional=True, content=None)}, "", "key", None),
({"key": ListConfig(is_optional=True, content=None)}, "", "key", None),
],
)
def test_dict_get_not_returning_default(
self,
d: Any,
select: Any,
key: Any,
expected: Any,
default_val: Any,
struct: Optional[bool],
) -> None:
c = OmegaConf.create(d)
c = OmegaConf.select(c, select)
OmegaConf.set_struct(c, struct)
assert c.get(key, default_val) == expected
@mark.parametrize(
"d,exc",
[
({"key": "${foo}"}, InterpolationKeyError),
(
{"key": "${foo}", "foo": "???"},
InterpolationToMissingValueError,
),
({"key": DictConfig(content="${foo}")}, InterpolationKeyError),
],
)
def test_dict_get_with_default_errors(
self, d: Any, exc: type, struct: Optional[bool], default_val: Any
) -> None:
c = OmegaConf.create(d)
OmegaConf.set_struct(c, struct)
with raises(exc):
c.get("key", default_value=123)
def test_map_expansion() -> None:
c = OmegaConf.create("{a: 2, b: 10}")
assert isinstance(c, DictConfig)
def foo(a: int, b: int) -> int:
return a + b
assert 12 == foo(**c)
def test_items_iterator_behavior() -> None:
c = OmegaConf.create({"a": 2, "b": 10})
assert list(c.items()) == [("a", 2), ("b", 10)]
# This is actually not compatible with native dict:
# Due to implementation considerations, DictConfig items() returns a list.
# If that can be fixed, feel free to remove this block
items = c.items()
for x in [("a", 2), ("b", 10)]:
assert x in items
items2 = iter(c.items())
assert next(items2) == ("a", 2)
assert next(items2) == ("b", 10)
with raises(StopIteration):
next(items2)
def test_mutate_config_via_items_iteration() -> None:
c = OmegaConf.create({"a": {"v": 1}, "b": {"v": 1}})
for k, v in c.items():
v.v = 2
assert c == {"a": {"v": 2}, "b": {"v": 2}}
def test_items_with_interpolation() -> None:
c = OmegaConf.create({"a": 2, "b": "${a}"})
r = {}
for k, v in c.items():
r[k] = v
assert r["a"] == 2
assert r["b"] == 2
@mark.parametrize(
("cfg", "expected", "expected_no_resolve"),
[
param({}, [], [], id="empty"),
param({"a": 10}, [("a", 10)], [("a", 10)], id="simple"),
param(
{"a": 2, "b": "${a}"},
[("a", 2), ("b", 2)],
[("a", 2), ("b", "${a}")],
id="interpolation_in_value",
),
param(
{"a": "???"},
raises(MissingMandatoryValue),
[("a", "???")],
id="missing_value",
),
# Special DictConfigs
param(DictConfig(None), raises(TypeError), raises(TypeError), id="none"),
param(
DictConfig("???"),
raises(MissingMandatoryValue),
raises(MissingMandatoryValue),
id="missing",
),
param(DictConfig("${missing}"), [], [], id="missing_interpolation"),
param(
DictConfig("${a}", parent=DictConfig({"a": {"b": 10}})),
[],
[],
id="missing_interpolation",
),
],
)
def test_items(cfg: Any, expected: Any, expected_no_resolve: Any) -> None:
cfg = _ensure_container(cfg)
if isinstance(expected, list):
assert list(cfg.items()) == expected
else:
with expected:
cfg.items()
if isinstance(expected_no_resolve, list):
pairs = list(cfg.items_ex(resolve=False))
assert pairs == expected_no_resolve
for idx in range(len(expected_no_resolve)):
assert type(pairs[idx][0]) == type(expected_no_resolve[idx][0]) # noqa
assert type(pairs[idx][1]) == type(expected_no_resolve[idx][1]) # noqa
else:
with expected_no_resolve:
cfg.items_ex(resolve=False)
@mark.parametrize(
("cfg", "expected"),
[
param({}, [], id="empty"),
param({"a": 10}, ["a"], id="full"),
param({"a": "???"}, ["a"], id="missing_value"),
param({"a": "${missing}}"}, ["a"], id="missing_interpolation"),
param({"a": "${b}}", "b": 10}, ["a", "b"], id="interpolation"),
param(DictConfig(None), [], id="none_dictconfig"),
param(DictConfig("???"), [], id="missing_dictconfig"),
param(DictConfig("${missing}"), [], id="missing_interpolation_dictconfig"),
param(
DictConfig("${a}", parent=OmegaConf.create({"a": {"b": 10}})),
[],
id="interpolation_dictconfig",
),
],
)
def test_dict_keys(cfg: Any, expected: Any) -> None:
c = _ensure_container(cfg)
assert list(c.keys()) == expected
def test_pickle_get_root() -> None:
# Test that get_root() is reconstructed correctly for pickle loaded files.
with tempfile.TemporaryFile() as fp:
c1 = OmegaConf.create({"a": {"a1": 1, "a2": 2}})
c2 = OmegaConf.create(
{"b": {"b1": "${a.a1}", "b2": 4, "bb": {"bb1": 3, "bb2": 4}}}
)
c3 = OmegaConf.merge(c1, c2)
assert isinstance(c3, DictConfig)
import pickle
pickle.dump(c3, fp)
fp.flush()
fp.seek(0)
loaded_c3 = pickle.load(fp)
def test(conf: DictConfig) -> None:
assert conf._get_root() == conf
assert conf.a._get_root() == conf
assert conf.b._get_root() == conf
assert conf.b.bb._get_root() == conf
assert c3 == loaded_c3
test(c3)
test(loaded_c3)
def test_iterate_dictionary() -> None:
c = OmegaConf.create({"a": 1, "b": 2})
m2 = {}
for key in c:
m2[key] = c[key]
assert m2 == c
def test_iterate_dict_with_interpolation() -> None:
c = OmegaConf.create({"a": "${b}", "b": 2})
expected = [("a", 2), ("b", 2)]
i = 0
for k, v in c.items():
assert k == expected[i][0]
assert v == expected[i][1]
i = i + 1
@mark.parametrize(
"cfg, key, default_, expected",
[
# string key
param({"a": 1, "b": 2}, "a", "__NO_DEFAULT__", 1, id="no_default"),
param({"a": 1, "b": 2}, "not_found", None, None, id="none_default"),
param({"a": 1, "b": 2}, "not_found", "default", "default", id="with_default"),
param({"a": None}, "a", "default", None, id="none_value"),
param({"a": "???"}, "a", "default", "default", id="missing_value"),
# Interpolations
param({"a": "${b}", "b": 2}, "a", "__NO_DEFAULT__", 2, id="interpolation"),
# enum key
param(
{Enum1.FOO: "bar"},
Enum1.FOO,
"__NO_DEFAULT__",
"bar",
id="enum_key_no_default",
),
param(
{Enum1.FOO: "bar"}, Enum1.BAR, None, None, id="enum_key_with_none_default"
),
param(
{Enum1.FOO: "bar"},
Enum1.BAR,
"default",
"default",
id="enum_key_with_default",
),
# other key types
param(
{123.45: "a", 67.89: "b"},
67.89,
"__NO_DEFAULT__",
"b",
id="float_key_no_default",
),
param(
{123.45: "a", 67.89: "b"},
"not found",
None,
None,
id="float_key_with_default",
),
param(
{True: "a", False: "b"},
False,
"__NO_DEFAULT__",
"b",
id="bool_key_no_default",
),
param(
{True: "a", False: "b"}, "not found", None, None, id="bool_key_with_default"
),
],
)
def test_dict_pop(cfg: Dict[Any, Any], key: Any, default_: Any, expected: Any) -> None:
c = OmegaConf.create(cfg)
if default_ != "__NO_DEFAULT__":
val = c.pop(key, default_)
else:
val = c.pop(key)
assert val == expected
assert type(val) == type(expected)
def test_dict_struct_mode_pop() -> None:
cfg = OmegaConf.create({"name": "Bond", "age": 7})
cfg._set_flag("struct", True)
with raises(ConfigTypeError):
cfg.pop("name")
with raises(ConfigTypeError):
cfg.pop("bar")
with raises(ConfigTypeError):
cfg.pop("bar", "not even with default")
def test_dict_structured_mode_pop() -> None:
cfg = OmegaConf.create({"user": User(name="Bond")})
with raises(ConfigTypeError):
cfg.user.pop("name")
with raises(ConfigTypeError):
cfg.user.pop("bar")
with raises(ConfigTypeError):
cfg.user.pop("bar", "not even with default")
# Unlocking the top level node is not enough.
with raises(ConfigTypeError):
with open_dict(cfg):
cfg.user.pop("name")
# You need to unlock the specified structured node to pop a field from it.
with open_dict(cfg.user):
cfg.user.pop("name")
assert "name" not in cfg.user
@mark.parametrize(
"cfg, key, expectation",
[
# key not found
({"a": 1, "b": 2}, "not_found", raises(KeyError)),
({1: "a", 2: "b"}, 3, raises(KeyError)),
({123.45: "a", 67.89: "b"}, 10.11, raises(KeyError)),
({True: "a"}, False, raises(KeyError)),
({Enum1.FOO: "bar"}, Enum1.BAR, raises(KeyError)),
# Interpolations
({"a": "???", "b": 2}, "a", raises(MissingMandatoryValue)),
({1: "???", 2: "b"}, 1, raises(MissingMandatoryValue)),
({123.45: "???", 67.89: "b"}, 123.45, raises(MissingMandatoryValue)),
({"a": "${b}"}, "a", raises(InterpolationKeyError)),
({True: "???", False: "b"}, True, raises(MissingMandatoryValue)),
(
{Enum1.FOO: "???", Enum1.BAR: "bar"},
Enum1.FOO,
raises(MissingMandatoryValue),
),
(
{"a": "${b}", "b": "???"},
"a",
raises(InterpolationToMissingValueError),
),
],
)
def test_dict_pop_error(cfg: Dict[Any, Any], key: Any, expectation: Any) -> None:
c = OmegaConf.create(cfg)
with expectation:
c.pop(key)
assert c == cfg
@mark.parametrize(
"conf,key,expected",
[
# str key type
({"a": 1, "b": {}}, "a", True),
({"a": 1, "b": {}}, "b", True),
({"a": 1, "b": {}}, "c", False),
({"a": 1, "b": "${a}"}, "b", True),
({"a": 1, "b": "???"}, "b", False),
({"a": 1, "b": "???", "c": "${b}"}, "c", True),
({"a": 1, "b": "${not_found}"}, "b", True),
({"a": "${unknown_resolver:bar}"}, "a", True),
({"a": None, "b": "${a}"}, "b", True),
({"a": "cat", "b": "${a}"}, "b", True),
# Enum key type
({Enum1.FOO: 1, "b": {}}, Enum1.FOO, True),
({Enum1.FOO: 1, "b": {}}, "aaa", False),
({Enum1.FOO: 1, "b": {}}, "FOO", False),
(
DictConfig(content={Enum1.FOO: "foo"}, key_type=Enum1, element_type=str),
Enum1.FOO,
True,
),
(
DictConfig(content={Enum1.FOO: "foo"}, key_type=Enum1, element_type=str),
"incompatible_key_type",
False,
),
(
DictConfig(content={Enum1.FOO: "foo"}, key_type=Enum1, element_type=str),
"FOO",
True,
),
(
DictConfig(content={Enum1.FOO: "foo"}, key_type=Enum1, element_type=str),
None,
False,
),
# int key type
({1: "a", 2: {}}, 1, True),
({1: "a", 2: {}}, 2, True),
({1: "a", 2: {}}, 3, False),
({1: "a", 2: "???"}, 2, False),
({1: "a", 2: "???"}, None, False),
({1: "a", 2: "???"}, "1", False),
(DictConfig({1: "a", 2: "???"}, key_type=int), "1", False),
# float key type
({1.1: "a", 2.2: {}}, 1.1, True),
({1.1: "a", 2.2: {}}, "1.1", False),
(DictConfig({1.1: "a", 2.2: {}}, key_type=float), "1.1", False),
({1.1: "a", 2.2: {}}, 2.2, True),
({1.1: "a", 2.2: {}}, 3.3, False),
({1.1: "a", 2.2: "???"}, 2.2, False),
({1.1: "a", 2.2: "???"}, None, False),
# bool key type
({True: "a", False: {}}, True, True),
({True: "a", False: {}}, False, True),
({True: "a", False: {}}, "no", False),
({True: "a", False: {}}, 1, True),
({True: "a", False: {}}, None, False),
({True: "a", False: "???"}, False, False),
],
)
def test_in_dict(conf: Any, key: str, expected: Any) -> None:
conf = OmegaConf.create(conf)
assert (key in conf) == expected
def test_get_root() -> None:
c = OmegaConf.create({"a": 123, "b": {"bb": 456, "cc": 7}})
assert c._get_root() == c
assert c.b._get_root() == c
def test_get_root_of_merged() -> None:
c1 = OmegaConf.create({"a": {"a1": 1, "a2": 2}})
c2 = OmegaConf.create({"b": {"b1": "???", "b2": 4, "bb": {"bb1": 3, "bb2": 4}}})
c3 = OmegaConf.merge(c1, c2)
assert isinstance(c3, DictConfig)
assert c3._get_root() == c3
assert c3.a._get_root() == c3
assert c3.b._get_root() == c3
assert c3.b.bb._get_root() == c3
def test_dict_config() -> None:
c = OmegaConf.create({})
assert isinstance(c, DictConfig)
def test_dict_structured_delitem() -> None:
c = OmegaConf.structured(User(name="Bond"))
with raises(ConfigTypeError):
del c["name"]
with open_dict(c):
del c["name"]
assert "name" not in c
def test_dict_nested_structured_delitem() -> None:
c = OmegaConf.create({"user": User(name="Bond")})
with raises(ConfigTypeError):
del c.user["name"]
# Unlocking the top level node is not enough.
with raises(ConfigTypeError):
with open_dict(c):
del c.user["name"]
# You need to unlock the specified structured node to delete a field from it.
with open_dict(c.user):
del c.user["name"]
assert "name" not in c.user
@mark.parametrize(
"d, expected",
[
param(DictConfig({}), 0, id="empty"),
param(DictConfig({"a": 10}), 1, id="full"),
param(DictConfig(None), 0, id="none"),
param(DictConfig("???"), 0, id="missing"),
param(
DictConfig("${foo}", parent=OmegaConf.create({"foo": {"a": 10}})),
0,
id="interpolation",
),
param(DictConfig("${foo}"), 0, id="broken_interpolation"),
],
)
def test_dict_len(d: DictConfig, expected: Any) -> None:
assert d.__len__() == expected
def test_dict_assign_illegal_value() -> None:
c = OmegaConf.create()
iv = IllegalType()
with raises(UnsupportedValueType, match=re.escape("key: a")):
c.a = iv
with flag_override(c, "allow_objects", True):
c.a = iv
assert c.a == iv
def test_dict_assign_illegal_value_nested() -> None:
c = OmegaConf.create({"a": {}})
iv = IllegalType()
with raises(UnsupportedValueType, match=re.escape("key: a.b")):
c.a.b = iv
with flag_override(c, "allow_objects", True):
c.a.b = iv
assert c.a.b == iv
def test_assign_dict_in_dict() -> None:
c = OmegaConf.create({})
c.foo = {"foo": "bar"}
assert c.foo == {"foo": "bar"}
assert isinstance(c.foo, DictConfig)
def test_instantiate_config_fails() -> None:
with raises(TypeError):
BaseContainer() # type: ignore
@mark.parametrize(
"cfg, key, expected",
[
({"a": 1, "b": 2, "c": 3}, None, ["a", "b", "c"]),
({"a": {}}, "a", []),
(StructuredWithMissing, "dict", []),
],
)
def test_dir(cfg: Any, key: Any, expected: Any) -> None:
c = OmegaConf.create(cfg)
if key is None:
assert dir(c) == expected
else:
assert dir(c._get_node(key)) == expected
def test_hash() -> None:
c1 = OmegaConf.create({"a": 10})
c2 = OmegaConf.create({"a": 10})
assert hash(c1) == hash(c2)
c2.a = 20
assert hash(c1) != hash(c2)
@mark.parametrize("default", ["default", 0, None])
def test_get_with_default_from_struct_not_throwing(default: Any) -> None:
c = OmegaConf.create({"a": 10, "b": 20})
OmegaConf.set_struct(c, True)
assert c.get("z", default) == default
@mark.parametrize("cfg", [{"foo": {}}, [1, 2, 3]])
def test_members(cfg: Any) -> None:
# Make sure accessing __members__ does not return None or throw.
c = OmegaConf.create(cfg)
with raises(AttributeError):
c.__members__
@mark.parametrize(
"in_cfg, mask_keys, expected",
[
({}, [], {}),
({"a": 1}, "a", {"a": 1}),
({"a": 1}, ["b"], {}),
({"a": 1, "b": 2}, "b", {"b": 2}),
({"a": 1, "b": 2}, ["a", "b"], {"a": 1, "b": 2}),
],
)
def test_masked_copy(
in_cfg: Dict[str, Any], mask_keys: Union[str, List[str]], expected: Any
) -> None:
cfg = OmegaConf.create(in_cfg)
masked = OmegaConf.masked_copy(cfg, keys=mask_keys)
assert masked == expected
def test_masked_copy_is_deep() -> None:
cfg = OmegaConf.create({"a": {"b": 1, "c": 2}})
expected = {"a": {"b": 1, "c": 2}}
masked = OmegaConf.masked_copy(cfg, keys=["a"])
assert masked == expected
cfg.a.b = 2
assert cfg != expected
with raises(ValueError):
OmegaConf.masked_copy("fail", []) # type: ignore
def test_shallow_copy() -> None:
cfg = OmegaConf.create({"a": 1, "b": 2})
c = cfg.copy()
cfg.a = 42
assert cfg.a == 42
assert c.a == 1
def test_shallow_copy_missing() -> None:
cfg = DictConfig(content=MISSING)
c = cfg.copy()
c._set_value({"foo": 1})
assert c.foo == 1
assert cfg._is_missing()
def test_shallow_copy_none() -> None:
cfg = DictConfig(content=None)
c = cfg.copy()
c._set_value({"foo": 1})
assert c.foo == 1
assert cfg._is_none()
@mark.parametrize(
"copy_method",
[
param(copy.copy),
param(lambda x: x.copy(), id="obj.copy"),
],
)
def test_dict_shallow_copy_is_deepcopy(copy_method: Any) -> None:
cfg = OmegaConf.create({"a": {"b": 10}})
cp = copy_method(cfg)
assert cfg is not cp
assert cfg._get_node("a") is not cp._get_node("a")
def test_creation_with_invalid_key() -> None:
with raises(KeyValidationError):
OmegaConf.create({object(): "a"})
def test_setitem_with_invalid_key() -> None:
cfg = OmegaConf.create()
with raises(KeyValidationError):
cfg.__setitem__(object(), "a") # type: ignore
def test_getitem_with_invalid_key() -> None:
cfg = OmegaConf.create()
with raises(KeyValidationError):
cfg.__getitem__(object()) # type: ignore
def test_hasattr() -> None:
cfg = OmegaConf.create({"foo": "bar"})
OmegaConf.set_struct(cfg, True)
assert hasattr(cfg, "foo")
assert not hasattr(cfg, "buz")
def test_typed_hasattr() -> None:
cfg = OmegaConf.structured(SubscriptedDict)
assert hasattr(cfg.dict_enum, "foo") is False
with raises(AttributeError):
cfg.dict_int.foo
def test_struct_mode_missing_key_getitem() -> None:
cfg = OmegaConf.create({"foo": "bar"})
OmegaConf.set_struct(cfg, True)
with raises(KeyError):
cfg.__getitem__("zoo")
def test_struct_mode_missing_key_setitem() -> None:
cfg = OmegaConf.create({"foo": "bar"})
OmegaConf.set_struct(cfg, True)
with raises(KeyError):
cfg.__setitem__("zoo", 10)
def test_get_type() -> None:
cfg = OmegaConf.structured(User)
assert OmegaConf.get_type(cfg) == User
cfg = OmegaConf.structured(User(name="bond"))
assert OmegaConf.get_type(cfg) == User
cfg = OmegaConf.create({"user": User, "inter": "${user}"})
assert OmegaConf.get_type(cfg.user) == User
assert OmegaConf.get_type(cfg.inter) == User
@mark.parametrize(
"cfg, expected_ref_type",
[
(
OmegaConf.create(
{"plugin": DictConfig(ref_type=Plugin, content=ConcretePlugin)}
),
Optional[Plugin],
),
(
OmegaConf.create(
{
"plugin": DictConfig(
ref_type=Plugin, content=ConcretePlugin, is_optional=False
)
}
),
Plugin,
),
],
)
def test_get_ref_type(cfg: Any, expected_ref_type: Any) -> None:
assert _utils.get_ref_type(cfg.plugin) == expected_ref_type
def test_get_ref_type_with_conflict() -> None:
cfg = OmegaConf.create(
{"user": User, "inter": DictConfig(ref_type=Plugin, content="${user}")}
)
assert OmegaConf.get_type(cfg.user) == User
assert _utils.get_ref_type(cfg.user) == Any
# Interpolation inherits both type and ref type from the target
assert OmegaConf.get_type(cfg.inter) == User
assert _utils.get_ref_type(cfg.inter) == Any
def test_is_missing() -> None:
cfg = OmegaConf.create(
{
"missing_node": DictConfig(content="???"),
"foo": "???",
"inter": "${foo}",
"str_inter": "zoo_${foo}",
"missing_node_inter": "${missing_node}",
}
)
assert cfg._get_node("foo")._is_missing() # type: ignore
assert not cfg._get_node("inter")._is_missing() # type: ignore
assert not cfg._get_node("str_inter")._is_missing() # type: ignore
assert cfg._get_node("missing_node")._is_missing() # type: ignore
assert not cfg._get_node("missing_node_inter")._is_missing() # type: ignore
@mark.parametrize("ref_type", [None, Any])
@mark.parametrize("assign", [None, {}, {"foo": "bar"}, [1, 2, 3]])
def test_assign_to_reftype_none_or_any(ref_type: Any, assign: Any) -> None:
cfg = OmegaConf.create({"foo": DictConfig(ref_type=ref_type, content={})})
cfg.foo = assign
assert cfg.foo == assign
@mark.parametrize(
"ref_type,assign",
[
(Plugin, None),
(Plugin, Plugin),
(Plugin, Plugin()),
(Plugin, ConcretePlugin),
(Plugin, ConcretePlugin()),
(ConcretePlugin, None),
param(ConcretePlugin, ConcretePlugin, id="subclass=subclass_obj"),
param(ConcretePlugin, ConcretePlugin(), id="subclass=subclass_obj"),
],
)
class TestAssignAndMergeIntoReftypePlugin:
def _test_assign(self, ref_type: Any, value: Any, assign: Any) -> None:
cfg = OmegaConf.create({"foo": DictConfig(ref_type=ref_type, content=value)})
assert _utils.get_ref_type(cfg, "foo") == Optional[ref_type]
cfg.foo = assign
assert cfg.foo == assign
assert _utils.get_ref_type(cfg, "foo") == Optional[ref_type]
def _test_merge(self, ref_type: Any, value: Any, assign: Any) -> None:
cfg = OmegaConf.create({"foo": DictConfig(ref_type=ref_type, content=value)})
cfg2 = OmegaConf.merge(cfg, {"foo": assign})
assert isinstance(cfg2, DictConfig)
assert cfg2.foo == assign
assert _utils.get_ref_type(cfg2, "foo") == Optional[ref_type]
def test_assign_to_reftype_plugin1(self, ref_type: Any, assign: Any) -> None:
self._test_assign(ref_type, ref_type, assign)
self._test_assign(ref_type, ref_type(), assign)
@mark.parametrize("value", [None, "???"])
def test_assign_to_reftype_plugin(
self, ref_type: Any, value: Any, assign: Any
) -> None:
self._test_assign(ref_type, value, assign)
def test_merge_into_reftype_plugin_(self, ref_type: Any, assign: Any) -> None:
self._test_merge(ref_type, ref_type, assign)
self._test_merge(ref_type, ref_type(), assign)
@mark.parametrize("value", [None, "???"])
def test_merge_into_reftype_plugin(
self, ref_type: Any, value: Any, assign: Any
) -> None:
self._test_merge(ref_type, value, assign)
@mark.parametrize(
"ref_type,assign,expectation",
[
param(
Plugin,
10,
raises(ValidationError),
id="assign_primitive_to_typed",
),
param(
ConcretePlugin,
Plugin,
raises(ValidationError),
id="assign_base_type_to_subclass",
),
param(
ConcretePlugin,
Plugin(),
raises(ValidationError),
id="assign_base_instance_to_subclass",
),
],
)
class TestAssignAndMergeIntoReftypePlugin_Errors:
def _test_assign(
self, ref_type: Any, value: Any, assign: Any, expectation: Any
) -> None:
cfg = OmegaConf.create({"foo": DictConfig(ref_type=ref_type, content=value)})
with expectation:
cfg.foo = assign
def _test_merge(
self, ref_type: Any, value: Any, assign: Any, expectation: Any
) -> None:
cfg = OmegaConf.create({"foo": DictConfig(ref_type=ref_type, content=value)})
with expectation:
OmegaConf.merge(cfg, {"foo": assign})
def test_assign_to_reftype_plugin_(
self, ref_type: Any, assign: Any, expectation: Any
) -> None:
self._test_assign(ref_type, ref_type, assign, expectation)
self._test_assign(ref_type, ref_type(), assign, expectation)
@mark.parametrize("value", [None, "???"])
def test_assign_to_reftype_plugin(
self, ref_type: Any, value: Any, assign: Any, expectation: Any
) -> None:
self._test_assign(ref_type, value, assign, expectation)
def test_merge_into_reftype_plugin1(
self, ref_type: Any, assign: Any, expectation: Any
) -> None:
self._test_merge(ref_type, ref_type, assign, expectation)
self._test_merge(ref_type, ref_type(), assign, expectation)
@mark.parametrize("value", [None, "???"])
def test_merge_into_reftype_plugin(
self, ref_type: Any, value: Any, assign: Any, expectation: Any
) -> None:
self._test_merge(ref_type, value, assign, expectation)
def test_setdefault() -> None:
cfg = OmegaConf.create({})
assert cfg.setdefault("foo", 10) == 10
assert cfg["foo"] == 10
assert cfg.setdefault("foo", 20) == 10
assert cfg["foo"] == 10
cfg = OmegaConf.create({})
OmegaConf.set_struct(cfg, True)
with raises(ConfigKeyError):
assert cfg.setdefault("foo", 10) == 10
assert cfg == {}
with open_dict(cfg):
assert cfg.setdefault("foo", 10) == 10
assert cfg.setdefault("foo", 20) == 10
assert cfg["foo"] == 10
assert cfg["foo"] == 10
@mark.parametrize(
"c",
[
param({"a": ListConfig([1, 2, 3], ref_type=list)}, id="list_value"),
param({"a": DictConfig({"b": 10}, ref_type=dict)}, id="dict_value"),
],
)
def test_self_assign_list_value_with_ref_type(c: Any) -> None:
cfg = OmegaConf.create(c)
cfg.a = cfg.a
assert cfg == c
def test_assign_to_sc_field_without_ref_type() -> None:
cfg = OmegaConf.create({"plugin": ConcretePlugin})
with raises(ValidationError):
cfg.plugin.params.foo = "bar"
cfg.plugin = 10
assert cfg.plugin == 10
def test_dict_getitem_not_found() -> None:
cfg = OmegaConf.create()
with raises(ConfigKeyError):
cfg["aaa"]
def test_dict_getitem_none_output() -> None:
cfg = OmegaConf.create({"a": None})
assert cfg["a"] is None
@mark.parametrize("data", [{"b": 0}, User])
@mark.parametrize("flag", ["struct", "readonly"])
def test_dictconfig_creation_with_parent_flag(flag: str, data: Any) -> None:
parent = OmegaConf.create({"a": 10})
parent._set_flag(flag, True)
cfg = DictConfig(data, parent=parent)
assert cfg == data
@mark.parametrize(
"node",
[
param(AnyNode("hello"), id="any"),
param(DictConfig({}), id="dict"),
param(ListConfig([]), id="list"),
],
)
def test_node_copy_on_set(node: Any) -> None:
cfg = OmegaConf.create({})
cfg.a = node
assert cfg.__dict__["_content"]["a"] is not node
|
|
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from scipy._lib.six import callable, xrange
from collections import namedtuple
__all__ = ['binned_statistic',
'binned_statistic_2d',
'binned_statistic_dd']
BinnedStatisticResult = namedtuple('BinnedStatisticResult',
('statistic', 'bin_edges', 'binnumber'))
def binned_statistic(x, values, statistic='mean',
bins=10, range=None):
"""
Compute a binned statistic for one or more sets of data.
This is a generalization of a histogram function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values (or set of values) within each bin.
Parameters
----------
x : (N,) array_like
A sequence of values to be binned.
values : (N,) array_like or list of (N,) array_like
The data on which the statistic will be computed. This must be
the same shape as `x`, or a set of sequences - each the same shape as
`x`. If `values` is a set of sequences, the statistic will be computed
on each independently.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width bins in the
given range (10 by default). If `bins` is a sequence, it defines the
bin edges, including the rightmost edge, allowing for non-uniform bin
widths. Values in `x` that are smaller than lowest bin edge are
assigned to bin number 0, values beyond the highest bin are assigned to
``bins[-1]``. If the bin edges are specified, the number of bins will
be, (nx = len(bins)-1).
range : (float, float) or [(float, float)], optional
The lower and upper range of the bins. If not provided, range
is simply ``(x.min(), x.max())``. Values outside the range are
ignored.
Returns
-------
statistic : array
The values of the selected statistic in each bin.
bin_edges : array of dtype float
Return the bin edges ``(length(statistic)+1)``.
binnumber: 1-D ndarray of ints
Indices of the bins (corresponding to `bin_edges`) in which each value
of `x` belongs. Same length as `values`. A binnumber of `i` means the
corresponding value is between (bin_edges[i-1], bin_edges[i]).
See Also
--------
numpy.digitize, numpy.histogram, binned_statistic_2d, binned_statistic_dd
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,
but excluding 2) and the second ``[2, 3)``. The last bin, however, is
``[3, 4]``, which *includes* 4.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
First some basic examples:
Create two evenly spaced bins in the range of the given sample, and sum the
corresponding values in each of those bins:
>>> values = [1.0, 1.0, 2.0, 1.5, 3.0]
>>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2)
(array([ 4. , 4.5]), array([ 1., 4., 7.]), array([1, 1, 1, 2, 2]))
Multiple arrays of values can also be passed. The statistic is calculated
on each set independently:
>>> values = [[1.0, 1.0, 2.0, 1.5, 3.0], [2.0, 2.0, 4.0, 3.0, 6.0]]
>>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2)
(array([[ 4. , 4.5], [ 8. , 9. ]]), array([ 1., 4., 7.]),
array([1, 1, 1, 2, 2]))
>>> stats.binned_statistic([1, 2, 1, 2, 4], np.arange(5), statistic='mean',
... bins=3)
(array([ 1., 2., 4.]), array([ 1., 2., 3., 4.]),
array([1, 2, 1, 2, 3]))
As a second example, we now generate some random data of sailing boat speed
as a function of wind speed, and then determine how fast our boat is for
certain wind speeds:
>>> windspeed = 8 * np.random.rand(500)
>>> boatspeed = .3 * windspeed**.5 + .2 * np.random.rand(500)
>>> bin_means, bin_edges, binnumber = stats.binned_statistic(windspeed,
... boatspeed, statistic='median', bins=[1,2,3,4,5,6,7])
>>> plt.figure()
>>> plt.plot(windspeed, boatspeed, 'b.', label='raw data')
>>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=5,
... label='binned statistic of data')
>>> plt.legend()
Now we can use ``binnumber`` to select all datapoints with a windspeed
below 1:
>>> low_boatspeed = boatspeed[binnumber == 0]
As a final example, we will use ``bin_edges`` and ``binnumber`` to make a
plot of a distribution that shows the mean and distribution around that
mean per bin, on top of a regular histogram and the probability
distribution function:
>>> x = np.linspace(0, 5, num=500)
>>> x_pdf = stats.maxwell.pdf(x)
>>> samples = stats.maxwell.rvs(size=10000)
>>> bin_means, bin_edges, binnumber = stats.binned_statistic(x, x_pdf,
... statistic='mean', bins=25)
>>> bin_width = (bin_edges[1] - bin_edges[0])
>>> bin_centers = bin_edges[1:] - bin_width/2
>>> plt.figure()
>>> plt.hist(samples, bins=50, normed=True, histtype='stepfilled',
... alpha=0.2, label='histogram of data')
>>> plt.plot(x, x_pdf, 'r-', label='analytical pdf')
>>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=2,
... label='binned statistic of data')
>>> plt.plot((binnumber - 0.5) * bin_width, x_pdf, 'g.', alpha=0.5)
>>> plt.legend(fontsize=10)
>>> plt.show()
"""
try:
N = len(bins)
except TypeError:
N = 1
if N != 1:
bins = [np.asarray(bins, float)]
if range is not None:
if len(range) == 2:
range = [range]
medians, edges, binnumbers = binned_statistic_dd(
[x], values, statistic, bins, range)
return BinnedStatisticResult(medians, edges[0], binnumbers)
BinnedStatistic2dResult = namedtuple('BinnedStatistic2dResult',
('statistic', 'x_edge', 'y_edge',
'binnumber'))
def binned_statistic_2d(x, y, values, statistic='mean',
bins=10, range=None, expand_binnumbers=False):
"""
Compute a bidimensional binned statistic for one or more sets of data.
This is a generalization of a histogram2d function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values (or set of values) within each bin.
Parameters
----------
x : (N,) array_like
A sequence of values to be binned along the first dimension.
y : (N,) array_like
A sequence of values to be binned along the second dimension.
values : (N,) array_like or list of (N,) array_like
The data on which the statistic will be computed. This must be
the same shape as `x`, or a list of sequences - each with the same
shape as `x`. If `values` is such a list, the statistic will be
computed on each independently.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or [int, int] or array_like or [array, array], optional
The bin specification:
* the number of bins for the two dimensions (nx = ny = bins),
* the number of bins in each dimension (nx, ny = bins),
* the bin edges for the two dimensions (x_edge = y_edge = bins),
* the bin edges in each dimension (x_edge, y_edge = bins).
If the bin edges are specified, the number of bins will be,
(nx = len(x_edge)-1, ny = len(y_edge)-1).
range : (2,2) array_like, optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
[[xmin, xmax], [ymin, ymax]]. All values outside of this range will be
considered outliers and not tallied in the histogram.
expand_binnumbers : bool, optional
'False' (default): the returned `binnumber` is a shape (N,) array of
linearized bin indices.
'True': the returned `binnumber` is 'unraveled' into a shape (2,N)
ndarray, where each row gives the bin numbers in the corresponding
dimension.
See the `binnumber` returned value, and the `Examples` section.
.. versionadded:: 0.17.0
Returns
-------
statistic : (nx, ny) ndarray
The values of the selected statistic in each two-dimensional bin.
x_edge : (nx + 1) ndarray
The bin edges along the first dimension.
y_edge : (ny + 1) ndarray
The bin edges along the second dimension.
binnumber : (N,) array of ints or (2,N) ndarray of ints
This assigns to each element of `sample` an integer that represents the
bin in which this observation falls. The representation depends on the
`expand_binnumbers` argument. See `Notes` for details.
See Also
--------
numpy.digitize, numpy.histogram2d, binned_statistic, binned_statistic_dd
Notes
-----
Binedges:
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,
but excluding 2) and the second ``[2, 3)``. The last bin, however, is
``[3, 4]``, which *includes* 4.
`binnumber`:
This returned argument assigns to each element of `sample` an integer that
represents the bin in which it belongs. The representation depends on the
`expand_binnumbers` argument. If 'False' (default): The returned
`binnumber` is a shape (N,) array of linearized indices mapping each
element of `sample` to its corresponding bin (using row-major ordering).
If 'True': The returned `binnumber` is a shape (2,N) ndarray where
each row indicates bin placements for each dimension respectively. In each
dimension, a binnumber of `i` means the corresponding value is between
(D_edge[i-1], D_edge[i]), where 'D' is either 'x' or 'y'.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import stats
Calculate the counts with explicit bin-edges:
>>> x = [0.1, 0.1, 0.1, 0.6]
>>> y = [2.1, 2.6, 2.1, 2.1]
>>> binx = [0.0, 0.5, 1.0]
>>> biny = [2.0, 2.5, 3.0]
>>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx,biny])
>>> ret.statistic
array([[ 2., 1.],
[ 1., 0.]])
The bin in which each sample is placed is given by the `binnumber`
returned parameter. By default, these are the linearized bin indices:
>>> ret.binnumber
array([5, 6, 5, 9])
The bin indices can also be expanded into separate entries for each
dimension using the `expand_binnumbers` parameter:
>>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx,biny],
... expand_binnumbers=True)
>>> ret.binnumber
array([[1, 1, 1, 2],
[1, 2, 1, 1]])
Which shows that the first three elements belong in the xbin 1, and the
fourth into xbin 2; and so on for y.
"""
# This code is based on np.histogram2d
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = np.asarray(bins, float)
bins = [xedges, yedges]
medians, edges, binnumbers = binned_statistic_dd(
[x, y], values, statistic, bins, range,
expand_binnumbers=expand_binnumbers)
return BinnedStatistic2dResult(medians, edges[0], edges[1], binnumbers)
BinnedStatisticddResult = namedtuple('BinnedStatisticddResult',
('statistic', 'bin_edges',
'binnumber'))
def binned_statistic_dd(sample, values, statistic='mean',
bins=10, range=None, expand_binnumbers=False):
"""
Compute a multidimensional binned statistic for a set of data.
This is a generalization of a histogramdd function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values within each bin.
Parameters
----------
sample : array_like
Data to histogram passed as a sequence of D arrays of length N, or
as an (N,D) array.
values : (N,) array_like or list of (N,) array_like
The data on which the statistic will be computed. This must be
the same shape as `x`, or a list of sequences - each with the same
shape as `x`. If `values` is such a list, the statistic will be
computed on each independently.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : sequence or int, optional
The bin specification must be in one of the following forms:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... = bins).
* The number of bins for all dimensions (nx = ny = ... = bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitely in `bins`. Defaults to the minimum and maximum
values along each dimension.
expand_binnumbers : bool, optional
'False' (default): the returned `binnumber` is a shape (N,) array of
linearized bin indices.
'True': the returned `binnumber` is 'unraveled' into a shape (D,N)
ndarray, where each row gives the bin numbers in the corresponding
dimension.
See the `binnumber` returned value, and the `Examples` section of
`binned_statistic_2d`.
.. versionadded:: 0.17.0
Returns
-------
statistic : ndarray, shape(nx1, nx2, nx3,...)
The values of the selected statistic in each two-dimensional bin.
bin_edges : list of ndarrays
A list of D arrays describing the (nxi + 1) bin edges for each
dimension.
binnumber : (N,) array of ints or (D,N) ndarray of ints
This assigns to each element of `sample` an integer that represents the
bin in which this observation falls. The representation depends on the
`expand_binnumbers` argument. See `Notes` for details.
See Also
--------
numpy.digitize, numpy.histogramdd, binned_statistic, binned_statistic_2d
Notes
-----
Binedges:
All but the last (righthand-most) bin is half-open in each dimension. In
other words, if `bins` is ``[1, 2, 3, 4]``, then the first bin is
``[1, 2)`` (including 1, but excluding 2) and the second ``[2, 3)``. The
last bin, however, is ``[3, 4]``, which *includes* 4.
`binnumber`:
This returned argument assigns to each element of `sample` an integer that
represents the bin in which it belongs. The representation depends on the
`expand_binnumbers` argument. If 'False' (default): The returned
`binnumber` is a shape (N,) array of linearized indices mapping each
element of `sample` to its corresponding bin (using row-major ordering).
If 'True': The returned `binnumber` is a shape (D,N) ndarray where
each row indicates bin placements for each dimension respectively. In each
dimension, a binnumber of `i` means the corresponding value is between
(bin_edges[D][i-1], bin_edges[D][i]), for each dimension 'D'.
.. versionadded:: 0.11.0
"""
known_stats = ['mean', 'median', 'count', 'sum', 'std']
if not callable(statistic) and statistic not in known_stats:
raise ValueError('invalid statistic %r' % (statistic,))
# `Ndim` is the number of dimensions (e.g. `2` for `binned_statistic_2d`)
# `Dlen` is the length of elements along each dimension.
# This code is based on np.histogramdd
try:
# `sample` is an ND-array.
Dlen, Ndim = sample.shape
except (AttributeError, ValueError):
# `sample` is a sequence of 1D arrays.
sample = np.atleast_2d(sample).T
Dlen, Ndim = sample.shape
# Store initial shape of `values` to preserve it in the output
values = np.asarray(values)
input_shape = list(values.shape)
# Make sure that `values` is 2D to iterate over rows
values = np.atleast_2d(values)
Vdim, Vlen = values.shape
# Make sure `values` match `sample`
if(statistic is not 'count' and Vlen != Dlen):
raise AttributeError('The number of `values` elements must match the '
'length of each `sample` dimension.')
nbin = np.empty(Ndim, int) # Number of bins in each dimension
edges = Ndim * [None] # Bin edges for each dim (will be 2D array)
dedges = Ndim * [None] # Spacing between edges (will be 2D array)
try:
M = len(bins)
if M != Ndim:
raise AttributeError('The dimension of bins must be equal '
'to the dimension of the sample x.')
except TypeError:
bins = Ndim * [bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
smin = np.atleast_1d(np.array(sample.min(axis=0), float))
smax = np.atleast_1d(np.array(sample.max(axis=0), float))
else:
smin = np.zeros(Ndim)
smax = np.zeros(Ndim)
for i in xrange(Ndim):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in xrange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# Create edge arrays
for i in xrange(Ndim):
if np.isscalar(bins[i]):
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = np.linspace(smin[i], smax[i], nbin[i] - 1)
else:
edges[i] = np.asarray(bins[i], float)
nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
dedges[i] = np.diff(edges[i])
nbin = np.asarray(nbin)
# Compute the bin number each sample falls into, in each dimension
sampBin = {}
for i in xrange(Ndim):
sampBin[i] = np.digitize(sample[:, i], edges[i])
# Using `digitize`, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right
# edge to be counted in the last bin, and not as an outlier.
for i in xrange(Ndim):
# Find the rounding precision
decimal = int(-np.log10(dedges[i].min())) + 6
# Find which points are on the rightmost edge.
on_edge = np.where(np.around(sample[:, i], decimal) ==
np.around(edges[i][-1], decimal))[0]
# Shift these points one bin to the left.
sampBin[i][on_edge] -= 1
# Compute the sample indices in the flattened statistic matrix.
ni = nbin.argsort()
# `binnumbers` is which bin (in linearized `Ndim` space) each sample goes
binnumbers = np.zeros(Dlen, int)
for i in xrange(0, Ndim - 1):
binnumbers += sampBin[ni[i]] * nbin[ni[i + 1:]].prod()
binnumbers += sampBin[ni[-1]]
result = np.empty([Vdim, nbin.prod()], float)
if statistic == 'mean':
result.fill(np.nan)
flatcount = np.bincount(binnumbers, None)
a = flatcount.nonzero()
for vv in xrange(Vdim):
flatsum = np.bincount(binnumbers, values[vv])
result[vv, a] = flatsum[a] / flatcount[a]
elif statistic == 'std':
result.fill(0)
flatcount = np.bincount(binnumbers, None)
a = flatcount.nonzero()
for vv in xrange(Vdim):
flatsum = np.bincount(binnumbers, values[vv])
flatsum2 = np.bincount(binnumbers, values[vv] ** 2)
result[vv, a] = np.sqrt(flatsum2[a] / flatcount[a] -
(flatsum[a] / flatcount[a]) ** 2)
elif statistic == 'count':
result.fill(0)
flatcount = np.bincount(binnumbers, None)
a = np.arange(len(flatcount))
result[:, a] = flatcount[np.newaxis, :]
elif statistic == 'sum':
result.fill(0)
for vv in xrange(Vdim):
flatsum = np.bincount(binnumbers, values[vv])
a = np.arange(len(flatsum))
result[vv, a] = flatsum
elif statistic == 'median':
result.fill(np.nan)
for i in np.unique(binnumbers):
for vv in xrange(Vdim):
result[vv, i] = np.median(values[vv, binnumbers == i])
elif callable(statistic):
with warnings.catch_warnings():
# Numpy generates a warnings for mean/std/... with empty list
warnings.filterwarnings('ignore', category=RuntimeWarning)
old = np.seterr(invalid='ignore')
try:
null = statistic([])
except:
null = np.nan
np.seterr(**old)
result.fill(null)
for i in np.unique(binnumbers):
for vv in xrange(Vdim):
result[vv, i] = statistic(values[vv, binnumbers == i])
# Shape into a proper matrix
result = result.reshape(np.append(Vdim, np.sort(nbin)))
for i in xrange(nbin.size):
j = ni.argsort()[i]
# Accomodate the extra `Vdim` dimension-zero with `+1`
result = result.swapaxes(i+1, j+1)
ni[i], ni[j] = ni[j], ni[i]
# Remove outliers (indices 0 and -1 for each bin-dimension).
core = [slice(None)] + Ndim * [slice(1, -1)]
result = result[core]
# Unravel binnumbers into an ndarray, each row the bins for each dimension
if(expand_binnumbers and Ndim > 1):
binnumbers = np.asarray(np.unravel_index(binnumbers, nbin))
if np.any(result.shape[1:] != nbin - 2):
raise RuntimeError('Internal Shape Error')
# Reshape to have output (`reulst`) match input (`values`) shape
result = result.reshape(input_shape[:-1] + list(nbin-2))
return BinnedStatisticddResult(result, edges, binnumbers)
|
|
import os
import uuid
from osipkd.tools import row2dict, xls_reader
from datetime import datetime
from sqlalchemy import not_, func
from pyramid.view import (
view_config,
)
from pyramid.httpexceptions import (
HTTPFound,
)
import colander
from deform import (
Form,
widget,
ValidationFailure,
)
from osipkd.models import (
DBSession,
Group
)
from osipkd.models.pemda import Urusan as UrusanModel
from datatables import ColumnDT, DataTables
from osipkd.views.base_view import BaseViews
SESS_ADD_FAILED = 'Tambah urusan gagal'
SESS_EDIT_FAILED = 'Edit urusan gagal'
class AddSchema(colander.Schema):
kode = colander.SchemaNode(
colander.String(),
validator=colander.Length(max=18))
nama = colander.SchemaNode(
colander.String())
disabled = colander.SchemaNode(
colander.Boolean())
class EditSchema(AddSchema):
id = colander.SchemaNode(colander.String(),
missing=colander.drop,
widget=widget.HiddenWidget(readonly=True))
class view_urusan(BaseViews):
########
# List #
########
@view_config(route_name='urusan', renderer='templates/urusan/list.pt',
permission='read')
def view_list(self):
return dict(a={})
##########
# Action #
##########
@view_config(route_name='urusan-act', renderer='json',
permission='read')
def gaji_urusan_act(self):
ses = self.request.session
req = self.request
params = req.params
url_dict = req.matchdict
if url_dict['act']=='grid':
columns = []
columns.append(ColumnDT('id'))
columns.append(ColumnDT('kode'))
columns.append(ColumnDT('nama'))
columns.append(ColumnDT('disabled'))
query = DBSession.query(UrusanModel)
rowTable = DataTables(req, UrusanModel, query, columns)
return rowTable.output_result()
elif url_dict['act']=='changeid':
row = UrusanModel.get_by_id('urusan_id' in params and params['urusan_id'] or 0)
if row:
ses['urusan_id']=row.id
ses['urusan_kd']=row.kode
ses['urusan_nm']=row.nama
return {'success':True}
#######
# Add #
#######
def form_validator(self, form, value):
if 'id' in form.request.matchdict:
uid = form.request.matchdict['id']
q = DBSession.query(UrusanModel).filter_by(id=uid)
urusan = q.first()
else:
urusan = None
def get_form(self, class_form, row=None):
schema = class_form(validator=self.form_validator)
schema = schema.bind()
schema.request = self.request
if row:
schema.deserialize(row)
return Form(schema, buttons=('simpan','batal'))
def save(self, values, user, row=None):
if not row:
row = UrusanModel()
row.created = datetime.now()
row.create_uid = user.id
row.from_dict(values)
row.updated = datetime.now()
row.update_uid = user.id
row.disabled = 'disabled' in values and values['disabled'] and 1 or 0
DBSession.add(row)
DBSession.flush()
return row
def save_request(self, values, row=None):
if 'id' in self.request.matchdict:
values['id'] = self.request.matchdict['id']
row = self.save(values, self.request.user, row)
self.request.session.flash('urusan sudah disimpan.')
def route_list(self):
return HTTPFound(location=self.request.route_url('urusan'))
def session_failed(self, session_name):
r = dict(form=self.session[session_name])
del self.session[session_name]
return r
@view_config(route_name='urusan-add', renderer='templates/urusan/add.pt',
permission='add')
def view_urusan_add(self):
req = self.request
ses = self.session
form = self.get_form(AddSchema)
if req.POST:
if 'simpan' in req.POST:
controls = req.POST.items()
try:
c = form.validate(controls)
except ValidationFailure, e:
req.session[SESS_ADD_FAILED] = e.render()
return HTTPFound(location=req.route_url('urusan-add'))
self.save_request(dict(controls))
return self.route_list()
elif SESS_ADD_FAILED in req.session:
return self.session_failed(SESS_ADD_FAILED)
return dict(form=form.render())
########
# Edit #
########
def query_id(self):
return DBSession.query(UrusanModel).filter_by(id=self.request.matchdict['id'])
def id_not_found(self):
msg = 'urusan ID %s Tidak Ditemukan.' % self.request.matchdict['id']
request.session.flash(msg, 'error')
return route_list()
@view_config(route_name='urusan-edit', renderer='templates/urusan/edit.pt',
permission='edit')
def view_urusan_edit(self):
request = self.request
row = self.query_id().first()
if not row:
return id_not_found(request)
form = self.get_form(EditSchema)
if request.POST:
if 'simpan' in request.POST:
controls = request.POST.items()
print controls
try:
c = form.validate(controls)
except ValidationFailure, e:
request.session[SESS_EDIT_FAILED] = e.render()
return HTTPFound(location=request.route_url('urusan-edit',
id=row.id))
self.save_request(dict(controls), row)
return self.route_list()
elif SESS_EDIT_FAILED in request.session:
return self.session_failed(SESS_EDIT_FAILED)
values = row.to_dict()
return dict(form=form.render(appstruct=values))
##########
# Delete #
##########
@view_config(route_name='urusan-delete', renderer='templates/urusan/delete.pt',
permission='delete')
def view_urusan_delete(self):
request = self.request
q = self.query_id()
row = q.first()
if not row:
return self.id_not_found(request)
form = Form(colander.Schema(), buttons=('hapus','batal'))
if request.POST:
if 'hapus' in request.POST:
msg = 'urusan ID %d %s sudah dihapus.' % (row.id, row.nama)
try:
q.delete()
DBSession.flush()
except:
msg = 'urusan ID %d %s tidak dapat dihapus.' % (row.id, row.nama)
request.session.flash(msg)
return self.route_list()
return dict(row=row,
form=form.render())
|
|
"""
These are experiments to compare the Betweenness results
from the python networkX library to some handwritten
notes and check if our expectations were correct.
Especially, we want to figure out how undirected edges
are handled in comparison to directed ones, how
end-point consideration changes the results, and if
Normalization makes more or less sense for our use
cases (also which normalization is used).
We therefore need to check each topology / configuration for:
-) directed / undirectedness (only for the first 2 graphs, as example)
-) endpoints to SP included or not (given as argument to _betweenness)
Normalization is set to 1/(n-2)(n-1) for directed and
2/(n-2)(n-1) for undirected graphs as formally described
and makes centralities more comparable between
graphs of different sizes; edge cases:
-) 1 node: no SP's, of course...
-) 2 nodes: apparently only (n-1) is considered, see results below
-) 1 direct SP, 1 indirect: direct SP influences (diminishes)
score on node on indirect SP
Also good points to implement:
-) strenght of a node in a weighed graph
which follows a power-law distribution:
s(k)\approx k^{\beta }
-) strength / betweenness ratio
A study of the average value s(b) of the strength
for vertices with betweenness b shows that the
functional behavior can be approximated by a scaling form:
s(b)\approx b^{{\alpha }}
where strength = sum of the weight of the adjacent edges,
so a_ij * w_ij
Currently, in Graphinius a_ij = w_ij[w_ij < Infinity]
IDEA: since strength is given by a matrix multiplication
which might be vectorized, and we know there's a relation
(and thus approximate mapping) between strength and
betweenness, why not train a CNN on strength and see
what happens...
"""
import networkx as nx
from networkx import betweenness_centrality
print("========================================================")
print("1-node graph: A")
print("========================================================")
G_1n = nx.Graph()
G_1n.add_node('A')
print( "\ndefault:\n" + str( betweenness_centrality( G_1n ) ) )
print( "\nUNnormalized:\n" + str( betweenness_centrality( G_1n, normalized=False ) ) )
print( "\nendpoints:\n" + str( betweenness_centrality( G_1n, endpoints=True ) ) )
print( "\nendpoints, UNnormalized:\n" + str( betweenness_centrality( G_1n, endpoints=True, normalized=False ) ) )
print("\n========================================================")
print("2-node graph with one connecting edge, UNdirected: A--B")
print("========================================================")
G_2n_1e = nx.Graph()
G_2n_1e.add_edge('A', 'B')
print( "\ndefault:\n" + str( betweenness_centrality( G_2n_1e ) ) )
print( "\nUNnormalized:\n" + str( betweenness_centrality( G_2n_1e, normalized=False ) ) )
print( "\nendpoints:\n" + str( betweenness_centrality( G_2n_1e, endpoints=True ) ) )
print( "\nendpoints, UNnormalized:\n" + str( betweenness_centrality( G_2n_1e, endpoints=True, normalized=False ) ) )
print("\n========================================================")
print("2-node graph with one connecting edge, directed: A->B")
print("========================================================")
G_2n_1e_di = nx.DiGraph()
G_2n_1e_di.add_edge('A', 'B')
print( "\ndefault:\n" + str( betweenness_centrality( G_2n_1e_di ) ) )
print( "\nUNnormalized:\n" + str( betweenness_centrality( G_2n_1e_di, normalized=False ) ) )
print( "\nendpoints:\n" + str( betweenness_centrality( G_2n_1e_di, endpoints=True ) ) )
print( "\nendpoints, UNnormalized:\n" + str( betweenness_centrality( G_2n_1e_di, endpoints=True, normalized=False ) ) )
"""
Shortest paths:
A--B
B--A
A--C
C--A
B--C
C--A
without endpoints:
A, C := 0
B: lies on A--C as well as C--A && there are 2 SP's between A--C := 1
with endpoints (Normalization == 1, so no difference):
A: lies on A--B twice (score=1), lies on A--C twice (score=1) := 2
C: lies on A--C twice (score=1), lies on B--C twice (score=1) := 2
B: A--B (score=1), B--C (score=1), A--C (score=1) : = 3
"""
print("\n========================================================")
print("3-node graph with two connecting edges, UNdirected: A--B--C")
print("========================================================")
G_3n_path = nx.Graph()
G_3n_path.add_edge('A', 'B')
G_3n_path.add_edge('B', 'C')
print( "\ndefault:\n" + str( betweenness_centrality( G_3n_path ) ) )
print( "\nUNnormalized:\n" + str( betweenness_centrality( G_3n_path, normalized=False ) ) )
print( "\nendpoints:\n" + str( betweenness_centrality( G_3n_path, endpoints=True ) ) )
print( "\nendpoints, UNnormalized:\n" + str( betweenness_centrality( G_3n_path, endpoints=True, normalized=False ) ) )
"""
Shorest paths:
A->B
A->C
B->C
normalization factor: 1/2
without endpoints:
A, C := 0
B: lies on A->C & there is only one SP(A->C) := 1
without endpoints, normalized:
B := 1/2
with endpoints:
A: A->B (score=1), A->C (score=1) := 2
C: A->C (score=1), B->C (score=1) := 2
B: A->B (score=1), A->C (score=1), B->C (score=1) := 3
with endpoints, normalized:
A, C := 1
B := 1.5
"""
print("\n========================================================")
print("3-node graph with two connecting edges, directed: A->B->C")
print("========================================================")
G_3n_path_di = nx.DiGraph()
G_3n_path_di.add_edge('A', 'B')
G_3n_path_di.add_edge('B', 'C')
print( "\ndefault:\n" + str( betweenness_centrality( G_3n_path_di ) ) )
print( "\nUNnormalized:\n" + str( betweenness_centrality( G_3n_path_di, normalized=False ) ) )
print( "\nendpoints:\n" + str( betweenness_centrality( G_3n_path_di, endpoints=True ) ) )
print( "\nendpoints, UNnormalized:\n" + str( betweenness_centrality( G_3n_path_di, endpoints=True, normalized=False ) ) )
print("\n========================================================")
print("5-node graph with 4 connecting edges, directed: A->B->C->D->E")
print("========================================================")
G_5n_path = nx.Graph()
G_5n_path.add_edge('A', 'B')
G_5n_path.add_edge('B', 'C')
G_5n_path.add_edge('C', 'D')
G_5n_path.add_edge('D', 'E')
print( "\ndefault:\n" + str( betweenness_centrality( G_5n_path ) ) )
print( "\nUNnormalized:\n" + str( betweenness_centrality( G_5n_path, normalized=False ) ) )
print( "\nendpoints:\n" + str( betweenness_centrality( G_5n_path, endpoints=True ) ) )
print( "\nendpoints, UNnormalized:\n" + str( betweenness_centrality( G_5n_path, endpoints=True, normalized=False ) ) )
print("\n========================================================")
print("3-node cycle graph, directed: A->B->C->A")
print("========================================================")
G_3n_cycle = nx.DiGraph()
G_3n_cycle.add_cycle(['A', 'B', 'C'])
print( "\ndefault:\n" + str( betweenness_centrality( G_3n_cycle ) ) )
print( "\nUNnormalized:\n" + str( betweenness_centrality( G_3n_cycle, normalized=False ) ) )
print( "\nendpoints:\n" + str( betweenness_centrality( G_3n_cycle, endpoints=True ) ) )
print( "\nendpoints, UNnormalized:\n" + str( betweenness_centrality( G_3n_cycle, endpoints=True, normalized=False ) ) )
print("\n========================================================")
print("3-node graph, directed, weighted: A->B; A->C->B")
print("========================================================")
G_2waySP = nx.DiGraph()
G_2waySP.add_edge('A', 'B', weight=1)
G_2waySP.add_edge('A', 'C', weight=0)
G_2waySP.add_edge('C', 'B', weight=1)
print( "\ndefault:\n" + str( betweenness_centrality( G_2waySP, weight="weight" ) ) )
print( "\nUNnormalized:\n" + str( betweenness_centrality( G_2waySP, weight="weight", normalized=False ) ) )
print( "\nendpoints:\n" + str( betweenness_centrality( G_2waySP, weight="weight", endpoints=True) ) )
print( "\nendpoints, UNnormalized:\n" + str( betweenness_centrality( G_2waySP, weight="weight", endpoints=True, normalized=False ) ) )
print("\n========================================================")
print("8-node graph, directed, one start split, one end merge: A->C->D->E->B; A->I->J->K->B")
print("========================================================")
G_8n_split_merge = nx.DiGraph()
G_8n_split_merge.add_edge('A', 'C')
G_8n_split_merge.add_edge('C', 'D')
G_8n_split_merge.add_edge('D', 'E')
G_8n_split_merge.add_edge('E', 'B')
G_8n_split_merge.add_edge('A', 'I')
G_8n_split_merge.add_edge('I', 'J')
G_8n_split_merge.add_edge('J', 'K')
G_8n_split_merge.add_edge('K', 'B')
print( "\ndefault:\n" + str( betweenness_centrality( G_8n_split_merge, weight="weight" ) ) )
print( "\nUNnormalized:\n" + str( betweenness_centrality( G_8n_split_merge, weight="weight", normalized=False ) ) )
print( "\nendpoints:\n" + str( betweenness_centrality( G_8n_split_merge, weight="weight", endpoints=True) ) )
print( "\nendpoints, UNnormalized:\n" + str( betweenness_centrality( G_8n_split_merge, weight="weight", endpoints=True, normalized=False ) ) )
print("\n========================================================")
print("7-node graph, directed, start, one split, end merge: A->C->D->E->B; A->C->I->J->B")
print("========================================================")
G_7n_start_split_merge = nx.DiGraph()
G_7n_start_split_merge.add_edge('A', 'C')
G_7n_start_split_merge.add_edge('C', 'D')
G_7n_start_split_merge.add_edge('D', 'E')
G_7n_start_split_merge.add_edge('E', 'B')
G_7n_start_split_merge.add_edge('C', 'I')
G_7n_start_split_merge.add_edge('I', 'J')
G_7n_start_split_merge.add_edge('J', 'B')
print( "\ndefault:\n" + str( betweenness_centrality( G_7n_start_split_merge, weight="weight" ) ) )
print( "\nUNnormalized:\n" + str( betweenness_centrality( G_7n_start_split_merge, weight="weight", normalized=False ) ) )
print( "\nendpoints:\n" + str( betweenness_centrality( G_7n_start_split_merge, weight="weight", endpoints=True) ) )
print( "\nendpoints, UNnormalized:\n" + str( betweenness_centrality( G_7n_start_split_merge, weight="weight", endpoints=True, normalized=False ) ) )
print("\n========================================================")
print("7-node graph, one split, one merge, end: A->C->D->E->B; A->I->J->E->B")
print("========================================================")
G_7n_split_merge_end = nx.DiGraph()
G_7n_split_merge_end.add_edge('A', 'C')
G_7n_split_merge_end.add_edge('C', 'D')
G_7n_split_merge_end.add_edge('D', 'E')
G_7n_split_merge_end.add_edge('E', 'B')
G_7n_split_merge_end.add_edge('A', 'I')
G_7n_split_merge_end.add_edge('I', 'J')
G_7n_split_merge_end.add_edge('J', 'E')
print( "\ndefault:\n" + str( betweenness_centrality( G_7n_split_merge_end, weight="weight" ) ) )
print( "\nUNnormalized:\n" + str( betweenness_centrality( G_7n_split_merge_end, weight="weight", normalized=False ) ) )
print( "\nendpoints:\n" + str( betweenness_centrality( G_7n_split_merge_end, weight="weight", endpoints=True) ) )
print( "\nendpoints, UNnormalized:\n" + str( betweenness_centrality( G_7n_split_merge_end, weight="weight", endpoints=True, normalized=False ) ) )
print("\n========================================================")
print("8-node graph, start, split, merge, end: A->C->D->E->B; A->I->J->E->B")
print("========================================================")
G_8n_start_split_merge_end = nx.DiGraph()
G_8n_start_split_merge_end.add_edge('A', 'C')
G_8n_start_split_merge_end.add_edge('C', 'D')
G_8n_start_split_merge_end.add_edge('D', 'E')
G_8n_start_split_merge_end.add_edge('E', 'F')
G_8n_start_split_merge_end.add_edge('F', 'B')
G_8n_start_split_merge_end.add_edge('C', 'I')
G_8n_start_split_merge_end.add_edge('I', 'J')
G_8n_start_split_merge_end.add_edge('J', 'F')
print( "\ndefault:\n" + str( betweenness_centrality( G_8n_start_split_merge_end, weight="weight" ) ) )
print( "\nUNnormalized:\n" + str( betweenness_centrality( G_8n_start_split_merge_end, weight="weight", normalized=False ) ) )
print( "\nendpoints:\n" + str( betweenness_centrality( G_8n_start_split_merge_end, weight="weight", endpoints=True) ) )
print( "\nendpoints, UNnormalized:\n" + str( betweenness_centrality( G_8n_start_split_merge_end, weight="weight", endpoints=True, normalized=False ) ) )
print("\n========================================================")
print("4-node quadrangle graph, directed: A->B; A->J->K->B")
print("========================================================")
G_4n_quadrangle = nx.DiGraph()
G_4n_quadrangle.add_edge('A', 'B')
G_4n_quadrangle.add_edge('A', 'J')
G_4n_quadrangle.add_edge('J', 'K')
G_4n_quadrangle.add_edge('K', 'B')
print( "\ndefault:\n" + str( betweenness_centrality( G_4n_quadrangle, weight="weight" ) ) )
print( "\nUNnormalized:\n" + str( betweenness_centrality( G_4n_quadrangle, weight="weight", normalized=False ) ) )
print( "\nendpoints:\n" + str( betweenness_centrality( G_4n_quadrangle, weight="weight", endpoints=True) ) )
print( "\nendpoints, UNnormalized:\n" + str( betweenness_centrality( G_4n_quadrangle, weight="weight", endpoints=True, normalized=False ) ) )
print("\n========================================================")
print("search_graph_multiple_SPs_no1DE")
print("========================================================")
G_search_graph_multiple_SPs_no1DE=nx.DiGraph()
G_search_graph_multiple_SPs_no1DE.add_edge('A','B',weight=3)
G_search_graph_multiple_SPs_no1DE.add_edge('A','C',weight=4)
G_search_graph_multiple_SPs_no1DE.add_edge('A','D',weight=1)
G_search_graph_multiple_SPs_no1DE.add_edge('B','A',weight=5)
G_search_graph_multiple_SPs_no1DE.add_edge('B','C',weight=1)
G_search_graph_multiple_SPs_no1DE.add_edge('B','E',weight=5)
G_search_graph_multiple_SPs_no1DE.add_edge('B','F',weight=1)
G_search_graph_multiple_SPs_no1DE.add_edge('C','A',weight=1)
G_search_graph_multiple_SPs_no1DE.add_edge('C','E',weight=1)
G_search_graph_multiple_SPs_no1DE.add_edge('D','C',weight=6)
G_search_graph_multiple_SPs_no1DE.add_edge('D','E',weight=17)
G_search_graph_multiple_SPs_no1DE.add_edge('E','B',weight=5)
G_search_graph_multiple_SPs_no1DE.add_edge('F','C',weight=3)
G_search_graph_multiple_SPs_no1DE.add_edge('F','E',weight=5)
print( "\ndefault:\n" + str( betweenness_centrality( G_search_graph_multiple_SPs_no1DE, weight="weight" ) ) )
print( "\nUNnormalized:\n" + str( betweenness_centrality( G_search_graph_multiple_SPs_no1DE, weight="weight", normalized=False ) ) )
print( "\nendpoints:\n" + str( betweenness_centrality( G_search_graph_multiple_SPs_no1DE, weight="weight", endpoints=True) ) )
print( "\nendpoints, UNnormalized:\n" + str( betweenness_centrality( G_search_graph_multiple_SPs_no1DE, weight="weight", endpoints=True, normalized=False ) ) )
|
|
# Copyright (c) 2017 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import importlib
import unittest
try:
import unittest.mock as mock
except ImportError:
import mock
from cloudbaseinit import exception
from cloudbaseinit.tests import testutils
MODPATH = "cloudbaseinit.utils.windows.licensing"
class LicensingTest(unittest.TestCase):
def setUp(self):
self._wmi_mock = mock.MagicMock()
self._module_patcher = mock.patch.dict(
'sys.modules', {
'wmi': self._wmi_mock})
self.snatcher = testutils.LogSnatcher(MODPATH)
self._module_patcher.start()
self.licensing = importlib.import_module(MODPATH)
self._licensing = self.licensing.LicensingManager()
self._licensing_v2 = self.licensing.LicensingManagerV2()
def tearDown(self):
self._module_patcher.stop()
@mock.patch('cloudbaseinit.osutils.factory.get_os_utils')
def _test_run_slmgr(self, mock_get_os_utils, ret_val=0,
sysnative=True):
mock_args = [mock.sentinel.args]
mock_outval = b"fake-out"
mock_cscriptdir = r"fake\cscript\dir"
mock_osutils = mock.Mock()
mock_osutils.get_sysnative_dir.return_value = mock_cscriptdir
mock_osutils.get_system32_dir.return_value = mock_cscriptdir
mock_get_os_utils.return_value = mock_osutils
mock_osutils.get_system32_dir.return_value = r"fakedir"
mock_osutils.check_sysnative_dir_exists.return_value = sysnative
mock_osutils.execute_process.return_value = (
mock_outval, mock.sentinel.err, ret_val)
if ret_val:
self.assertRaises(exception.CloudbaseInitException,
self._licensing._run_slmgr, mock_args)
else:
res_out = self._licensing._run_slmgr(mock_args)
self.assertEqual(res_out, "fake-out")
self.assertEqual(mock_osutils.execute_process.call_count, 1)
def test_run_slmgr_sys_native(self):
self._test_run_slmgr()
def test_run_slmgr_system32(self):
self._test_run_slmgr(sysnative=False)
def test_run_slmgr_fail(self):
self._test_run_slmgr(ret_val=1)
@mock.patch(MODPATH + ".LicensingManager._run_slmgr")
def test_get_licensing_info(self, mock_run_slmgr):
mock_out = mock.sentinel.out_val
mock_run_slmgr.return_value = mock_out
res = self._licensing.get_licensing_info()
mock_run_slmgr.assert_called_once_with(['/dlv'])
self.assertEqual(res, mock_out)
@mock.patch(MODPATH + ".LicensingManager._run_slmgr")
def test_activate_windows(self, mock_run_slmgr):
mock_out = mock.sentinel.out_val
mock_run_slmgr.return_value = mock_out
res = self._licensing.activate_windows()
mock_run_slmgr.assert_called_once_with(['/ato'])
self.assertEqual(res, mock_out)
@mock.patch(MODPATH + ".LicensingManager._run_slmgr")
def test_set_kms_host(self, mock_run_slmgr):
mock_out = mock.sentinel.out_val
mock_kms = mock.sentinel.kms_host
mock_run_slmgr.return_value = mock_out
res = self._licensing.set_kms_host(mock_kms)
expected_host = "%s:%s" % (mock_kms, self.licensing.DEFAULT_KMS_PORT)
mock_run_slmgr.assert_called_once_with(['/skms', expected_host])
self.assertEqual(res, mock_out)
@mock.patch(MODPATH + ".LicensingManager._run_slmgr")
def test_set_kms_auto_discovery(self, mock_run_slmgr):
mock_out = mock.sentinel.out_val
mock_run_slmgr.return_value = mock_out
res = self._licensing.set_kms_auto_discovery()
mock_run_slmgr.assert_called_once_with(['/ckms'])
self.assertEqual(res, mock_out)
@mock.patch(MODPATH + ".LicensingManager._run_slmgr")
def test_set_product_key(self, mock_run_slmgr):
mock_out = mock.sentinel.out_val
mock_product_key = mock.sentinel.product_key
mock_run_slmgr.return_value = mock_out
res = self._licensing.set_product_key(mock_product_key)
mock_run_slmgr.assert_called_once_with(['/ipk', mock_product_key])
self.assertEqual(res, mock_out)
def test_is_eval_v1(self):
with self.assertRaises(NotImplementedError):
self._licensing.is_eval()
def test_get_kms_product_v1(self):
with self.assertRaises(NotImplementedError):
self._licensing.get_kms_product()
def test_get_volume_activation_product_key_v1(self):
with self.assertRaises(NotImplementedError):
self._licensing.get_volume_activation_product_key('fake')
def test_get_service(self):
mock_result = mock.Mock()
conn = self._wmi_mock.WMI
conn.SoftwareLicensingService.return_value = [mock_result]
self._licensing_v2._get_service()
self.assertIsNotNone(self._licensing_v2._service)
@mock.patch(MODPATH + '.LicensingManagerV2._get_service')
def test_set_product_key_v2(self, mock_get_service):
mock_product_key = mock.Mock()
mock_service = mock.Mock()
mock_get_service.return_value = mock_service
self._licensing_v2.set_product_key(mock_product_key)
mock_get_service.assert_called_once_with()
mock_service.InstallProductKey.assert_called_once_with(
mock_product_key)
@mock.patch(MODPATH + '.LicensingManagerV2._get_service')
def test_set_kms_auto_discovery_v2(self, mock_get_service):
mock_service = mock.Mock()
mock_get_service.return_value = mock_service
self._licensing_v2.set_kms_auto_discovery()
mock_get_service.assert_called_once_with()
mock_service.ClearKeyManagementServiceMachine.assert_called_once_with()
mock_service.ClearKeyManagementServicePort.assert_called_once_with()
@mock.patch(MODPATH + '.LicensingManagerV2._get_service')
def test_set_kms_host_v2(self, mock_get_service):
mock_service = mock.Mock()
mock_host = mock.sentinel.host
mock_port = mock.sentinel.port
mock_get_service.return_value = mock_service
self._licensing_v2.set_kms_host(mock_host, mock_port)
mock_get_service.assert_called_once_with()
mock_service.SetKeyManagementServiceMachine.assert_called_once_with(
mock_host)
mock_service.SetKeyManagementServicePort.assert_called_once_with(
mock_port)
@mock.patch(MODPATH + '.LicensingManagerV2._get_service')
def test_refresh_status_v2(self, mock_get_service):
mock_service = mock.Mock()
mock_get_service.return_value = mock_service
self._licensing_v2.refresh_status()
mock_get_service.assert_called_once_with()
mock_service.RefreshLicenseStatus.assert_called_once_with()
def test_is_current_product(self):
mock_product = mock.Mock()
mock_product.PartialProductKey = "fake-key"
res = self._licensing_v2._is_current_product(mock_product)
self.assertTrue(res)
def test_get_products(self):
mock_result = mock.Mock()
conn = self._wmi_mock.WMI
conn.query.return_value = mock_result
res = self._licensing_v2._get_products()
self.assertEqual(res, self._licensing_v2._products)
@mock.patch(MODPATH + ".LicensingManagerV2._get_products")
def test_is_eval(self, mock_get_products):
mock_product = mock.Mock()
mock_product.ApplicationId = self.licensing.WINDOWS_APP_ID
mock_product.Description = u"TIMEBASED_EVAL"
mock_product.EvaluationEndDate = "fake"
mock_get_products.return_value = [mock_product]
res = self._licensing_v2.is_eval()
self.assertEqual(res, "fake")
@mock.patch(MODPATH + ".LicensingManagerV2._get_products")
def _test_get_kms_product(self, mock_get_products, products=()):
mock_get_products.return_value = products
if not products:
self.assertRaises(exception.ItemNotFoundException,
self._licensing_v2.get_kms_product)
return
res = self._licensing_v2.get_kms_product()
self.assertIsNotNone(res)
def test_get_kms_product_no_keys(self):
self._test_get_kms_product()
def test_get_kms_product(self):
mock_product = mock.Mock()
mock_product.ApplicationId = self.licensing.WINDOWS_APP_ID
mock_product.Description = u"VOLUME_KMSCLIENT"
self._test_get_kms_product(products=[mock_product])
@mock.patch('cloudbaseinit.osutils.factory.get_os_utils')
def test_get_volume_activation_product_key(self, mock_get_os_utils):
mock_os_version = {'major_version': 10, 'minor_version': 0}
expected_key = "WC2BQ-8NRM3-FDDYY-2BFGV-KHKQY"
mock_osutils = mock.Mock()
mock_get_os_utils.return_value = mock_osutils
mock_osutils.get_os_version.return_value = mock_os_version
res = self._licensing_v2.get_volume_activation_product_key(
license_family="ServerStandard")
self.assertEqual(res, expected_key)
|
|
Uproc : Unix Processes
Prezentarea proiectului
Five examples of using Unix processes are presented.
The examples are implementation in the C language.
At the finish of each example, the equivqlent Python
source is presented.
The first example illustrates the link between a parent process and
and a son's created by fork.
The second example shows different aspects of some use exec calls.
The last three examples present three specific problems that
using combinations of system calls fork, exec, wait, exit, and system
Example 1, use fork.
--------------------
1. The source fork1.c:
#include <stdio.h>
#include <stdlib.h>
main() {
int p;
p=fork();
if (p == -1) {perror("fork imposibil!"); exit(1);}
if (p == 0) {
printf("Fiu: pid=%d, ppid=%d\n", getpid(), getppid());
} else {
printf("Parinte: pid=%d ppid=%d\n", getpid(), getppid());
//wait(0);
printf("Terminat fiul\n");
}
}
Run the fork1 without wait (is comment):
Parinte: pid=2704 ppid=2197
Terminat fiul
Fiu: pid=2705, ppid=2704
Rulare fork1 cu wait:
Parinte: pid=2715 ppid=2197
Fiu: pid=2716, ppid=2715
Terminat fiul
Reason: the termination message is given from the parent.
In absence of wait, it is possible that the parent obtain the
control before his son and display the message before
that son display his message. If wait appears, then
father expects his son before the actual completion
end message display.
The equivalent Python source:
fork1.py
--------
import os
p=os.fork()
if p == -1:
print "fork imposibil!"
exit(1)
if p == 0:
print "Fiu: pid="+`os.getpid()`+", ppid="+`os.getppid()`
else:
print "Parinte: pid="+`os.getpid()`+" ppid="+`os.getppid()`
#os.wait()
print "Terminat fiul"
2. The source fork2.c :
#include <stdio.h>
#include <stdlib.h>
main() {
int p;
printf("Parinte: pid=%d, ppid=%d\n", getpid(), getppid());
p=fork();
if(p==-1){perror("fork imposibil!"); exit(1);}
if(p==0){
printf("Fiu: pid=%d, ppid=%d\n", getpid(), getppid());
//exit(2);
}
printf("pid=%d, ppid=%d\n", getpid(), getppid());
}
Run fork2 without exit(2) (is comment):
Parinte: pid=2743, ppid=2197
pid=2743, ppid=2197
Fiu: pid=2744, ppid=2743
pid=2744, ppid=2743
Rulare fork2 cu exit(2):
Parinte: pid=2755, ppid=2197
pid=2755, ppid=2197
Fiu: pid=2756, ppid=2755
Reason: The last print is execute belongs to the parent and
to the son. If exit(2) appears, the son no execute last print.
The equivalent Python source:
fork2.py
--------
import os
print "Parinte: pid="+`os.getpid()`+", ppid="+`os.getppid()`
p = os.fork()
if p == -1:
print "fork imposibil!"
exit(1)
if p == 0:
print "Fiu: pid="+`os.getpid()`+", ppid="+`os.getppid()`
exit(2)
print "pid="+`os.getpid()`+", ppid="+`os.getppid()`
3. Sursa fork3.c:
#include <stdio.h>
#include <stdlib.h>
main() {
int p, i;
p=fork();
if (p == -1) {perror("fork imposibil!"); exit(1);}
if (p == 0) {
for (i=0; i<10; i++)
printf("%d. Fiu: pid=%d, ppid=%d\n", i, getpid(), getppid());
} else {
for (i=0; i<10; i++)
printf("%d. Parinte: pid=%d ppid=%d\n", i, getpid(), getppid());
}
}
Results:
0. Parinte: pid=2708 ppid=1768
1. Parinte: pid=2708 ppid=1768
2. Parinte: pid=2708 ppid=1768
0. Fiu: pid=2715, ppid=2708
3. Parinte: pid=2708 ppid=1768
1. Fiu: pid=2715, ppid=2708
4. Parinte: pid=2708 ppid=1768
2. Fiu: pid=2715, ppid=2708
5. Parinte: pid=2708 ppid=1768
3. Fiu: pid=2715, ppid=2708
6. Parinte: pid=2708 ppid=1768
4. Fiu: pid=2715, ppid=2708
7. Parinte: pid=2708 ppid=1768
5. Fiu: pid=2715, ppid=2708
8. Parinte: pid=2708 ppid=1768
6. Fiu: pid=2715, ppid=2708
9. Parinte: pid=2708 ppid=1768
7. Fiu: pid=2715, ppid=2708
8. Fiu: pid=2715, ppid=2708
9. Fiu: pid=2715, ppid=2708
Reason: mixing the output of the parent with son
execution of the above is just one of many possible.
If you are running several times, is observed each
on other (possible) mixing. Everything depends on which
between the two processes get first the access.
The equivalent Python source:
fork3.py
--------
import os
p = os.fork()
if p == -1:
print "fork imposibil!"
exit(1)
if p == 0:
for i in range(10):
print `i`+". Fiu: pid="+`os.getpid()`+", ppid="+`os.getppid()`
else:
for i in range(10):
print `i`+". Parinte: pid="+`os.getpid()`+" ppid="+`os.getppid()`
Example 2: use simple execl, execlp, execv:
--------------------------------------------------
These three programs, though different, have the same effect.
All three use an exec command type, for the launch of it:
ls -l
These three sources, ignore at the moment the comments, are:
----------------------------------------------
Source execl.c:
#include<stdio.h>
#include<unistd.h>
main() {
printf("Urmeaza rezultatul executiei comenzii ls:\n");
execl("/bin/ls", "/bin/ls", "-l", NULL);
//execl("/bin/ls","/bin/ls","-l","execl.c", "fork1.c", "xx", NULL);
//execl("/bin/ls","/bin/ls","-l","*.c", NULL);
printf("Aici nu se ajunge decat in urma unei erori exec\n");
}
Source execlp.c:
#include<stdio.h>
#include<unistd.h>
main() {
printf("Urmeaza rezultatul executiei comenzii ls:\n");
execlp("ls", "ls", "-l", NULL) == -1;
printf("Aici nu se ajunge decat in urma unei erori exec\n");
// if (execlp("ls","ls","-l",NULL) == -1) {
// printf("Aici nu se ajunge decat in urma unei erori exec\n");
// perror("Aici se impune un mesaj explicativ; sistemul raspunde");
// }
}
Source execv.c:
#include<stdio.h>
#include<unistd.h>
main() {
char* argv[3];
argv[0] = "/bin/ls";
argv[1] = "-l";
argv[2] = NULL;
printf("Urmeaza rezultatul executiei comenzii ls:\n");
execv("/bin/ls",argv);
printf("Aici nu se ajunge decat in urma unei erori exec\n");
}
The effect to each of the programs are:
Urmeaza rezultatul executiei comenzii ls:
total 184
-rwxr-xr-x 1 florin florin 7176 2011-03-17 16:47 a.out
-rwxrwxrwx 1 florin florin 340 2011-03-17 16:43 execl.c
-rwxrwxrwx 1 florin florin 404 2011-03-17 16:43 execlp.c
-rwxrwxrwx 1 florin florin 370 2011-03-17 16:43 execv.c
-rwxrwxrwx 1 florin florin 364 2011-03-17 15:45 fork1.c
-rw-r--r-- 1 florin florin 353 2011-03-17 16:06 fork2.c
-rw-r--r-- 1 florin florin 386 2011-03-17 16:10 fork3.c
1.
The first program uses excl. Therefore is need to specified
with absolute path, /bin/ls. Follow the argument list
command line (hence the doubling occurs first argument).
The second uses exclp, so the order is sought in
directories in the PATH, so you just specify ls.
The third uses execv. Like the first, specific
absolute path. This program prepares in advance on a
array of three pointers to put the arguments of the command
line and the NULL pointer to mark the end of the table.
The compiler automatically allocate space for each constant
string. After the assignments a[0] = --- and a[1] = ---
that string addressed are assigned.
If necessary, such a picture can be dynamically allocates
in the heap (via malloc), then it will initialize the
corresponding values ??using the specific methods the C language
Primul program foloseste excl. De aceea comanda se specifica
cu calea completa /bin/ls. Urmeaza lista argumentelor din
linia de comanda (de aceea apare dublarea primului argument).
Al doilea foloseste exclp, deci comanda este cautata in
directoarele din PATH, de aceea se specifica doar ls.
Al treilea foloseste execv. La fel ca primul, specifica
calea absoluta. Acest program pregateste in prelabil un
tablou cu trei pointeri la stringuri in care se pun cele
doua argumente ale liniei de comanda si pointerul NULL
ce marcheaza sfarsitul tabloului. Compilatorul in mod
automat aloca spatiu de memorie pentru fiecare constanta
string. In urma atribuirii a[0] = --- sau a[1] = ---
se atribuie adresa stringului respectiv.
If is necessary, the arry can be dynamic allocate in the heap,
and then to assign the values to it, as in the C language.
2.
It can be seen, studying the runs, it will not display text
in the second printf, every program launched.
Simply change the first argument to exec, ut xxxx instead of the
ls for example, and will receive:
Urmeaza rezultatul executiei comenzii ls:
Aici nu se ajunge decat in urma unei erori exec
3.
In the spirit of the point 2, the desire to present
programs as simple as possible, we "broke" a golden rule
in C programming:
"To be tested each time the result returned by a
C function or system call! "
Consequently, each call to exec should do so
as shown in the execlp.c comments:
if (execlp("ls","ls","-l",NULL) == -1) {
printf("Aici nu se ajunge decat in urma unei erori exec\n");
perror("Aici se impune un mesaj explicativ; sistemul raspunde");
}
See man exec.
If replace an exec call with a sequence like above, and replace ls with
xxxx it obtains:
Aici nu se ajunge decat in urma unei erori exec
Aici se impune un mesaj explicativ; sistemul raspunde: No such file or directory
4.
In the source execl.c, two comment lines with execl will appears. Replacing
one by one execl with another, it will obtain:
Urmeaza rezultatul executiei comenzii ls:
/bin/ls: cannot access xx: No such file or directory
-rwxrwxrwx 1 florin florin 340 2011-03-17 17:39 execl.c
-rwxrwxrwx 1 florin florin 364 2011-03-17 15:45 fork1.c
Urmeaza rezultatul executiei comenzii ls:
/bin/ls: cannot access *.c: No such file or directory
In first case the effects is:
ls -l execl.c fork1.c xx
and the file xx do not exists in the current directory.
In the second case is the command:
ls -l *.c
but that is not performed as we expected!
-----------------------------------------
Why? Because the specification *.c is a a generic
specification for the file, but only shell "knows "
that replaces the specification with all c files from directory.
The same thing happens with the assessment of environment variables,
${---}, reverse apostrofy construction ( `---` )
redirect I/O standard, etc.
These processing are specific for shell, its are not treated by exec.
If is necessary, the processing must be done
C program before the call to exec!
Instead, the system calls treated these replaces: ("ls-l *. c")
The equivalent Python sources:
execl.py
--------
import os
print "Urmeaza rezultatul executiei comenzii ls:"
os.execl("/bin/ls", "/bin/ls", "-l");
#os.execl("/bin/ls","/bin/ls","-l","execl.c", "fork1.c", "xx")
#os.execl("/bin/ls","/bin/ls","-l","*.c")
print "Aici nu se ajunge decat in urma unei erori exec"
execlp.py
---------
import os
print "Urmeaza rezultatul executiei comenzii ls:"
os.execlp("ls", "ls", "-l")
print "Aici nu se ajunge decat in urma unei erori exec"
#if os.execlp("ls","ls","-l") == -1:
# print "Aici nu se ajunge decat in urma unei erori exec"
# print "Aici se impune un mesaj explicativ; sistemul raspunde"
execv.py
--------
import os
argv = ["/bin/ls", "-l"]
print "Urmeaza rezultatul executiei comenzii ls:"
os.execv("/bin/ls",argv)
print "Aici nu se ajunge decat in urma unei erori exec"
Example 3: How pear of the nenule numbers hawe the even sum?
-----------------------------------------------------------
The problem is trivially simple, but suitable for
exemplify the use of fork, wait and exit.
This statement of the problem: give at the command line some
pair of integers. The program will be created, for each two
consecutive arguments from the command line a son process.
Either of the son process back the return code:
0 if the sum is even,
1 if sum is odd,
2 if one of the arguments is 0 or not integer.
The parent wait after finish the sons and display the result.
The source paritate.c is:
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/wait.h>
main(int argc, char* argv[]) {
int pare = 0, impare = 0, nenum = 0, i, n1, n2;
for (i = 1; i < argc-1; i += 2) {
if (fork() == 0) {
n1 = atoi(argv[i]); // atoi intoarce 0
n2 = atoi(argv[i+1]); // si la nenumeric
if (n1 == 0 || n2 == 0) exit(2);
if ((n1 + n2) % 2 == 0) exit(0);
else exit(1);
// Aici se termina fiecare fiu
}
}
// Parintele asteapta terminarile fiilor
for (i = 1; i < argc-1; i += 2) {
wait(&n1);
switch (WEXITSTATUS(n1)) {
case 0: pare++;break;
case 1: impare++;break;
default: nenum++;
}
}
printf("Pare %d, impare %d, nenumerice %d\n",pare, impare, nenum);
}
At the end, every child return an appropriate return code.
Father receives in an entire configuration bits n1
among which there is the return code value.
Function (macro in reality) WEXITSTATUS extracted the code value.
The equivalent Python source:
paritate.py
-----------
import sys
import os
import string
pare, impare, nenum = 0, 0, 0
for i in range(1,len(sys.argv)-1,2):
if os.fork() == 0:
try : n1 = string.atoi(sys.argv[i])
except: n1 = 0
try : n2 = string.atoi(sys.argv[i+1])
except: n2 = 0
if n1 == 0 or n2 == 0: exit(2)
if (n1 + n2) % 2 == 0: exit(0)
else : exit(1)
# Aici se termina fiecare fiu
# Parintele asteapta terminarile fiilor
for i in range(1,len(sys.argv)-1,2):
n1 = os.wait()
if os.WEXITSTATUS(n1[1]) == 0 : pare += 1
elif os.WEXITSTATUS(n1[1]) == 1: impare += 1
else : nenum += 1
print "Pare "+`pare`+", impare "+`impare`+", nenumerice "+`nenum`
Example 4: A program for compiling and running another program.
--------------------------------------------------------------
The example has the same effect as the following shell script:
#!/bin/sh
if gcc -o ceva $1
then ./ceva
else echo "erori de compilare"
fi
We do will implement in sh, but we use the following C program instead,
the source compilerun.c:
#include<stdio.h>
#include<unistd.h>
#include<string.h>
#include <sys/wait.h>
main(int argc, char* argv[]) {
char com[200];
strcpy(com, "gcc -o ceva "); // fabricat comanda
strcat(com, argv[1]);
if (WEXITSTATUS(system(com)) == 0)
execl("./ceva","./ceva", NULL);
printf("Erori de compilare\n");
}
To compile we use:
gcc -o comprun compilerun.c
To run we use, for examplep
./comprun fork1.c
As a result, when compiling the source argument (fork1.c) is correct,
gcc compiler then creates the file and return something
a return code, then something is released by execl.
If compilation fails, it will print only message.
The equivalent Python source:
compilerun.py
-------------
import sys
import os
com = "gcc -o ceva "+sys.argv[1]
if os.WEXITSTATUS(os.system(com)) == 0:
os.execl("./ceva","./ceva")
print "Erori de compilare"
Example 5: Simultaneous processing of multiple text files.
-------------------------------------------------- ----------
We want to turn a text file into another text file with the
same content, but all the words he begin with a capital letter.
Such a program will be called:
capitalizare inputfile outputfile
We aim to process multiple such files.
Therefore we will create a master process, which gets from the
command line the file names whose contents will be capitalized:
master file1 file2 - - - filen
The result will consist of files:
fisier1.CAPIT, fisier2.CAPIT - - - fisiern.CAPIT
Master process creates n son processes, and every son will launch
program through execl:
capitalizare filei filei.CAPIT
The source capitalizare.c is:
#include<stdio.h>
#include<string.h>
#define MAXLINIE 100
main(int argc, char* argv[]) {
FILE *fi, *fo;
char linie[MAXLINIE], *p;
fi = fopen(argv[1], "r");
fo = fopen(argv[2], "w");
if (fi == NULL && fo == NULL) exit(1);
for ( ; ; ) {
p = fgets(linie, MAXLINIE, fi);
linie[MAXLINIE-1] = '\0';
if (p == NULL) break;
for (p = linie; ; ) {
p = strstr(p, " ");
if (p == NULL) break;
p++;
if (*p == '\n') break;
*p = toupper(*p);
}
fprintf(fo, "%s", linie);
}
fclose(fo);
fclose(fi);
}
The program receives the name from the command line
for the two files. These files are opened and read
the first file line by line.
With the pointer p, the current line are scanning
and looking for a space, but that is not
last character in line. The next character is
then converted to uppercase (toupper do this
transformation only if the character is actually a
lower case).
The source master.c is:
#include<stdio.h>
#include<unistd.h>
#include<string.h>
main(int argc, char* argv[]) {
int i;
char nume[200];
for (i=1; argv[i]; i++) {
if (fork() == 0) { // un fiu pentru un fisier de capitalizat
strcpy(nume, argv[i]);
strcat(nume, ".CAPIT"); // fabricat numele iesirii
// incarcat programul de capitalizare
execl("capitalizare","capitalizare",argv[i], nume, 0);
}
}
printf("Lansat simultan %d procese de capitalizare\n",i-1);
}
They go through the command line arguments for each
of son process.
In the array nume the output file name is built.
Then load the program capitalizared with two names
data file "command line".
The programs are compiled:
gcc -o capitalizare capitalizare.c
gcc -o master master.c
To start use the following command:
./master fis1 fis2 - - - fisn
The equivalent Python sources:
capitalizare.py
---------------
import sys
fi = open(sys.argv[1], "r")
fo = open(sys.argv[2], "w")
if fi == None or fo == None: exit(1)
while True:
linie = fi.readline()
if not linie: break
p = 0
while True:
p = linie.find(" ", p)
if p < 0: break
p += 1
if p == len(linie): break
linie = linie[:p]+linie[p].upper()+linie[p+1:]
fo.write(linie+"\n")
fo.close()
fi.close()
master.py
---------
import sys
import os
for i in range(1, len(sys.argv)):
if os.fork() == 0: # un fiu pentru un fisier de capitalizat
nume = sys.argv[i]+".CAPIT" # fabricat numele iesirii
# incarcat programul de capitalizare
os.execl("./capitalizare","./capitalizare",sys.argv[i], nume)
print "Lansat simultan "+`i`+" procese de capitalizare"
The statement of each homework will be like:
"It requires a server and one or more clients"
For pipe and FIFO communication we will use only
one client, and the statement will be changed to:
"It requires a server and one client"
We are making this change because:
1. A pipe or a FIFO channel is used to establish
communication between two processes. It is difficult
to maintain multiple clients.
2. In order to use pipe of FIFO between multiple
clients we should use at least one of the following:
- the server should manage a separate channel for
each client.
- using some locking logic and a single channel.
- using popen calls.
3. We will use the same statement for another assignments,
where we will need to support multiple clients. Because
of this we advise our students to code their solutions
in a modular manner in order to reuse the business logic.
Only the communication logic will need changes for the following
assignment.
We will present using an example for the implementation details
of using pipe, FIFO and popen.
The implementation is in the C language.
At the end of the presentation, the equivalent Python sources
will be presented.
The problem is:
Example: a limited list of file names that have a certain pattern.
------------------------------------------------------------
The problem statement is:
It requires a server and a client. The client will send an integer l
and a string s. The server will respond with a message that has
maximum l entries. Each entry will represent a file, from the
current directory, that has the name ending with string s.
We will present the implementation using multiple functions.
At first we will build two functions that will have the same role,
to present the list of the file names:
Mesaj *dir(int l, char *s)
---
Mesaj *dirPopen(int l, char *s)
--------
The 'Mesaj' data type will be different based on the used channel
and we will use the following format:
/--PLUS--\ /------------- MAXS --------------\
|lung| |s |
|----|----------|xxxxxxxxxxxxxxxxxxxxxxxxxxxx|------|
| |
\ -------------- lung ----------------/
lung - is an integer that represents the length of the message body.
PLUS zone - contains the a couple of integers, dependent of the used channel.
s - is an array of MAXS characters.
Each student will customize the message structure bases on the problem requirements.
Source dir.c contains dir and dirPopen functions. These functions are independent
----- --- --------
regarding the communication channel that is used:
Mesaj *dir(int l, char *s) {
static Mesaj resp;
DIR *dirp;
struct dirent *d;
char *p, *q, *n;
int i;
p = resp.s;
resp.lung = 0;
dirp = opendir (".");
for (i=0; i<l; ) {
d = readdir (dirp);
if (d == NULL) break;
n = d->d_name;
if (strlen(n) < strlen(s)) continue;
if (strcmp(n+strlen(n)-strlen(s), s) != 0) continue;
if (resp.lung+strlen(n)+1 > MAXS) break;
i++;
strcpy(p, n);
resp.lung += strlen(n)+1;
p += strlen(n)+1;
}
closedir (dirp);
resp.lung += PLUS;
return &resp;
}
Mesaj *dirPopen(int l, char *s) {
static Mesaj resp;
FILE *fin;
char n[MAXL], *p, *q, comanda[MAXL];
int i;
strcpy(comanda, "ls -1 *");
strcat(comanda, s);
fin = popen(comanda, "r");
p = resp.s;
resp.lung = 0;
for (i=0; i<l; ) {
q = fgets(n, MAXL, fin);
if (q == NULL) break;
n[strlen(n)-1] = '\0';
if (resp.lung+strlen(n)+1 > MAXS) break;
i++;
strcpy(p, n);
resp.lung += strlen(n)+1;
p += strlen(n)+1;
}
pclose (fin);
resp.lung += PLUS;
return &resp;
}
The above functions are receiving an integer named l and a string names s,
and will return a pointer to a Mesaj structure, that will
contain the list of file names.
In the message body, in s section, we will have a sequence
of null terminated strings (C convention) with the requested file
names. The 'lung' value will represent the sum of the total length
of the sequence from s and PLUS zone.
dir function will build the response using system calls like:
opendir, readdir, closedir and structures like DIR*, struct dirent.
dirPopen function will use the popen system call, using a call like
popen("ls -1 *.c") if the string s is ".c". In dirPopen we will use the
standard output of popen call to extract the first l entries. (We can
notice that dirPopen output will be different then dir output!)
For pipe and FIFO the message body will be defined in mesaj.h file:
-------
#define MAXS 10000
#define MAXL 1000
typedef struct {
int lung;
int i;
char s[MAXS];
} Mesaj;
#define PLUS (sizeof(int))
The read / write of a Mesaj message using pipe or FIFO is done
in two steps:
1. read / write the 'lung' field.
2. we read / write multiple times, for each entry in the message body.
Why do we need such a convention?
We will use read and write system calls. These system calls are atomic
and they are not assuring that all the requested number of bytes are transferred
at once (the third argument). Each call will return the number of bytes
that was able to transfer.
Also because the above calls are not returning anything that could relate to the
client, like we said previously it is difficult to maintain communication
with multiple clients this way.
For exchanging messages we will use the following
functions: Read, Write, ReadMes, WriteMes.
---- ----- ------- --------
The first two will use read and write system call multiple times
until all n bytes are exchanges, n is the input parameter.
ReadMes and WriteMes are exchanging at first the length of the body
and then body content.
All of the above functions are defined in ReadWrite.c source.
-----------
void Read(int f, char *t, int n) {
char *p;
int i, c;
for (p=t, c=n; ; ) {
i = read(f, p, c);
if (i == c) return;
c -= i;
p += i;
}
}
void Write(int f, char *t, int n) {
char *p;
int i, c;
for (p=t, c=n; c; ) {
i = write(f, p, c);
if (i == c) return;
c -= i;
p += i;
}
}
Mesaj *ReadMes(int canal) {
static Mesaj mesaj;
read(canal, (char*)&mesaj.lung, sizeof(int));
Read(canal, (char*)&mesaj+sizeof(int), mesaj.lung);
return &mesaj;
}
void WriteMes(int canal, Mesaj *pm) {
write(canal, (char*)pm, sizeof(int));
Write(canal, (char*)pm+sizeof(int), pm->lung);
}
The server main activity will be defined by the following function:
void parinte(int in, in out)
-------
The input parameters are channel handlers that will be used to
read/write the message from/to the client.
For each case, the handlers will be for pipe, FIFO or how we will
see on the next assignments, handlers for shared memory or queue messages.
The logic of this function is like: read one message from the client,
call dir (or dirPopen) function, write the response to the client.
Source code for parinte.c:
---------
void parinte(int in, int out) {
Mesaj *pm;
for ( ; ; ) {
pm = ReadMes(in);
//pm = dirPopen(pm->i, pm->s);
pm = dir(pm->i, pm->s);
WriteMes(out, pm);
}
}
The main client logic is defined by the following function:
void fiu(int in, in out)
---
Like on the server site, the arguments are representing channel handlers,
in this case used to exchange messages with the server part.
The client will read from standard input an integer l and
a string s, after this will build a message that will be send to the server.
After sending the message will wait for a response, and once received it will
print the content on the standard output.
The fiu.c source presents the client logic:
-----
void fiu(int in, int out) {
Mesaj *pm, mesaj;
char *pc,linie[MAXL];
int i;
for ( ; ; ) {
printf("Dati: numar|sufix: ");
pc = (char*)fgets(linie, MAXL, stdin);
if (pc == NULL) break;
linie[strlen(linie)-1] = '\0';
pc = strstr(linie, "|");
if (pc == NULL) continue;
mesaj.i = atoi(linie);
strcpy(mesaj.s, pc+1);
mesaj.lung = PLUS + strlen(mesaj.s) + 1;
WriteMes(out, &mesaj);
pm = ReadMes(in);
pc = pm->s;
printf("%d\n",pm->lung);
for (i = PLUS; i < pm->lung; ) {
printf("%d %s\n", i, pc);
i += strlen(pc) + 1;
pc += strlen(pc) + 1;
}
}
}
Finally we will present the main sources.
The first implementation contains pipe communication logic and
is presented in pipe.c source file:
------
#include <dirent.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include "mesaj.h"
#include "ReadWrite.c"
#include "dir.c"
#include "parinte.c"
#include "fiu.c"
main() {
int fp[2], pf[2], pid;
// Doua pipe, fp: fiu->parinte, pf: parinte->fiu
if (pipe(fp) < 0 || pipe(pf) < 0) exit(1);
pid = fork();
if (pid < 0) exit(2);
if (pid > 0) { // Codul serverului (parintelui)
fclose(stdin);
fclose(stdout);
close(fp[1]);
close(pf[0]);
parinte(fp[0], pf[1]);
} else { // Codul clientului (fiului)
close(fp[0]);
close(pf[1]);
fiu(pf[0], fp[1]);
close(pf[0]);
close(fp[1]);
}
}
We should notice that we are closing the unused channels on each side.
Be this we are establishing the communication direction and free the unused
resources.
In the second implementation we are presenting FIFO.
We need at first to create two FIFO channels in our current directory:
$ mkfifo fifo1
$ mkfifo fifo2
If the fifo files exist we should remove them using:
$ rm fifo1
$ rm fifo2
Because the client and the server are running by the same user
the fifo files should reside in the current directory.
If the processes are created by separate users we will define
the fifo files in a common location, like: /tmp/fifo1, /tmp/fifo2.
The server source code is defined in fifos.c:
--------
#include <dirent.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include "mesaj.h"
#include "ReadWrite.c"
#include "dir.c"
#include "parinte.c"
main() {
int f1, f2;
fclose(stdin);
fclose(stdout);
f1 = open("fifo1", O_WRONLY);
f2 = open("fifo2", O_RDONLY);
parinte(f2, f1);
}
The client source code is defined in fifoc.c:
-------
#include <string.h>
#include <stdio.h>
#include <fcntl.h>
#include "mesaj.h"
#include "ReadWrite.c"
#include "fiu.c"
main() {
int f1, f2;
f1 = open("fifo1", O_RDONLY);
f2 = open("fifo2", O_WRONLY);
fiu(f1, f2);
close(f1);
close(f2);
}
Bellow are presented some executions with and without popen, pipe and FIFO.
---------------
florin@florin-laptop:~/probleme/UPipe-H$ #dir
florin@florin-laptop:~/probleme/UPipe-H$ gcc pipe.c
florin@florin-laptop:~/probleme/UPipe-H$ ./a.out
Dati: numar|sufix: 5|.c
47
4 fiu.c
10 ReadWrite.c
22 parinte.c
32 fifos.c
40 pipe.c
Dati: numar|sufix: ^C
florin@florin-laptop:~/probleme/UPipe-H$ #dirPopen
florin@florin-laptop:~/probleme/UPipe-H$ gcc pipe.c
florin@florin-laptop:~/probleme/UPipe-H$ ./a.out
Dati: numar|sufix: 5|.c
43
4 fifoc.c
12 fifos.c
20 fiu.c
26 parinte.c
36 pipe.c
Dati: numar|sufix: ^C
florin@florin-laptop:~/probleme/UPipe-H$ mkfifo fifo1
florin@florin-laptop:~/probleme/UPipe-H$ mkfifo fifo2
florin@florin-laptop:~/probleme/UPipe-H$ #dirPopen
florin@florin-laptop:~/probleme/UPipe-H$ gcc -o s fifos.c
florin@florin-laptop:~/probleme/UPipe-H$ gcc -o c fifoc.c
florin@florin-laptop:~/probleme/UPipe-H$ ./s&
[1] 2066
florin@florin-laptop:~/probleme/UPipe-H$ ./c
Dati: numar|sufix: 5|.c
43
4 fifoc.c
12 fifos.c
20 fiu.c
26 parinte.c
36 pipe.c
Dati: numar|sufix: ^C
florin@florin-laptop:~/probleme/UPipe-H$ #dir
florin@florin-laptop:~/probleme/UPipe-H$ kill 2066
florin@florin-laptop:~/probleme/UPipe-H$ rm fifo1
[1]+ Terminated ./s
florin@florin-laptop:~/probleme/UPipe-H$ rm fifo2
florin@florin-laptop:~/probleme/UPipe-H$ mkfifo fifo1
florin@florin-laptop:~/probleme/UPipe-H$ mkfifo fifo2
florin@florin-laptop:~/probleme/UPipe-H$ gcc -o s fifos.c
florin@florin-laptop:~/probleme/UPipe-H$ gcc -o c fifoc.c
florin@florin-laptop:~/probleme/UPipe-H$ ./s&
[1] 2142
florin@florin-laptop:~/probleme/UPipe-H$ ./c
Dati: numar|sufix: 5|.c
47
4 fiu.c
10 ReadWrite.c
22 parinte.c
32 fifos.c
40 pipe.c
Dati: numar|sufix:
The equivalent Python sources:
Mesaj.py
--------
class Mesaj:
MAXS = 10000
SIZEOFINT = 10 # privit ca text
PLUS = SIZEOFINT
lung = 0
i = 0
s = [""]
def __init__(self, ser):
if ser == None: return
self.lung = int(ser[:self.SIZEOFINT])
self.i = int(ser[self.SIZEOFINT:2*self.SIZEOFINT])
self.s = ser[2*self.SIZEOFINT:self.SIZEOFINT+self.lung].split("|")
def __str__(self):
ser = ""
for l in self.s:
ser += l+"|"
ser = ser[:-1]
ser = self.i2s(self.lung)+self.i2s(self.i)+ser
return ser
def i2s(self, i):
sir = "000000000000000000"+`i`
if sir.endswith("L"): sir = sir[:-1]
return sir[-self.SIZEOFINT:]
dir.py
------
import os
import Mesaj
def dir(l, s):
mesaj = Mesaj.Mesaj(None)
mesaj.s = []
lung = 0
i = 1
for linie in os.listdir("."):
if i > l: break
if lung + len(linie) + len(mesaj.s) > mesaj.MAXS: break
if len(linie) < len(s): continue
if linie[len(linie)- len(s):] != s: continue
mesaj.s += [linie]
i += 1
lung += len(linie)
mesaj.lung = mesaj.PLUS + lung + len(mesaj.s) - 1
if len(mesaj.s) == 0: mesaj.lung += 1
return mesaj
def dirPopen(l, s):
mesaj = Mesaj.Mesaj(None)
mesaj.s = []
lung = 0
i = 1
for linie in os.popen("ls -1", "r").readlines():
linie = linie[:-1]
if i > l: break
if lung + len(linie) + len(mesaj.s) > mesaj.MAXS: break
if len(linie) < len(s): continue
if linie[len(linie)- len(s):] != s: continue
mesaj.s += [linie]
i += 1
lung += len(linie)
mesaj.lung = mesaj.PLUS + lung + len(mesaj.s) - 1
if len(mesaj.s) == 0: mesaj.lung += 1
return mesaj
ReadWrite.py
------------
import Mesaj
import os
def Read(f, n):
c = n
sir = ""
while c > 0:
s = os.read(f, c);
sir += s
c -= len(s)
return sir
def Write(f, sir):
c = len(sir)
p = 0
while c > 0:
i = os.write(f, sir[p:])
c -= i
p += i
def ReadMes(canal):
lung = os.read(canal, Mesaj.Mesaj.SIZEOFINT)
ser = Read(canal, int(lung))
return Mesaj.Mesaj(lung+ser)
def WriteMes(canal, mesaj):
lung = mesaj.SIZEOFINT+mesaj.lung
Write(canal, str(mesaj)[:lung])
parinte.py
----------
import ReadWrite
import dir
def parinte(iN, out):
while True:
mesaj = ReadWrite.ReadMes(iN)
mesaj = dir.dir(mesaj.i, mesaj.s[0])
#mesaj = dir.dirPopen(mesaj.i, mesaj.s[0])
ReadWrite.WriteMes(out, mesaj)
fiu.py
------
import sys
import Mesaj
import ReadWrite
def fiu(iN, out):
while True:
print "Dati: numar|sufix: ",
linie = sys.stdin.readline()
if not linie: break
linie = linie[:-1]
pc = linie.find("|")
if pc < 0: continue
mesaj = Mesaj.Mesaj(None)
mesaj.s = []
mesaj.i = int(linie[:pc])
mesaj.s += [linie[pc+1:]]
mesaj.lung = mesaj.PLUS + len(mesaj.s[0])
ReadWrite.WriteMes(out, mesaj)
mesaj = ReadWrite.ReadMes(iN)
for l in mesaj.s:
print l
pipe.py
-------
import sys
import os
import parinte
import fiu
def main():
fp = os.pipe()
pf = os.pipe()
pid = os.fork()
if pid < 0: exit(2)
if pid > 0: # Codul serverului (parintelui)
sys.stdin.close()
sys.stdout.close()
os.close(fp[1])
os.close(pf[0])
parinte.parinte(fp[0], pf[1])
else: # Codul clientului (fiului)
os.close(fp[0])
os.close(pf[1])
fiu.fiu(pf[0], fp[1])
os.close(pf[0])
os.close(fp[1])
main()
fifos.py
--------
import sys
import os
import parinte
def main():
sys.stdin.close()
sys.stdout.close(
f1 = os.open("fifo1", os.O_WRONLY, 0666)
f2 = os.open("fifo2", os.O_RDONLY, 0666)
parinte.parinte(f2, f1)
main()
fifoc.py
--------
import os
import fiu
def main():
f1 = os.open("fifo1", os.O_RDONLY, 0666)
f2 = os.open("fifo2", os.O_WRONLY, 0666)
fiu.fiu(f1, f2)
os.close(f1)
os.close(f2)
main()
Prezentarea problemei
For each command line argument the main process will launch a subprocess (type A).
The A process will try to execute, in a new subprocess (type B), using one of the exec functions, the received argument.
In case of failure, the process of type A, will send to his parent, using a pipe channel, the error code (errno value) generated by the exec call.
On success, the process A, will wait for process of type B and once finished it will transmit to his parent the zero code, using the same pipe channel.
The main process will print for each argument if the execution was successful or not. Only for failed executions the error received code will be printed too.
|
|
import unittest
import mock
import numpy
import chainer
from chainer import cuda
import chainer.functions as F
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.utils import type_check
if cuda.available:
cuda.init()
class TestFunction(unittest.TestCase):
def _get_method(self, prefix, gpu):
suffix = 'gpu' if gpu else 'cpu'
return getattr(self.f, prefix + '_' + suffix)
def setUp(self):
y1 = numpy.arange(4).astype(numpy.float32)
y2 = numpy.arange(4).astype(numpy.float32) + 1
gx1 = numpy.arange(3).astype(numpy.float32)
gx2 = None
gy1 = numpy.arange(4).astype(numpy.float32)
gy2 = numpy.arange(4).astype(numpy.float32)
f = chainer.Function()
f.check_type_forward = mock.MagicMock()
f.forward_cpu = mock.MagicMock(return_value=(y1, y2))
f.forward_gpu = mock.MagicMock()
f.backward_cpu = mock.MagicMock(return_value=(gx1, gx2))
f.backward_gpu = mock.MagicMock()
self.f = f
self.x1 = numpy.arange(3).astype(numpy.float32)
self.x2 = numpy.arange(3).astype(numpy.int32)
self.y1 = y1
self.y2 = y2
self.gx1 = gx1
self.gx2 = gx2
self.gy1 = gy1
self.gy2 = gy2
def tearDown(self):
# Set None to delete cuda array
self.f = None
self.y1 = None
self.y2 = None
self.gx1 = None
def setup_gpu(self):
self.x1 = cuda.to_gpu(self.x1)
self.x2 = cuda.to_gpu(self.x2)
self.y1 = cuda.to_gpu(self.y1)
self.y2 = cuda.to_gpu(self.y2)
self.gx1 = cuda.to_gpu(self.gx1)
self.gx2 = None
self.gy1 = cuda.to_gpu(self.gy1)
self.gy2 = cuda.to_gpu(self.gy2)
self.f.forward_gpu = mock.MagicMock(return_value=(self.y1, self.y2))
self.f.backward_gpu = mock.MagicMock(return_value=(self.gx1, self.gx2))
def check_forward(self, gpu):
y1, y2 = self.f.forward((self.x1, self.x2))
self.assertEqual(self.f.check_type_forward.call_count, 0)
self.assertEqual(self._get_method('forward', not gpu).call_count, 0)
self._get_method('forward', gpu).assert_called_once_with(
(self.x1, self.x2))
self.assertTrue((cuda.to_cpu(y1) == cuda.to_cpu(self.y1)).all())
self.assertTrue((cuda.to_cpu(y2) == cuda.to_cpu(self.y2)).all())
def test_forward_cpu(self):
self.check_forward(False)
@attr.gpu
def test_forward_gpu(self):
self.setup_gpu()
self.check_forward(True)
def check_backward(self, gpu):
gx1, gx2 = self.f.backward((self.x1, self.x2), (self.gy1, self.gy2))
self.assertEqual(self._get_method('backward', not gpu).call_count, 0)
self._get_method('backward', gpu).assert_called_once_with(
(self.x1, self.x2), (self.gy1, self.gy2))
self.assertTrue((cuda.to_cpu(gx1) == cuda.to_cpu(self.gx1)).all())
self.assertIsNone(gx2)
def test_backward_cpu(self):
self.check_backward(False)
@attr.gpu
def test_backward_gpu(self):
self.setup_gpu()
self.check_backward(True)
def check_check_type_forward(self):
self.assertEqual(self.f.check_type_forward.call_count, 1)
ts = self.f.check_type_forward.call_args[0][0]
self.assertIsInstance(ts, type_check.TypeInfoTuple)
self.assertEqual(len(ts), 2)
self.assertEqual(ts[0].name, 'in_types[0]')
t1 = ts[0].eval()
self.assertEqual(t1.shape, (3,))
self.assertEqual(t1.dtype, numpy.float32)
self.assertEqual(ts[1].name, 'in_types[1]')
t2 = ts[1].eval()
self.assertEqual(t2.shape, (3,))
self.assertEqual(t2.dtype, numpy.int32)
def check_call(self):
x1 = chainer.Variable(self.x1)
x2 = chainer.Variable(self.x2)
x1.rank = 1
x2.rank = 3
ys = self.f(x1, x2)
self.assertEqual(len(ys), 2)
self.check_check_type_forward()
for y in ys:
self.assertIsInstance(y, chainer.Variable)
# rank is (maximum rank in xs) + 2, since Function call
# automatically inserts Split function.
self.assertEqual(y.rank, 5)
self.assertFalse(y.volatile)
# __call__ method makes a copy
self.assertIsNot(y.creator, self.f)
self.assertIsNone(self.f.outputs)
self.assertIsInstance(y.creator.outputs, tuple)
def test_call_cpu(self):
self.check_call()
@attr.gpu
def test_call_gpu(self):
self.setup_gpu()
self.check_call()
def check_call_volatile(self):
x1 = chainer.Variable(self.x1, volatile=True)
x2 = chainer.Variable(self.x2, volatile=True)
x1.rank = 1
x2.rank = 3
ys = self.f(x1, x2)
self.assertEqual(len(ys), 2)
self.check_check_type_forward()
for y in ys:
self.assertIsInstance(y, chainer.Variable)
self.assertEqual(y.rank, 0)
self.assertTrue(y.volatile)
self.assertIsNone(y.creator)
self.assertIsNone(self.f.outputs)
def test_call_volatile_cpu(self):
self.check_call_volatile()
@attr.gpu
def test_call_volatile_gpu(self):
self.setup_gpu()
self.check_call_volatile()
def check_call_single_return_value(self, volatile):
x1 = chainer.Variable(self.x1, volatile=volatile)
x2 = chainer.Variable(self.x2, volatile=volatile)
ret = self.f(x1, x2)
self.assertIsInstance(ret, chainer.Variable)
def test_call_sigle_return_value_cpu(self):
self.f.forward_cpu.return_value = (cuda.to_cpu(self.y1),)
self.check_call_single_return_value(False)
@attr.gpu
def test_call_sigle_return_value_gpu(self):
self.setup_gpu()
self.f.forward_gpu.return_value = (cuda.to_gpu(self.y1),)
self.check_call_single_return_value(False)
def test_call_sigle_return_value_volatile_cpu(self):
self.f.forward_cpu.return_value = (cuda.to_cpu(self.y1),)
self.check_call_single_return_value(True)
@attr.gpu
def test_call_sigle_return_value_volatile_gpu(self):
self.setup_gpu()
self.f.forward_gpu.return_value = (cuda.to_gpu(self.y1),)
self.check_call_single_return_value(True)
def check_call_mixed_volatile(self):
x1 = chainer.Variable(self.x1, volatile=True)
x2 = chainer.Variable(self.x2, volatile=False)
with self.assertRaises(AssertionError):
self.f(x1, x2)
def test_call_mixed_volatile_cpu(self):
self.check_call_mixed_volatile()
@attr.gpu
def test_call_mixed_volatile_gpu(self):
self.setup_gpu()
self.check_call_mixed_volatile()
def _get_f(self):
x1 = chainer.Variable(self.x1)
x2 = chainer.Variable(self.x2)
y1, y2 = self.f(x1, x2)
f = y1.creator
# To test weak refernece, return only x1 and y1.
# x2 and y2 are deleted by the garbage collector
return f, x1, y1
def test_unchain(self):
f, _x1, _y1 = self._get_f()
f.unchain()
y1, y2 = f.outputs
# As _y1 is alive, this weak ref is also alive
y1_ref = y1()
self.assertTrue(y1_ref is not None and y1_ref.creator is None)
# This weak ref is dead by unchain
y2_ref = y2()
self.assertTrue(y2_ref is None)
self.assertIsNone(f.inputs)
def test_parameters_getter(self):
self.assertEqual(self.f.parameters, ())
def test_gradients_getter(self):
self.assertEqual(self.f.gradients, ())
def test_label(self):
self.assertEqual(self.f.label, 'Function')
class TestParameterizedFunction(unittest.TestCase):
def setUp(self):
f = chainer.Function()
f.p1 = numpy.arange(10)
f.p2 = numpy.arange(5)
f.g1 = numpy.arange(8)
f.g2 = numpy.arange(3)
f.parameter_names = ('p1', 'p2')
f.gradient_names = ('g1', 'g2')
self.f = f
@attr.gpu
def test_to_gpu(self):
self.f.to_gpu()
self.assertIsInstance(self.f.p1, cuda.GPUArray)
self.assertIsInstance(self.f.p2, cuda.GPUArray)
@attr.gpu
def test_to_cpu(self):
self.f.to_gpu()
self.f.to_cpu()
self.assertIsInstance(self.f.p1, numpy.ndarray)
self.assertIsInstance(self.f.p2, numpy.ndarray)
def test_parameters_getter(self):
ps = self.f.parameters
self.assertIsInstance(ps, tuple)
self.assertEqual(len(ps), 2)
def test_parameters_setter(self):
p1 = numpy.arange(10) + 1
p2 = numpy.arange(5) + 1
self.f.parameters = (p1, p2)
q1, q2 = self.f.parameters
self.assertIs(p1, q1)
self.assertIs(p2, q2)
def test_parameters_setter_invalid_size(self):
p1 = numpy.arange(10) + 1
with self.assertRaises(AssertionError):
self.f.parameters = (p1,)
def test_gradients_getter(self):
gs = self.f.gradients
self.assertIsInstance(gs, tuple)
self.assertEqual(len(gs), 2)
def test_gradients_setter(self):
g1 = numpy.arange(8) + 1
g2 = numpy.arange(4) + 1
self.f.gradients = (g1, g2)
h1, h2 = self.f.gradients
self.assertIs(g1, h1)
self.assertIs(g2, h2)
def test_gradients_setter_invalid_size(self):
g1 = numpy.arange(8) + 1
with self.assertRaises(AssertionError):
self.f.gradients = (g1,)
class TestFunctionBackwardIntegration(unittest.TestCase):
def test_backward(self):
x = chainer.Variable(numpy.array([1]))
y1 = F.identity(x)
y2 = F.identity(x)
z = y1 + y2
z.grad = numpy.array([1])
z.backward(retain_grad=True)
self.assertEqual(y1.grad[0], 1)
self.assertEqual(y2.grad[0], 1)
self.assertEqual(x.grad[0], 2)
class TestSplit(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)
self.g1 = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)
self.g2 = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)
def _make_split(self, x):
v = chainer.Variable(x)
v.rank = 1
return chainer.function.Split(v)
def check_init(self, x):
split = self._make_split(x)
self.assertEqual(split.rank, 1)
def test_init_cpu(self):
self.check_init(self.x)
@attr.gpu
def test_init_gpu(self):
self.check_init(cuda.to_gpu(self.x))
def check_add_branch(self, x):
split = self._make_split(x)
out = split.add_branch()
self.assertIsInstance(out, chainer.Variable)
self.assertIs(out.creator, split)
self.assertEqual(len(split.outputs), 1)
def test_add_branch_cpu(self):
self.check_add_branch(self.x)
@attr.gpu
def test_add_branch_gpu(self):
self.check_add_branch(cuda.to_gpu(self.x))
def check_backward(self, x, g1, g2):
split = self._make_split(x)
grads = (g1, g2, None)
gx, = split.backward((x,), grads)
gradient_check.assert_allclose(g1 + g2, gx)
def test_backward_cpu(self):
self.check_backward(self.x, self.g1, self.g2)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x),
cuda.to_gpu(self.g1),
cuda.to_gpu(self.g2))
def check_backward_one(self, x, g1):
split = self._make_split(x)
grads = (g1,)
gx, = split.backward((x,), grads)
# Note that when only one argument is given, its return value
# is a grad itself, and not a copy of it.
self.assertIs(g1, gx)
def test_backward_one_cpu(self):
self.check_backward_one(self.x, self.g1)
@attr.gpu
def test_backward_one_gpu(self):
self.check_backward_one(cuda.to_gpu(self.x),
cuda.to_cpu(self.g1))
testing.run_module(__name__, __file__)
|
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
PXE Boot Interface
"""
import os
from ironic_lib import metrics_utils
from ironic_lib import utils as ironic_utils
from oslo_log import log as logging
from oslo_utils import fileutils
from ironic.common import boot_devices
from ironic.common import dhcp_factory
from ironic.common import exception
from ironic.common.glance_service import service_utils
from ironic.common.i18n import _, _LW
from ironic.common import image_service as service
from ironic.common import images
from ironic.common import pxe_utils
from ironic.common import states
from ironic.common import utils
from ironic.conf import CONF
from ironic.drivers import base
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules import image_cache
from ironic.drivers import utils as driver_utils
LOG = logging.getLogger(__name__)
METRICS = metrics_utils.get_metrics_logger(__name__)
REQUIRED_PROPERTIES = {
'deploy_kernel': _("UUID (from Glance) of the deployment kernel. "
"Required."),
'deploy_ramdisk': _("UUID (from Glance) of the ramdisk that is "
"mounted at boot time. Required."),
}
COMMON_PROPERTIES = REQUIRED_PROPERTIES
def _parse_driver_info(node):
"""Gets the driver specific Node deployment info.
This method validates whether the 'driver_info' property of the
supplied node contains the required information for this driver to
deploy images to the node.
:param node: a single Node.
:returns: A dict with the driver_info values.
:raises: MissingParameterValue
"""
info = node.driver_info
d_info = {k: info.get(k) for k in ('deploy_kernel', 'deploy_ramdisk')}
error_msg = _("Cannot validate PXE bootloader. Some parameters were"
" missing in node's driver_info")
deploy_utils.check_for_missing_params(d_info, error_msg)
return d_info
def _get_instance_image_info(node, ctx):
"""Generate the paths for TFTP files for instance related images.
This method generates the paths for instance kernel and
instance ramdisk. This method also updates the node, so caller should
already have a non-shared lock on the node.
:param node: a node object
:param ctx: context
:returns: a dictionary whose keys are the names of the images (kernel,
ramdisk) and values are the absolute paths of them. If it's a whole
disk image or node is configured for localboot,
it returns an empty dictionary.
"""
image_info = {}
# NOTE(pas-ha) do not report image kernel and ramdisk for
# local boot or whole disk images so that they are not cached
if (node.driver_internal_info.get('is_whole_disk_image') or
deploy_utils.get_boot_option(node) == 'local'):
return image_info
root_dir = pxe_utils.get_root_dir()
i_info = node.instance_info
labels = ('kernel', 'ramdisk')
d_info = deploy_utils.get_image_instance_info(node)
if not (i_info.get('kernel') and i_info.get('ramdisk')):
glance_service = service.GlanceImageService(
version=CONF.glance.glance_api_version, context=ctx)
iproperties = glance_service.show(d_info['image_source'])['properties']
for label in labels:
i_info[label] = str(iproperties[label + '_id'])
node.instance_info = i_info
node.save()
for label in labels:
image_info[label] = (
i_info[label],
os.path.join(root_dir, node.uuid, label)
)
return image_info
def _get_deploy_image_info(node):
"""Generate the paths for TFTP files for deploy images.
This method generates the paths for the deploy kernel and
deploy ramdisk.
:param node: a node object
:returns: a dictionary whose keys are the names of the images (
deploy_kernel, deploy_ramdisk) and values are the absolute
paths of them.
:raises: MissingParameterValue, if deploy_kernel/deploy_ramdisk is
missing in node's driver_info.
"""
d_info = _parse_driver_info(node)
return pxe_utils.get_deploy_kr_info(node.uuid, d_info)
def _build_deploy_pxe_options(task, pxe_info):
pxe_opts = {}
node = task.node
for label, option in (('deploy_kernel', 'deployment_aki_path'),
('deploy_ramdisk', 'deployment_ari_path')):
if CONF.pxe.ipxe_enabled:
image_href = pxe_info[label][0]
if (CONF.pxe.ipxe_use_swift and
service_utils.is_glance_image(image_href)):
pxe_opts[option] = images.get_temp_url_for_glance_image(
task.context, image_href)
else:
pxe_opts[option] = '/'.join([CONF.deploy.http_url, node.uuid,
label])
else:
pxe_opts[option] = pxe_utils.get_path_relative_to_tftp_root(
pxe_info[label][1])
return pxe_opts
def _build_instance_pxe_options(task, pxe_info):
pxe_opts = {}
node = task.node
for label, option in (('kernel', 'aki_path'),
('ramdisk', 'ari_path')):
if label in pxe_info:
if CONF.pxe.ipxe_enabled:
# NOTE(pas-ha) do not use Swift TempURLs for kernel and
# ramdisk of user image when boot_option is not local,
# as this breaks instance reboot later when temp urls
# have timed out.
pxe_opts[option] = '/'.join(
[CONF.deploy.http_url, node.uuid, label])
else:
# It is possible that we don't have kernel/ramdisk or even
# image_source to determine if it's a whole disk image or not.
# For example, when transitioning to 'available' state
# for first time from 'manage' state.
pxe_opts[option] = pxe_utils.get_path_relative_to_tftp_root(
pxe_info[label][1])
# These are dummy values to satisfy elilo.
# image and initrd fields in elilo config cannot be blank.
pxe_opts.setdefault('aki_path', 'no_kernel')
pxe_opts.setdefault('ari_path', 'no_ramdisk')
return pxe_opts
def _build_extra_pxe_options():
# Enable debug in IPA according to CONF.debug if it was not
# specified yet
pxe_append_params = CONF.pxe.pxe_append_params
if CONF.debug and 'ipa-debug' not in pxe_append_params:
pxe_append_params += ' ipa-debug=1'
return {'pxe_append_params': pxe_append_params,
'tftp_server': CONF.pxe.tftp_server,
'ipxe_timeout': CONF.pxe.ipxe_timeout * 1000}
def _build_pxe_config_options(task, pxe_info, service=False):
"""Build the PXE config options for a node
This method builds the PXE boot options for a node,
given all the required parameters.
The options should then be passed to pxe_utils.create_pxe_config to
create the actual config files.
:param task: A TaskManager object
:param pxe_info: a dict of values to set on the configuration file
:param service: if True, build "service mode" pxe config for netboot-ed
user image and skip adding deployment image kernel and ramdisk info
to PXE options.
:returns: A dictionary of pxe options to be used in the pxe bootfile
template.
"""
if service:
pxe_options = {}
else:
pxe_options = _build_deploy_pxe_options(task, pxe_info)
# NOTE(pas-ha) we still must always add user image kernel and ramdisk info
# as later during switching PXE config to service mode the template
# will not be regenerated anew, but instead edited as-is.
# This can be changed later if/when switching PXE config will also use
# proper templating instead of editing existing files on disk.
pxe_options.update(_build_instance_pxe_options(task, pxe_info))
pxe_options.update(_build_extra_pxe_options())
return pxe_options
def _build_service_pxe_config(task, instance_image_info,
root_uuid_or_disk_id):
node = task.node
pxe_config_path = pxe_utils.get_pxe_config_file_path(node.uuid)
# NOTE(pas-ha) if it is takeover of ACTIVE node,
# first ensure that basic PXE configs and links
# are in place before switching pxe config
if (node.provision_state == states.ACTIVE and
not os.path.isfile(pxe_config_path)):
pxe_options = _build_pxe_config_options(task, instance_image_info,
service=True)
pxe_config_template = deploy_utils.get_pxe_config_template(node)
pxe_utils.create_pxe_config(task, pxe_options, pxe_config_template)
iwdi = node.driver_internal_info.get('is_whole_disk_image')
deploy_utils.switch_pxe_config(
pxe_config_path, root_uuid_or_disk_id,
deploy_utils.get_boot_mode_for_deploy(node),
iwdi, deploy_utils.is_trusted_boot_requested(node))
@METRICS.timer('validate_boot_option_for_trusted_boot')
def validate_boot_parameters_for_trusted_boot(node):
"""Check if boot parameters are valid for trusted boot."""
boot_mode = deploy_utils.get_boot_mode_for_deploy(node)
boot_option = deploy_utils.get_boot_option(node)
is_whole_disk_image = node.driver_internal_info.get('is_whole_disk_image')
# 'is_whole_disk_image' is not supported by trusted boot, because there is
# no Kernel/Ramdisk to measure at all.
if (boot_mode != 'bios' or
is_whole_disk_image or
boot_option != 'netboot'):
msg = (_("Trusted boot is only supported in BIOS boot mode with "
"netboot and without whole_disk_image, but Node "
"%(node_uuid)s was configured with boot_mode: %(boot_mode)s, "
"boot_option: %(boot_option)s, is_whole_disk_image: "
"%(is_whole_disk_image)s: at least one of them is wrong, and "
"this can be caused by enable secure boot.") %
{'node_uuid': node.uuid, 'boot_mode': boot_mode,
'boot_option': boot_option,
'is_whole_disk_image': is_whole_disk_image})
LOG.error(msg)
raise exception.InvalidParameterValue(msg)
@image_cache.cleanup(priority=25)
class TFTPImageCache(image_cache.ImageCache):
def __init__(self):
super(TFTPImageCache, self).__init__(
CONF.pxe.tftp_master_path,
# MiB -> B
cache_size=CONF.pxe.image_cache_size * 1024 * 1024,
# min -> sec
cache_ttl=CONF.pxe.image_cache_ttl * 60)
def _cache_ramdisk_kernel(ctx, node, pxe_info):
"""Fetch the necessary kernels and ramdisks for the instance."""
fileutils.ensure_tree(
os.path.join(pxe_utils.get_root_dir(), node.uuid))
LOG.debug("Fetching necessary kernel and ramdisk for node %s",
node.uuid)
deploy_utils.fetch_images(ctx, TFTPImageCache(), list(pxe_info.values()),
CONF.force_raw_images)
def _clean_up_pxe_env(task, images_info):
"""Cleanup PXE environment of all the images in images_info.
Cleans up the PXE environment for the mentioned images in
images_info.
:param task: a TaskManager object
:param images_info: A dictionary of images whose keys are the image names
to be cleaned up (kernel, ramdisk, etc) and values are a tuple of
identifier and absolute path.
"""
for label in images_info:
path = images_info[label][1]
ironic_utils.unlink_without_raise(path)
pxe_utils.clean_up_pxe_config(task)
TFTPImageCache().clean_up()
class PXEBoot(base.BootInterface):
def get_properties(self):
"""Return the properties of the interface.
:returns: dictionary of <property name>:<property description> entries.
"""
return COMMON_PROPERTIES
@METRICS.timer('PXEBoot.validate')
def validate(self, task):
"""Validate the PXE-specific info for booting deploy/instance images.
This method validates the PXE-specific info for booting the
ramdisk and instance on the node. If invalid, raises an
exception; otherwise returns None.
:param task: a task from TaskManager.
:returns: None
:raises: InvalidParameterValue, if some parameters are invalid.
:raises: MissingParameterValue, if some required parameters are
missing.
"""
node = task.node
if not driver_utils.get_node_mac_addresses(task):
raise exception.MissingParameterValue(
_("Node %s does not have any port associated with it.")
% node.uuid)
if CONF.pxe.ipxe_enabled:
if (not CONF.deploy.http_url or
not CONF.deploy.http_root):
raise exception.MissingParameterValue(_(
"iPXE boot is enabled but no HTTP URL or HTTP "
"root was specified."))
# Check the trusted_boot capabilities value.
deploy_utils.validate_capabilities(node)
if deploy_utils.is_trusted_boot_requested(node):
# Check if 'boot_option' and boot mode is compatible with
# trusted boot.
validate_boot_parameters_for_trusted_boot(node)
_parse_driver_info(node)
d_info = deploy_utils.get_image_instance_info(node)
if (node.driver_internal_info.get('is_whole_disk_image') or
deploy_utils.get_boot_option(node) == 'local'):
props = []
elif service_utils.is_glance_image(d_info['image_source']):
props = ['kernel_id', 'ramdisk_id']
else:
props = ['kernel', 'ramdisk']
deploy_utils.validate_image_properties(task.context, d_info, props)
@METRICS.timer('PXEBoot.prepare_ramdisk')
def prepare_ramdisk(self, task, ramdisk_params):
"""Prepares the boot of Ironic ramdisk using PXE.
This method prepares the boot of the deploy kernel/ramdisk after
reading relevant information from the node's driver_info and
instance_info.
:param task: a task from TaskManager.
:param ramdisk_params: the parameters to be passed to the ramdisk.
pxe driver passes these parameters as kernel command-line
arguments.
:returns: None
:raises: MissingParameterValue, if some information is missing in
node's driver_info or instance_info.
:raises: InvalidParameterValue, if some information provided is
invalid.
:raises: IronicException, if some power or set boot boot device
operation failed on the node.
"""
node = task.node
if CONF.pxe.ipxe_enabled:
# Render the iPXE boot script template and save it
# to HTTP root directory
boot_script = utils.render_template(
CONF.pxe.ipxe_boot_script,
{'ipxe_for_mac_uri': pxe_utils.PXE_CFG_DIR_NAME + '/'})
bootfile_path = os.path.join(
CONF.deploy.http_root,
os.path.basename(CONF.pxe.ipxe_boot_script))
# NOTE(pas-ha) to prevent unneeded writes,
# only write to file if its content is different from required,
# which should be rather rare
if (not os.path.isfile(bootfile_path) or
not utils.file_has_content(bootfile_path, boot_script)):
utils.write_to_file(bootfile_path, boot_script)
dhcp_opts = pxe_utils.dhcp_options_for_instance(task)
provider = dhcp_factory.DHCPFactory()
provider.update_dhcp(task, dhcp_opts)
pxe_info = _get_deploy_image_info(node)
# NODE: Try to validate and fetch instance images only
# if we are in DEPLOYING state.
if node.provision_state == states.DEPLOYING:
pxe_info.update(_get_instance_image_info(node, task.context))
pxe_options = _build_pxe_config_options(task, pxe_info)
pxe_options.update(ramdisk_params)
pxe_config_template = deploy_utils.get_pxe_config_template(node)
pxe_utils.create_pxe_config(task, pxe_options,
pxe_config_template)
deploy_utils.try_set_boot_device(task, boot_devices.PXE)
if CONF.pxe.ipxe_enabled and CONF.pxe.ipxe_use_swift:
pxe_info.pop('deploy_kernel', None)
pxe_info.pop('deploy_ramdisk', None)
if pxe_info:
_cache_ramdisk_kernel(task.context, node, pxe_info)
@METRICS.timer('PXEBoot.clean_up_ramdisk')
def clean_up_ramdisk(self, task):
"""Cleans up the boot of ironic ramdisk.
This method cleans up the PXE environment that was setup for booting
the deploy ramdisk. It unlinks the deploy kernel/ramdisk in the node's
directory in tftproot and removes it's PXE config.
:param task: a task from TaskManager.
:returns: None
"""
node = task.node
try:
images_info = _get_deploy_image_info(node)
except exception.MissingParameterValue as e:
LOG.warning(_LW('Could not get deploy image info '
'to clean up images for node %(node)s: %(err)s'),
{'node': node.uuid, 'err': e})
else:
_clean_up_pxe_env(task, images_info)
@METRICS.timer('PXEBoot.prepare_instance')
def prepare_instance(self, task):
"""Prepares the boot of instance.
This method prepares the boot of the instance after reading
relevant information from the node's instance_info. In case of netboot,
it updates the dhcp entries and switches the PXE config. In case of
localboot, it cleans up the PXE config.
:param task: a task from TaskManager.
:returns: None
"""
node = task.node
boot_option = deploy_utils.get_boot_option(node)
boot_device = None
if boot_option != "local":
# Make sure that the instance kernel/ramdisk is cached.
# This is for the takeover scenario for active nodes.
instance_image_info = _get_instance_image_info(
task.node, task.context)
_cache_ramdisk_kernel(task.context, task.node, instance_image_info)
# If it's going to PXE boot we need to update the DHCP server
dhcp_opts = pxe_utils.dhcp_options_for_instance(task)
provider = dhcp_factory.DHCPFactory()
provider.update_dhcp(task, dhcp_opts)
iwdi = task.node.driver_internal_info.get('is_whole_disk_image')
try:
root_uuid_or_disk_id = task.node.driver_internal_info[
'root_uuid_or_disk_id'
]
except KeyError:
if not iwdi:
LOG.warning(
_LW("The UUID for the root partition can't be "
"found, unable to switch the pxe config from "
"deployment mode to service (boot) mode for "
"node %(node)s"), {"node": task.node.uuid})
else:
LOG.warning(
_LW("The disk id for the whole disk image can't "
"be found, unable to switch the pxe config "
"from deployment mode to service (boot) mode "
"for node %(node)s"),
{"node": task.node.uuid})
else:
_build_service_pxe_config(task, instance_image_info,
root_uuid_or_disk_id)
boot_device = boot_devices.PXE
else:
# If it's going to boot from the local disk, we don't need
# PXE config files. They still need to be generated as part
# of the prepare() because the deployment does PXE boot the
# deploy ramdisk
pxe_utils.clean_up_pxe_config(task)
boot_device = boot_devices.DISK
# NOTE(pas-ha) do not re-set boot device on ACTIVE nodes
# during takeover
if boot_device and task.node.provision_state != states.ACTIVE:
deploy_utils.try_set_boot_device(task, boot_device)
@METRICS.timer('PXEBoot.clean_up_instance')
def clean_up_instance(self, task):
"""Cleans up the boot of instance.
This method cleans up the environment that was setup for booting
the instance. It unlinks the instance kernel/ramdisk in node's
directory in tftproot and removes the PXE config.
:param task: a task from TaskManager.
:returns: None
"""
node = task.node
try:
images_info = _get_instance_image_info(node, task.context)
except exception.MissingParameterValue as e:
LOG.warning(_LW('Could not get instance image info '
'to clean up images for node %(node)s: %(err)s'),
{'node': node.uuid, 'err': e})
else:
_clean_up_pxe_env(task, images_info)
|
|
import pytest
import mock
from api.base.settings.defaults import API_BASE
from api.preprint_providers.permissions import GroupHelper
from osf_tests.factories import (
PreprintFactory,
AuthUserFactory,
PreprintProviderFactory,
)
from osf.utils import permissions as osf_permissions
from api_tests.reviews.mixins.filter_mixins import ReviewActionFilterMixin
class TestReviewActionFilters(ReviewActionFilterMixin):
@pytest.fixture()
def url(self):
return '/{}actions/reviews/'.format(API_BASE)
@pytest.fixture()
def expected_actions(self, all_actions, allowed_providers):
actions = super(
TestReviewActionFilters, self
).expected_actions(all_actions, allowed_providers)
node = actions[0].target.node
node.is_public = False
node.save()
return [a for a in actions if a.target.node.is_public]
def test_no_permission(self, app, url, expected_actions):
res = app.get(url, expect_errors=True)
assert res.status_code == 401
some_rando = AuthUserFactory()
res = app.get(url, auth=some_rando.auth)
assert not res.json['data']
@pytest.mark.django_db
class TestReviewActionCreateRelated(object):
def create_payload(self, reviewable_id=None, **attrs):
payload = {
'data': {
'attributes': attrs,
'relationships': {},
'type': 'actions'
}
}
if reviewable_id:
payload['data']['relationships']['target'] = {
'data': {
'type': 'preprints',
'id': reviewable_id
}
}
return payload
@pytest.fixture()
def url(self, preprint):
return '/{}preprints/{}/review_actions/'.format(API_BASE, preprint._id)
@pytest.fixture()
def provider(self):
return PreprintProviderFactory(reviews_workflow='pre-moderation')
@pytest.fixture()
def node_admin(self):
return AuthUserFactory()
@pytest.fixture()
def preprint(self, node_admin, provider):
preprint = PreprintFactory(
provider=provider,
node__creator=node_admin,
is_published=False)
preprint.node.add_contributor(
node_admin, permissions=[osf_permissions.ADMIN])
return preprint
@pytest.fixture()
def moderator(self, provider):
moderator = AuthUserFactory()
moderator.groups.add(GroupHelper(provider).get_group('moderator'))
return moderator
@mock.patch('website.preprints.tasks.get_and_set_preprint_identifiers.si')
def test_create_permissions(
self, mock_ezid, app, url, preprint, node_admin, moderator):
assert preprint.machine_state == 'initial'
submit_payload = self.create_payload(preprint._id, trigger='submit')
# Unauthorized user can't submit
res = app.post_json_api(url, submit_payload, expect_errors=True)
assert res.status_code == 401
# A random user can't submit
some_rando = AuthUserFactory()
res = app.post_json_api(
url, submit_payload,
auth=some_rando.auth,
expect_errors=True)
assert res.status_code == 403
# Node admin can submit
res = app.post_json_api(url, submit_payload, auth=node_admin.auth)
assert res.status_code == 201
preprint.refresh_from_db()
assert preprint.machine_state == 'pending'
assert not preprint.is_published
accept_payload = self.create_payload(
preprint._id, trigger='accept', comment='This is good.')
# Unauthorized user can't accept
res = app.post_json_api(url, accept_payload, expect_errors=True)
assert res.status_code == 401
# A random user can't accept
res = app.post_json_api(
url, accept_payload,
auth=some_rando.auth,
expect_errors=True)
assert res.status_code == 403
# Moderator from another provider can't accept
another_moderator = AuthUserFactory()
another_moderator.groups.add(GroupHelper(
PreprintProviderFactory()).get_group('moderator'))
res = app.post_json_api(
url, accept_payload,
auth=another_moderator.auth,
expect_errors=True)
assert res.status_code == 403
# Node admin can't accept
res = app.post_json_api(
url, accept_payload,
auth=node_admin.auth,
expect_errors=True)
assert res.status_code == 403
# Still unchanged after all those tries
preprint.refresh_from_db()
assert preprint.machine_state == 'pending'
assert not preprint.is_published
# Moderator can accept
res = app.post_json_api(url, accept_payload, auth=moderator.auth)
assert res.status_code == 201
preprint.refresh_from_db()
assert preprint.machine_state == 'accepted'
assert preprint.is_published
# Check if "get_and_set_preprint_identifiers" is called once.
assert mock_ezid.call_count == 1
def test_cannot_create_actions_for_unmoderated_provider(
self, app, url, preprint, provider, node_admin):
provider.reviews_workflow = None
provider.save()
submit_payload = self.create_payload(preprint._id, trigger='submit')
res = app.post_json_api(
url, submit_payload,
auth=node_admin.auth,
expect_errors=True)
assert res.status_code == 409
def test_bad_requests(self, app, url, preprint, provider, moderator):
invalid_transitions = {
'post-moderation': [
('accepted', 'accept'),
('accepted', 'submit'),
('initial', 'accept'),
('initial', 'edit_comment'),
('initial', 'reject'),
('pending', 'submit'),
('rejected', 'reject'),
('rejected', 'submit'),
],
'pre-moderation': [
('accepted', 'accept'),
('accepted', 'submit'),
('initial', 'accept'),
('initial', 'edit_comment'),
('initial', 'reject'),
('rejected', 'reject'),
]
}
for workflow, transitions in invalid_transitions.items():
provider.reviews_workflow = workflow
provider.save()
for state, trigger in transitions:
preprint.machine_state = state
preprint.save()
bad_payload = self.create_payload(
preprint._id, trigger=trigger)
res = app.post_json_api(
url, bad_payload, auth=moderator.auth, expect_errors=True)
assert res.status_code == 409
# test invalid trigger
bad_payload = self.create_payload(
preprint._id, trigger='badtriggerbad')
res = app.post_json_api(
url, bad_payload,
auth=moderator.auth,
expect_errors=True)
assert res.status_code == 400
# test target is required
bad_payload = self.create_payload(trigger='accept')
res = app.post_json_api(
url, bad_payload,
auth=moderator.auth,
expect_errors=True)
assert res.status_code == 400
@mock.patch('website.preprints.tasks.get_and_set_preprint_identifiers.si')
def test_valid_transitions(
self, mock_ezid, app, url, preprint, provider, moderator):
valid_transitions = {
'post-moderation': [
('accepted', 'edit_comment', 'accepted'),
('accepted', 'reject', 'rejected'),
('initial', 'submit', 'pending'),
('pending', 'accept', 'accepted'),
('pending', 'edit_comment', 'pending'),
('pending', 'reject', 'rejected'),
('rejected', 'accept', 'accepted'),
('rejected', 'edit_comment', 'rejected'),
],
'pre-moderation': [
('accepted', 'edit_comment', 'accepted'),
('accepted', 'reject', 'rejected'),
('initial', 'submit', 'pending'),
('pending', 'accept', 'accepted'),
('pending', 'edit_comment', 'pending'),
('pending', 'reject', 'rejected'),
('pending', 'submit', 'pending'),
('rejected', 'accept', 'accepted'),
('rejected', 'edit_comment', 'rejected'),
('rejected', 'submit', 'pending'),
],
}
for workflow, transitions in valid_transitions.items():
provider.reviews_workflow = workflow
provider.save()
for from_state, trigger, to_state in transitions:
preprint.machine_state = from_state
preprint.is_published = False
preprint.date_published = None
preprint.date_last_transitioned = None
preprint.save()
payload = self.create_payload(preprint._id, trigger=trigger)
res = app.post_json_api(url, payload, auth=moderator.auth)
assert res.status_code == 201
action = preprint.actions.order_by('-created').first()
assert action.trigger == trigger
preprint.refresh_from_db()
assert preprint.machine_state == to_state
if preprint.in_public_reviews_state:
assert preprint.is_published
assert preprint.date_published == action.created
assert mock_ezid.called
mock_ezid.reset_mock()
else:
assert not preprint.is_published
assert preprint.date_published is None
assert not mock_ezid.called
if trigger == 'edit_comment':
assert preprint.date_last_transitioned is None
else:
assert preprint.date_last_transitioned == action.created
|
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
This auth module is intended to allow OpenStack client-tools to select from a
variety of authentication strategies, including NoAuth (the default), and
Keystone (an identity management system).
> auth_plugin = AuthPlugin(creds)
> auth_plugin.authenticate()
> auth_plugin.auth_token
abcdefg
> auth_plugin.management_url
http://service_endpoint/
"""
import httplib2
from oslo.serialization import jsonutils
from oslo_log import log as logging
# NOTE(jokke): simplified transition to py3, behaves like py2 xrange
from six.moves import range
import six.moves.urllib.parse as urlparse
from daisy.common import exception
from daisy import i18n
LOG = logging.getLogger(__name__)
_ = i18n._
class BaseStrategy(object):
def __init__(self):
self.auth_token = None
# TODO(sirp): Should expose selecting public/internal/admin URL.
self.management_url = None
def authenticate(self):
raise NotImplementedError
@property
def is_authenticated(self):
raise NotImplementedError
@property
def strategy(self):
raise NotImplementedError
class NoAuthStrategy(BaseStrategy):
def authenticate(self):
pass
@property
def is_authenticated(self):
return True
@property
def strategy(self):
return 'noauth'
class KeystoneStrategy(BaseStrategy):
MAX_REDIRECTS = 10
def __init__(self, creds, insecure=False, configure_via_auth=True):
self.creds = creds
self.insecure = insecure
self.configure_via_auth = configure_via_auth
super(KeystoneStrategy, self).__init__()
def check_auth_params(self):
# Ensure that supplied credential parameters are as required
for required in ('username', 'password', 'auth_url',
'strategy'):
if self.creds.get(required) is None:
raise exception.MissingCredentialError(required=required)
if self.creds['strategy'] != 'keystone':
raise exception.BadAuthStrategy(expected='keystone',
received=self.creds['strategy'])
# For v2.0 also check tenant is present
if self.creds['auth_url'].rstrip('/').endswith('v2.0'):
if self.creds.get("tenant") is None:
raise exception.MissingCredentialError(required='tenant')
def authenticate(self):
"""Authenticate with the Keystone service.
There are a few scenarios to consider here:
1. Which version of Keystone are we using? v1 which uses headers to
pass the credentials, or v2 which uses a JSON encoded request body?
2. Keystone may respond back with a redirection using a 305 status
code.
3. We may attempt a v1 auth when v2 is what's called for. In this
case, we rewrite the url to contain /v2.0/ and retry using the v2
protocol.
"""
def _authenticate(auth_url):
# If OS_AUTH_URL is missing a trailing slash add one
if not auth_url.endswith('/'):
auth_url += '/'
token_url = urlparse.urljoin(auth_url, "tokens")
# 1. Check Keystone version
is_v2 = auth_url.rstrip('/').endswith('v2.0')
if is_v2:
self._v2_auth(token_url)
else:
self._v1_auth(token_url)
self.check_auth_params()
auth_url = self.creds['auth_url']
for _ in range(self.MAX_REDIRECTS):
try:
_authenticate(auth_url)
except exception.AuthorizationRedirect as e:
# 2. Keystone may redirect us
auth_url = e.url
except exception.AuthorizationFailure:
# 3. In some configurations nova makes redirection to
# v2.0 keystone endpoint. Also, new location does not
# contain real endpoint, only hostname and port.
if 'v2.0' not in auth_url:
auth_url = urlparse.urljoin(auth_url, 'v2.0/')
else:
# If we successfully auth'd, then memorize the correct auth_url
# for future use.
self.creds['auth_url'] = auth_url
break
else:
# Guard against a redirection loop
raise exception.MaxRedirectsExceeded(redirects=self.MAX_REDIRECTS)
def _v1_auth(self, token_url):
creds = self.creds
headers = {}
headers['X-Auth-User'] = creds['username']
headers['X-Auth-Key'] = creds['password']
tenant = creds.get('tenant')
if tenant:
headers['X-Auth-Tenant'] = tenant
resp, resp_body = self._do_request(token_url, 'GET', headers=headers)
def _management_url(self, resp):
for url_header in ('x-image-management-url',
'x-server-management-url',
'x-glance'):
try:
return resp[url_header]
except KeyError as e:
not_found = e
raise not_found
if resp.status in (200, 204):
try:
if self.configure_via_auth:
self.management_url = _management_url(self, resp)
self.auth_token = resp['x-auth-token']
except KeyError:
raise exception.AuthorizationFailure()
elif resp.status == 305:
raise exception.AuthorizationRedirect(uri=resp['location'])
elif resp.status == 400:
raise exception.AuthBadRequest(url=token_url)
elif resp.status == 401:
raise exception.NotAuthenticated()
elif resp.status == 404:
raise exception.AuthUrlNotFound(url=token_url)
else:
raise Exception(_('Unexpected response: %s') % resp.status)
def _v2_auth(self, token_url):
creds = self.creds
creds = {
"auth": {
"tenantName": creds['tenant'],
"passwordCredentials": {
"username": creds['username'],
"password": creds['password']
}
}
}
headers = {}
headers['Content-Type'] = 'application/json'
req_body = jsonutils.dumps(creds)
resp, resp_body = self._do_request(
token_url, 'POST', headers=headers, body=req_body)
if resp.status == 200:
resp_auth = jsonutils.loads(resp_body)['access']
creds_region = self.creds.get('region')
if self.configure_via_auth:
endpoint = get_endpoint(resp_auth['serviceCatalog'],
endpoint_region=creds_region)
self.management_url = endpoint
self.auth_token = resp_auth['token']['id']
elif resp.status == 305:
raise exception.RedirectException(resp['location'])
elif resp.status == 400:
raise exception.AuthBadRequest(url=token_url)
elif resp.status == 401:
raise exception.NotAuthenticated()
elif resp.status == 404:
raise exception.AuthUrlNotFound(url=token_url)
else:
raise Exception(_('Unexpected response: %s') % resp.status)
@property
def is_authenticated(self):
return self.auth_token is not None
@property
def strategy(self):
return 'keystone'
def _do_request(self, url, method, headers=None, body=None):
headers = headers or {}
conn = httplib2.Http()
conn.force_exception_to_status_code = True
conn.disable_ssl_certificate_validation = self.insecure
headers['User-Agent'] = 'glance-client'
resp, resp_body = conn.request(url, method, headers=headers, body=body)
return resp, resp_body
def get_plugin_from_strategy(strategy, creds=None, insecure=False,
configure_via_auth=True):
if strategy == 'noauth':
return NoAuthStrategy()
elif strategy == 'keystone':
return KeystoneStrategy(creds, insecure,
configure_via_auth=configure_via_auth)
else:
raise Exception(_("Unknown auth strategy '%s'") % strategy)
def get_endpoint(service_catalog, service_type='image', endpoint_region=None,
endpoint_type='publicURL'):
"""
Select an endpoint from the service catalog
We search the full service catalog for services
matching both type and region. If the client
supplied no region then any 'image' endpoint
is considered a match. There must be one -- and
only one -- successful match in the catalog,
otherwise we will raise an exception.
"""
endpoint = None
for service in service_catalog:
s_type = None
try:
s_type = service['type']
except KeyError:
msg = _('Encountered service with no "type": %s') % s_type
LOG.warn(msg)
continue
if s_type == service_type:
for ep in service['endpoints']:
if endpoint_region is None or endpoint_region == ep['region']:
if endpoint is not None:
# This is a second match, abort
raise exception.RegionAmbiguity(region=endpoint_region)
endpoint = ep
if endpoint and endpoint.get(endpoint_type):
return endpoint[endpoint_type]
else:
raise exception.NoServiceEndpoint()
|
|
# white_signals.py
"""Contains class factories for white noise signals. White noise signals are
defined as the class of signals that only modifies the white noise matrix `N`.
"""
import numpy as np
import scipy.sparse
from enterprise.signals import parameter, selections, signal_base, utils
from enterprise.signals.parameter import function
from enterprise.signals.selections import Selection
def WhiteNoise(varianceFunction, selection=Selection(selections.no_selection), name=""):
""" Class factory for generic white noise signals."""
class WhiteNoise(signal_base.Signal):
signal_type = "white noise"
signal_name = name
signal_id = name
def __init__(self, psr):
super(WhiteNoise, self).__init__(psr)
self.name = self.psrname + "_" + self.signal_id
self._do_selection(psr, varianceFunction, selection)
def _do_selection(self, psr, vfn, selection):
sel = selection(psr)
self._keys = sorted(sel.masks.keys())
self._masks = [sel.masks[key] for key in self._keys]
self._ndiag, self._params = {}, {}
for key, mask in zip(self._keys, self._masks):
pnames = [psr.name, name, key]
pname = "_".join([n for n in pnames if n])
self._ndiag[key] = vfn(pname, psr=psr)
for param in self._ndiag[key]._params.values():
self._params[param.name] = param
@property
def ndiag_params(self):
"""Get any varying ndiag parameters."""
return [pp.name for pp in self.params]
@signal_base.cache_call("ndiag_params")
def get_ndiag(self, params):
ret = 0
for key, mask in zip(self._keys, self._masks):
ret += self._ndiag[key](params=params) * mask
return signal_base.ndarray_alt(ret)
return WhiteNoise
@function
def efac_ndiag(toaerrs, efac=1.0):
return efac ** 2 * toaerrs ** 2
def MeasurementNoise(efac=parameter.Uniform(0.5, 1.5), selection=Selection(selections.no_selection), name=""):
"""Class factory for EFAC type measurement noise."""
varianceFunction = efac_ndiag(efac=efac)
BaseClass = WhiteNoise(varianceFunction, selection=selection, name=name)
class MeasurementNoise(BaseClass):
signal_name = "efac"
signal_id = "efac_" + name if name else "efac"
return MeasurementNoise
@function
def equad_ndiag(toas, log10_equad=-8):
return np.ones_like(toas) * 10 ** (2 * log10_equad)
def EquadNoise(log10_equad=parameter.Uniform(-10, -5), selection=Selection(selections.no_selection), name=""):
"""Class factory for EQUAD type measurement noise."""
varianceFunction = equad_ndiag(log10_equad=log10_equad)
BaseClass = WhiteNoise(varianceFunction, selection=selection, name=name)
class EquadNoise(BaseClass):
signal_name = "equad"
signal_id = "equad_" + name if name else "equad"
return EquadNoise
def EcorrKernelNoise(
log10_ecorr=parameter.Uniform(-10, -5),
selection=Selection(selections.no_selection),
method="sherman-morrison",
name="",
):
r"""Class factory for ECORR type noise.
:param log10_ecorr: ``Parameter`` type for log10 or ecorr parameter.
:param selection:
``Selection`` object specifying masks for backends, time segments, etc.
:param method: Method for computing noise covariance matrix.
Options include `sherman-morrison`, `sparse`, and `block`
:return: ``EcorrKernelNoise`` class.
ECORR is a noise signal that is used for data with multi-channel TOAs
that are nearly simultaneous in time. It is a white noise signal that
is uncorrelated epoch to epoch but completely correlated for TOAs in a
given observing epoch.
For this implementation we use this covariance matrix as part of the
white noise covariance matrix :math:`N`. It can be seen from above that
this covariance is block diagonal, thus allowing us to exploit special
methods to make matrix manipulations easier.
In this signal implementation we offer three methods of performing these
matrix operations:
sherman-morrison
Uses the `Sherman-Morrison`_ forumla to compute the matrix
inverse and other matrix operations. **Note:** This method can only
be used for covariances that can be constructed by the outer product
of two vectors, :math:`uv^T`.
sparse
Uses `Scipy Sparse`_ matrices to construct the block diagonal
covariance matrix and perform matrix operations.
block
Uses a custom scheme that uses the individual blocks from the block
diagonal matrix to perform fast matrix inverse and other solve
operations.
.. note:: The sherman-morrison method is the fastest, followed by the block
and then sparse methods, however; the block and sparse methods are more
general and should be used if sub-classing this signal for more
complicated blocks.
.. _Sherman-Morrison: https://en.wikipedia.org/wiki/Sherman-Morrison_formula
.. _Scipy Sparse: https://docs.scipy.org/doc/scipy-0.18.1/reference/sparse.html
.. # noqa E501
"""
if method not in ["sherman-morrison", "block", "sparse"]:
msg = "EcorrKernelNoise does not support method: {}".format(method)
raise TypeError(msg)
class EcorrKernelNoise(signal_base.Signal):
signal_type = "white noise"
signal_name = "ecorr_" + method
signal_id = "_".join(["ecorr", name, method]) if name else "_".join(["ecorr", method])
def __init__(self, psr):
super(EcorrKernelNoise, self).__init__(psr)
self.name = self.psrname + "_" + self.signal_id
sel = selection(psr)
self._params, self._masks = sel("log10_ecorr", log10_ecorr)
keys = sorted(self._masks.keys())
masks = [self._masks[key] for key in keys]
Umats = []
for key, mask in zip(keys, masks):
Umats.append(utils.create_quantization_matrix(psr.toas[mask], nmin=2)[0])
nepoch = sum(U.shape[1] for U in Umats)
U = np.zeros((len(psr.toas), nepoch))
self._slices = {}
netot = 0
for ct, (key, mask) in enumerate(zip(keys, masks)):
nn = Umats[ct].shape[1]
U[mask, netot : nn + netot] = Umats[ct]
self._slices.update({key: utils.quant2ind(U[:, netot : nn + netot])})
netot += nn
# initialize sparse matrix
self._setup(psr)
@property
def ndiag_params(self):
"""Get any varying ndiag parameters."""
return [pp.name for pp in self.params]
@signal_base.cache_call("ndiag_params")
def get_ndiag(self, params):
if method == "sherman-morrison":
return self._get_ndiag_sherman_morrison(params)
elif method == "sparse":
return self._get_ndiag_sparse(params)
elif method == "block":
return self._get_ndiag_block(params)
def _setup(self, psr):
if method == "sparse":
self._setup_sparse(psr)
def _setup_sparse(self, psr):
Ns = scipy.sparse.csc_matrix((len(psr.toas), len(psr.toas)))
for key, slices in self._slices.items():
for slc in slices:
if slc.stop - slc.start > 1:
Ns[slc, slc] = 1.0
self._Ns = signal_base.csc_matrix_alt(Ns)
def _get_ndiag_sparse(self, params):
for p in self._params:
for slc in self._slices[p]:
if slc.stop - slc.start > 1:
self._Ns[slc, slc] = 10 ** (2 * self.get(p, params))
return self._Ns
def _get_ndiag_sherman_morrison(self, params):
slices, jvec = self._get_jvecs(params)
return signal_base.ShermanMorrison(jvec, slices)
def _get_ndiag_block(self, params):
slices, jvec = self._get_jvecs(params)
blocks = []
for jv, slc in zip(jvec, slices):
nb = slc.stop - slc.start
blocks.append(np.ones((nb, nb)) * jv)
return signal_base.BlockMatrix(blocks, slices)
def _get_jvecs(self, params):
slices = sum([self._slices[key] for key in sorted(self._slices.keys())], [])
jvec = np.concatenate(
[
np.ones(len(self._slices[key])) * 10 ** (2 * self.get(key, params))
for key in sorted(self._slices.keys())
]
)
return (slices, jvec)
return EcorrKernelNoise
|
|
""" Cisco_IOS_XR_ipv4_telnet_mgmt_cfg
This module contains a collection of YANG definitions
for Cisco IOS\-XR ipv4\-telnet\-mgmt package configuration.
This module contains definitions
for the following management objects\:
telnet\: Global Telnet configuration commands
Copyright (c) 2013\-2016 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class Telnet(object):
"""
Global Telnet configuration commands
.. attribute:: vrfs
VRF name for telnet service
**type**\: :py:class:`Vrfs <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_telnet_mgmt_cfg.Telnet.Vrfs>`
"""
_prefix = 'ipv4-telnet-mgmt-cfg'
_revision = '2015-11-09'
def __init__(self):
self.vrfs = Telnet.Vrfs()
self.vrfs.parent = self
class Vrfs(object):
"""
VRF name for telnet service
.. attribute:: vrf
VRF name for telnet service
**type**\: list of :py:class:`Vrf <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_telnet_mgmt_cfg.Telnet.Vrfs.Vrf>`
"""
_prefix = 'ipv4-telnet-mgmt-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.vrf = YList()
self.vrf.parent = self
self.vrf.name = 'vrf'
class Vrf(object):
"""
VRF name for telnet service
.. attribute:: vrf_name <key>
VRF name
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: ipv4
IPv4 configuration
**type**\: :py:class:`Ipv4 <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_telnet_mgmt_cfg.Telnet.Vrfs.Vrf.Ipv4>`
"""
_prefix = 'ipv4-telnet-mgmt-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.vrf_name = None
self.ipv4 = Telnet.Vrfs.Vrf.Ipv4()
self.ipv4.parent = self
class Ipv4(object):
"""
IPv4 configuration
.. attribute:: dscp
Specify the DSCP value
**type**\: int
**range:** 0..63
"""
_prefix = 'ipv4-telnet-mgmt-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.dscp = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-telnet-mgmt-cfg:ipv4'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.dscp is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_telnet_mgmt_cfg as meta
return meta._meta_table['Telnet.Vrfs.Vrf.Ipv4']['meta_info']
@property
def _common_path(self):
if self.vrf_name is None:
raise YPYModelError('Key property vrf_name is None')
return '/Cisco-IOS-XR-ipv4-telnet-mgmt-cfg:telnet/Cisco-IOS-XR-ipv4-telnet-mgmt-cfg:vrfs/Cisco-IOS-XR-ipv4-telnet-mgmt-cfg:vrf[Cisco-IOS-XR-ipv4-telnet-mgmt-cfg:vrf-name = ' + str(self.vrf_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.vrf_name is not None:
return True
if self.ipv4 is not None and self.ipv4._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_telnet_mgmt_cfg as meta
return meta._meta_table['Telnet.Vrfs.Vrf']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv4-telnet-mgmt-cfg:telnet/Cisco-IOS-XR-ipv4-telnet-mgmt-cfg:vrfs'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.vrf is not None:
for child_ref in self.vrf:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_telnet_mgmt_cfg as meta
return meta._meta_table['Telnet.Vrfs']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv4-telnet-mgmt-cfg:telnet'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.vrfs is not None and self.vrfs._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_telnet_mgmt_cfg as meta
return meta._meta_table['Telnet']['meta_info']
|
|
import json
import re
from ctypes import c_void_p, c_wchar_p
from decimal import Decimal
from logging import DEBUG, NullHandler, getLogger
from subprocess import CalledProcessError, check_output
from pymediainfo import MediaInfo
from pymediainfo import __version__ as pymediainfo_version
from knowit import VIDEO_EXTENSIONS
from knowit.core import MultiValue, Property
from knowit.properties import (
AudioChannels,
AudioCodec,
AudioCompression,
AudioProfile,
Basic,
BitRateMode,
Duration,
Language,
Quantity,
ScanType,
SubtitleFormat,
VideoCodec,
VideoEncoder,
VideoHdrFormat,
VideoProfile,
VideoProfileTier,
YesNo,
)
from knowit.provider import (
MalformedFileError,
Provider,
)
from knowit.rules import (
AtmosRule,
AudioChannelsRule,
ClosedCaptionRule,
DtsHdRule,
HearingImpairedRule,
LanguageRule,
ResolutionRule,
)
from knowit.units import units
from knowit.utils import (
define_candidate,
detect_os, round_decimal,
)
logger = getLogger(__name__)
logger.addHandler(NullHandler())
WARN_MSG = r'''
=========================================================================================
MediaInfo not found on your system or could not be loaded.
Visit https://mediaarea.net/ to download it.
If you still have problems, please check if the downloaded version matches your system.
To load MediaInfo from a specific location, please define the location as follow:
knowit --mediainfo /usr/local/mediainfo/lib <video_path>
knowit --mediainfo /usr/local/mediainfo/bin <video_path>
knowit --mediainfo "C:\Program Files\MediaInfo" <video_path>
knowit --mediainfo C:\Software\MediaInfo.dll <video_path>
knowit --mediainfo C:\Software\MediaInfo.exe <video_path>
knowit --mediainfo /opt/mediainfo/libmediainfo.so <video_path>
knowit --mediainfo /opt/mediainfo/libmediainfo.dylib <video_path>
=========================================================================================
'''
class MediaInfoExecutor:
"""Media info executable knows how to execute media info: using ctypes or cli."""
version_re = re.compile(r'\bv(?P<version>\d+(?:\.\d+)+)\b')
locations = {
'unix': ('/usr/local/mediainfo/lib', '/usr/local/mediainfo/bin', '__PATH__'),
'windows': ('__PATH__', ),
'macos': ('__PATH__', ),
}
def __init__(self, location, version):
"""Initialize the object."""
self.location = location
self.version = version
def extract_info(self, filename):
"""Extract media info."""
return self._execute(filename)
def _execute(self, filename):
raise NotImplementedError
@classmethod
def _get_version(cls, output):
match = cls.version_re.search(output)
if match:
version = tuple([int(v) for v in match.groupdict()['version'].split('.')])
return version
@classmethod
def get_executor_instance(cls, suggested_path=None):
"""Return the executor instance."""
os_family = detect_os()
logger.debug('Detected os: %s', os_family)
for exec_cls in (MediaInfoCTypesExecutor, MediaInfoCliExecutor):
executor = exec_cls.create(os_family, suggested_path)
if executor:
return executor
class MediaInfoCliExecutor(MediaInfoExecutor):
"""Media info using cli."""
names = {
'unix': ('mediainfo', ),
'windows': ('MediaInfo.exe', ),
'macos': ('mediainfo', ),
}
def _execute(self, filename):
return json.loads(check_output([self.location, '--Output=JSON', '--Full', filename]).decode())
@classmethod
def create(cls, os_family=None, suggested_path=None):
"""Create the executor instance."""
for candidate in define_candidate(cls.locations, cls.names, os_family, suggested_path):
try:
output = check_output([candidate, '--version']).decode()
version = cls._get_version(output)
if version:
logger.debug('MediaInfo cli detected: %s', candidate)
return MediaInfoCliExecutor(candidate, version)
except CalledProcessError as e:
# old mediainfo returns non-zero exit code for mediainfo --version
version = cls._get_version(e.output.decode())
if version:
logger.debug('MediaInfo cli detected: %s', candidate)
return MediaInfoCliExecutor(candidate, version)
except OSError:
pass
class MediaInfoCTypesExecutor(MediaInfoExecutor):
"""Media info ctypes."""
names = {
'unix': ('libmediainfo.so.0', ),
'windows': ('MediaInfo.dll', ),
'macos': ('libmediainfo.0.dylib', 'libmediainfo.dylib'),
}
def _execute(self, filename):
# Create a MediaInfo handle
return json.loads(MediaInfo.parse(filename, library_file=self.location, output='JSON'))
@classmethod
def create(cls, os_family=None, suggested_path=None):
"""Create the executor instance."""
for candidate in define_candidate(cls.locations, cls.names, os_family, suggested_path):
if MediaInfo.can_parse(candidate):
lib, handle, lib_version_str, lib_version = MediaInfo._get_library(candidate)
lib.MediaInfo_Option.argtypes = [c_void_p, c_wchar_p, c_wchar_p]
lib.MediaInfo_Option.restype = c_wchar_p
version = MediaInfoExecutor._get_version(lib.MediaInfo_Option(None, "Info_Version", ""))
logger.debug('MediaInfo library detected: %s (v%s)', candidate, '.'.join(map(str, version)))
return MediaInfoCTypesExecutor(candidate, version)
class MediaInfoProvider(Provider):
"""Media Info provider."""
executor = None
def __init__(self, config, suggested_path):
"""Init method."""
super().__init__(config, {
'general': {
'title': Property('Title', description='media title'),
'path': Property('CompleteName', description='media path'),
'duration': Duration('Duration', resolution=1000, description='media duration'),
'size': Quantity('FileSize', unit=units.byte, description='media size'),
'bit_rate': Quantity('OverallBitRate', unit=units.bps, description='media bit rate'),
},
'video': {
'id': Basic('ID', data_type=int, allow_fallback=True, description='video track number'),
'name': Property('Title', description='video track name'),
'language': Language('Language', description='video language'),
'duration': Duration('Duration', resolution=1000, description='video duration'),
'size': Quantity('StreamSize', unit=units.byte, description='video stream size'),
'width': Quantity('Width', unit=units.pixel),
'height': Quantity('Height', unit=units.pixel),
'scan_type': ScanType(config, 'ScanType', default='Progressive', description='video scan type'),
'aspect_ratio': Basic('DisplayAspectRatio', data_type=Decimal,
processor=lambda x: round_decimal(x, min_digits=1, max_digits=3),
description='display aspect ratio'),
'pixel_aspect_ratio': Basic('PixelAspectRatio', data_type=Decimal,
processor=lambda x: round_decimal(x, min_digits=1, max_digits=3),
description='pixel aspect ratio'),
'resolution': None, # populated with ResolutionRule
'frame_rate': Quantity('FrameRate', unit=units.FPS, data_type=Decimal, description='video frame rate'),
# frame_rate_mode
'bit_rate': Quantity('BitRate', unit=units.bps, description='video bit rate'),
'bit_depth': Quantity('BitDepth', unit=units.bit, description='video bit depth'),
'codec': VideoCodec(config, 'CodecID', description='video codec'),
'profile': VideoProfile(config, 'Format_Profile', description='video codec profile'),
'profile_level': Property('Format_Level', description='video codec profile level'),
'profile_tier': VideoProfileTier(config, 'Format_Tier', description='video codec profile tier'),
'encoder': VideoEncoder(config, 'Encoded_Library_Name', description='video encoder'),
'hdr_format': MultiValue(VideoHdrFormat(config, 'HDR_Format', description='video hdr format'),
delimiter=' / '),
'media_type': Property('InternetMediaType', description='video media type'),
'forced': YesNo('Forced', hide_value=False, description='video track forced'),
'default': YesNo('Default', hide_value=False, description='video track default'),
},
'audio': {
'id': Basic('ID', data_type=int, allow_fallback=True, description='audio track number'),
'name': Property('Title', description='audio track name'),
'language': Language('Language', description='audio language'),
'duration': Duration('Duration', resolution=1000, description='audio duration'),
'size': Quantity('StreamSize', unit=units.byte, description='audio stream size'),
'codec': MultiValue(AudioCodec(config, 'CodecID', description='audio codec')),
'format_commercial': Property('Format_Commercial', private=True),
'profile': MultiValue(AudioProfile(config, 'Format_Profile', 'Format_AdditionalFeatures',
description='audio codec profile'),
delimiter=' / '),
'channels_count': MultiValue(AudioChannels('Channels_Original', 'Channels',
description='audio channels count')),
'channel_positions': MultiValue(name='ChannelPositions_String2', handler=(lambda x, *args: x),
delimiter=' / ', private=True, description='audio channels position'),
'channels': None, # populated with AudioChannelsRule
'bit_depth': Quantity('BitDepth', unit=units.bit, description='audio bit depth'),
'bit_rate': MultiValue(Quantity('BitRate', unit=units.bps, description='audio bit rate')),
'bit_rate_mode': MultiValue(BitRateMode(config, 'BitRate_Mode', description='audio bit rate mode')),
'sampling_rate': MultiValue(Quantity('SamplingRate', unit=units.Hz, description='audio sampling rate')),
'compression': MultiValue(AudioCompression(config, 'Compression_Mode',
description='audio compression')),
'forced': YesNo('Forced', hide_value=False, description='audio track forced'),
'default': YesNo('Default', hide_value=False, description='audio track default'),
},
'subtitle': {
'id': Basic('ID', data_type=int, allow_fallback=True, description='subtitle track number'),
'name': Property('Title', description='subtitle track name'),
'language': Language('Language', description='subtitle language'),
'hearing_impaired': None, # populated with HearingImpairedRule
'_closed_caption': Property('ClosedCaptionsPresent', private=True),
'closed_caption': None, # populated with ClosedCaptionRule
'format': SubtitleFormat(config, 'CodecID', description='subtitle format'),
'forced': YesNo('Forced', hide_value=False, description='subtitle track forced'),
'default': YesNo('Default', hide_value=False, description='subtitle track default'),
},
}, {
'video': {
'language': LanguageRule('video language'),
'resolution': ResolutionRule('video resolution'),
},
'audio': {
'language': LanguageRule('audio language'),
'channels': AudioChannelsRule('audio channels'),
'_atmosrule': AtmosRule(config, 'atmos rule'),
'_dtshdrule': DtsHdRule(config, 'dts-hd rule'),
},
'subtitle': {
'language': LanguageRule('subtitle language'),
'hearing_impaired': HearingImpairedRule('subtitle hearing impaired'),
'closed_caption': ClosedCaptionRule('closed caption'),
}
})
self.executor = MediaInfoExecutor.get_executor_instance(suggested_path)
def accepts(self, video_path):
"""Accept any video when MediaInfo is available."""
if self.executor is None:
logger.warning(WARN_MSG)
self.executor = False
return self.executor and video_path.lower().endswith(VIDEO_EXTENSIONS)
def describe(self, video_path, context):
"""Return video metadata."""
data = self.executor.extract_info(video_path)
def debug_data():
"""Debug data."""
return json.dumps(data, indent=4)
context['debug_data'] = debug_data
if logger.isEnabledFor(DEBUG):
logger.debug('Video %r scanned using mediainfo %r has raw data:\n%s',
video_path, self.executor.location, debug_data())
result = {}
tracks = data.get('media', {}).get('track', [])
if tracks:
general_tracks = []
video_tracks = []
audio_tracks = []
subtitle_tracks = []
for track in tracks:
track_type = track.get('@type')
if track_type == 'General':
general_tracks.append(track)
elif track_type == 'Video':
video_tracks.append(track)
elif track_type == 'Audio':
audio_tracks.append(track)
elif track_type == 'Text':
subtitle_tracks.append(track)
result = self._describe_tracks(video_path, general_tracks[0] if general_tracks else {},
video_tracks, audio_tracks, subtitle_tracks, context)
if not result:
raise MalformedFileError
result['provider'] = {
'name': 'mediainfo',
'version': self.version
}
return result
@property
def version(self):
"""Return mediainfo version information."""
versions = {'pymediainfo': pymediainfo_version}
if self.executor:
executor_version = '.'.join(map(str, self.executor.version))
versions[self.executor.location] = f'v{executor_version}'
return versions
|
|
class IntersectionError(Exception):
def __init__(self):
Exception.__init__(self)
class SetPartitionPartStructureError(Exception):
def __init__(self, arg):
Exception.__init__(self, arg)
class SetPartitionPart(tuple):
def __new__(_cls, arg):
seen = []
if len(arg) == 0:
message = repr(arg) + ' is not a container'
raise SetPartitionStructureError(message)
for item in arg:
if type(item) is not int or item < 1: # make assert_natural
message = repr(item) + ' cannot be in a SetPartitionPart'
raise SetPartitionPartStructureError(message)
if item in seen:
message = repr(item) + ' degenerate in ' + repr(arg)
raise SetPartitionPartStructureError(message)
seen.append(item)
return tuple.__new__(_cls, sorted(arg))
def __lt__(self, other):
self._checkcompat(other)
return self[0] < other[0]
def __le__(self, other):
self._checkcompat(other)
return self[0] <= other[0]
def __gt__(self, other):
self._checkcompat(other)
return self[0] > other[0]
def __ge__(self, other):
self._checkcompat(other)
return self[0] >= other[0]
def __eq__(self, other):
return tuple.__eq__(self, other)
def __ne__(self, other):
return tuple.__ne__(self, other)
def _checkcompat(self, other):
if set(self) & set(other) != set():
message = repr(self) + ' and ' + repr(other) + ' not disjoint'
raise SetPartitionPartStructureError(message)
class SetPartition(tuple):
def __new__(_cls, arg):
parts = []
for item in arg:
if type(item) == SetPartitionPart:
parts.append(item)
else:
parts.append(SetPartitionPart(item))
return tuple.__new__(_cls, tuple(sorted(parts)))
def __init__(self, arg):
if len(self) >= 2:
for i, a in enumerate(self[:-1]):
for b in self[i+1:]:
a._checkcompat(b)
rep = self._flatten()
self.n = max(rep)
if rep != range(1, self.n + 1):
message = 'Sub-tuples do not span 1-' + str(self.n)
raise TypeError(message)
def __int__(self):
return self.n
def _flatten(self):
return sorted([i for part in self for i in part])
def _checkother(self, other):
if type(other) is not SetPartition:
message = 'A SetPartition cannot be compared with a(n)' + type(other).__name__
raise TypeError(message)
if self.n != other.n:
message = 'Partitions are not of the same set'
raise TypeError(message)
def __lt__(self, other):
self._checkother(other)
for a in self:
if not any([set(self).issubset(b) for b in other]):
return False
return True
def __le__(self, other):
return self == other or self < other
def __gt__(self, other):
self._checkother(other)
return other < self
def __ge__(self, other):
return self == other or self > other
def __eq__(self, other):
self._checkother(other)
return tuple.__eq__(self, other)
def __ne__(self, other):
return not self == other
def __floordiv__(self, other):
return not self <= other or self > other
def get_setpartitions(n):
# assert_natural(0)
if n == 0:
return [tuple(tuple())]
places = [1 for _ in xrange(n)]
partitions = [range(1, n + 1)]
maxes = [0 for _ in xrange(n)]
while places != range(1, n + 1):
for i in xrange(1, n):
maxes[i] = max(places[i - 1], maxes[i - 1])
partition = []
j = list(reversed([places[i] <= maxes[i] for i in xrange(n)])).index(True)
places[n - 1 - j] += 1
for i in xrange(n - j, n):
places[i] = 1
for i in xrange(n):
if places[i] <= len(partition):
partition[places[i] - 1].append(i + 1)
else:
partition.append([i + 1])
partitions.append(SetPartition(partition))
return partitions
class IntPartitionPart(int):
def __new__(_cls, arg):
#assert_natural
if type(arg) is not int or arg < 1:
message = repr(arg) + ' is not a valid IntPartitionPart'
raise TypeError(message)
return int.__new__(_cls, arg)
class IntPartition(tuple):
def __new__(_cls, arg):
parts = []
for k in arg:
# sometimes not container
if type(k) == IntPartitionPart:
parts.append(k)
else:
parts.append(IntPartitionPart(k))
return tuple.__new__(_cls, reversed(sorted(parts)))
def __init__(self, arg):
self.n = sum(self)
def __int__(self):
return self.n
def _checkother(self, other):
if type(other) is not IntPartition:
raise TypeError
def __lt__(self, other):
self._checkother(other)
if len(self) > len(other):
return False
for i in len(self):
if self[i] > other[i]:
return False
return True
def __le__(self, other):
return self == other or self < other
def __gt__(self, other):
self._checkother(other)
return other < self
def __ge__(self, other):
return self == other or self > other
def __eq__(self, other):
self._checkother(other)
return tuple.__eq__(self, other)
def __ne__(self, other):
return not self == other
def young(self, char='*'):
for part in self:
print char*part
def conj(self):
w = list(self)
c = []
while w != []:
c.append(len(w))
w = [part - 1 for part in w if part > 1]
print c
return IntPartition(c)
def rank(self):
if len(self) == 0:
return 0
if self[-1] == len(self):
return len(self)
for k in xrange(2, len(self) + 1):
if self[k - 1] < k:
break
return k - 1
"""
x = SetPartitionPart([1, 2, 4])
print 'x: ' + `x`
y = SetPartitionPart([5, 6])
z = SetPartitionPart([3])
P = SetPartition((x, y, z))
x1 = [1,2,4]
y1 = [5,6]
z1 = [3]
P1 = SetPartition((x1, y1, z1))
print P == P1
"""
for i in xrange(0, 7):
print len(get_setpartitions(i))
x = IntPartition((2, 6, 2, 1, 1, 1))
x = IntPartition((4, 3, 3))
print x
print len(x)
x.young()
y = x.conj()
print y.rank()
|
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tensorflow dataset for fever evidence matching."""
import collections
import json
import os
import random
from absl import logging
import apache_beam as beam
import dataclasses
from language.serene import constants
from language.serene import retrieval_pb2
from language.serene import scrape_db
from language.serene import text_matcher
from language.serene import types
from language.serene import util
from language.serene import wiki_db
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
@dataclasses.dataclass(frozen=True)
class SentenceLocation:
"""Dataclass to hold the location of an evidence sentence."""
__slots__ = ['claim_id', 'wikipedia_url', 'sentence_id']
claim_id: int
wikipedia_url: Text
sentence_id: int
def underscore_to_whitespace(text):
return text.replace('_', ' ')
def merge_validation_gold_and_candidate(examples,
candidates,
n_candidates):
"""Merge gold and candidate examples for the validation fold.
Note: This mutates the members of examples, specifically adds the
tfidf_candidate field.
Args:
examples: A dictionary of examples to merge with, typically containing only
gold examples
candidates: Candidates to possibly include
n_candidates: The max number of candidates to return
Returns:
A list of examples.
"""
for candidate in candidates:
key = (candidate['wikipedia_url'], candidate['sentence_id'])
# Prevent duplicates, just mark them as retrieved and attach doc score
if key in examples:
examples[key]['retrieved'] = True
examples[key]['doc_score'] = candidate['doc_score']
else:
examples[key] = candidate
sorted_candidates = sorted(
examples.values(), key=lambda x: x['sentence_score'], reverse=True)
final_candidates = sorted_candidates[:n_candidates]
final_examples = {}
for candidate in final_candidates:
key = (candidate['wikipedia_url'], candidate['sentence_id'])
candidate['tfidf_candidate'] = True
final_examples[key] = candidate
bottom_candidates = sorted_candidates[n_candidates:]
for candidate in bottom_candidates:
key = (candidate['wikipedia_url'], candidate['sentence_id'])
# Force include gold examples, but mark them with tfidf_candidate=False
# to indicate they should be excluded in validation evaluations.
if candidate['gold']:
candidate['tfidf_candidate'] = False
final_examples[key] = candidate
return list(final_examples.values())
class ExtractEvidenceForClaim(beam.DoFn):
"""Create evidence examples for each claim."""
# pyformat: disable
def __init__(
self,
*,
fever_train_path,
fever_dev_path,
fever_test_path,
train_scrape_type,
n_similar_negatives,
n_background_negatives,
n_inference_candidates,
n_inference_documents,
wiki_db_path,
text_matcher_params_path,
title_in_scoring,
drqa_db_path,
lucene_db_path,
ukp_docs_train,
ukp_docs_dev,
ukp_docs_test,
max_inference_sentence_id):
super().__init__()
self._fever_train_path = fever_train_path
self._fever_dev_path = fever_dev_path
self._fever_test_path = fever_test_path
self._train_scrape_type = train_scrape_type
self._n_similar_negatives = n_similar_negatives
self._n_background_negatives = n_background_negatives
self._n_inference_candidates = n_inference_candidates
self._n_inference_documents = n_inference_documents
self._wiki_db_path = wiki_db_path
self._text_matcher_params_path = text_matcher_params_path
self._title_in_scoring = title_in_scoring
self._drqa_db_path = drqa_db_path
self._lucene_db_path = lucene_db_path
self._ukp_docs_train_path = ukp_docs_train
self._ukp_docs_dev_path = ukp_docs_dev
self._ukp_docs_test_path = ukp_docs_test
self._max_inference_sentence_id = max_inference_sentence_id
self._wiki_db: Optional[wiki_db.WikiDatabase] = None
self._wiki_titles: Optional[Set[Text]] = None
self._matcher: Optional[text_matcher.TextMatcher] = None
self._drqa_scrape_table: Optional[collections.ChainMap] = None
self._lucene_scrape_table: Optional[collections.ChainMap] = None
self._name_to_scrape: Optional[Text, collections.ChainMap] = None # pytype: disable=invalid-annotation # attribute-variable-annotations
self._ukp_docs: Optional[Dict[int, types.Json]] = None
self._claim_to_fold: Optional[Dict[int, Text]] = None
self._n_missing_pages = beam.metrics.Metrics.counter(
self.__class__, 'n_missing_pages')
# pyformat: enable
def setup(self):
self._claim_to_fold = {}
train = util.read_jsonlines(self._fever_train_path)
for claim in train:
self._claim_to_fold[claim['id']] = 'train'
dev = util.read_jsonlines(self._fever_dev_path)
for claim in dev:
self._claim_to_fold[claim['id']] = 'dev'
test = util.read_jsonlines(self._fever_test_path)
for claim in test:
self._claim_to_fold[claim['id']] = 'test'
self._wiki_db = wiki_db.WikiDatabase.from_local(self._wiki_db_path)
self._wiki_titles = set(self._wiki_db.get_wikipedia_urls())
self._matcher = text_matcher.TextMatcher()
self._matcher.load(self._text_matcher_params_path)
drqa_scrape_table = scrape_db.ScrapeDatabase.from_local(self._drqa_db_path) # pylint: disable=unused-variable
self._drqa_scrape_table = drqa_scrape_table
lucene_scrape_table = scrape_db.ScrapeDatabase.from_local(
self._drqa_db_path) # pylint: disable=unused-variable
self._lucene_scrape_table = lucene_scrape_table
self._name_to_scrape = {
constants.DRQA:
self._drqa_scrape_table,
constants.LUCENE:
self.
_lucene_scrape_table # pytype: disable=annotation-type-mismatch # attribute-variable-annotations
}
ukp_claim_docs = (
util.read_jsonlines(self._ukp_docs_train_path) +
util.read_jsonlines(self._ukp_docs_dev_path) +
util.read_jsonlines(self._ukp_docs_test_path))
self._ukp_docs = {claim['id']: claim for claim in ukp_claim_docs}
def _get_retrieved_documents(self, claim_id,
scrape_type):
"""Retrieve the appropriate set of documents depending on settings.
Args:
claim_id: The claim to get documents for
scrape_type: The scrape type to use when fetching documents
Returns:
A list of documents to generate examples from
"""
if scrape_type in (constants.DRQA, constants.LUCENE):
claim_scrape = self._name_to_scrape[scrape_type][str(claim_id)]
documents = [(doc.doc_id, doc.ir_score) for doc in claim_scrape.documents]
elif scrape_type in constants.UKP_TYPES:
if scrape_type == constants.UKP_WIKI:
claim_scrape = self._ukp_docs[claim_id]['wiki_results']
elif scrape_type == constants.UKP_PRED:
claim_scrape = self._ukp_docs[claim_id]['predicted_pages']
else:
raise ValueError(f'Invalid scrape type: {scrape_type}')
documents = [
# UKP Does not have document scores.
(doc_id.replace(' ', '_'), -1) for doc_id in claim_scrape
]
else:
raise ValueError(f'Invalid scrape type: {scrape_type}')
return documents
def _get_gold_examples(
self,
*,
claim_json,
scrape_type,
):
"""Create gold examples and seed the example dictionary with them.
Args:
claim_json: The json of the claim from fever dataset
scrape_type: What type to label gold as, technically this isn't that
scrape type, but it makes grouping by scrape type easier to do later on.
Returns:
A dictionary of examples keyed by (wikipedia_url, sentence_id)
"""
examples = {}
used_wikipedia_urls = set()
claim_label = claim_json['label']
# For NOT ENOUGH INFO, there are no gold examples
if claim_label == constants.NOT_ENOUGH_INFO:
return examples, used_wikipedia_urls
for evidence_set in claim_json['evidence']:
for evidence in evidence_set:
wikipedia_url = util.normalize(evidence[2])
used_wikipedia_urls.add(wikipedia_url)
sentence_id = evidence[3]
page = self._wiki_db.get_page(wikipedia_url)
if page is None:
raise ValueError(f'Missing page: {wikipedia_url}')
if sentence_id in page.sentences:
sentence = page.sentences[sentence_id].text
key = (wikipedia_url, sentence_id)
# FEVER sometimes has duplicated evidence if it was picked by
# multiple raters.
if key not in examples:
sentence_with_title = underscore_to_whitespace(
wikipedia_url) + ' ' + sentence
examples[key] = {
'evidence_text': sentence,
'evidence_text_with_title': sentence_with_title,
'evidence_label': constants.MATCHING,
'claim_label': claim_label,
'gold': True,
'retrieved': False,
'background': False,
'doc_score': -1,
'wikipedia_url': wikipedia_url,
'sentence_id': sentence_id,
'scrape_type': scrape_type,
}
return examples, used_wikipedia_urls
def _get_similar_candidates(
self,
*,
claim_label,
documents,
used_wikipedia_urls,
scrape_type,
):
"""Return negative examples that are similar to the claim.
Args:
claim_label: The label of the fever claim
documents: The documents to use
used_wikipedia_urls: The urls used so far
scrape_type: The scrape type to use to find candidates
Returns:
A list of similar evidence candidates and updated wikipedia url set
"""
used_wikipedia_urls = set(used_wikipedia_urls)
candidates: List[types.Json] = []
for wikipedia_url, ir_score in documents:
used_wikipedia_urls.add(wikipedia_url)
parsed_page = self._wiki_db.get_page(wikipedia_url)
if parsed_page is None:
if scrape_type in constants.UKP_TYPES:
self._n_missing_pages.inc()
continue
else:
raise ValueError(f'Missing page: {wikipedia_url}')
for sentence_id, sentence_struct in parsed_page.sentences.items():
sentence_with_title = underscore_to_whitespace(
wikipedia_url) + ' ' + sentence_struct.text
example = {
'evidence_text': sentence_struct.text,
'evidence_text_with_title': sentence_with_title,
'evidence_label': constants.NOT_MATCHING,
'claim_label': claim_label,
'gold': False,
'retrieved': True,
'background': False,
'wikipedia_url': wikipedia_url,
'sentence_id': sentence_id,
'doc_score': ir_score,
'scrape_type': scrape_type,
}
candidates.append(example)
# We want to score and sort the retrieved candidates that are not also gold
return candidates, used_wikipedia_urls
def _get_background_candidates(
self, *, claim_label, used_wikipedia_urls,
scrape_type):
"""Return background negatives (ie random from wikipedia).
During inference, we should not get these, hence the shortcut.
Args:
claim_label: The label of the fever claim
used_wikipedia_urls: The wikipedia urls used so far
scrape_type: What type to label background as, technically this isn't that
scrape type, but it makes grouping by scrape type easier to do later on.
Returns:
A list of background candidates and updated wikipedia urls used.
Does not mutate the original
"""
used_wikipedia_urls = set(used_wikipedia_urls)
background_candidates = []
while True:
if len(background_candidates) >= self._n_background_negatives:
break
# sample works on sets, choice does not
wikipedia_url = random.sample(self._wiki_titles, 1)[0]
if wikipedia_url in used_wikipedia_urls:
continue
used_wikipedia_urls.add(wikipedia_url)
page = self._wiki_db.get_page(wikipedia_url)
if page is None:
raise ValueError(f'Missing page: {wikipedia_url}')
sentence_candidates = list(page.sentences.keys())
if not sentence_candidates: # len(sentence_candidates) == 0
continue
sentence_id = random.choice(list(page.sentences.keys()))
sentence = page.sentences[sentence_id].text
sentence_with_title = underscore_to_whitespace(
wikipedia_url) + ' ' + sentence
background_candidates.append({
'evidence_text': sentence,
'evidence_text_with_title': sentence_with_title,
'evidence_label': constants.NOT_MATCHING,
'claim_label': claim_label,
'gold': False,
'retrieved': False,
'background': True,
'tfidf_candidate': False,
'wikipedia_url': wikipedia_url,
'sentence_id': sentence_id,
'doc_score': -1,
'scrape_type': scrape_type,
})
return background_candidates, used_wikipedia_urls
def _create_train_examples(self, claim_json):
used_wikipedia_urls = set()
claim_id = claim_json['id']
claim_text = claim_json['claim']
claim_label = claim_json['label']
# Seed examples with gold documents as positives, negs will be added
examples, gold_used_wikipedia_urls = self._get_gold_examples(
claim_json=claim_json,
scrape_type=self._train_scrape_type,
)
used_wikipedia_urls = used_wikipedia_urls.union(gold_used_wikipedia_urls)
# Add retrieved documents as negatives
documents = self._get_retrieved_documents(
claim_id,
scrape_type=self._train_scrape_type,
)
candidates, used_wikipedia_urls = self._get_similar_candidates(
claim_label=claim_label,
documents=documents,
used_wikipedia_urls=used_wikipedia_urls,
scrape_type=self._train_scrape_type,
)
for candidate in candidates:
key = (candidate['wikipedia_url'], candidate['sentence_id'])
# Prevent duplicates, just mark them as retrieved and attach doc score
if key in examples:
examples[key]['retrieved'] = True
examples[key]['doc_score'] = candidate['doc_score']
else:
examples[key] = candidate
# Score gold and retrieved evidence on the sentence level
examples_to_scores = list(examples.values())
# .predict() returns candidates sorted by score
if self._title_in_scoring:
text_key = 'evidence_text_with_title'
else:
text_key = 'evidence_text'
scored_examples = self._matcher.predict(
claim_text, examples_to_scores, text_key=text_key)
max_candidates = self._n_similar_negatives
final_candidates = scored_examples[:max_candidates]
final_examples = {}
for score, candidate in final_candidates:
key = (candidate['wikipedia_url'], candidate['sentence_id'])
candidate['sentence_score'] = score
candidate['tfidf_candidate'] = True
final_examples[key] = candidate
bottom_candidates = scored_examples[max_candidates:]
for score, candidate in bottom_candidates:
key = (candidate['wikipedia_url'], candidate['sentence_id'])
# Force include gold examples, but notate them with false tfidf candidate
if candidate['gold']:
candidate['sentence_score'] = score
candidate['tfidf_candidate'] = False
final_examples[key] = candidate
# During inference, we don't want background candidates, its primarily
# useful for training.
background_candidates, used_wikipedia_urls = self._get_background_candidates(
claim_label=claim_label,
used_wikipedia_urls=used_wikipedia_urls,
scrape_type=self._train_scrape_type,
)
scored_background_candidates = self._matcher.score(
claim_text, background_candidates, text_key=text_key)
for score, candidate in zip(scored_background_candidates,
background_candidates):
candidate['sentence_score'] = score
key = (candidate['wikipedia_url'], candidate['sentence_id'])
# Since the wikipedia page is never seen, and only one sentence is drawn
# from each, it is impossible to accidentally duplicate evidence here.
final_examples[key] = candidate
return list(final_examples.values())
def _create_validation_examples(self, *, claim_json,
scrape_type,
n_inference_candidates):
"""Create validation examples for fever task.
This function follows these steps/guidelines:
1. Get up to the top n_inference_documents rated documents
2. Return up to the first thirty sentences in each document
3. In total, return n_inference_candidates, obtaining the max by iteratively
getting the 1st sentence of each doc, then second etc.
4. For debugging, include gold examples not retrieved with these, but mark
tfidf_candidate False so that they can be filtered out
Args:
claim_json: The fever claim to get examples for
scrape_type: The scrape type to use
n_inference_candidates: Number of candidates to return
Returns:
Examples for validation on fever
"""
used_wikipedia_urls = set()
claim_id = claim_json['id']
claim_label = claim_json['label']
# Seed examples with gold documents as positives, negs will be added
examples, gold_used_wikipedia_urls = self._get_gold_examples(
claim_json=claim_json,
scrape_type=scrape_type,
)
for key in examples:
examples[key]['sentence_score'] = -examples[key]['sentence_id']
used_wikipedia_urls = used_wikipedia_urls.union(gold_used_wikipedia_urls)
# Add retrieved documents as negatives
# For inference, we generate the input documents for each type of way
# to get them, the scoring script handles separating this out to create
# metrics for each method so we can compare
documents = self._get_retrieved_documents(
claim_id,
scrape_type=scrape_type,
)
documents = documents[:self._n_inference_documents]
candidates: List[types.Json] = []
for wikipedia_url, ir_score in documents:
parsed_page = self._wiki_db.get_page(wikipedia_url)
used_wikipedia_urls.add(wikipedia_url)
if parsed_page is None:
self._n_missing_pages.inc()
else:
for sentence_id in range(self._max_inference_sentence_id):
if sentence_id in parsed_page.sentences:
sentence_struct = parsed_page.sentences[sentence_id]
sentence_with_title = underscore_to_whitespace(
wikipedia_url) + ' ' + sentence_struct.text
example = {
'evidence_text': sentence_struct.text,
'evidence_text_with_title': sentence_with_title,
'evidence_label': constants.NOT_MATCHING,
'claim_label': claim_label,
'gold': False,
'retrieved': True,
'background': False,
'wikipedia_url': wikipedia_url,
'sentence_id': sentence_id,
'doc_score': ir_score,
'scrape_type': scrape_type,
# This sorts examples with smallest sentence_id to the top
'sentence_score': -sentence_id
}
candidates.append(example)
return merge_validation_gold_and_candidate(examples, candidates,
n_inference_candidates)
def _create_test_examples(self, *, claim_json, scrape_type,
n_inference_candidates):
"""Create test examples for fever task.
This function is similar to create_validation_examples, but handles the
fact that: (1) there are no gold examples and (2) examples only have fields
"id" and "claim"
This function follows these steps/guidelines:
1. Get up to the top n_inference_documents rated documents
2. Return up to the first thirty sentences in each document
3. In total, return n_inference_candidates, obtaining the max by iteratively
getting the 1st sentence of each doc, then second etc.
Args:
claim_json: The fever claim to get examples for
scrape_type: The scrape type to use
n_inference_candidates: Number of candidates to return
Returns:
Examples for test on fever
"""
claim_id = claim_json['id']
# Add retrieved documents as negatives
# For inference, we generate the input documents for each type of way
# to get them, the scoring script handles separating this out to create
# metrics for each method so we can compare
documents = self._get_retrieved_documents(
claim_id,
scrape_type=scrape_type,
)
documents = documents[:self._n_inference_documents]
candidates: List[types.Json] = []
for wikipedia_url, ir_score in documents:
parsed_page = self._wiki_db.get_page(wikipedia_url)
if parsed_page is None:
self._n_missing_pages.inc()
continue
for sentence_id in range(self._max_inference_sentence_id):
if sentence_id in parsed_page.sentences:
sentence_struct = parsed_page.sentences[sentence_id]
sentence_with_title = underscore_to_whitespace(
wikipedia_url) + ' ' + sentence_struct.text
example = {
'evidence_text': sentence_struct.text,
'evidence_text_with_title': sentence_with_title,
'evidence_label': constants.NOT_MATCHING,
# This label does not mean anything since test examples are not
# labeled, but it must exist and be valid for TFDS to work
# correctly.
'claim_label': constants.REFUTES,
'gold': False,
'retrieved': True,
'background': False,
'tfidf_candidate': True,
'wikipedia_url': wikipedia_url,
'sentence_id': sentence_id,
'doc_score': ir_score,
'scrape_type': scrape_type,
# This sorts examples with smallest sentence_id to the top
'sentence_score': -sentence_id
}
candidates.append(example)
return sorted(
candidates, reverse=True,
key=lambda c: c['sentence_score'])[:n_inference_candidates]
def process(self, claim_json, *args,
**kwargs):
"""Convert a json claim to a list of claim-evidence example pairs.
Sketch of this method:
1. Get the gold examples for the claim
2. Get the retrieved examples for the claim
3. Get the background examples for the claim
Then:
4. Deduplicate the gold and retrieved examples, maintaining track of where
they came from
5. Score the (large) list of gold/retrieved examples with a sentence matcher
6. Sort by this, and cut to the top evidence
7. Find any gold evidence not in the top evidence, mark that it was excluded
And re-add it back if in whole-wiki scenario
Args:
claim_json: The claim json from fever
*args: API Compat
**kwargs: API Compat
Returns:
A list of json formatted examples for the tensorflow dataset
"""
claim_id = claim_json['id']
fold = self._claim_to_fold[claim_id]
if fold == 'train':
fold_examples = self._create_train_examples(claim_json)
elif fold == 'test':
fold_examples = []
fold_examples.extend(
self._create_test_examples(
claim_json=claim_json,
scrape_type=constants.DRQA,
n_inference_candidates=self._n_inference_candidates))
fold_examples.extend(
self._create_test_examples(
claim_json=claim_json,
scrape_type=constants.LUCENE,
n_inference_candidates=self._n_inference_candidates))
fold_examples.extend(
self._create_test_examples(
claim_json=claim_json,
scrape_type=constants.UKP_PRED,
n_inference_candidates=self._n_inference_candidates))
fold_examples.extend(
self._create_test_examples(
claim_json=claim_json,
scrape_type=constants.UKP_WIKI,
n_inference_candidates=self._n_inference_candidates))
elif fold == 'dev':
fold_examples = []
fold_examples.extend(
self._create_validation_examples(
claim_json=claim_json,
scrape_type=constants.DRQA,
n_inference_candidates=self._n_inference_candidates))
fold_examples.extend(
self._create_validation_examples(
claim_json=claim_json,
scrape_type=constants.LUCENE,
n_inference_candidates=self._n_inference_candidates))
fold_examples.extend(
self._create_validation_examples(
claim_json=claim_json,
scrape_type=constants.UKP_PRED,
n_inference_candidates=self._n_inference_candidates))
fold_examples.extend(
self._create_validation_examples(
claim_json=claim_json,
scrape_type=constants.UKP_WIKI,
n_inference_candidates=self._n_inference_candidates))
else:
raise ValueError(f'Invalid fold: {fold} for\n{claim_json}')
serialized_examples = []
for idx, example in enumerate(fold_examples):
scrape_type = example['scrape_type']
metadata = {
'claim_id': claim_id,
'claim_label': example['claim_label'],
'evidence_label': example['evidence_label'],
'doc_score': example.get('doc_score', -1),
'sentence_score': example['sentence_score'],
'scrape_type': scrape_type,
'gold': example['gold'],
'retrieved': example['retrieved'],
'background': example['background'],
'tfidf_candidate': example['tfidf_candidate'],
'wikipedia_url': example['wikipedia_url'],
'sentence_id': example['sentence_id'],
}
serialized_examples.append(
dict(
example_id=f'{claim_id}-{idx}-{scrape_type}',
claim_text=claim_json['claim'],
evidence_text=example['evidence_text'],
wikipedia_url=example['wikipedia_url'],
sentence_id=str(example['sentence_id']),
evidence_label=example['evidence_label'],
claim_label=example['claim_label'],
scrape_type=scrape_type,
metadata=json.dumps(metadata),
))
return serialized_examples
def dataset_path(*, data_dir, scrape_type,
include_not_enough_info, title_in_scoring,
n_similar_negatives, n_background_negatives):
"""Return the dataset path based on its configuration.
For example, {data_dir}/type=drqa,n_similar=5,n_background=10,include_nei=true
Args:
data_dir: The parent directory to use
scrape_type: The scrape type (e.g., drqa, lucene)
include_not_enough_info: Whether to include not enough information claims
title_in_scoring: Whether to include title in evidence for tfidf scoring
n_similar_negatives: How many similar negatives tare used
n_background_negatives: How many background negatives are used
Returns:
Path for FeverEvidence TFDS to write to
"""
parts = [
f'train_type={scrape_type}', f'n_similar={n_similar_negatives}',
f'n_background={n_background_negatives}',
f'include_nei={include_not_enough_info}',
f'score_title={title_in_scoring}'
]
parts = sorted(parts)
directory = ','.join(parts)
return os.path.join(data_dir, directory)
class FeverEvidence(tfds.core.BeamBasedBuilder):
"""TFDS for Fever Evidence Matching."""
VERSION = tfds.core.Version('0.1.0')
def __init__(
self,
*,
# Next params optional if loading from data_dir, required for generation.
title_in_scoring,
n_similar_negatives,
n_background_negatives,
include_not_enough_info,
train_scrape_type,
n_inference_documents = None,
n_inference_candidates = None,
max_inference_sentence_id = None,
wiki_db_path = None,
text_matcher_params_path = None,
fever_train_path = None,
fever_dev_path = None,
fever_test_path = None,
self._ukp_docs_train_path = ukp_docs_train
self._ukp_docs_dev_path = ukp_docs_dev
self._ukp_docs_test_path = ukp_docs_test
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
# tfds.features.FeatureConnectors
features=tfds.features.FeaturesDict({
'example_id':
tf.string,
'metadata':
tf.string,
'claim_text':
tfds.features.Text(),
'evidence_text':
tfds.features.Text(),
'wikipedia_url':
tfds.features.Text(),
'sentence_id':
tfds.features.Text(),
'scrape_type':
tfds.features.Text(),
'evidence_label':
tfds.features.ClassLabel(
names=constants.EVIDENCE_MATCHING_CLASSES),
'claim_label':
tfds.features.ClassLabel(names=constants.FEVER_CLASSES)
}),
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# dl_manager is a tfds.download.DownloadManager that can be used to
# download and extract URLs
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
'claim_filepath': self._fever_train,
},
num_shards=100,
),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
gen_kwargs={
'claim_filepath': self._fever_dev,
},
),
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
gen_kwargs={
'claim_filepath': self._fever_test,
})
]
def _build_pcollection(self, pipeline, claim_filepath):
"""Build a beam pipeline to generate training examples.
Args:
pipeline: The pipeline configured with a runner
claim_filepath: The path to read claims from
Returns:
Beam pipeline to compute examples
"""
claims = util.read_jsonlines(claim_filepath)
if not self._include_not_enough_info:
claims = [c for c in claims if c['label'] != constants.NOT_ENOUGH_INFO]
logging.info('Reading claims from: %s', claim_filepath)
logging.info('n_similar_negatives=%s', self._n_similar_negatives)
logging.info('n_background_negatives=%s', self._n_background_negatives)
return (pipeline
| 'LoadClaims' >> beam.Create(claims)
| 'ReshuffleClaims' >> beam.Reshuffle()
| 'ExtractEvidenceForEachClaim' >> beam.ParDo(
ExtractEvidenceForClaim(
n_similar_negatives=self._n_similar_negatives,
n_background_negatives=self._n_background_negatives,
n_inference_candidates=self._n_inference_candidates,
n_inference_documents=self._n_inference_documents,
max_inference_sentence_id=self._max_inference_sentence_id,
wiki_db_path=self._wiki_db_path,
text_matcher_params_path=self._text_matcher_params_path,
title_in_scoring=self._title_in_scoring,
train_scrape_type=self._train_scrape_type,
ukp_docs_train=self._ukp_docs_train_path,
ukp_docs_dev=self._ukp_docs_dev_path,
ukp_docs_test=self._ukp_docs_test_path,
fever_train_path=self._fever_train,
fever_dev_path=self._fever_dev,
fever_test_path=self._fever_test,
drqa_db_path=self._drqa_db_path,
lucene_db_path=self._lucene_db_path,
))
| 'ExampleWithId' >> beam.Map(lambda x: (x['example_id'], x))
| 'ShuffleExamples' >> beam.Reshuffle())
|
|
from collections import defaultdict, namedtuple
import re
import inspect
BOOLEAN_FIELDS = (
"search.regex", "searchable", "orderable", "regex"
)
DataColumn = namedtuple("DataColumn", ("name", "model_name", "filter"))
class DataTablesError(ValueError):
pass
class DataTable(object):
def __init__(self, params, model, query, columns):
self.params = params
self.model = model
self.query = query
self.data = {}
self.columns = []
self.columns_dict = {}
self.search_func = lambda qs, s: qs
self.column_search_func = lambda mc, qs, s: qs
for col in columns:
name, model_name, filter_func = None, None, None
if isinstance(col, DataColumn):
self.columns.append(col)
continue
elif isinstance(col, tuple):
# col is either 1. (name, model_name), 2. (name, filter) or 3. (name, model_name, filter)
if len(col) == 3:
name, model_name, filter_func = col
elif len(col) == 2:
# Work out the second argument. If it is a function then it's type 2, else it is type 1.
if callable(col[1]):
name, filter_func = col
model_name = name
else:
name, model_name = col
else:
raise ValueError("Columns must be a tuple of 2 to 3 elements")
else:
# It's just a string
name, model_name = col, col
d = DataColumn(name=name, model_name=model_name, filter=filter_func)
self.columns.append(d)
self.columns_dict[d.name] = d
for column in (col for col in self.columns if "." in col.model_name):
self.query = self.query.join(column.model_name.split(".")[0], aliased=True)
def query_into_dict(self, key_start):
returner = defaultdict(dict)
# Matches columns[number][key] with an [optional_value] on the end
pattern = "{}(?:\[(\d+)\])?\[(\w+)\](?:\[(\w+)\])?".format(key_start)
columns = (param for param in self.params if re.match(pattern, param))
for param in columns:
column_id, key, optional_subkey = re.search(pattern, param).groups()
if column_id is None:
returner[key] = self.coerce_value(key, self.params[param])
elif optional_subkey is None:
returner[int(column_id)][key] = self.coerce_value(key, self.params[param])
else:
# Oh baby a triple
subdict = returner[int(column_id)].setdefault(key, {})
subdict[optional_subkey] = self.coerce_value("{}.{}".format(key, optional_subkey),
self.params[param])
return dict(returner)
@staticmethod
def coerce_value(key, value):
try:
return int(value)
except ValueError:
if key in BOOLEAN_FIELDS:
return value == "true"
return value
def get_integer_param(self, param_name):
if param_name not in self.params:
raise DataTablesError("Parameter {} is missing".format(param_name))
try:
return int(self.params[param_name])
except ValueError:
raise DataTablesError("Parameter {} is invalid".format(param_name))
def add_data(self, **kwargs):
self.data.update(**kwargs)
def json(self):
try:
return self._json()
except DataTablesError as e:
return {
"error": str(e)
}
def get_column(self, column):
if "." in column.model_name:
column_path = column.model_name.split(".")
relationship = getattr(self.model, column_path[0])
model_column = getattr(relationship.property.mapper.entity, column_path[1])
else:
model_column = getattr(self.model, column.model_name)
return model_column
def searchable(self, func):
self.search_func = func
def searchable_column(self, func):
self.column_search_func = func
def _json(self):
draw = self.get_integer_param("draw")
start = self.get_integer_param("start")
length = self.get_integer_param("length")
columns = self.query_into_dict("columns")
ordering = self.query_into_dict("order")
search = self.query_into_dict("search")
query = self.query
total_records = query.count()
if callable(self.search_func) and search.get("value", None):
query = self.search_func(query, search["value"])
for column_data in columns.values():
search_value = column_data["search"]["value"]
if (
not column_data["searchable"]
or not search_value
or not callable(self.column_search_func)
):
continue
column_name = column_data["data"]
column = self.columns_dict[column_name]
model_column = self.get_column(column)
query = self.column_search_func(model_column, query, str(search_value))
for order in ordering.values():
direction, column = order["dir"], order["column"]
if column not in columns:
raise DataTablesError("Cannot order {}: column not found".format(column))
if not columns[column]["orderable"]:
continue
column_name = columns[column]["data"]
column = self.columns_dict[column_name]
model_column = self.get_column(column)
if isinstance(model_column, property):
raise DataTablesError("Cannot order by column {} as it is a property".format(column.model_name))
query = query.order_by(model_column.desc() if direction == "desc" else model_column.asc())
filtered_records = query.count()
if length > 0:
query = query.slice(start, start + length)
return {
"draw": draw,
"recordsTotal": total_records,
"recordsFiltered": filtered_records,
"data": [
self.output_instance(instance) for instance in query.all()
]
}
def output_instance(self, instance):
returner = {
key.name: self.get_value(key, instance) for key in self.columns
}
if self.data:
returner["DT_RowData"] = {
k: v(instance) for k, v in self.data.items()
}
return returner
def get_value(self, key, instance):
attr = key.model_name
if "." in attr:
tmp_list=attr.split(".")
attr=tmp_list[-1]
for sub in tmp_list[:-1]:
instance = getattr(instance, sub)
if key.filter is not None:
r = key.filter(instance)
else:
r = getattr(instance, attr)
return r() if inspect.isroutine(r) else r
|
|
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import datetime
import iso8601
import netaddr
import six
from solum.openstack.common.gettextutils import _
from solum.openstack.common import timeutils
class KeyTypeError(TypeError):
def __init__(self, expected, value):
super(KeyTypeError, self).__init__(
_('Key %(key)s must be of type %(expected)s not %(actual)s'
) % {'key': repr(value),
'expected': expected.__name__,
'actual': value.__class__.__name__,
})
class ElementTypeError(TypeError):
def __init__(self, expected, key, value):
super(ElementTypeError, self).__init__(
_('Element %(key)s:%(val)s must be of type %(expected)s'
' not %(actual)s'
) % {'key': key,
'val': repr(value),
'expected': expected,
'actual': value.__class__.__name__,
})
class AbstractFieldType(six.with_metaclass(abc.ABCMeta, object)):
@abc.abstractmethod
def coerce(self, obj, attr, value):
"""This is called to coerce (if possible) a value on assignment.
This method should convert the value given into the designated type,
or throw an exception if this is not possible.
:param:obj: The DomainObject on which an attribute is being set
:param:attr: The name of the attribute being set
:param:value: The value being set
:returns: A properly-typed value
"""
pass
@abc.abstractmethod
def from_primitive(self, obj, attr, value):
"""This is called to deserialize a value.
This method should deserialize a value from the form given by
to_primitive() to the designated type.
:param:obj: The DomainObject on which the value is to be set
:param:attr: The name of the attribute which will hold the value
:param:value: The serialized form of the value
:returns: The natural form of the value
"""
pass
@abc.abstractmethod
def to_primitive(self, obj, attr, value):
"""This is called to serialize a value.
This method should serialize a value to the form expected by
from_primitive().
:param:obj: The DomainObject on which the value is set
:param:attr: The name of the attribute holding the value
:param:value: The natural form of the value
:returns: The serialized form of the value
"""
pass
@abc.abstractmethod
def describe(self):
"""Returns a string describing the type of the field."""
pass
class FieldType(AbstractFieldType):
def coerce(self, obj, attr, value):
return value
def from_primitive(self, obj, attr, value):
return value
def to_primitive(self, obj, attr, value):
return value
def describe(self):
return self.__class__.__name__
class UnspecifiedDefault(object):
pass
class Field(object):
def __init__(self, field_type, nullable=False, default=UnspecifiedDefault):
self._type = field_type
self._nullable = nullable
self._default = default
@property
def nullable(self):
return self._nullable
@property
def default(self):
return self._default
def _null(self, obj, attr):
if self.nullable:
return None
elif self._default != UnspecifiedDefault:
# NOTE(danms): We coerce the default value each time the field
# is set to None as our contract states that we'll let the type
# examine the object and attribute name at that time.
return self._type.coerce(obj, attr, self._default)
else:
raise ValueError(_("Field `%s' cannot be None") % attr)
def coerce(self, obj, attr, value):
"""Coerce a value to a suitable type.
This is called any time you set a value on an object, like:
foo.myint = 1
and is responsible for making sure that the value (1 here) is of
the proper type, or can be sanely converted.
This also handles the potentially nullable or defaultable
nature of the field and calls the coerce() method on a
FieldType to actually do the coercion.
:param:obj: The object being acted upon
:param:attr: The name of the attribute/field being set
:param:value: The value being set
:returns: The properly-typed value
"""
if value is None:
return self._null(obj, attr)
else:
return self._type.coerce(obj, attr, value)
def from_primitive(self, obj, attr, value):
"""Deserialize a value from primitive form.
This is responsible for deserializing a value from primitive
into regular form. It calls the from_primitive() method on a
FieldType to do the actual deserialization.
:param:obj: The object being acted upon
:param:attr: The name of the attribute/field being deserialized
:param:value: The value to be deserialized
:returns: The deserialized value
"""
if value is None:
return None
else:
return self._type.from_primitive(obj, attr, value)
def to_primitive(self, obj, attr, value):
"""Serialize a value to primitive form.
This is responsible for serializing a value to primitive
form. It calls to_primitive() on a FieldType to do the actual
serialization.
:param:obj: The object being acted upon
:param:attr: The name of the attribute/field being serialized
:param:value: The value to be serialized
:returns: The serialized value
"""
if value is None:
return None
else:
return self._type.to_primitive(obj, attr, value)
def describe(self):
"""Return a short string describing the type of this field."""
name = self._type.describe()
prefix = self.nullable and 'Nullable' or ''
return prefix + name
class String(FieldType):
def coerce(self, obj, attr, value):
# FIXME(danms): We should really try to avoid the need to do this
if isinstance(value, (basestring, int, long, float,
datetime.datetime)):
return unicode(value)
else:
raise ValueError(_('A string is required here, not %s'),
value.__class__.__name__)
class UUID(FieldType):
def coerce(self, obj, attr, value):
# FIXME(danms): We should actually verify the UUIDness here
return str(value)
class Integer(FieldType):
def coerce(self, obj, attr, value):
return int(value)
class Boolean(FieldType):
def coerce(self, obj, attr, value):
return bool(value)
class DateTime(FieldType):
def coerce(self, obj, attr, value):
if isinstance(value, basestring):
value = timeutils.parse_isotime(value)
elif not isinstance(value, datetime.datetime):
raise ValueError(_('A datetime.datetime is required here'))
if value.utcoffset() is None:
value = value.replace(tzinfo=iso8601.iso8601.Utc())
return value
def from_primitive(self, obj, attr, value):
return self.coerce(obj, attr, timeutils.parse_isotime(value))
def to_primitive(self, obj, attr, value):
return timeutils.isotime(value)
class IPV4Address(FieldType):
def coerce(self, obj, attr, value):
try:
return netaddr.IPAddress(value, version=4)
except netaddr.AddrFormatError as e:
raise ValueError(str(e))
def from_primitive(self, obj, attr, value):
return self.coerce(obj, attr, value)
def to_primitive(self, obj, attr, value):
return str(value)
class IPV6Address(FieldType):
def coerce(self, obj, attr, value):
try:
return netaddr.IPAddress(value, version=6)
except netaddr.AddrFormatError as e:
raise ValueError(str(e))
def from_primitive(self, obj, attr, value):
return self.coerce(obj, attr, value)
def to_primitive(self, obj, attr, value):
return str(value)
class CompoundFieldType(FieldType):
def __init__(self, element_type, **field_args):
self._element_type = Field(element_type, **field_args)
class List(CompoundFieldType):
def coerce(self, obj, attr, value):
if not isinstance(value, list):
raise ValueError(_('A list is required here'))
for index, element in enumerate(list(value)):
value[index] = self._element_type.coerce(
obj, '%s[%i]' % (attr, index), element)
return value
def to_primitive(self, obj, attr, value):
return [self._element_type.to_primitive(obj, attr, x) for x in value]
def from_primitive(self, obj, attr, value):
return [self._element_type.from_primitive(obj, attr, x) for x in value]
class Dict(CompoundFieldType):
def coerce(self, obj, attr, value):
if not isinstance(value, dict):
raise ValueError(_('A dict is required here'))
for key, element in value.items():
if not isinstance(key, basestring):
raise KeyTypeError(basestring, key)
value[key] = self._element_type.coerce(
obj, '%s["%s"]' % (attr, key), element)
return value
def to_primitive(self, obj, attr, value):
primitive = {}
for key, element in value.items():
primitive[key] = self._element_type.to_primitive(
obj, '%s["%s"]' % (attr, key), element)
return primitive
def from_primitive(self, obj, attr, value):
concrete = {}
for key, element in value.items():
concrete[key] = self._element_type.from_primitive(
obj, '%s["%s"]' % (attr, key), element)
return concrete
class Object(FieldType):
def __init__(self, obj_name, **kwargs):
self._obj_name = obj_name
super(Object, self).__init__(**kwargs)
def coerce(self, obj, attr, value):
try:
obj_name = value.obj_name()
except AttributeError:
obj_name = ""
if obj_name != self._obj_name:
raise ValueError(_('An object of type %s is required here') %
self._obj_name)
return value
def to_primitive(self, obj, attr, value):
return value.obj_to_primitive()
def from_primitive(self, obj, attr, value):
# FIXME(danms): Avoid circular import from base.py
from solum.objects import base as obj_base
return obj_base.DomainObject.obj_from_primitive(value, obj._context)
def describe(self):
return "Object<%s>" % self._obj_name
class CIDR(FieldType):
def coerce(self, obj, attr, value):
try:
network, length = value.split('/')
except (ValueError, AttributeError):
raise ValueError(_('CIDR "%s" is not in proper form') % value)
try:
network = netaddr.IPAddress(network)
except netaddr.AddrFormatError:
raise ValueError(_('Network "%s is not valid') % network)
try:
length = int(length)
assert (length >= 0)
except (ValueError, AssertionError):
raise ValueError(_('Netmask length "%s" is not valid') % length)
if ((network.version == 4 and length > 32) or
(network.version == 6 and length > 128)):
raise ValueError(_('Netmask length "%(length)s" is not valid '
'for IPv%(version)i address') %
{'length': length, 'version': network.version})
return value
class AutoTypedField(Field):
AUTO_TYPE = None
def __init__(self, **kwargs):
super(AutoTypedField, self).__init__(self.AUTO_TYPE, **kwargs)
class StringField(AutoTypedField):
AUTO_TYPE = String()
class UUIDField(AutoTypedField):
AUTO_TYPE = UUID()
class IntegerField(AutoTypedField):
AUTO_TYPE = Integer()
class BooleanField(AutoTypedField):
AUTO_TYPE = Boolean()
class DateTimeField(AutoTypedField):
AUTO_TYPE = DateTime()
class IPV4AddressField(AutoTypedField):
AUTO_TYPE = IPV4Address()
class IPV6AddressField(AutoTypedField):
AUTO_TYPE = IPV6Address()
class DictOfStringsField(AutoTypedField):
AUTO_TYPE = Dict(String())
class DictOfNullableStringsField(AutoTypedField):
AUTO_TYPE = Dict(String(), nullable=True)
class ListOfStringsField(AutoTypedField):
AUTO_TYPE = List(String())
class ObjectField(AutoTypedField):
def __init__(self, objtype, **kwargs):
self.AUTO_TYPE = Object(objtype)
super(ObjectField, self).__init__(**kwargs)
class ListOfObjectsField(AutoTypedField):
def __init__(self, objtype, **kwargs):
self.AUTO_TYPE = List(Object(objtype))
super(ListOfObjectsField, self).__init__(**kwargs)
|
|
# -*- coding: utf-8 -*-
"""
These tests all run against an actual Google Music account.
Destructive modifications are not made, but if things go terrible wrong,
an extra test playlist or song may result.
"""
from __future__ import print_function, division, absolute_import, unicode_literals
from builtins import * # noqa
from collections import namedtuple
import datetime
from hashlib import md5
import itertools
import os
import re
import types
import warnings
from decorator import decorator
from proboscis.asserts import (
assert_true, assert_equal, assert_is_not_none,
assert_raises, assert_not_equal, Check,
)
from proboscis import test, before_class, after_class, SkipTest
import requests
from requests.exceptions import SSLError
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from gmusicapi import Musicmanager, Mobileclient
# from gmusicapi.protocol import mobileclient
from gmusicapi.protocol.shared import authtypes
from gmusicapi.utils.utils import retry, id_or_nid
import gmusicapi.test.utils as test_utils
TEST_PLAYLIST_NAME = 'gmusicapi_test_playlist'
TEST_PLAYLIST_DESCRIPTION = 'gmusicapi test playlist'
TEST_STATION_NAME = 'gmusicapi_test_station'
TEST_STORE_GENRE_ID = 'METAL'
# that dumb little intro track on Conspiracy of One,
# picked since it's only a few seconds long
TEST_STORE_SONG_ID = 'Tf3pxtcrp2tw7i6kdxzueoz7uia'
# used for testing streaming.
# differences between clients are presumably from stream quality.
TEST_STORE_SONG_WC_HASH = 'c3302fe6bd54ce9b310f92da1904f3b9'
TEST_STORE_SONG_MC_HASH = '75b3281eda6af04239bf4f23de65618d'
# The Nerdist.
TEST_PODCAST_SERIES_ID = 'Iliyrhelw74vdqrro77kq2vrdhy'
# An episode of Note to Self.
# Picked because it's very short (~4 minutes).
TEST_PODCAST_EPISODE_ID = 'Diksw5cywxflebfs3dbiiabfphu'
TEST_PODCAST_EPISODE_HASH = 'e8ff4efd6a3a6a1017b35e0ef564d840'
# Amorphis
TEST_STORE_ARTIST_ID = 'Apoecs6off3y6k4h5nvqqos4b5e'
# Holographic Universe
TEST_STORE_ALBUM_ID = 'B4cao5ms5jjn36notfgnhjtguwa'
# this is owned by my test account, so it shouldn't disappear
TEST_PLAYLIST_SHARETOKEN = ('AMaBXymHAkflgs5lvFAUyyQLYelqqMZNAB4v7Y_-'
'v9vmrctLOeW64GScAScoFHEnrLgOP5DSRpl9FYIH'
'b84HRBvyIMsxc7Zlrg==')
TEST_CURATED_STATION_ID = 'L75iymnapfmeiklef5rhaqxqiry'
# this is a little data class for the songs we upload
TestSong = namedtuple('TestSong', 'sid title artist album full_data')
def sids(test_songs):
"""Given [TestSong], return ['sid']."""
return [s.sid for s in test_songs]
def test_subscription_features():
return 'GM_A' in os.environ
@decorator
def subscription(f, *args, **kwargs):
"""Declare a test to only be run if subscription testing is enabled."""
if test_subscription_features():
return f(*args, **kwargs)
else:
raise SkipTest('Subscription testing disabled')
@test(groups=['server-other'])
class SslVerificationTests(object):
test_url = 'https://wrong.host.badssl.com/'
@test
def site_has_invalid_cert(self):
assert_raises(SSLError, requests.head, self.test_url)
def request_invalid_site(self, client):
req_kwargs = {'url': self.test_url,
'method': 'HEAD'}
no_auth = authtypes()
client.session.send(req_kwargs, no_auth)
@test
def clients_verify_by_default(self):
# Webclient removed since testing is disabled.
for client_cls in (Mobileclient, Musicmanager):
assert_raises(SSLError, self.request_invalid_site, client_cls())
@test
def disable_client_verify(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=InsecureRequestWarning)
# Webclient removed since testing is disabled.
for client_cls in (Mobileclient, Musicmanager):
self.request_invalid_site(client_cls(verify_ssl=False)) # should not raise SSLError
@test(groups=['server'])
class ClientTests(object):
# set on the instance in login
# wc = None # webclient
mm = None # musicmanager
mc = None # mobileclient
# These are set on the instance in eg create_song.
# both are [TestSong]
user_songs = None
store_songs = None
playlist_ids = None
plentry_ids = None
station_ids = None
podcast_ids = None
delete_podcast = True # Set to false if user already subscribed to test podcast.
@property
def all_songs(self):
return (self.user_songs or []) + (self.store_songs or [])
def mc_get_playlist_songs(self, plid):
"""For convenience, since mc can only get all playlists at once."""
all_contents = self.mc.get_all_user_playlist_contents()
found = [p for p in all_contents if p['id'] == plid]
assert_true(len(found), 1)
return found[0]['tracks']
@before_class
def login(self):
# self.wc = test_utils.new_test_client(Webclient)
# assert_true(self.wc.is_authenticated())
self.mm = test_utils.new_test_client(Musicmanager)
assert_true(self.mm.is_authenticated())
self.mc = test_utils.new_test_client(Mobileclient)
assert_true(self.mc.is_authenticated())
@after_class(always_run=True)
def logout(self):
# if self.wc is None:
# raise SkipTest('did not create wc')
# assert_true(self.wc.logout())
if self.mm is None:
raise SkipTest('did not create mm')
assert_true(self.mm.logout())
if self.mc is None:
raise SkipTest('did not create mc')
assert_true(self.mc.logout())
# This next section is a bit odd: it orders tests that create
# required resources.
# The intuitition: starting from an empty library, you need to create
# a song before you can eg add it to a playlist.
# The dependencies end up with an ordering that might look like:
#
# with song
# with playlist
# with plentry
# with station
#
#
# Suggestions to improve any of this are welcome!
@staticmethod
@retry
def assert_songs_state(method, sids, present):
"""
Assert presence/absence of sids and return a list of
TestSongs found.
:param method: eg self.mc.get_all_songs
:param sids: list of song ids
:param present: if True verify songs are present; False the opposite
"""
library = method()
found = [s for s in library if s['id'] in sids]
expected_len = len(sids)
if not present:
expected_len = 0
assert_equal(len(found), expected_len)
return [TestSong(s['id'], s['title'], s['artist'], s['album'], s)
for s in found]
@staticmethod
@retry
def assert_list_inc_equivalence(method, **kwargs):
"""
Assert that some listing method returns the same
contents for incremental=True/False.
:param method: eg self.mc.get_all_songs, must support `incremental` kwarg
:param **kwargs: passed to method
"""
lib_chunk_gen = method(incremental=True, **kwargs)
assert_true(isinstance(lib_chunk_gen, types.GeneratorType))
assert_equal([e for chunk in lib_chunk_gen for e in chunk],
method(incremental=False, **kwargs))
@test
def song_create(self):
# This can create more than one song: one through uploading, one through
# adding a store track to the library.
user_sids = []
store_sids = []
fname = test_utils.small_mp3
uploaded, matched, not_uploaded = self.mm.upload(fname)
if len(not_uploaded) == 1 and 'ALREADY_EXISTS' in not_uploaded[fname]:
# delete the song if it exists already because a previous test failed
self.mc.delete_songs(re.search(r'\(.*\)', not_uploaded[fname]).group().strip('()'))
# and retry the upload
uploaded, matched, not_uploaded = self.mm.upload(fname)
# Otherwise, it should have been uploaded normally.
assert_equal(not_uploaded, {})
assert_equal(matched, {})
assert_equal(list(uploaded.keys()), [fname])
user_sids.append(uploaded[fname])
if test_subscription_features():
store_sids.append(self.mc.add_store_tracks(TEST_STORE_SONG_ID)[0])
# we test get_all_songs here so that we can assume the existance
# of the song for future tests (the servers take time to sync an upload)
self.user_songs = self.assert_songs_state(self.mc.get_all_songs, user_sids, present=True)
self.store_songs = self.assert_songs_state(self.mc.get_all_songs, store_sids, present=True)
@test
def playlist_create(self):
mc_id = self.mc.create_playlist(TEST_PLAYLIST_NAME, "", public=True)
# wc_id = self.wc.create_playlist(TEST_PLAYLIST_NAME, "", public=True)
# like song_create, retry until the playlist appears
@retry
def assert_playlist_exists(plids):
found = [p for p in self.mc.get_all_playlists()
if p['id'] in plids]
assert_equal(len(found), 1)
assert_playlist_exists([mc_id])
self.playlist_ids = [mc_id]
@test(depends_on=[playlist_create, song_create],
runs_after_groups=['playlist.exists', 'song.exists'])
def plentry_create(self):
song_ids = [self.user_songs[0].sid]
# create 3 entries total
# 3 songs is the minimum to fully test reordering, and also includes the
# duplicate song_id case
double_id = self.user_songs[0].sid
if test_subscription_features():
double_id = TEST_STORE_SONG_ID
song_ids += [double_id] * 2
plentry_ids = self.mc.add_songs_to_playlist(self.playlist_ids[0], song_ids)
@retry
def assert_plentries_exist(plid, plentry_ids):
songs = self.mc_get_playlist_songs(plid)
found = [e for e in songs
if e['id'] in plentry_ids]
assert_equal(len(found), len(plentry_ids))
assert_plentries_exist(self.playlist_ids[0], plentry_ids)
self.plentry_ids = plentry_ids
@test(groups=['plentry'], depends_on=[plentry_create],
runs_after_groups=['plentry.exists'],
always_run=True)
def plentry_delete(self):
if self.plentry_ids is None:
raise SkipTest('did not store self.plentry_ids')
res = self.mc.remove_entries_from_playlist(self.plentry_ids)
assert_equal(res, self.plentry_ids)
@retry
def assert_plentries_removed(plid, entry_ids):
found = [e for e in self.mc_get_playlist_songs(plid)
if e['id'] in entry_ids]
assert_equal(len(found), 0)
assert_plentries_removed(self.playlist_ids[0], self.plentry_ids)
@test(groups=['playlist'], depends_on=[playlist_create],
runs_after=[plentry_delete],
runs_after_groups=['playlist.exists'],
always_run=True)
def playlist_delete(self):
if self.playlist_ids is None:
raise SkipTest('did not store self.playlist_ids')
for plid in self.playlist_ids:
res = self.mc.delete_playlist(plid)
assert_equal(res, plid)
@retry
def assert_playlist_does_not_exist(plid):
found = [p for p in self.mc.get_all_playlists()
if p['id'] == plid]
assert_equal(len(found), 0)
for plid in self.playlist_ids:
assert_playlist_does_not_exist(plid)
@test
@subscription
def station_create(self):
station_ids = []
for prefix, kwargs in (
('Store song', {'track_id': TEST_STORE_SONG_ID}),
('Store-added song', {'track_id': self.store_songs[0].sid}),
('up song', {'track_id': self.user_songs[0].sid}),
('artist', {'artist_id': TEST_STORE_ARTIST_ID}),
('album', {'album_id': TEST_STORE_ALBUM_ID}),
('genre', {'genre_id': TEST_STORE_GENRE_ID}),
('playlist', {'playlist_token': TEST_PLAYLIST_SHARETOKEN}),
('curated station', {'curated_station_id': TEST_CURATED_STATION_ID})):
station_ids.append(
self.mc.create_station(prefix + ' ' + TEST_STATION_NAME, **kwargs))
@retry
def assert_station_exists(station_id):
stations = self.mc.get_all_stations()
found = [s for s in stations
if s['id'] == station_id]
assert_equal(len(found), 1)
for station_id in station_ids:
assert_station_exists(station_id)
self.station_ids = station_ids
@test(groups=['station'], depends_on=[station_create, song_create],
runs_after_groups=['station.exists', 'song.exists'],
always_run=True)
def station_delete(self):
if self.station_ids is None:
raise SkipTest('did not store self.station_ids')
res = self.mc.delete_stations(self.station_ids)
assert_equal(res, self.station_ids)
@retry
def assert_station_deleted(station_id):
stations = self.mc.get_all_stations()
found = [s for s in stations
if s['id'] == station_id]
assert_equal(len(found), 0)
for station_id in self.station_ids:
assert_station_deleted(station_id)
@test(groups=['song'], depends_on=[song_create],
runs_after=[plentry_delete, station_delete],
runs_after_groups=["song.exists"],
always_run=True)
def song_delete(self):
# split deletion between wc and mc
# mc is only to run if subscription testing not enabled
with Check() as check:
for i, testsong in enumerate(self.all_songs):
if True:
res = self.mc.delete_songs(testsong.sid)
else:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res = self.wc.delete_songs(testsong.sid)
check.equal(res, [testsong.sid])
self.assert_songs_state(self.mc.get_all_songs, sids(self.all_songs), present=False)
@test
def podcast_create(self):
# Check to make sure podcast doesn't already exist to prevent deletion.
already_exists = [pc for pc in self.mc.get_all_podcast_series()
if pc['seriesId'] == TEST_PODCAST_SERIES_ID]
if already_exists:
self.delete_podcast = False
# like song_create, retry until the podcast appears
@retry
def assert_podcast_exists(pcids):
found = [pc for pc in self.mc.get_all_podcast_series()
if pc['seriesId'] in pcids]
assert_equal(len(found), 1)
pc_id = self.mc.add_podcast_series(TEST_PODCAST_SERIES_ID)
assert_podcast_exists([pc_id])
self.podcast_ids = [pc_id]
@test(groups=['podcast'], depends_on=[podcast_create],
runs_after_groups=['podcast.exists'],
always_run=True)
def podcast_delete(self):
if self.podcast_ids is None:
raise SkipTest('did not store self.podcast_ids')
if not self.delete_podcast:
raise SkipTest('not deleting already existing podcast')
for pcid in self.podcast_ids:
res = self.mc.delete_podcast_series(pcid)
assert_equal(res, pcid)
@retry
def assert_podcast_does_not_exist(pcid):
found = [pc for pc in self.mc.get_all_podcast_series()
if pc['seriesId'] == pcid]
assert_equal(len(found), 0)
for pcid in self.podcast_ids:
assert_podcast_does_not_exist(pcid)
# These decorators just prevent setting groups and depends_on over and over.
# They won't work right with additional settings; if that's needed this
# pattern should be factored out.
# TODO it'd be nice to have per-client test groups
song_test = test(groups=['song', 'song.exists'], depends_on=[song_create])
playlist_test = test(groups=['playlist', 'playlist.exists'],
depends_on=[playlist_create])
plentry_test = test(groups=['plentry', 'plentry.exists'],
depends_on=[plentry_create])
station_test = test(groups=['station', 'station.exists'], depends_on=[station_create])
podcast_test = test(groups=['podcast', 'podcast.exists'], depends_on=[podcast_create])
# Non-wonky tests resume down here.
# ---------
# MM tests
# ---------
def mm_get_quota(self):
# just testing the call is successful
self.mm.get_quota()
@song_test
def mm_list_new_songs(self):
# mm only includes user-uploaded songs
self.assert_songs_state(self.mm.get_uploaded_songs, sids(self.user_songs), present=True)
self.assert_songs_state(self.mm.get_uploaded_songs, sids(self.store_songs), present=False)
@test
def mm_list_songs_inc_equal(self):
self.assert_list_inc_equivalence(self.mm.get_uploaded_songs)
@song_test
def mm_download_song(self):
@retry
def assert_download(sid):
filename, audio = self.mm.download_song(sid)
# TODO could use original filename to verify this
# but, when manually checking, got modified title occasionally
assert_true(filename.endswith('.mp3'))
assert_is_not_none(audio)
assert_download(self.user_songs[0].sid)
# ---------
# WC tests
# ---------
# @test
# def wc_get_registered_devices(self):
# # no logic; just checking schema
# self.wc.get_registered_devices()
# @test
# def wc_get_shared_playlist_info(self):
# expected = {
# u'author': u'gmusic api',
# u'description': u'description here',
# u'title': u'public title here',
# u'num_tracks': 2
# }
# assert_equal(
# self.wc.get_shared_playlist_info(TEST_PLAYLIST_SHARETOKEN),
# expected
# )
# @test
# @subscription
# def wc_get_store_stream_urls(self):
# urls = self.wc.get_stream_urls(TEST_STORE_SONG_ID)
# assert_true(len(urls) > 1)
# @test
# @subscription
# def wc_stream_store_track_with_header(self):
# audio = self.wc.get_stream_audio(TEST_STORE_SONG_ID, use_range_header=True)
# assert_equal(md5(audio).hexdigest(), TEST_STORE_SONG_WC_HASH)
# @test
# @subscription
# def wc_stream_store_track_without_header(self):
# audio = self.wc.get_stream_audio(TEST_STORE_SONG_ID, use_range_header=False)
# assert_equal(md5(audio).hexdigest(), TEST_STORE_SONG_WC_HASH)
# @song_test
# def wc_get_download_info(self):
# url, download_count = self.wc.get_song_download_info(self.user_songs[0].sid)
# assert_is_not_none(url)
# @song_test
# def wc_get_uploaded_stream_urls(self):
# urls = self.wc.get_stream_urls(self.user_songs[0].sid)
# assert_equal(len(urls), 1)
# url = urls[0]
# assert_is_not_none(url)
# assert_equal(url.split(':')[0], 'https')
# @song_test
# def wc_upload_album_art(self):
# url = self.wc.upload_album_art(self.user_songs[0].sid, test_utils.image_filename)
# assert_equal(url[:4], 'http')
# # TODO download the track and verify the metadata changed
# ---------
# MC tests
# ---------
@test
def mc_get_registered_devices(self):
# no logic; just checking schema
self.mc.get_registered_devices()
@test
def mc_get_browse_podcast_hierarchy(self):
# no logic; just checking schema
self.mc.get_browse_podcast_hierarchy()
@test
def mc_get_browse_podcast_series(self):
# no logic; just checking schema
self.mc.get_browse_podcast_series()
@test
def mc_get_listen_now_items(self):
# no logic; just checking schema
self.mc.get_listen_now_items()
@test
def mc_get_listen_now_situations(self):
# no logic; just checking schema
self.mc.get_listen_now_situations()
@test
def mc_list_stations_inc_equal(self):
self.assert_list_inc_equivalence(self.mc.get_all_stations)
@test
def mc_list_shared_playlist_entries(self):
entries = self.mc.get_shared_playlist_contents(TEST_PLAYLIST_SHARETOKEN)
assert_true(len(entries) > 0)
@test
def mc_stream_podcast_episode(self):
raise SkipTest('podcast ids keep changing')
# uses frozen device_id
# url = self.mc.get_podcast_episode_stream_url(TEST_PODCAST_EPISODE_ID)
# audio = self.mc.session._rsession.get(url).content
# assert_equal(md5(audio).hexdigest(), TEST_PODCAST_EPISODE_HASH)
@test
@subscription
def mc_stream_store_track(self):
url = self.mc.get_stream_url(TEST_STORE_SONG_ID) # uses frozen device_id
audio = self.mc.session._rsession.get(url).content
assert_equal(md5(audio).hexdigest(), TEST_STORE_SONG_MC_HASH)
@song_test
def mc_get_uploaded_track_stream_url(self):
url = self.mc.get_stream_url(self.user_songs[0].sid)
assert_is_not_none(url)
assert_equal(url[:4], 'http')
@staticmethod
@retry
def _assert_song_key_equal_to(method, sid, key, value):
"""
:param method: eg self.mc.get_all_songs
:param sid: song id
:param key: eg 'rating'
:param value: eg '1'
"""
songs = method()
if not isinstance(songs, list):
# kind of a hack to support get_track_info as well
songs = [songs]
found = [s for s in songs if id_or_nid(s) == sid]
assert_equal(len(found), 1)
assert_equal(found[0][key], value)
return found[0]
# how can I get the rating key to show up for store tracks?
# it works in Google's clients!
# @test
# @subscription
# def mc_change_store_song_rating(self):
# song = self.mc.get_track_info(TEST_STORE_SONG_ID)
# # increment by one but keep in rating range
# rating = int(song.get('rating', '0')) + 1
# rating = str(rating % 6)
# self.mc.rate_songs(song, rating)
# self._assert_song_key_equal_to(lambda: self.mc.get_track_info(TEST_STORE_SONG_ID),
# id_or_nid(song),
# song['rating'])
@song_test
def mc_change_uploaded_song_rating(self):
song = self._assert_song_key_equal_to(
self.mc.get_all_songs,
self.all_songs[0].sid,
'rating',
'0') # initially unrated
self.mc.rate_songs(song, 1)
self._assert_song_key_equal_to(self.mc.get_all_songs, song['id'], 'rating', '1')
self.mc.rate_songs(song, 0)
@song_test
@retry
def mc_get_promoted_songs(self):
song = self.mc.get_track_info(TEST_STORE_SONG_ID)
self.mc.rate_songs(song, 5)
promoted = self.mc.get_promoted_songs()
assert_true(len(promoted))
self.mc.rate_songs(song, 0)
def _test_increment_playcount(self, sid):
matching = [t for t in self.mc.get_all_songs()
if t['id'] == sid]
assert_equal(len(matching), 1)
# playCount is an optional field.
initial_playcount = matching[0].get('playCount', 0)
self.mc.increment_song_playcount(sid, 2)
self._assert_song_key_equal_to(
self.mc.get_all_songs,
sid,
'playCount',
initial_playcount + 2)
@song_test
def mc_increment_uploaded_song_playcount(self):
self._test_increment_playcount(self.all_songs[0].sid)
# Fails silently. See https://github.com/simon-weber/gmusicapi/issues/349.
# @song_test
# @subscription
# def mc_increment_store_song_playcount(self):
# self._test_increment_playcount(self.all_songs[1].sid)
@song_test
def mc_change_uploaded_song_title_fails(self):
# this used to work, but now only ratings can be changed.
# this test is here so I can tell if this starts working again.
song = self.assert_songs_state(self.mc.get_all_songs, [self.all_songs[0].sid],
present=True)[0]
old_title = song.title
new_title = old_title + '_mod'
# Mobileclient.change_song_metadata is deprecated, so
# ignore its deprecation warning.
with warnings.catch_warnings():
warnings.simplefilter('ignore')
self.mc.change_song_metadata({'id': song.sid, 'title': new_title})
self._assert_song_key_equal_to(self.mc.get_all_songs, song.sid, 'title', old_title)
@song_test
def mc_list_songs_inc_equal(self):
self.assert_list_inc_equivalence(self.mc.get_all_songs)
@song_test
def mc_list_songs_updated_after(self):
songs_last_minute = self.mc.get_all_songs(
updated_after=datetime.datetime.now() - datetime.timedelta(minutes=1))
assert_not_equal(len(songs_last_minute), 0)
all_songs = self.mc.get_all_songs()
assert_not_equal(len(songs_last_minute), len(all_songs))
@podcast_test
def mc_list_podcast_series_inc_equal(self):
self.assert_list_inc_equivalence(self.mc.get_all_podcast_series)
@playlist_test
def mc_list_playlists_inc_equal(self):
self.assert_list_inc_equivalence(self.mc.get_all_playlists)
@playlist_test
def mc_edit_playlist_name(self):
new_name = TEST_PLAYLIST_NAME + '_mod'
plid = self.mc.edit_playlist(self.playlist_ids[0], new_name=new_name)
assert_equal(self.playlist_ids[0], plid)
@retry # change takes time to propogate
def assert_name_equal(plid, name):
playlists = self.mc.get_all_playlists()
found = [p for p in playlists if p['id'] == plid]
assert_equal(len(found), 1)
assert_equal(found[0]['name'], name)
assert_name_equal(self.playlist_ids[0], new_name)
# revert
self.mc.edit_playlist(self.playlist_ids[0], new_name=TEST_PLAYLIST_NAME)
assert_name_equal(self.playlist_ids[0], TEST_PLAYLIST_NAME)
@playlist_test
def mc_edit_playlist_description(self):
new_description = TEST_PLAYLIST_DESCRIPTION + '_mod'
plid = self.mc.edit_playlist(self.playlist_ids[0], new_description=new_description)
assert_equal(self.playlist_ids[0], plid)
@retry # change takes time to propogate
def assert_description_equal(plid, description):
playlists = self.mc.get_all_playlists()
found = [p for p in playlists if p['id'] == plid]
assert_equal(len(found), 1)
assert_equal(found[0]['description'], description)
assert_description_equal(self.playlist_ids[0], new_description)
# revert
self.mc.edit_playlist(self.playlist_ids[0], new_description=TEST_PLAYLIST_DESCRIPTION)
assert_description_equal(self.playlist_ids[0], TEST_PLAYLIST_DESCRIPTION)
@playlist_test
def mc_edit_playlist_public(self):
new_public = False
plid = self.mc.edit_playlist(self.playlist_ids[0], public=new_public)
assert_equal(self.playlist_ids[0], plid)
@retry # change takes time to propogate
def assert_public_equal(plid, public):
playlists = self.mc.get_all_playlists()
found = [p for p in playlists if p['id'] == plid]
assert_equal(len(found), 1)
assert_equal(found[0]['accessControlled'], public)
assert_public_equal(self.playlist_ids[0], new_public)
# revert
self.mc.edit_playlist(self.playlist_ids[0], public=True)
assert_public_equal(self.playlist_ids[0], True)
@playlist_test
def mc_list_playlists_updated_after(self):
pls_last_minute = self.mc.get_all_playlists(
updated_after=datetime.datetime.now() - datetime.timedelta(minutes=1))
assert_not_equal(len(pls_last_minute), 0)
print(pls_last_minute)
all_pls = self.mc.get_all_playlists()
assert_not_equal(len(pls_last_minute), len(all_pls))
@retry(tries=3)
def _mc_assert_ple_position(self, entry, pos):
"""
:param entry: entry dict
:pos: 0-based position to assert
"""
pl = self.mc_get_playlist_songs(entry['playlistId'])
indices = [i for (i, e) in enumerate(pl)
if e['id'] == entry['id']]
assert_equal(len(indices), 1)
assert_equal(indices[0], pos)
@retry
def _mc_test_ple_reodering(self, from_pos, to_pos):
if from_pos == to_pos:
raise ValueError('Will not test no-op reordering.')
pl = self.mc_get_playlist_songs(self.playlist_ids[0])
from_e = pl[from_pos]
e_before_new_pos, e_after_new_pos = None, None
if from_pos < to_pos:
adj = 0
else:
adj = -1
if to_pos - 1 >= 0:
e_before_new_pos = pl[to_pos + adj]
if to_pos + 1 < len(self.plentry_ids):
e_after_new_pos = pl[to_pos + adj + 1]
self.mc.reorder_playlist_entry(from_e,
to_follow_entry=e_before_new_pos,
to_precede_entry=e_after_new_pos)
self._mc_assert_ple_position(from_e, to_pos)
if e_before_new_pos:
self._mc_assert_ple_position(e_before_new_pos, to_pos - 1)
if e_after_new_pos:
self._mc_assert_ple_position(e_after_new_pos, to_pos + 1)
@plentry_test
def mc_reorder_ple_forwards(self):
for from_pos, to_pos in [pair for pair in
itertools.product(range(len(self.plentry_ids)), repeat=2)
if pair[0] < pair[1]]:
self._mc_test_ple_reodering(from_pos, to_pos)
@plentry_test
def mc_reorder_ple_backwards(self):
playlist_len = len(self.plentry_ids)
for from_pos, to_pos in [pair for pair in
itertools.product(range(playlist_len), repeat=2)
if pair[0] > pair[1]]:
self._mc_test_ple_reodering(from_pos, to_pos)
# This fails, unfortunately, which means n reorderings mean n
# separate calls in the general case.
# @plentry_test
# def mc_reorder_ples_forwards(self):
# pl = self.mc_get_playlist_songs(self.playlist_ids[0])
# # rot2, eg 0123 -> 2301
# pl.append(pl.pop(0))
# pl.append(pl.pop(0))
# mutate_call = mobileclient.BatchMutatePlaylistEntries
# mutations = [
# mutate_call.build_plentry_reorder(
# pl[-1], pl[-2]['clientId'], None),
# mutate_call.build_plentry_reorder(
# pl[-2], pl[-3]['clientId'], pl[-1]['clientId'])
# ]
# self.mc._make_call(mutate_call, [mutations])
# self._mc_assert_ple_position(pl[-1], len(pl) - 1)
# self._mc_assert_ple_position(pl[-2], len(pl) - 2)
@station_test
@retry # sometimes this comes back with no data key
@subscription
def mc_list_station_tracks(self):
for station_id in self.station_ids:
self.mc.get_station_tracks(station_id, num_tracks=1)
# used to assert that at least 1 track came back, but
# our dummy uploaded track won't match anything
self.mc.get_station_tracks(station_id, num_tracks=1,
recently_played_ids=[TEST_STORE_SONG_ID])
self.mc.get_station_tracks(station_id, num_tracks=1,
recently_played_ids=[self.user_songs[0].sid])
def mc_list_IFL_station_tracks(self):
assert_equal(len(self.mc.get_station_tracks('IFL', num_tracks=1)),
1)
@test(groups=['search'])
def mc_search_store_no_playlists(self):
res = self.mc.search('morning', max_results=100)
res.pop('genre_hits') # Genre cluster is returned but without results in the new response.
# TODO playlist and situation results are not returned consistently.
res.pop('playlist_hits')
res.pop('situation_hits')
with Check() as check:
for type_, hits in res.items():
if ((not test_subscription_features() and
type_ in ('artist_hits', 'song_hits', 'album_hits'))):
# These results aren't returned for non-sub accounts.
check.true(len(hits) == 0, "%s had %s hits, expected 0" % (type_, len(hits)))
else:
check.true(len(hits) > 0, "%s had %s hits, expected > 0" % (type_, len(hits)))
@test
def mc_artist_info(self):
aid = 'Apoecs6off3y6k4h5nvqqos4b5e' # amorphis
optional_keys = set(('albums', 'topTracks', 'related_artists'))
include_all_res = self.mc.get_artist_info(aid, include_albums=True,
max_top_tracks=1, max_rel_artist=1)
no_albums_res = self.mc.get_artist_info(aid, include_albums=False)
no_rel_res = self.mc.get_artist_info(aid, max_rel_artist=0)
no_tracks_res = self.mc.get_artist_info(aid, max_top_tracks=0)
with Check() as check:
check.true(set(include_all_res.keys()) & optional_keys == optional_keys)
check.true(set(no_albums_res.keys()) & optional_keys ==
optional_keys - {'albums'})
check.true(set(no_rel_res.keys()) & optional_keys ==
optional_keys - {'related_artists'})
check.true(set(no_tracks_res.keys()) & optional_keys ==
optional_keys - {'topTracks'})
@test
@retry
def mc_album_info(self):
include_tracks = self.mc.get_album_info(TEST_STORE_ALBUM_ID, include_tracks=True)
no_tracks = self.mc.get_album_info(TEST_STORE_ALBUM_ID, include_tracks=False)
with Check() as check:
check.true('tracks' in include_tracks)
check.true('tracks' not in no_tracks)
del include_tracks['tracks']
check.equal(include_tracks, no_tracks)
@test
def mc_track_info(self):
self.mc.get_track_info(TEST_STORE_SONG_ID) # just for the schema
@test
def mc_podcast_series_info(self):
optional_keys = {'episodes'}
include_episodes = self.mc.get_podcast_series_info(TEST_PODCAST_SERIES_ID, max_episodes=1)
no_episodes = self.mc.get_podcast_series_info(TEST_PODCAST_SERIES_ID, max_episodes=0)
with Check() as check:
check.true(set(include_episodes.keys()) & optional_keys == optional_keys)
check.true(set(no_episodes.keys()) & optional_keys ==
optional_keys - {'episodes'})
@test(groups=['genres'])
def mc_all_genres(self):
expected_genres = {u'COMEDY_SPOKEN_WORD_OTHER', u'COUNTRY', u'HOLIDAY', u'R_B_SOUL',
u'FOLK', u'LATIN', u'CHRISTIAN_GOSPEL', u'ALTERNATIVE_INDIE', u'POP',
u'ROCK', u'WORLD', u'VOCAL_EASY_LISTENING', u'HIP_HOP_RAP', u'JAZZ',
u'METAL', u'REGGAE_SKA', u'SOUNDTRACKS_CAST_ALBUMS', u'DANCE_ELECTRONIC',
u'CLASSICAL', u'NEW_AGE', u'BLUES', u'CHILDREN_MUSIC'}
res = self.mc.get_genres()
assert_equal(set([e['id'] for e in res]), expected_genres)
@test(groups=['genres'])
def mc_specific_genre(self):
expected_genres = {u'PROGRESSIVE_METAL', u'CLASSIC_METAL', u'HAIR_METAL', u'INDUSTRIAL',
u'ALT_METAL', u'THRASH', u'METALCORE', u'BLACK_DEATH_METAL',
u'DOOM_METAL'}
res = self.mc.get_genres('METAL')
assert_equal(set([e['id'] for e in res]), expected_genres)
@test(groups=['genres'])
def mc_leaf_parent_genre(self):
assert_equal(self.mc.get_genres('AFRICA'), [])
@test(groups=['genres'])
def mc_invalid_parent_genre(self):
assert_equal(self.mc.get_genres('bogus genre'), [])
|
|
"""
mbed CMSIS-DAP debugger
Copyright (c) 2006-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cmsis_dap_core import CMSIS_DAP_Protocol
from transport import Transport, TransferError, READ_START, READ_NOW, READ_END
import logging
from time import sleep
# !! This value are A[2:3] and not A[3:2]
DP_REG = {'IDCODE' : 0x00,
'ABORT' : 0x00,
'CTRL_STAT': 0x04,
'SELECT': 0x08
}
AP_REG = {'CSW' : 0x00,
'TAR' : 0x04,
'DRW' : 0x0C,
'IDR' : 0xFC
}
IDCODE = 0 << 2
AP_ACC = 1 << 0
DP_ACC = 0 << 0
READ = 1 << 1
WRITE = 0 << 1
VALUE_MATCH = 1 << 4
MATCH_MASK = 1 << 5
APBANKSEL = 0x000000f0
# AP Control and Status Word definitions
CSW_SIZE = 0x00000007
CSW_SIZE8 = 0x00000000
CSW_SIZE16 = 0x00000001
CSW_SIZE32 = 0x00000002
CSW_ADDRINC = 0x00000030
CSW_NADDRINC = 0x00000000
CSW_SADDRINC = 0x00000010
CSW_PADDRINC = 0x00000020
CSW_DBGSTAT = 0x00000040
CSW_TINPROG = 0x00000080
CSW_HPROT = 0x02000000
CSW_MSTRTYPE = 0x20000000
CSW_MSTRCORE = 0x00000000
CSW_MSTRDBG = 0x20000000
CSW_RESERVED = 0x01000000
CSW_VALUE = (CSW_RESERVED | CSW_MSTRDBG | CSW_HPROT | CSW_DBGSTAT | CSW_SADDRINC)
TRANSFER_SIZE = {8: CSW_SIZE8,
16: CSW_SIZE16,
32: CSW_SIZE32
}
# Response values to DAP_Connect command
DAP_MODE_SWD = 1
DAP_MODE_JTAG = 2
# DP Control / Status Register bit definitions
CTRLSTAT_STICKYORUN = 0x00000002
CTRLSTAT_STICKYCMP = 0x00000010
CTRLSTAT_STICKYERR = 0x00000020
COMMANDS_PER_DAP_TRANSFER = 12
class CMSIS_DAP(Transport):
"""
This class implements the CMSIS-DAP protocol
"""
def __init__(self, interface):
super(CMSIS_DAP, self).__init__(interface)
self.protocol = CMSIS_DAP_Protocol(interface)
self.packet_max_count = 0
self.packet_max_size = 0
self.csw = -1
self.dp_select = -1
self.deferred_transfer = False
self.request_list = []
self.data_list = []
self.data_read_list = []
def init(self, frequency = 1000000):
# Flush to be safe
self.flush()
# connect to DAP, check for SWD or JTAG
self.mode = self.protocol.connect()
# set clock frequency
self.protocol.setSWJClock(frequency)
# configure transfer
self.protocol.transferConfigure()
if (self.mode == DAP_MODE_SWD):
# configure swd protocol
self.protocol.swdConfigure()
# switch from jtag to swd
self.JTAG2SWD()
# read ID code
logging.info('IDCODE: 0x%X', self.readDP(DP_REG['IDCODE']))
# clear errors
self.protocol.writeAbort(0x1e);
elif (self.mode == DAP_MODE_JTAG):
# configure jtag protocol
self.protocol.jtagConfigure(4)
# Test logic reset, run test idle
self.protocol.swjSequence([0x1F])
# read ID code
logging.info('IDCODE: 0x%X', self.protocol.jtagIDCode())
# clear errors
self.writeDP(DP_REG['CTRL_STAT'], CTRLSTAT_STICKYERR | CTRLSTAT_STICKYCMP | CTRLSTAT_STICKYORUN)
return
def uninit(self):
self.flush()
self.protocol.disconnect()
return
def JTAG2SWD(self):
data = [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff]
self.protocol.swjSequence(data)
data = [0x9e, 0xe7]
self.protocol.swjSequence(data)
data = [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff]
self.protocol.swjSequence(data)
data = [0x00]
self.protocol.swjSequence(data)
def info(self, request):
self.flush()
resp = None
try:
resp = self.protocol.dapInfo(request)
except KeyError:
logging.error('request %s not supported', request)
return resp
def clearStickyErr(self):
if (self.mode == DAP_MODE_SWD):
self.writeDP(0x0, (1 << 2))
elif (self.mode == DAP_MODE_JTAG):
self.writeDP(DP_REG['CTRL_STAT'], CTRLSTAT_STICKYERR)
def writeMem(self, addr, data, transfer_size = 32):
self.writeAP(AP_REG['CSW'], CSW_VALUE | TRANSFER_SIZE[transfer_size])
if transfer_size == 8:
data = data << ((addr & 0x03) << 3)
elif transfer_size == 16:
data = data << ((addr & 0x02) << 3)
self._write(WRITE | AP_ACC | AP_REG['TAR'], addr)
self._write(WRITE | AP_ACC | AP_REG['DRW'], data)
# If not in deferred mode flush after calls to _read or _write
if not self.deferred_transfer:
self.flush()
def readMem(self, addr, transfer_size = 32, mode = READ_NOW):
res = None
if mode in (READ_START, READ_NOW):
self.writeAP(AP_REG['CSW'], CSW_VALUE | TRANSFER_SIZE[transfer_size])
self._write(WRITE | AP_ACC | AP_REG['TAR'], addr)
self._write(READ | AP_ACC | AP_REG['DRW'])
if mode in (READ_NOW, READ_END):
resp = self._read()
res = (resp[0] << 0) | \
(resp[1] << 8) | \
(resp[2] << 16) | \
(resp[3] << 24)
# All READ_STARTs must have been finished with READ_END before using READ_NOW
assert (mode != READ_NOW) or (len(self.data_read_list) == 0)
if transfer_size == 8:
res = (res >> ((addr & 0x03) << 3) & 0xff)
elif transfer_size == 16:
res = (res >> ((addr & 0x02) << 3) & 0xffff)
# If not in deferred mode flush after calls to _read or _write
if not self.deferred_transfer:
self.flush()
return res
# write aligned word ("data" are words)
def writeBlock32(self, addr, data):
# put address in TAR
self.writeAP(AP_REG['CSW'], CSW_VALUE | CSW_SIZE32)
self.writeAP(AP_REG['TAR'], addr)
try:
self._transferBlock(len(data), WRITE | AP_ACC | AP_REG['DRW'], data)
except TransferError:
self.clearStickyErr()
raise
# If not in deferred mode flush after calls to _read or _write
if not self.deferred_transfer:
self.flush()
# read aligned word (the size is in words)
def readBlock32(self, addr, size):
# put address in TAR
self.writeAP(AP_REG['CSW'], CSW_VALUE | CSW_SIZE32)
self.writeAP(AP_REG['TAR'], addr)
data = []
try:
resp = self._transferBlock(size, READ | AP_ACC | AP_REG['DRW'])
except TransferError:
self.clearStickyErr()
raise
for i in range(len(resp)/4):
data.append( (resp[i*4 + 0] << 0) | \
(resp[i*4 + 1] << 8) | \
(resp[i*4 + 2] << 16) | \
(resp[i*4 + 3] << 24))
# If not in deferred mode flush after calls to _read or _write
if not self.deferred_transfer:
self.flush()
return data
def readDP(self, addr, mode = READ_NOW):
res = None
if mode in (READ_START, READ_NOW):
self._write(READ | DP_ACC | (addr & 0x0c))
if mode in (READ_NOW, READ_END):
resp = self._read()
res = (resp[0] << 0) | \
(resp[1] << 8) | \
(resp[2] << 16) | \
(resp[3] << 24)
# All READ_STARTs must have been finished with READ_END before using READ_NOW
assert (mode != READ_NOW) or (len(self.data_read_list) == 0)
# If not in deferred mode flush after calls to _read or _write
if not self.deferred_transfer:
self.flush()
return res
def writeDP(self, addr, data):
if addr == DP_REG['SELECT']:
if data == self.dp_select:
return
self.dp_select = data
self._write(WRITE | DP_ACC | (addr & 0x0c), data)
# If not in deferred mode flush after calls to _read or _write
if not self.deferred_transfer:
self.flush()
return True
def writeAP(self, addr, data):
ap_sel = addr & 0xff000000
bank_sel = addr & APBANKSEL
self.writeDP(DP_REG['SELECT'], ap_sel | bank_sel)
if addr == AP_REG['CSW']:
if data == self.csw:
return
self.csw = data
self._write(WRITE | AP_ACC | (addr & 0x0c), data)
# If not in deferred mode flush after calls to _read or _write
if not self.deferred_transfer:
self.flush()
return True
def readAP(self, addr, mode = READ_NOW):
res = None
if mode in (READ_START, READ_NOW):
ap_sel = addr & 0xff000000
bank_sel = addr & APBANKSEL
self.writeDP(DP_REG['SELECT'], ap_sel | bank_sel)
self._write(READ | AP_ACC | (addr & 0x0c))
if mode in (READ_NOW, READ_END):
resp = self._read()
res = (resp[0] << 0) | \
(resp[1] << 8) | \
(resp[2] << 16) | \
(resp[3] << 24)
# All READ_STARTs must have been finished with READ_END before using READ_NOW
assert (mode != READ_NOW) or (len(self.data_read_list) == 0)
# If not in deferred mode flush after calls to _read or _write
if not self.deferred_transfer:
self.flush()
return res
def reset(self):
self.flush()
self.protocol.setSWJPins(0, 'nRESET')
sleep(0.1)
self.protocol.setSWJPins(0x80, 'nRESET')
sleep(0.1)
def assertReset(self, asserted):
self.flush()
if asserted:
self.protocol.setSWJPins(0, 'nRESET')
else:
self.protocol.setSWJPins(0x80, 'nRESET')
def setClock(self, frequency):
self.flush()
self.protocol.setSWJClock(frequency)
def setDeferredTransfer(self, enable):
"""
Allow transfers to be delayed and buffered
By default deferred transfers are turned off. All reads and
writes will be completed by the time the function returns.
When enabled packets are buffered and sent all at once, which
increases speed. When memory is written to, the transfer
might take place immediately, or might take place on a future
memory write. This means that an invalid write could cause an
exception to occur on a later, unrelated write. To guarantee
that previous writes are complete call the flush() function.
The behaviour of read operations is determined by the modes
READ_START, READ_NOW and READ_END. The option READ_NOW is the
default and will cause the read to flush all previous writes,
and read the data immediately. To improve performance, multiple
reads can be made using READ_START and finished later with READ_NOW.
This allows the reads to be buffered and sent at once. Note - All
READ_ENDs must be called before a call using READ_NOW can be made.
"""
if self.deferred_transfer and not enable:
self.flush()
self.deferred_transfer = enable
def flush(self):
"""
Flush out all commands
"""
transfer_count = len(self.request_list)
if transfer_count > 0:
assert transfer_count <= COMMANDS_PER_DAP_TRANSFER
try:
data = self.protocol.transfer(transfer_count, self.request_list, self.data_list)
self.data_read_list.extend(data)
except TransferError:
# Dump any pending commands
self.request_list = []
self.data_list = []
# Dump any data read
self.data_read_list = []
# Invalidate cached registers
self.csw = -1
self.dp_select = -1
# Clear error
self.clearStickyErr()
raise
self.request_list = []
self.data_list = []
def _write(self, request, data = 0):
"""
Write a single command
"""
assert type(request) in (int, long), "request is not an int"
assert type(data) in (int, long), "data is not an int"
self.request_list.append(request)
self.data_list.append(data)
transfer_count = len(self.request_list)
if (transfer_count >= COMMANDS_PER_DAP_TRANSFER):
self.flush()
def _read(self):
"""
Read the response from a single command
"""
if len(self.data_read_list) < 4:
self.flush()
data = self.data_read_list[0:4]
self.data_read_list = self.data_read_list[4:]
return data
def _transferBlock(self, count, request, data = [0]):
self.flush()
return self.protocol.transferBlock(count, request, data)
|
|
#!/usr/bin/env python2
###############################################################################
# ------------------------- Description ---------------------------------------
###############################################################################
# This script is designed to create a mask of stagnation days as defined by
# Wang and Angell [1999]. The three variables read in and assessed for
# stagnation condition are 500mb , 1000mb (later taken as SLP) geostrophic
# winds, and precipitation.
# TODO: UPDATE RH calculation to better estimate
# TODO: Make a dynamic argument to choose all domain or "_NA_" domain.
# TODO: the current working version ony uses merged years NA domain.
# TODO: Make block identification not terrible.
# Set the directory where the data structure starts
# data creation pipeline:
# get_era_interim_data.py
# average6hourlyData.py,
# merge_nc_data.py
# TODO: Days since last rain mask in func make_era_interim_met_masks()
import os
import os.path
import numpy as np
import sys
from netCDF4 import Dataset
import time as timer
from datetime import timedelta
import datetime
import matplotlib.pyplot as plt
plt.ioff() # makes it so figures only show if you say .show()
import cesm_nc_manager as cnm
from mpl_toolkits.basemap import Basemap, cm
from skimage import measure
from skimage import filters
region = "_"
# Figure out what machine this code is running on. Set file paths.
drive = cnm.getDrive()
dataDirBase = drive + "era_interim_nc_daily_merged/"
################################################################################
# ------------------------- Functions -----------------------------------------
################################################################################
def find_blocking_days(sdFactor=0.5, startDate="2003-01-01", endDate="2016-12-31",
minDays=3, plotBlocks=False, minBlobSize=15.):
"""
This function finds blocking days based on a very simple definition.
Blocking days are defined as days when the 500 mb geopotential height
is one standard deviation above the jDay mean (1979-2016) for at least
five days. This function takes one argument.
Arguments:
sdFactor: Number multiplied by monthly std when setting
the threshold.
startDate: The first date to create blocking event mask for.
endDate: The last date to create blocking event mask for.
minDays: The minimum number of consecutive days required for high z
values to be considered a block.
plotBlocks: True or False. If true the z climatology, daily value, and
identified area of blocking are plotted and saved. SLOW.
minBlobSize: The minimum size of a blob in terms of degrees. Value
is squared.
return:
blocking days: An array of 1 and 0 indicating if blocking
exists (where equal to 1).
"""
# get the start and end times
startDate = datetime.datetime.strptime(startDate, '%Y-%m-%d')
endDate = datetime.datetime.strptime(endDate, '%Y-%m-%d')
# NOTE: File with all 6 hourly z500 files downloaded with get_all_era_interim_z.py
# NOTE: These 6 hourly data were made daily using make_all_z500_annual.py
all_Z500_dir = drive + 'era_interim_nc_daily/'
z_nc = Dataset(all_Z500_dir + 'z_all_daily.nc', 'r')
z = z_nc.variables['z500']
lat = z_nc.variables['latitude'][:]
lon = z_nc.variables['longitude'][:]
time = z_nc.variables['time']
# These are for measuring feature size
dx = np.abs(np.mean(np.diff(lon)))
dy = np.abs(np.mean(np.diff(lat)))
# Translates minBlobSpan degree argument to indecies needed to make this many
# degrees in our latitude x longitude gridded data.
blobSpan = np.round(minBlobSize / dx)
# # For test plotting make bounds
# minLat = lat.min()
# maxLat = lat.max()
# minLon = lon[241]
# maxLon = lon[479]
# map = Basemap(projection='robin',llcrnrlat=minLat, urcrnrlat=maxLat,\
# llcrnrlon=minLon, urcrnrlon=maxLon,resolution='c',\
# lon_0=0, lat_0=90)
map = Basemap(projection='ortho',lon_0=-105,lat_0=60,resolution='l')
# grid coords for mesh plotting of values.
lons, lats = np.meshgrid(lon, lat)
x, y = map(lons, lats)
# Make a nice month and time array for masking
t, month, year = cnm.get_era_interim_time(time)
nTime = len(t)
day = []
for i in range(len(t)):
day.append(t[i].day)
day = np.array(day)
# # Now sinces they are annoying, lets ignore Feb 29 all the time. Meaning
# # we are getting rid of it in the time and z arrays.
# notLeapDayMask = (month != 2) & (day != 29)
#
# t = t[notLeapDayMask]
# month = month[notLeapDayMask]
# year = year[notLeapDayMask]
# day = day[notLeapDayMask]
# nTime = len(t)
#
# if np.sum((month == 2) & (day == 29)) > 0:
# raise ValueError('There is still a February 29th in the time series.')
# Create Julian day specific threshold values based on that JDay
# mean and sd for the ~39 years of reanalysis I am working with
# the z_thresh will be equal spatial same shape as z but with
# Julian day time axis.
jDays = np.arange(1, 367)
nJDays = len(jDays)
# Create an array of the Julian dates associated with the time
# axis of the Z data.
t_jDay = []
for i in range(len(t)):
thisJDay = t[i].timetuple().tm_yday
t_jDay.append(thisJDay)
t_jDay = np.array(t_jDay)
# Create the threshold mask.
# NOTE: If we save the spatial_mean and spatial_std, masks could be created
# NOTE: later on much easier.
# NOTE: Some years June 1 has a different julain day because of leap year.
# NOTE: so these methods will smooth things out a bit.
spatial_mean = np.zeros((nJDays, z.shape[1], z.shape[2]))
spatial_std = np.zeros((nJDays, z.shape[1], z.shape[2]))
z_thresh = np.zeros((nJDays, z.shape[1], z.shape[2]))
# Sample statistics summary.
# To see if JDay 366 has way bigger std. Turns out it is smaller. Probably
# not getting the true variability properly represented.
julianDaySD = np.zeros((nJDays))
nSamples = np.zeros(nJDays)
for i in range(nJDays):
# Find mask for this jDay days in the record, should be ~40
dayMask = jDays[i] == t_jDay
nSamples[i] = np.sum(dayMask)
spatial_mean[i,:,:] = np.mean(z[dayMask,:,:], axis=0)
spatial_std[i,:,:] = np.std(z[dayMask,:,:], axis=0)
julianDaySD[i] = np.mean(spatial_std[i,:,:])
z_thresh[i,:,:] = spatial_mean[i,:,:] + (spatial_std[i,:,:] * sdFactor)
# Only create a blocking even mask for dates between the date arguments
analysisDateIndex = np.where((t >= startDate) & (t <= endDate))[0]
nAnalaysisDays = len(analysisDateIndex)
# Create an array where each days blocking mask summary can be saved.
blocking_mask = np.zeros(shape=(nAnalaysisDays, z.shape[1], z.shape[2]), dtype=bool)
for i in range(nAnalaysisDays):
# for this analysis date (i), where does that put us on jDays?
print 'working on: ' + str(t[analysisDateIndex[i]])
jDayIndex = np.where(t_jDay[analysisDateIndex[i]] == jDays)[0][0]
# Here, the numbers are in reference to days in the past from
# day we are tying to make a mask for
high_z_masks = np.zeros((minDays, z.shape[1], z.shape[2]))
for d in range(minDays):
high_z_masks[d,:,:] = z[analysisDateIndex[i]-d, :, :] >= z_thresh[jDayIndex-d,:,:]
# Figure out where these 2D arrays are all true, sum over days dimension
block_count = np.sum(high_z_masks, axis=0)
# Turn the boolean into a numeric 1 or 0 array
ridgeMask = np.array(block_count == minDays, dtype=int)
blocking_mask[i, :, :] = ridgeMask
# For plotting, mask out where there are no blocks, easy to plot blocks
ridgeMask_ma = np.ma.masked_where(ridgeMask==0, ridgeMask)
ridge_z_values_ma = np.ma.masked_where(ridgeMask==0, z[analysisDateIndex[i],:,:])
# Show this dates Z for plotting, divide by 100 m to show decameters
# the way they do at: http://weather.rap.ucar.edu/upper/upaCNTR_500.gif
todays_z = z[analysisDateIndex[i],:,:] / 100.
todays_climo_z = spatial_mean[jDayIndex,:,:] / 100.
#########################################################################
# Set a minimum size requirement for block 'features'.
# This of course means features have to be identified.
# http://www.scipy-lectures.org/packages/scikit-image/auto_examples/plot_labels.html
#########################################################################
# TODO: Try 15 x 15 deg min size to dfine a block. This means we have to
# TODO: check the blobs. I think we should do this from the centriod.
im = ridgeMask
blobs = im == 1 # Bool condition of blobs is where ridgemask == 1 by def
all_labels = measure.label(blobs)
blobs_labels = measure.label(blobs, background=0, connectivity=2)
uniqueBobIDs = np.unique(blobs_labels)
# Check each blob for minimum size requirement
for b in uniqueBobIDs:
if b !=0: # 0 is background so skip
blobMask = b == blobs_labels
blobArea = np.sum(blobMask)
if blobArea < blobSpan**2:
# I do not want this to remain a blob
blobs_labels[blobMask] = 0
# Mask non-blobs for plotting
blobs_labels_ma = np.ma.masked_where(blobs_labels==0, blobs_labels)
# TODO: !!!!!!!!!
# Go through unique blob labels. The sum of the blog label met is the
# total area. Use that as a cuttoff and get rid of blobs that are too
# small.
if plotBlocks:
# Plot the mean field for this day also.
# # plotting subset indicies
# lon_g, lat_g = np.meshgrid(lon, lat)
lat_i = (lat > 20)
# m = (lon_g > 180.) & (lon_g < 360.) & (lat_g > 8.) & (lat_g < 80.)
fig = plt.figure(figsize=(12,12))
map.drawcoastlines()
map.drawstates()
map.drawcountries()
# Plot the julain day climatology in pcolor shaded.
c_z_climotology = map.pcolor(x[lat_i,:], y[lat_i,:], todays_climo_z[lat_i,:])
bar = plt.colorbar(c_z_climotology)
# Show the identified features
c_peaks = map.pcolor(x[lat_i,:], y[lat_i,:], blobs_labels_ma[lat_i,:], cmap="spectral")
# Plot the daily heights as contours using the same colorbar
c_height = map.contour(x[lat_i,:], y[lat_i,:], todays_z[lat_i,:], linewidths=4)
plt.clabel(c_height, inline=1, fontsize=10)
# shade out the area where we define a block using semi-transparent
# shading.
c_ridge = map.pcolor(x[lat_i,:], y[lat_i,:], ridgeMask_ma[lat_i,:],
hatch="/.", alpha=0.)
dateString = str(t[analysisDateIndex[i]])[0:10]
plt.title('Date: ' + dateString + \
' Julain day = ' + str(jDays[jDayIndex]))
plt.savefig('../Figures/block_test/z_show_' + dateString\
+ '_sd='+str(sdFactor)+\
'_days='+str(minDays)+'_minBlobSize='+str(minBlobSize)+\
'.png')
plt.close(fig)
# Finally close the very large nc file connection.
z_nc.close()
return blocking_mask
def daysSinceLastRain(region="_"):
"""This function reads ecmwf reanalysis total daily precipitation and
calculates how many days since non zero precip at each location."""
# Read in the rain nc data
tp_nc = Dataset(dataDirBase + 'tp'+ region +'1992_2016.nc', 'r')
tp_meters = tp_nc.variables['tp'] # meters per calendar date
inchPerM = 39.3701 # [inch/meter]
tp = tp_meters[:] * inchPerM # This loads a really big file into the workspace
latitude = tp_nc.variables['latitude']
longitude = tp_nc.variables['longitude']
time = tp_nc.variables['time']
# We are going to round the daily rainfall totals to 5 decimal places, even
# this is probably beyond the significant figure.
tp = np.round(tp, decimals=5)
# The minimum value is being interpreted as zero. This is partially why I
# rounded the values.
# NOTE: In Python -0 is a thing and it is equal to 0.
minValue = np.min(tp)
# Make sure there are no negative precip values floating around in these here
# datas
if (np.sum(tp < 0) > 0.01):
raise ValueError('Precip less than zero detected in data. Evaulated chosen precip file pipeline.')
# Loop through time and figure out how long it has been since zero rain for
# each grid cell.
daysSinceRain = np.zeros( tp.shape ,dtype=int)
nTime = len(time)
for t in range(nTime):
# Where did it not rain today?
dailyRainMask = tp[t,:,:] == minValue
# The first day has no memory, needs to be treated differently
if t == 0:
daysSinceRain[t,:,:][dailyRainMask] = daysSinceRain[t,:,:][dailyRainMask] + 1
else:
# Where there is rain leave the value of zero in place (zero means it rained today)
# Everywhere else, increase the value of the array by 1.
daysSinceRain[t,:,:][dailyRainMask] = daysSinceRain[t-1,:,:][dailyRainMask] + 1
# Write this as a met nc file
saveName = dataDirBase + 'days_since_rain' + region + '1992_2016.nc'
ncFile = Dataset(saveName, 'w', format='NETCDF4')
ncFile.description = 'Counter indicating days since rain > 0 at a location.'
ncFile.location = 'Global'
ncFile.createDimension('time', len(time[:]) )
ncFile.createDimension('latitude', len(latitude[:]) )
ncFile.createDimension('longitude', len(longitude[:]) )
days_since_rain_ = ncFile.createVariable('days_since_rain', 'i4', ('time','latitude','longitude'))
days_since_rain_.units = 'days since daily rain > 0'
days_since_rain_[:] = daysSinceRain
# dimension values assignments
time_ = ncFile.createVariable('time', 'i4', ('time',))
time_.units = time.units
time_[:] = time[:]
latitude_ = ncFile.createVariable('latitude', 'f4', ('latitude',))
latitude_.units = latitude.units
latitude_[:] = latitude[:]
longitude_ = ncFile.createVariable('longitude', 'f4', ('longitude',))
longitude_.units = longitude.units
longitude_[:] = longitude[:]
tp_nc.close()
return daysSinceRain
# TODO: Make years of analysis arguments? Rather than assume NA and 2003-2016?
# TODO: 'NA' needs to be changed to 'west' as it only covers 30. - 49.5 N and
# TODO: 234. - 258.75 E.
def make_era_interim_met_masks(windSfcLim=8., wind500Lim=13., precLim=0.01,
TThresh=297.039, RHThresh=25., windThresh=6.7056,
writeNC=True, region = "_"):
"""
This function takes limits and creates masks (1 condition is true, 0 condition
is not true) for different meteorology event or threshold types.
Argument Units:
windSfcLim = m/s
wind500Lim = m/s
precLim = inches/day
# For single variable thresholds
TThresh = K
RHThresh = %
windThresh = m/s
writeNC = True masks written to nc file. If false
they are not.
region = regions nc data are presliced into formatted as
'_regionName_'. '_' is no region and correct filename
formatting.
These are the defualt definitions of stagnation defined:
http://www.arl.noaa.gov/documents/reports/atlas.pdf
"""
startTime = timer.time()
#############################################################################
# Load surface winds
# NOTE: x = store_x/scale + offset.
#############################################################################
u10_nc = Dataset(dataDirBase + 'u10' + region + '2003_2016.nc', 'r')
u10 = u10_nc.variables['u10'][:]
#u10_ = u10[:] #/ u10.scale_factor + u10.scale_factor????
u10_nc.close()
v10_nc = Dataset(dataDirBase + 'v10' + region + '2003_2016.nc', 'r')
v10 = v10_nc.variables['v10'][:]
v10_nc.close()
sfc_wind = np.sqrt(v10**2 + u10**2)
###############################################################################
# Load 500 mb winds
###############################################################################
v_nc = Dataset(dataDirBase + 'v'+ region +'2003_2016.nc', 'r')
level = v_nc.variables['level']
level_i = level[:] == 500
v = v_nc.variables['v'][:,level_i,:,:]
v_nc.close()
u_nc = Dataset(dataDirBase + 'u'+ region +'2003_2016.nc', 'r')
u = u_nc.variables['u'][:,level_i,:,:]
u_nc.close()
upper_wind = np.sqrt(v**2 + u**2)
del u,v, v10, u10
###############################################################################
# Get precipitation
###############################################################################
tp_nc = Dataset(dataDirBase + 'tp'+ region +'2003_2016.nc', 'r')
tp_meters = tp_nc.variables['tp'] # meters per calendar date
inchPerM = 39.3701 # [inch/m]
tp = tp_meters[:] * inchPerM
latitude = tp_nc.variables['latitude']
longitude = tp_nc.variables['longitude']
time = tp_nc.variables['time']
# build the individual masks, first tp (total precipitation)
mask_sfc = np.array(sfc_wind < windSfcLim, dtype=bool)
mask_500 = np.array(upper_wind < wind500Lim, dtype=bool)
mask_tp = np.array(tp < precLim, dtype=bool)
# Combined stagnation mask, one at a time to save working memory
m1 = int(mask_sfc) + int(mask_500) + int(mask_tp)
stagnation_mask = np.array(m1 == 3, dtype=int)
###############################################################################
# Sanity check the output before writing the mask to an nc file
###############################################################################
if np.max(tp[mask_tp]) >= precLim:
print 'The maximum value of precip on stangation days exceeds threshold!'
raise ValueError("This means creating the mask has failed!!!!!")
del mask_tp, mask_sfc, mask_500, m1
###############################################################################
# Now make individual masks for high wind, T, and low prec and RH days
# http://w1.weather.gov/glossary/index.php?word=Red%20Flag%20Warning
# For red flat warning:
# T > 75 F = 297.039 K
# RH% <= 25%
# surface wind >= 15 mph = 6.7056 m/s
###############################################################################
t2m_nc = Dataset(dataDirBase + 't2m'+ region +'2003_2016.nc', 'r')
t2m = t2m_nc.variables['t2m'][:]
t2m_nc.close()
# LOAD THE NEW AND IMPROVED RH ESTIMATE
RH_nc = Dataset(dataDirBase + 'rh2m'+ region +'2003_2016.nc', 'r')
RH = RH_nc.variables['rh2m'][:]
RH_nc.close()
# Make the masks
high_wind_mask = np.array(sfc_wind > windThresh, dtype=int)
low_RH_mask = np.array(RH < RHThresh, dtype=int)
high_T_mask = np.array(t2m > TThresh, dtype=int)
blocking_mask = np.array(find_blocking_days(), dtype=int) # Calls comlex function
low_precip_mask = np.array(tp < precLim, dtype=int)
writingComplete = timer.time()
dt = (writingComplete - startTime) / 60.
print '----------------------------------------------------------------------'
print 'It took ' + str(dt) + ' minutes to create the requested masks.'
print '----------------------------------------------------------------------'
###############################################################################
# Write the stagnation mask as daily netCDF data
###############################################################################
if writeNC:
saveName = dataDirBase + 'met_event_masks' + region + '2003_2016.nc'
ncFile = Dataset(saveName, 'w', format='NETCDF4')
ncFile.description = 'Masks indicating threshold conditions'
ncFile.location = 'Global'
ncFile.createDimension('time', len(time[:]) )
ncFile.createDimension('latitude', len(latitude[:]) )
ncFile.createDimension('longitude', len(longitude[:]) )
# Create variables on the dimension they live on
# Stagnation
stagnation_mask_ = ncFile.createVariable('stagnation_mask', 'i', ('time','latitude','longitude'))
stagnation_mask_.units = 'limts = surface wind >= ' + str(windSfcLim) +\
' 500 mb wind lim < ' +str(wind500Lim) + 'precip < ' + str(precLim)
stagnation_mask_[:] = stagnation_mask
# Precip
low_precip_mask_ = ncFile.createVariable('low_precip_mask', 'i', ('time','latitude','longitude'))
low_precip_mask_.units = 'days precip < ' + str(precLim) + ' inches/day'
low_precip_mask_[:] = low_precip_mask[:]
# wind
high_wind_mask_ = ncFile.createVariable('high_wind_mask', 'i', ('time','latitude','longitude'))
high_wind_mask_.units = 'days wind > ' + str(windThresh) + ' m/s'
high_wind_mask_[:] = high_wind_mask[:]
# RH
low_RH_mask_ = ncFile.createVariable('low_RH_mask', 'i', ('time','latitude','longitude'))
low_RH_mask_.units = 'RH% less than ' + str(RHThresh)
low_RH_mask_[:] = low_RH_mask[:]
# Temperature
high_T_mask_ = ncFile.createVariable('high_T_mask', 'i', ('time','latitude','longitude'))
high_T_mask_.units = 'T >= ' + str(TThresh)
high_T_mask_[:] = high_T_mask[:]
# Blocking days
blocking_mask_ = ncFile.createVariable('blocking_mask', 'i', ('time','latitude','longitude'))
blocking_mask_.units = 'Daily z > Jday mean by 0.5 sd for 3 days'
blocking_mask_[:] = blocking_mask[:]
# dimension values assignments
time_ = ncFile.createVariable('time', 'i4', ('time',))
time_.units = time.units
time_[:] = time[:]
latitude_ = ncFile.createVariable('latitude', 'f4', ('latitude',))
latitude_.units = latitude.units
latitude_[:] = latitude[:]
longitude_ = ncFile.createVariable('longitude', 'f4', ('longitude',))
longitude_.units = longitude.units
longitude_[:] = longitude[:]
ncFile.close()
dt = (timer.time() - writingComplete) / 60.
print '----------------------------------------------------------------------'
print 'It took ' + str(dt) + ' minutes to write the data as nc files.'
print '----------------------------------------------------------------------'
|
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data generators for the CNN and Daily Mail datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import io
import os
import tarfile
# Dependency imports
import six
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.data_generators import problem
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.data_generators import text_problems
from tensor2tensor.utils import registry
import tensorflow as tf
# Links to data from http://cs.nyu.edu/~kcho/DMQA/
_CNN_STORIES_DRIVE_URL = "https://drive.google.com/uc?export=download&id=0BwmD_VLjROrfTHk4NFg2SndKcjQ"
_DAILYMAIL_STORIES_DRIVE_URL = "https://drive.google.com/uc?export=download&id=0BwmD_VLjROrfM1BxdkxVaTY2bWs"
# Note: using See et al. (2017) as reference for data generation
# For more info, use the links below
# Train/Dev/Test Splits for summarization data
_TRAIN_URLS = "https://raw.githubusercontent.com/abisee/cnn-dailymail/master/url_lists/all_train.txt"
_DEV_URLS = "https://raw.githubusercontent.com/abisee/cnn-dailymail/master/url_lists/all_val.txt"
_TEST_URLS = "https://raw.githubusercontent.com/abisee/cnn-dailymail/master/url_lists/all_test.txt"
# End-of-sentence marker.
EOS = text_encoder.EOS_ID
# Techniques for data prep from See et al. (2017)
dm_single_close_quote = u"\u2019" # unicode
dm_double_close_quote = u"\u201d"
# Acceptable ways to end a sentence.
END_TOKENS = [
u".", u"!", u"?", u"...", u"'", u"`", u"\"", dm_single_close_quote,
dm_double_close_quote, u")"
]
def _maybe_download_corpora(tmp_dir, is_training):
"""Download corpora if necessary and unzip them.
Args:
tmp_dir: directory containing dataset.
is_training: whether we're in training mode or not.
Returns:
List of all files generated and path to file containing
train/dev/test split info.
"""
cnn_filename = "cnn_stories.tgz"
cnn_finalpath = os.path.join(tmp_dir, "cnn/stories/")
dailymail_filename = "dailymail_stories.tgz"
dailymail_finalpath = os.path.join(tmp_dir, "dailymail/stories/")
if not tf.gfile.Exists(cnn_finalpath):
cnn_file = generator_utils.maybe_download_from_drive(
tmp_dir, cnn_filename, _CNN_STORIES_DRIVE_URL)
with tarfile.open(cnn_file, "r:gz") as cnn_tar:
cnn_tar.extractall(tmp_dir)
if not tf.gfile.Exists(dailymail_finalpath):
dailymail_file = generator_utils.maybe_download_from_drive(
tmp_dir, dailymail_filename, _DAILYMAIL_STORIES_DRIVE_URL)
with tarfile.open(dailymail_file, "r:gz") as dailymail_tar:
dailymail_tar.extractall(tmp_dir)
cnn_files = tf.gfile.Glob(cnn_finalpath + "*")
dailymail_files = tf.gfile.Glob(dailymail_finalpath + "*")
all_files = cnn_files + dailymail_files
if is_training:
urls_path = generator_utils.maybe_download(tmp_dir, "all_train.txt",
_TRAIN_URLS)
else:
urls_path = generator_utils.maybe_download(tmp_dir, "all_val.txt",
_DEV_URLS)
return all_files, urls_path
def example_splits(url_file, all_files):
"""Generate splits of the data."""
def generate_hash(inp):
"""Generate a sha1 hash to match the raw url to the filename extracted."""
h = hashlib.sha1()
h.update(inp)
return h.hexdigest()
all_files_map = {f.split("/")[-1]: f for f in all_files}
urls = []
for line in tf.gfile.Open(url_file):
urls.append(line.strip().encode("utf-8"))
filelist = []
for url in urls:
url_hash = generate_hash(url)
filename = url_hash + ".story"
if filename not in all_files_map:
tf.logging.info("Missing file: %s" % url)
continue
filelist.append(all_files_map[filename])
tf.logging.info("Found %d examples" % len(filelist))
return filelist
def example_generator(all_files, urls_path, sum_token):
"""Generate examples."""
def fix_run_on_sents(line):
if u"@highlight" in line:
return line
if not line:
return line
if line[-1] in END_TOKENS:
return line
return line + u"."
filelist = example_splits(urls_path, all_files)
story_summary_split_token = u" <summary> " if sum_token else " "
for story_file in filelist:
story = []
summary = []
reading_highlights = False
for line in tf.gfile.Open(story_file, "rb"):
if six.PY2:
line = unicode(line.strip(), "utf-8")
else:
line = line.strip().decode("utf-8")
line = fix_run_on_sents(line)
if not line:
continue
elif line.startswith(u"@highlight"):
if not story:
break # No article text.
reading_highlights = True
elif reading_highlights:
summary.append(line)
else:
story.append(line)
if (not story) or not summary:
continue
yield " ".join(story) + story_summary_split_token + " ".join(summary)
def _story_summary_split(story):
split_str = u" <summary> "
split_str_len = len(split_str)
split_pos = story.find(split_str)
return story[:split_pos], story[split_pos + split_str_len:] # story, summary
def write_raw_text_to_files(all_files, urls_path, tmp_dir, is_training):
"""Write text to files."""
def write_to_file(all_files, urls_path, tmp_dir, filename):
with io.open(os.path.join(tmp_dir, filename + ".source"), "w") as fstory:
with io.open(os.path.join(tmp_dir, filename + ".target"),
"w") as fsummary:
for example in example_generator(all_files, urls_path, sum_token=True):
story, summary = _story_summary_split(example)
fstory.write(story + "\n")
fsummary.write(summary + "\n")
filename = "cnndm.train" if is_training else "cnndm.dev"
tf.logging.info("Writing %s" % filename)
write_to_file(all_files, urls_path, tmp_dir, filename)
if not is_training:
test_urls_path = generator_utils.maybe_download(tmp_dir, "all_test.txt",
_TEST_URLS)
filename = "cnndm.test"
tf.logging.info("Writing %s" % filename)
write_to_file(all_files, test_urls_path, tmp_dir, filename)
@registry.register_problem
class SummarizeCnnDailymail32k(text_problems.Text2TextProblem):
"""Summarize CNN and Daily Mail articles to their summary highlights."""
@property
def vocab_filename(self):
return "vocab.cnndailymail.%d" % self.approx_vocab_size
def generate_text_for_vocab(self, data_dir, tmp_dir):
del data_dir
all_files, urls_path = _maybe_download_corpora(tmp_dir, True)
return example_generator(all_files, urls_path, sum_token=False)
def is_generate_per_split(self):
return True
def generate_samples(self, data_dir, tmp_dir, dataset_split):
del data_dir
is_training = dataset_split == problem.DatasetSplit.TRAIN
all_files, urls_path = _maybe_download_corpora(tmp_dir, is_training)
write_raw_text_to_files(all_files, urls_path, tmp_dir, is_training)
for example in example_generator(all_files, urls_path, sum_token=True):
story, summary = _story_summary_split(example)
yield {"inputs": story, "targets": summary}
|
|
from __future__ import unicode_literals
import json
import mimetypes
import os
import re
import sys
from copy import copy
from importlib import import_module
from io import BytesIO
from django.apps import apps
from django.conf import settings
from django.core import urlresolvers
from django.core.handlers.base import BaseHandler
from django.core.handlers.wsgi import ISO_8859_1, UTF_8, WSGIRequest
from django.core.signals import (
got_request_exception, request_finished, request_started,
)
from django.db import close_old_connections
from django.http import HttpRequest, QueryDict, SimpleCookie
from django.template import TemplateDoesNotExist
from django.test import signals
from django.test.utils import ContextList
from django.utils import six
from django.utils.encoding import force_bytes, force_str, uri_to_iri
from django.utils.functional import SimpleLazyObject, curry
from django.utils.http import urlencode
from django.utils.itercompat import is_iterable
from django.utils.six.moves.urllib.parse import urlparse, urlsplit
__all__ = ('Client', 'RedirectCycleError', 'RequestFactory', 'encode_file', 'encode_multipart')
BOUNDARY = 'BoUnDaRyStRiNg'
MULTIPART_CONTENT = 'multipart/form-data; boundary=%s' % BOUNDARY
CONTENT_TYPE_RE = re.compile('.*; charset=([\w\d-]+);?')
class RedirectCycleError(Exception):
"""
The test client has been asked to follow a redirect loop.
"""
def __init__(self, message, last_response):
super(RedirectCycleError, self).__init__(message)
self.last_response = last_response
self.redirect_chain = last_response.redirect_chain
class FakePayload(object):
"""
A wrapper around BytesIO that restricts what can be read since data from
the network can't be seeked and cannot be read outside of its content
length. This makes sure that views can't do anything under the test client
that wouldn't work in Real Life.
"""
def __init__(self, content=None):
self.__content = BytesIO()
self.__len = 0
self.read_started = False
if content is not None:
self.write(content)
def __len__(self):
return self.__len
def read(self, num_bytes=None):
if not self.read_started:
self.__content.seek(0)
self.read_started = True
if num_bytes is None:
num_bytes = self.__len or 0
assert self.__len >= num_bytes, "Cannot read more than the available bytes from the HTTP incoming data."
content = self.__content.read(num_bytes)
self.__len -= num_bytes
return content
def write(self, content):
if self.read_started:
raise ValueError("Unable to write a payload after he's been read")
content = force_bytes(content)
self.__content.write(content)
self.__len += len(content)
def closing_iterator_wrapper(iterable, close):
try:
for item in iterable:
yield item
finally:
request_finished.disconnect(close_old_connections)
close() # will fire request_finished
request_finished.connect(close_old_connections)
class ClientHandler(BaseHandler):
"""
A HTTP Handler that can be used for testing purposes. Uses the WSGI
interface to compose requests, but returns the raw HttpResponse object with
the originating WSGIRequest attached to its ``wsgi_request`` attribute.
"""
def __init__(self, enforce_csrf_checks=True, *args, **kwargs):
self.enforce_csrf_checks = enforce_csrf_checks
super(ClientHandler, self).__init__(*args, **kwargs)
def __call__(self, environ):
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._request_middleware is None:
self.load_middleware()
request_started.disconnect(close_old_connections)
request_started.send(sender=self.__class__, environ=environ)
request_started.connect(close_old_connections)
request = WSGIRequest(environ)
# sneaky little hack so that we can easily get round
# CsrfViewMiddleware. This makes life easier, and is probably
# required for backwards compatibility with external tests against
# admin views.
request._dont_enforce_csrf_checks = not self.enforce_csrf_checks
# Request goes through middleware.
response = self.get_response(request)
# Attach the originating request to the response so that it could be
# later retrieved.
response.wsgi_request = request
# We're emulating a WSGI server; we must call the close method
# on completion.
if response.streaming:
response.streaming_content = closing_iterator_wrapper(
response.streaming_content, response.close)
else:
request_finished.disconnect(close_old_connections)
response.close() # will fire request_finished
request_finished.connect(close_old_connections)
return response
def store_rendered_templates(store, signal, sender, template, context, **kwargs):
"""
Stores templates and contexts that are rendered.
The context is copied so that it is an accurate representation at the time
of rendering.
"""
store.setdefault('templates', []).append(template)
store.setdefault('context', ContextList()).append(copy(context))
def encode_multipart(boundary, data):
"""
Encodes multipart POST data from a dictionary of form values.
The key will be used as the form data name; the value will be transmitted
as content. If the value is a file, the contents of the file will be sent
as an application/octet-stream; otherwise, str(value) will be sent.
"""
lines = []
to_bytes = lambda s: force_bytes(s, settings.DEFAULT_CHARSET)
# Not by any means perfect, but good enough for our purposes.
is_file = lambda thing: hasattr(thing, "read") and callable(thing.read)
# Each bit of the multipart form data could be either a form value or a
# file, or a *list* of form values and/or files. Remember that HTTP field
# names can be duplicated!
for (key, value) in data.items():
if is_file(value):
lines.extend(encode_file(boundary, key, value))
elif not isinstance(value, six.string_types) and is_iterable(value):
for item in value:
if is_file(item):
lines.extend(encode_file(boundary, key, item))
else:
lines.extend(to_bytes(val) for val in [
'--%s' % boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
item
])
else:
lines.extend(to_bytes(val) for val in [
'--%s' % boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
value
])
lines.extend([
to_bytes('--%s--' % boundary),
b'',
])
return b'\r\n'.join(lines)
def encode_file(boundary, key, file):
to_bytes = lambda s: force_bytes(s, settings.DEFAULT_CHARSET)
filename = os.path.basename(file.name) if hasattr(file, 'name') else ''
if hasattr(file, 'content_type'):
content_type = file.content_type
elif filename:
content_type = mimetypes.guess_type(filename)[0]
else:
content_type = None
if content_type is None:
content_type = 'application/octet-stream'
if not filename:
filename = key
return [
to_bytes('--%s' % boundary),
to_bytes('Content-Disposition: form-data; name="%s"; filename="%s"'
% (key, filename)),
to_bytes('Content-Type: %s' % content_type),
b'',
to_bytes(file.read())
]
class RequestFactory(object):
"""
Class that lets you create mock Request objects for use in testing.
Usage:
rf = RequestFactory()
get_request = rf.get('/hello/')
post_request = rf.post('/submit/', {'foo': 'bar'})
Once you have a request object you can pass it to any view function,
just as if that view had been hooked up using a URLconf.
"""
def __init__(self, **defaults):
self.defaults = defaults
self.cookies = SimpleCookie()
self.errors = BytesIO()
def _base_environ(self, **request):
"""
The base environment for a request.
"""
# This is a minimal valid WSGI environ dictionary, plus:
# - HTTP_COOKIE: for cookie support,
# - REMOTE_ADDR: often useful, see #8551.
# See http://www.python.org/dev/peps/pep-3333/#environ-variables
environ = {
'HTTP_COOKIE': self.cookies.output(header='', sep='; '),
'PATH_INFO': str('/'),
'REMOTE_ADDR': str('127.0.0.1'),
'REQUEST_METHOD': str('GET'),
'SCRIPT_NAME': str(''),
'SERVER_NAME': str('testserver'),
'SERVER_PORT': str('80'),
'SERVER_PROTOCOL': str('HTTP/1.1'),
'wsgi.version': (1, 0),
'wsgi.url_scheme': str('http'),
'wsgi.input': FakePayload(b''),
'wsgi.errors': self.errors,
'wsgi.multiprocess': True,
'wsgi.multithread': False,
'wsgi.run_once': False,
}
environ.update(self.defaults)
environ.update(request)
return environ
def request(self, **request):
"Construct a generic request object."
return WSGIRequest(self._base_environ(**request))
def _encode_data(self, data, content_type):
if content_type is MULTIPART_CONTENT:
return encode_multipart(BOUNDARY, data)
else:
# Encode the content so that the byte representation is correct.
match = CONTENT_TYPE_RE.match(content_type)
if match:
charset = match.group(1)
else:
charset = settings.DEFAULT_CHARSET
return force_bytes(data, encoding=charset)
def _get_path(self, parsed):
path = force_str(parsed[2])
# If there are parameters, add them
if parsed[3]:
path += str(";") + force_str(parsed[3])
path = uri_to_iri(path).encode(UTF_8)
# Under Python 3, non-ASCII values in the WSGI environ are arbitrarily
# decoded with ISO-8859-1. We replicate this behavior here.
# Refs comment in `get_bytes_from_wsgi()`.
return path.decode(ISO_8859_1) if six.PY3 else path
def get(self, path, data=None, secure=False, **extra):
"Construct a GET request."
data = {} if data is None else data
r = {
'QUERY_STRING': urlencode(data, doseq=True),
}
r.update(extra)
return self.generic('GET', path, secure=secure, **r)
def post(self, path, data=None, content_type=MULTIPART_CONTENT,
secure=False, **extra):
"Construct a POST request."
data = {} if data is None else data
post_data = self._encode_data(data, content_type)
return self.generic('POST', path, post_data, content_type,
secure=secure, **extra)
def head(self, path, data=None, secure=False, **extra):
"Construct a HEAD request."
data = {} if data is None else data
r = {
'QUERY_STRING': urlencode(data, doseq=True),
}
r.update(extra)
return self.generic('HEAD', path, secure=secure, **r)
def trace(self, path, secure=False, **extra):
"Construct a TRACE request."
return self.generic('TRACE', path, secure=secure, **extra)
def options(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"Construct an OPTIONS request."
return self.generic('OPTIONS', path, data, content_type,
secure=secure, **extra)
def put(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"Construct a PUT request."
return self.generic('PUT', path, data, content_type,
secure=secure, **extra)
def patch(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"Construct a PATCH request."
return self.generic('PATCH', path, data, content_type,
secure=secure, **extra)
def delete(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"Construct a DELETE request."
return self.generic('DELETE', path, data, content_type,
secure=secure, **extra)
def generic(self, method, path, data='',
content_type='application/octet-stream', secure=False,
**extra):
"""Constructs an arbitrary HTTP request."""
parsed = urlparse(path)
data = force_bytes(data, settings.DEFAULT_CHARSET)
r = {
'PATH_INFO': self._get_path(parsed),
'REQUEST_METHOD': str(method),
'SERVER_PORT': str('443') if secure else str('80'),
'wsgi.url_scheme': str('https') if secure else str('http'),
}
if data:
r.update({
'CONTENT_LENGTH': len(data),
'CONTENT_TYPE': str(content_type),
'wsgi.input': FakePayload(data),
})
r.update(extra)
# If QUERY_STRING is absent or empty, we want to extract it from the URL.
if not r.get('QUERY_STRING'):
query_string = force_bytes(parsed[4])
# WSGI requires latin-1 encoded strings. See get_path_info().
if six.PY3:
query_string = query_string.decode('iso-8859-1')
r['QUERY_STRING'] = query_string
return self.request(**r)
class Client(RequestFactory):
"""
A class that can act as a client for testing purposes.
It allows the user to compose GET and POST requests, and
obtain the response that the server gave to those requests.
The server Response objects are annotated with the details
of the contexts and templates that were rendered during the
process of serving the request.
Client objects are stateful - they will retain cookie (and
thus session) details for the lifetime of the Client instance.
This is not intended as a replacement for Twill/Selenium or
the like - it is here to allow testing against the
contexts and templates produced by a view, rather than the
HTML rendered to the end-user.
"""
def __init__(self, enforce_csrf_checks=False, **defaults):
super(Client, self).__init__(**defaults)
self.handler = ClientHandler(enforce_csrf_checks)
self.exc_info = None
def store_exc_info(self, **kwargs):
"""
Stores exceptions when they are generated by a view.
"""
self.exc_info = sys.exc_info()
def _session(self):
"""
Obtains the current session variables.
"""
if apps.is_installed('django.contrib.sessions'):
engine = import_module(settings.SESSION_ENGINE)
cookie = self.cookies.get(settings.SESSION_COOKIE_NAME)
if cookie:
return engine.SessionStore(cookie.value)
else:
s = engine.SessionStore()
s.save()
self.cookies[settings.SESSION_COOKIE_NAME] = s.session_key
return s
return {}
session = property(_session)
def request(self, **request):
"""
The master request method. Composes the environment dictionary
and passes to the handler, returning the result of the handler.
Assumes defaults for the query environment, which can be overridden
using the arguments to the request.
"""
environ = self._base_environ(**request)
# Curry a data dictionary into an instance of the template renderer
# callback function.
data = {}
on_template_render = curry(store_rendered_templates, data)
signal_uid = "template-render-%s" % id(request)
signals.template_rendered.connect(on_template_render, dispatch_uid=signal_uid)
# Capture exceptions created by the handler.
exception_uid = "request-exception-%s" % id(request)
got_request_exception.connect(self.store_exc_info, dispatch_uid=exception_uid)
try:
try:
response = self.handler(environ)
except TemplateDoesNotExist as e:
# If the view raises an exception, Django will attempt to show
# the 500.html template. If that template is not available,
# we should ignore the error in favor of re-raising the
# underlying exception that caused the 500 error. Any other
# template found to be missing during view error handling
# should be reported as-is.
if e.args != ('500.html',):
raise
# Look for a signalled exception, clear the current context
# exception data, then re-raise the signalled exception.
# Also make sure that the signalled exception is cleared from
# the local cache!
if self.exc_info:
exc_info = self.exc_info
self.exc_info = None
six.reraise(*exc_info)
# Save the client and request that stimulated the response.
response.client = self
response.request = request
# Add any rendered template detail to the response.
response.templates = data.get("templates", [])
response.context = data.get("context")
response.json = curry(self._parse_json, response)
# Attach the ResolverMatch instance to the response
response.resolver_match = SimpleLazyObject(
lambda: urlresolvers.resolve(request['PATH_INFO']))
# Flatten a single context. Not really necessary anymore thanks to
# the __getattr__ flattening in ContextList, but has some edge-case
# backwards-compatibility implications.
if response.context and len(response.context) == 1:
response.context = response.context[0]
# Update persistent cookie data.
if response.cookies:
self.cookies.update(response.cookies)
return response
finally:
signals.template_rendered.disconnect(dispatch_uid=signal_uid)
got_request_exception.disconnect(dispatch_uid=exception_uid)
def get(self, path, data=None, follow=False, secure=False, **extra):
"""
Requests a response from the server using GET.
"""
response = super(Client, self).get(path, data=data, secure=secure,
**extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def post(self, path, data=None, content_type=MULTIPART_CONTENT,
follow=False, secure=False, **extra):
"""
Requests a response from the server using POST.
"""
response = super(Client, self).post(path, data=data,
content_type=content_type,
secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def head(self, path, data=None, follow=False, secure=False, **extra):
"""
Request a response from the server using HEAD.
"""
response = super(Client, self).head(path, data=data, secure=secure,
**extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def options(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""
Request a response from the server using OPTIONS.
"""
response = super(Client, self).options(path, data=data,
content_type=content_type,
secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def put(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""
Send a resource to the server using PUT.
"""
response = super(Client, self).put(path, data=data,
content_type=content_type,
secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def patch(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""
Send a resource to the server using PATCH.
"""
response = super(Client, self).patch(path, data=data,
content_type=content_type,
secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def delete(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""
Send a DELETE request to the server.
"""
response = super(Client, self).delete(path, data=data,
content_type=content_type,
secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def trace(self, path, data='', follow=False, secure=False, **extra):
"""
Send a TRACE request to the server.
"""
response = super(Client, self).trace(path, data=data, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def login(self, **credentials):
"""
Sets the Factory to appear as if it has successfully logged into a site.
Returns True if login is possible; False if the provided credentials
are incorrect, or the user is inactive, or if the sessions framework is
not available.
"""
from django.contrib.auth import authenticate
user = authenticate(**credentials)
if (user and user.is_active and
apps.is_installed('django.contrib.sessions')):
self._login(user)
return True
else:
return False
def force_login(self, user, backend=None):
if backend is None:
backend = settings.AUTHENTICATION_BACKENDS[0]
user.backend = backend
self._login(user)
def _login(self, user):
from django.contrib.auth import login
engine = import_module(settings.SESSION_ENGINE)
# Create a fake request to store login details.
request = HttpRequest()
if self.session:
request.session = self.session
else:
request.session = engine.SessionStore()
login(request, user)
# Save the session values.
request.session.save()
# Set the cookie to represent the session.
session_cookie = settings.SESSION_COOKIE_NAME
self.cookies[session_cookie] = request.session.session_key
cookie_data = {
'max-age': None,
'path': '/',
'domain': settings.SESSION_COOKIE_DOMAIN,
'secure': settings.SESSION_COOKIE_SECURE or None,
'expires': None,
}
self.cookies[session_cookie].update(cookie_data)
def logout(self):
"""
Removes the authenticated user's cookies and session object.
Causes the authenticated user to be logged out.
"""
from django.contrib.auth import get_user, logout
request = HttpRequest()
engine = import_module(settings.SESSION_ENGINE)
if self.session:
request.session = self.session
request.user = get_user(request)
else:
request.session = engine.SessionStore()
logout(request)
self.cookies = SimpleCookie()
def _parse_json(self, response, **extra):
if 'application/json' not in response.get('Content-Type'):
raise ValueError('Content-Type header is "{0}", not "application/json"'.format(response.get('Content-Type')))
return json.loads(response.content.decode(), **extra)
def _handle_redirects(self, response, **extra):
"Follows any redirects by requesting responses from the server using GET."
response.redirect_chain = []
while response.status_code in (301, 302, 303, 307):
response_url = response.url
redirect_chain = response.redirect_chain
redirect_chain.append((response_url, response.status_code))
url = urlsplit(response_url)
if url.scheme:
extra['wsgi.url_scheme'] = url.scheme
if url.hostname:
extra['SERVER_NAME'] = url.hostname
if url.port:
extra['SERVER_PORT'] = str(url.port)
response = self.get(url.path, QueryDict(url.query), follow=False, **extra)
response.redirect_chain = redirect_chain
if redirect_chain[-1] in redirect_chain[:-1]:
# Check that we're not redirecting to somewhere we've already
# been to, to prevent loops.
raise RedirectCycleError("Redirect loop detected.", last_response=response)
if len(redirect_chain) > 20:
# Such a lengthy chain likely also means a loop, but one with
# a growing path, changing view, or changing query argument;
# 20 is the value of "network.http.redirection-limit" from Firefox.
raise RedirectCycleError("Too many redirects.", last_response=response)
return response
|
|
"""Utilities to deal with sympy.Matrix, numpy and scipy.sparse."""
from sympy import Matrix, I, Expr, Integer
from sympy.matrices import matrices
__all__ = [
'numpy_ndarray',
'scipy_sparse_matrix',
'sympy_to_numpy',
'sympy_to_scipy_sparse',
'numpy_to_sympy',
'scipy_sparse_to_sympy',
'flatten_scalar',
'matrix_dagger',
'to_sympy',
'to_numpy',
'to_scipy_sparse',
'matrix_tensor_product'
]
# Conditionally define the base classes for numpy and scipy.sparse arrays
# for use in isinstance tests.
try:
import numpy as np
except ImportError:
class numpy_ndarray(object):
pass
else:
numpy_ndarray = np.ndarray
try:
from scipy import sparse
except ImportError:
class scipy_sparse_matrix(object):
pass
else:
# Try to find spmatrix.
if hasattr(sparse, 'base'):
# Newer versions have it under scipy.sparse.base.
scipy_sparse_matrix = sparse.base.spmatrix
elif hasattr(sparse, 'sparse'):
# Older versions have it under scipy.sparse.sparse.
scipy_sparse_matrix = sparse.sparse.spmatrix
def sympy_to_numpy(m, **options):
"""Convert a sympy Matrix/complex number to a numpy matrix or scalar."""
import numpy as np
dtype = options.get('dtype','complex')
if isinstance(m, Matrix):
return np.matrix(m.tolist(), dtype=dtype)
elif isinstance(m, Expr):
if m.is_Number or m.is_NumberSymbol or m == I:
return complex(m)
raise TypeError('Expected Matrix or complex scalar, got: %r' % m)
def sympy_to_scipy_sparse(m, **options):
"""Convert a sympy Matrix/complex number to a numpy matrix or scalar."""
from scipy import sparse
import numpy as np
dtype = options.get('dtype','complex')
if isinstance(m, Matrix):
return sparse.csr_matrix(np.matrix(m.tolist(), dtype=dtype))
elif isinstance(m, Expr):
if m.is_Number or m.is_NumberSymbol or m == I:
return complex(m)
raise TypeError('Expected Matrix or complex scalar, got: %r' % m)
def scipy_sparse_to_sympy(m, **options):
"""Convert a scipy.sparse matrix to a sympy matrix."""
return Matrix(m.todense())
def numpy_to_sympy(m, **options):
"""Convert a numpy matrix to a sympy matrix."""
return Matrix(m)
def to_sympy(m, **options):
"""Convert a numpy/scipy.sparse matrix to a sympy matrix."""
if isinstance(m, Matrix):
return m
elif isinstance(m, numpy_ndarray):
return numpy_to_sympy(m)
elif isinstance(m, scipy_sparse_matrix):
return scipy_sparse_to_sympy(m)
elif isinstance(m, Expr):
return m
raise TypeError('Expected sympy/numpy/scipy.sparse matrix, got: %r' % m)
def to_numpy(m, **options):
"""Convert a sympy/scipy.sparse matrix to a numpy matrix."""
dtype = options.get('dtype','complex')
if isinstance(m, (Matrix, Expr)):
return sympy_to_numpy(m, dtype=dtype)
elif isinstance(m, numpy_ndarray):
return m
elif isinstance(m, scipy_sparse_matrix):
return m.todense()
raise TypeError('Expected sympy/numpy/scipy.sparse matrix, got: %r' % m)
def to_scipy_sparse(m, **options):
"""Convert a sympy/numpy matrix to a scipy.sparse matrix."""
dtype = options.get('dtype','complex')
if isinstance(m, (Matrix, Expr)):
return sympy_to_scipy_sparse(m, dtype=dtype)
elif isinstance(m, numpy_ndarray):
from scipy import sparse
return sparse.csr_matrix(m)
elif isinstance(m, scipy_sparse_matrix):
return m
raise TypeError('Expected sympy/numpy/scipy.sparse matrix, got: %r' % m)
def flatten_scalar(e):
"""Flatten a 1x1 matrix to a scalar, return larger matrices unchanged."""
if isinstance(e, Matrix):
if e.shape == (1,1):
e = e[0]
if isinstance(e, (numpy_ndarray, scipy_sparse_matrix)):
if e.shape == (1,1):
e = complex(e[0,0])
return e
def matrix_dagger(e):
"""Return the dagger of a sympy/numpy/scipy.sparse matrix."""
if isinstance(e, Matrix):
return e.H
elif isinstance(e, (numpy_ndarray, scipy_sparse_matrix)):
return e.conjugate().transpose()
raise TypeError('Expected sympy/numpy/scipy.sparse matrix, got: %r' % e)
# TODO: Move this into sympy.matricies.
def _sympy_tensor_product(*matrices):
"""Compute the tensor product of a sequence of sympy Matrices.
This is the standard Kronecker product of matrices [1].
Parameters
==========
matrices : tuple of Matrix instances
The matrices to take the tensor product of.
Returns
=======
matrix : Matrix
The tensor product matrix.
Examples
========
>>> from sympy import I, Matrix, symbols
>>> from sympy.physics.quantum.matrixutils import _sympy_tensor_product
>>> m1 = Matrix([[1,2],[3,4]])
>>> m2 = Matrix([[1,0],[0,1]])
>>> _sympy_tensor_product(m1, m2)
[1, 0, 2, 0]
[0, 1, 0, 2]
[3, 0, 4, 0]
[0, 3, 0, 4]
>>> _sympy_tensor_product(m2, m1)
[1, 2, 0, 0]
[3, 4, 0, 0]
[0, 0, 1, 2]
[0, 0, 3, 4]
References
==========
[1] http://en.wikipedia.org/wiki/Kronecker_product
"""
# Make sure we have a sequence of Matrices
testmat = [isinstance(m, Matrix) for m in matrices]
if not all(testmat):
raise TypeError(
'Sequence of Matrices expected, got: %s' % repr(matrices)
)
# Pull out the first element in the product.
matrix_expansion = matrices[-1]
# Do the tensor product working from right to left.
for mat in reversed(matrices[:-1]):
rows = mat.rows
cols = mat.cols
# Go through each row appending tensor product to.
# running matrix_expansion.
for i in range(rows):
start = matrix_expansion*mat[i*cols]
# Go through each column joining each item
for j in range(cols-1):
start = start.row_join(
matrix_expansion*mat[i*cols+j+1]
)
# If this is the first element, make it the start of the
# new row.
if i == 0:
next = start
else:
next = next.col_join(start)
matrix_expansion = next
return matrix_expansion
def _numpy_tensor_product(*product):
"""numpy version of tensor product of multiple arguments."""
import numpy as np
answer = product[0]
for item in product[1:]:
answer = np.kron(answer, item)
return answer
def _scipy_sparse_tensor_product(*product):
"""scipy.sparse version of tensor product of multiple arguments."""
from scipy import sparse
answer = product[0]
for item in product[1:]:
answer = sparse.kron(answer, item)
# The final matrices will just be multiplied, so csr is a good final
# sparse format.
return sparse.csr_matrix(answer)
def matrix_tensor_product(*product):
"""Compute the matrix tensor product of sympy/numpy/scipy.sparse matrices."""
if isinstance(product[0], Matrix):
return _sympy_tensor_product(*product)
elif isinstance(product[0], numpy_ndarray):
return _numpy_tensor_product(*product)
elif isinstance(product[0], scipy_sparse_matrix):
return _scipy_sparse_tensor_product(*product)
def _numpy_eye(n):
"""numpy version of complex eye."""
import numpy as np
return np.matrix(np.eye(n, dtype='complex'))
def _scipy_sparse_eye(n):
"""scipy.sparse version of complex eye."""
from scipy import sparse
return sparse.eye(n, n, dtype='complex')
def matrix_eye(n, **options):
"""Get the version of eye and tensor_product for a given format."""
format = options.get('format','sympy')
if format == 'sympy':
return matrices.eye(n)
elif format == 'numpy':
return _numpy_eye(n)
elif format == 'scipy.sparse':
return _scipy_sparse_eye(n)
raise NotImplementedError('Invalid format: %r' % format)
def _numpy_matrix_to_zero(e):
"""Convert a numpy zero matrix to the zero scalar."""
import numpy as np
test = np.zeros_like(e)
if np.allclose(e, test):
return 0.0
else:
return e
def _scipy_sparse_matrix_to_zero(e):
"""Convert a scipy.sparse zero matrix to the zero scalar."""
import numpy as np
edense = e.todense()
test = np.zeros_like(edense)
if np.allclose(edense, test):
return 0.0
else:
return e
def matrix_to_zero(e):
"""Convert a zero matrix to the scalar zero."""
if isinstance(e, Matrix):
if matrices.zeros(e.shape) == e:
e = Integer(0)
elif isinstance(e, numpy_ndarray):
e = _numpy_matrix_to_zero(e)
elif isinstance(e, scipy_sparse_matrix):
e = _scipy_sparse_matrix_to_zero(e)
return e
|
|
# -*- coding: utf-8 -*-
#
# The MIT License (MIT)
#
# Copyright (c) 2013 Ivo Tzvetkov
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from Queue import Queue
from threading import Thread
from common import *
from py2neo import neo4j
def test_contains_iter_len(m):
n1 = TNode()
n2 = TNode()
r1 = Relationship.get((n1, 'test1', n2))
assert n1 in m.session
assert n1 in iter(m.session)
assert n2 in m.session
assert n2 in iter(m.session)
assert r1 in m.session
assert r1 in iter(m.session)
assert len(list(iter(m.session))) == 3
assert len(m.session) == 3
m.session.commit()
assert n1 in m.session
assert n1 in iter(m.session)
assert n2 in m.session
assert n2 in iter(m.session)
assert r1 in m.session
assert r1 in iter(m.session)
assert len(list(iter(m.session))) == 3
assert len(m.session) == 3
n3 = TNode()
n4 = TNode()
r2 = Relationship.get((n3, 'test2', n4))
assert n3 in m.session
assert n3 in iter(m.session)
assert n4 in m.session
assert n4 in iter(m.session)
assert r2 in m.session
assert r2 in iter(m.session)
assert len(list(iter(m.session))) == 6
assert len(m.session) == 6
r1.expunge()
n1.expunge()
n2.expunge()
assert n1 not in m.session
assert n1 not in iter(m.session)
assert n2 not in m.session
assert n2 not in iter(m.session)
assert r1 not in m.session
assert r1 not in iter(m.session)
assert len(list(iter(m.session))) == 3
assert len(m.session) == 3
r2.expunge()
n3.expunge()
n4.expunge()
assert n3 not in m.session
assert n3 not in iter(m.session)
assert n4 not in m.session
assert n4 not in iter(m.session)
assert r2 not in m.session
assert r2 not in iter(m.session)
assert len(list(iter(m.session))) == 0
assert len(m.session) == 0
def test_counts_and_clear(m):
assert m.session.count == 0
assert m.session.new == 0
assert m.session.dirty == 0
assert not m.session.is_dirty()
assert len(m.session.nodes) == 0
assert len(m.session.phantomnodes) == 0
assert len(m.session.relmap) == 0
assert len(m.session.propmap) == 0
assert len(neo4j.Node.cache) == 0
assert len(neo4j.Relationship.cache) == 0
assert len(neo4j.Rel.cache) == 0
n1 = TNode()
assert m.session.count == 1
assert m.session.new == 1
assert m.session.dirty == 0
assert m.session.is_dirty()
assert len(m.session.nodes) == 0
assert len(m.session.phantomnodes) == 1
assert len(m.session.relmap) == 0
assert len(m.session.propmap) == 0
assert len(neo4j.Node.cache) == 0
assert len(neo4j.Relationship.cache) == 0
assert len(neo4j.Rel.cache) == 0
m.session.commit()
assert m.session.count == 1
assert m.session.new == 0
assert m.session.dirty == 0
assert not m.session.is_dirty()
assert len(m.session.nodes) == 1
assert len(m.session.phantomnodes) == 0
assert len(m.session.relmap) == 0
assert len(m.session.propmap) == 1
assert len(neo4j.Node.cache) == 1
assert len(neo4j.Relationship.cache) == 0
assert len(neo4j.Rel.cache) == 0
n1.string = "test"
assert m.session.count == 1
assert m.session.new == 0
assert m.session.dirty == 1
assert m.session.is_dirty()
assert len(m.session.nodes) == 1
assert len(m.session.phantomnodes) == 0
assert len(m.session.relmap) == 0
assert len(m.session.propmap) == 1
assert len(neo4j.Node.cache) == 1
assert len(neo4j.Relationship.cache) == 0
assert len(neo4j.Rel.cache) == 0
n2 = TNode()
assert m.session.count == 2
assert m.session.new == 1
assert m.session.dirty == 1
assert m.session.is_dirty()
assert len(m.session.nodes) == 1
assert len(m.session.phantomnodes) == 1
assert len(m.session.relmap) == 0
assert len(m.session.propmap) == 1
assert len(neo4j.Node.cache) == 1
assert len(neo4j.Relationship.cache) == 0
assert len(neo4j.Rel.cache) == 0
r = n1.trel_out.append(n2)
assert m.session.count == 3
assert m.session.new == 2
assert m.session.dirty == 1
assert m.session.is_dirty()
assert len(m.session.nodes) == 1
assert len(m.session.phantomnodes) == 1
assert len(m.session.relmap) == 1
assert len(m.session.propmap) == 1
assert len(neo4j.Node.cache) == 1
assert len(neo4j.Relationship.cache) == 0
assert len(neo4j.Rel.cache) == 0
m.session.clear()
assert m.session.count == 0
assert m.session.new == 0
assert m.session.dirty == 0
assert not m.session.is_dirty()
assert len(m.session.nodes) == 0
assert len(m.session.phantomnodes) == 0
assert len(m.session.relmap) == 0
assert len(m.session.propmap) == 0
assert len(neo4j.Node.cache) == 0
assert len(neo4j.Relationship.cache) == 0
assert len(neo4j.Rel.cache) == 0
n1 = TNode()
n2 = TNode()
r = n1.trel_out.append(n2)
m.session.commit()
assert m.session.count == 3
assert m.session.new == 0
assert m.session.dirty == 0
assert not m.session.is_dirty()
assert len(m.session.nodes) == 2
assert len(m.session.phantomnodes) == 0
assert len(m.session.relmap) == 1
assert len(m.session.propmap) == 3
assert len(neo4j.Node.cache) == 2
assert len(neo4j.Relationship.cache) == 1
assert len(neo4j.Rel.cache) == 1
m.session.clear()
assert m.session.count == 0
assert m.session.new == 0
assert m.session.dirty == 0
assert not m.session.is_dirty()
assert len(m.session.nodes) == 0
assert len(m.session.phantomnodes) == 0
assert len(m.session.relmap) == 0
assert len(m.session.propmap) == 0
assert len(neo4j.Node.cache) == 0
assert len(neo4j.Relationship.cache) == 0
assert len(neo4j.Rel.cache) == 0
def test_new_and_dirty(m):
assert m.session.new == 0
assert m.session.dirty == 0
assert not m.session.is_dirty()
n1 = TNode()
assert m.session.new == 1
assert m.session.dirty == 0
assert m.session.is_dirty()
n2 = TNode()
r1 = n1.trel_out.append(n2)
assert m.session.new == 3
assert m.session.dirty == 0
assert m.session.is_dirty()
m.session.commit()
assert m.session.new == 0
assert m.session.dirty == 0
assert not m.session.is_dirty()
n3 = TNode()
assert m.session.new == 1
assert m.session.dirty == 0
assert m.session.is_dirty()
n3.string = 'test'
assert m.session.new == 1
assert m.session.dirty == 0
assert m.session.is_dirty()
m.session.commit()
assert m.session.new == 0
assert m.session.dirty == 0
assert not m.session.is_dirty()
r2 = n1.trel_out.append(n3)
assert m.session.new == 1
assert m.session.dirty == 0
assert m.session.is_dirty()
m.session.commit()
assert m.session.new == 0
assert m.session.dirty == 0
assert not m.session.is_dirty()
n1.string = 'test'
assert n1.is_dirty()
assert m.session.new == 0
assert m.session.dirty == 1
assert m.session.is_dirty()
assert n1.string == 'test'
m.session.commit()
assert m.session.new == 0
assert m.session.dirty == 0
assert not m.session.is_dirty()
assert n1.string == 'test'
n1.string = 'test'
assert not n1.is_dirty()
assert m.session.new == 0
assert m.session.dirty == 0
assert not m.session.is_dirty()
assert n1.string == 'test'
r1.string = 'test'
assert r1.is_dirty()
assert m.session.new == 0
assert m.session.dirty == 1
assert m.session.is_dirty()
assert r1.string == 'test'
m.session.commit()
assert m.session.new == 0
assert m.session.dirty == 0
assert not m.session.is_dirty()
assert r1.string == 'test'
r1.string = 'test'
assert not r1.is_dirty()
assert m.session.new == 0
assert m.session.dirty == 0
assert not m.session.is_dirty()
assert r1.string == 'test'
def test_add(m):
n1 = TNode()
n2 = TNode()
r = n1.trel_out.append(n2)
assert m.session.count == 3
assert m.session.new == 3
assert m.session.dirty == 0
assert m.session.is_dirty()
assert len(m.session.nodes) == 0
assert len(m.session.phantomnodes) == 2
assert len(m.session.relmap) == 1
assert len(m.session.propmap) == 0
m.session.add(n1)
m.session.add(n2)
m.session.add(r)
assert m.session.count == 3
assert m.session.new == 3
assert m.session.dirty == 0
assert m.session.is_dirty()
assert len(m.session.nodes) == 0
assert len(m.session.phantomnodes) == 2
assert len(m.session.relmap) == 1
assert len(m.session.propmap) == 0
m.session.clear()
m.session.add(n1)
assert n1 in m.session.phantomnodes
m.session.add(n2)
assert n2 in m.session.phantomnodes
m.session.add(r)
assert r in m.session.relmap
assert m.session.count == 3
assert m.session.new == 3
assert m.session.dirty == 0
assert m.session.is_dirty()
assert len(m.session.nodes) == 0
assert len(m.session.phantomnodes) == 2
assert len(m.session.relmap) == 1
assert len(m.session.propmap) == 0
def test_get(m):
n1 = TNode()
n2 = TNode()
r = n1.trel_out.append(n2)
m.session.commit()
assert m.session.get(n1._entity) is n1
assert m.session.get(m.graph.node(n1.id)) is n1
assert m.session.get(n2._entity) is n2
assert m.session.get(m.graph.node(n2.id)) is n2
assert m.session.get(r._entity) is r
assert m.session.get(m.graph.relationship(r.id)) is r
def test_expunge(m):
n1 = TNode()
n2 = TNode()
r = n1.trel_out.append(n2)
assert r in m.session.relmap
assert r in n1.trel_out.rels()
assert r in n2.trel_in.rels()
assert n1 in m.session.phantomnodes
assert n2 in m.session.phantomnodes
r.expunge()
assert r not in m.session.relmap
assert r not in n1.trel_out.rels()
assert r not in n2.trel_in.rels()
assert n1 in m.session.phantomnodes
assert n2 in m.session.phantomnodes
r = n1.trel_out.append(n2)
n2.expunge()
assert n2 not in m.session.phantomnodes
assert r not in m.session.relmap
assert r not in n1.trel_out.rels()
assert n1 in m.session.phantomnodes
n1.expunge()
assert m.session.count == 0
assert m.session.new == 0
assert m.session.dirty == 0
assert not m.session.is_dirty()
assert len(m.session.nodes) == 0
assert len(m.session.phantomnodes) == 0
assert len(m.session.relmap) == 0
assert len(m.session.propmap) == 0
def test_rollback(m):
n1 = TNode()
n2 = TNode()
r1 = n1.trel_out.append(n2)
m.session.commit()
# base case
n3 = TNode()
n4 = TNode()
r2 = n3.trel_out.append(n4)
n1.string = "test"
r1.string = "test"
r3 = n1.trel_out.append(n3)
m.session.rollback()
assert not m.session.is_dirty()
assert n1 in m.session.nodes.values()
assert not n1.is_dirty()
assert n1.string is None
assert n2 in m.session.nodes.values()
assert not n2.is_dirty()
assert r1 in m.session.relmap
assert not r1.is_dirty()
assert r1.string is None
assert n3 not in m.session.phantomnodes
assert n4 not in m.session.phantomnodes
assert r2 not in m.session.relmap
assert r3 not in m.session.relmap
assert m.session.count == 3
# deleted relationship
assert r1 in m.session
assert r1 in n1.trel_out
assert r1 in n2.trel_in
assert not r1.is_deleted()
r1.delete()
assert r1 in m.session
assert r1 not in n1.trel_out
assert r1 not in n2.trel_in
assert r1.is_deleted()
m.session.rollback()
assert r1 in m.session
assert r1 in n1.trel_out
assert r1 in n2.trel_in
assert not r1.is_deleted()
# deleted node
assert n1 in m.session
assert n1 in n2.trel_in
assert not n1.is_deleted()
n1.delete()
assert n1 in m.session
assert n1 not in n2.trel_in
assert n1.is_deleted()
m.session.rollback()
assert n1 in m.session
assert n1 in n2.trel_in
assert not n1.is_deleted()
# externally-deleted relationship
delete_out_of_session(m, r1)
assert r1 in m.session
assert not r1.is_deleted()
assert r1.string is None
r1.string = 'test'
assert r1.is_dirty()
m.session.rollback()
assert r1 in m.session
assert not r1.is_deleted()
assert r1.string is None
# externally-deleted node
delete_out_of_session(m, n1)
assert n1 in m.session
assert not n1.is_deleted()
assert n1.string is None
n1.string = 'test'
assert n1.is_dirty()
m.session.rollback()
assert n1 in m.session
assert not n1.is_deleted()
assert n1.string is None
def test_commit(m):
from py2neo import neo4j
n1 = TNode()
n2 = TNode()
r = n1.trel_out.append(n2)
assert m.session.is_dirty()
assert len(m.session.batch) == 0
assert not m.session.committing
assert n1._entity == None
assert n1.id == None
assert n2._entity == None
assert n2.id == None
assert r._entity == None
assert r.id == None
m.session.commit()
assert not m.session.is_dirty()
assert len(m.session.batch) == 0
assert not m.session.committing
assert isinstance(n1._entity, neo4j.Node)
assert isinstance(n1.id, int)
assert isinstance(n2._entity, neo4j.Node)
assert isinstance(n2.id, int)
assert isinstance(r._entity, neo4j.Relationship)
assert isinstance(r.id, int)
def test_commit_deleted(m):
n1 = TNode()
n2 = TNode()
r1 = n1.trel_out.append(n2)
m.session.commit()
# change deleted rel
r1_id = r1.id
delete_out_of_session(m, r1)
assert r1 in m.session
assert not r1.is_deleted()
r1.string = 'test1'
assert r1.is_dirty()
with raises(CommitError):
m.session.commit()
assert r1 in m.session
# delete deleted rel
r1.delete()
assert r1.is_deleted()
m.session.commit()
assert r1 not in m.session
with raises(EntityNotFoundException):
Relationship.get(r1_id)
# add rel to deleted node
n2_id = n2.id
delete_out_of_session(m, n2)
assert n2 in m.session
assert not n2.is_deleted()
r2 = n1.subtrel_out.append(n2)
with raises(CommitError):
m.session.commit()
assert r2 in m.session
r2.expunge()
assert r2 not in m.session
assert n2 in m.session
assert not n2.is_deleted()
# change deleted node
n2.string = 'test2'
assert n2.is_dirty()
with raises(CommitError):
m.session.commit()
assert n2 in m.session
# delete deleted node
n2.delete()
assert n2.is_deleted()
m.session.commit()
assert n2 not in m.session
with raises(EntityNotFoundException):
Node.get(n2_id)
def test_threadsafe(m):
# initial state
n1 = TNode()
m.session.commit()
n1.string = "test"
n2 = TNode()
n1.trel_out.append(n2)
assert m.session.count == 3
assert m.session.new == 2
assert m.session.dirty == 1
assert m.session.is_dirty()
assert len(m.session.nodes) == 1
assert len(m.session.phantomnodes) == 1
assert len(m.session.relmap) == 1
assert len(m.session.propmap) == 1
# verify that subthread has separate session
def test(q, m):
try:
assert m.session.count == 0
assert m.session.new == 0
assert m.session.dirty == 0
assert not m.session.is_dirty()
assert len(m.session.nodes) == 0
assert len(m.session.phantomnodes) == 0
assert len(m.session.relmap) == 0
assert len(m.session.propmap) == 0
except Exception as e:
q.put(e)
finally:
m.session.clear()
q = Queue()
t = Thread(target=test, args=(q, m))
t.start()
t.join()
if not q.empty():
raise q.get()
# verify subthread's m.session.clear() has not affected state
assert m.session.count == 3
assert m.session.new == 2
assert m.session.dirty == 1
assert m.session.is_dirty()
assert len(m.session.nodes) == 1
assert len(m.session.phantomnodes) == 1
assert len(m.session.relmap) == 1
assert len(m.session.propmap) == 1
def test_py2neo_threadsafe(m):
# initial state
n1 = TNode()
n2 = TNode()
n1.trel_out.append(n2)
m.session.commit()
assert m.session.count == 3
assert len(neo4j.Node.cache) == 2
assert len(neo4j.Relationship.cache) == 1
assert len(neo4j.Rel.cache) == 1
# verify that subthread has separate session
def test(q, m):
try:
assert m.session.count == 0
assert len(neo4j.Node.cache) == 0
assert len(neo4j.Relationship.cache) == 0
assert len(neo4j.Rel.cache) == 0
except Exception as e:
q.put(e)
finally:
m.session.clear()
q = Queue()
t = Thread(target=test, args=(q, m))
t.start()
t.join()
if not q.empty():
raise q.get()
# verify subthread's m.session.clear() has not affected state
assert m.session.count == 3
assert len(neo4j.Node.cache) == 2
assert len(neo4j.Relationship.cache) == 1
assert len(neo4j.Rel.cache) == 1
|
|
# encoding: utf8
#
# spyne - Copyright (C) Spyne contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
from __future__ import print_function
import logging
logger = logging.getLogger(__name__)
from inspect import isgenerator
from lxml import etree
from lxml.etree import LxmlSyntaxError
from lxml.builder import E
from spyne import ProtocolContext, BODY_STYLE_WRAPPED
from spyne.util import Break, coroutine
from spyne.protocol.cloth.to_parent import ToParentMixin
from spyne.protocol.cloth.to_cloth import ToClothMixin
from spyne.util.six import StringIO, string_types
class XmlClothProtocolContext(ProtocolContext):
def __init__(self, parent, transport, type=None):
super(XmlClothProtocolContext, self).__init__(parent, transport, type)
self.inst_stack = []
self.prot_stack = []
self.doctype_written = False
self.close_until = None
class XmlCloth(ToParentMixin, ToClothMixin):
mime_type = 'text/xml'
HtmlMicroFormat = None
def __init__(self, app=None, mime_type=None,
ignore_uncap=False, ignore_wrappers=False,
cloth=None, attr_name='spyne_id', root_attr_name='spyne',
cloth_parser=None, polymorphic=True):
super(XmlCloth, self).__init__(app=app, mime_type=mime_type,
ignore_uncap=ignore_uncap, ignore_wrappers=ignore_wrappers,
polymorphic=polymorphic)
self._init_cloth(cloth, attr_name, root_attr_name, cloth_parser)
@staticmethod
def trc(cls, locale, default):
"""Translate a class.
:param cls: class
:param locale: locale string
:param default: default string if no translation found
:returns: translated string
"""
if locale is None:
locale = 'en_US'
if cls.Attributes.translations is not None:
return cls.Attributes.translations.get(locale, default)
return default
@staticmethod
def trd(trdict, locale, default):
"""Translate from a translations dict.
:param trdict: translation dict
:param locale: locale string
:param default: default string if no translation found
:returns: translated string
"""
if locale is None:
locale = 'en_US'
if trdict is None:
return default
if isinstance(trdict, string_types):
return trdict
return trdict.get(locale, default)
def get_context(self, parent, transport):
return XmlClothProtocolContext(parent, transport)
def serialize(self, ctx, message):
"""Uses ``ctx.out_object``, ``ctx.out_header`` or ``ctx.out_error`` to
set ``ctx.out_body_doc``, ``ctx.out_header_doc`` and
``ctx.out_document`` as an ``lxml.etree._Element instance``.
Not meant to be overridden.
"""
assert message in (self.REQUEST, self.RESPONSE)
self.event_manager.fire_event('before_serialize', ctx)
if ctx.out_stream is None:
ctx.out_stream = StringIO()
logger.debug("%r %d", ctx.out_stream, id(ctx.out_stream))
if ctx.out_error is not None:
# All errors at this point must be Fault subclasses.
inst = ctx.out_error
cls = inst.__class__
name = cls.get_type_name()
ctx.out_document = E.div()
with self.docfile(ctx.out_stream) as xf:
# as XmlDocument is not push-ready yet, this is what we do.
# this is an ugly hack, bear with me.
retval = XmlCloth.HtmlMicroFormat() \
.to_parent(ctx, cls, inst, xf, name)
else:
assert message is self.RESPONSE
result_message_class = ctx.descriptor.out_message
name = result_message_class.get_type_name()
if ctx.descriptor.body_style == BODY_STYLE_WRAPPED:
if self.ignore_wrappers:
result_message = ctx.out_object[0]
while result_message_class.Attributes._wrapper and \
len(result_message_class._type_info) == 1:
result_message_class, = \
result_message_class._type_info.values()
else:
result_message = result_message_class()
for i, attr_name in enumerate(
result_message_class._type_info.keys()):
setattr(result_message, attr_name, ctx.out_object[i])
else:
result_message, = ctx.out_object
retval = self.incgen(ctx, result_message_class, result_message, name)
self.event_manager.fire_event('after_serialize', ctx)
return retval
def create_out_string(self, ctx, charset=None):
"""Sets an iterable of string fragments to ctx.out_string if the output
is a StringIO object, which means we're run by a sync framework. Async
frameworks have the out_stream write directly to the output stream so
out_string should not be used.
"""
if isinstance(ctx.out_stream, StringIO):
ctx.out_string = [ctx.out_stream.getvalue()]
@coroutine
def incgen(self, ctx, cls, inst, name):
if name is None:
name = cls.get_type_name()
try:
with self.docfile(ctx.out_stream) as xf:
ctx.protocol.doctype_written = False
ctx.protocol.prot_stack = []
ret = self.subserialize(ctx, cls, inst, xf, name)
if isgenerator(ret): # Poor man's yield from
try:
while True:
sv2 = (yield)
ret.send(sv2)
except Break as b:
try:
ret.throw(b)
except StopIteration:
pass
except LxmlSyntaxError as e:
if e.msg == 'no content written':
pass
else:
raise
def docfile(self, *args, **kwargs):
return etree.xmlfile(*args, **kwargs)
def write_doctype(self, ctx, parent, cloth=None):
pass # FIXME: write it
@staticmethod
def get_class_cloth(cls):
return cls.Attributes._xml_cloth
@staticmethod
def get_class_root_cloth(cls):
return cls.Attributes._xml_root_cloth
def check_class_cloths(self, ctx, cls, inst, parent, name, **kwargs):
c = self.get_class_root_cloth(cls)
eltstack = getattr(ctx.protocol, 'eltstack', [])
if c is not None and len(eltstack) == 0 and not (eltstack[-1] is c):
if not ctx.protocol.doctype_written:
self.write_doctype(ctx, parent, c)
logger.debug("to object root cloth")
return True, self.to_root_cloth(ctx, cls, inst, c, parent, name,
**kwargs)
c = self.get_class_cloth(cls)
if c is not None:
if not ctx.protocol.doctype_written:
self.write_doctype(ctx, parent, c)
logger.debug("to object cloth")
return True, self.to_parent_cloth(ctx, cls, inst, c, parent, name,
**kwargs)
return False, None
def subserialize(self, ctx, cls, inst, parent, name='', **kwargs):
pstack = ctx.protocol.prot_stack
pstack.append(self)
logger.debug("push prot %r. newlen: %d", self, len(pstack))
if self._root_cloth is not None:
logger.debug("to root cloth")
retval = self.to_root_cloth(ctx, cls, inst, self._root_cloth,
parent, name)
elif self._cloth is not None:
logger.debug("to parent cloth")
retval = self.to_parent_cloth(ctx, cls, inst, self._cloth, parent,
name)
else:
logger.debug("to parent")
retval = self.start_to_parent(ctx, cls, inst, parent, name, **kwargs)
# FIXME: if retval is a coroutine handle, this will be inconsistent
pstack.pop()
logger.debug("pop prot %r. newlen: %d", self, len(pstack))
return retval
def decompose_incoming_envelope(self, ctx, message):
raise NotImplementedError("This is an output-only protocol.")
|
|
'''
====================================================================
Copyright (c) 2003-2010 Barry A Scott. All rights reserved.
This software is licensed as described in the file LICENSE.txt,
which you should have received as part of this distribution.
====================================================================
wb_shell_unix_commands.py
'''
import os
import signal
import subprocess
import types
import shlex
import tempfile
import pathlib
from PyQt5 import QtCore
__all__ = ('setupCommands', 'getTerminalProgramList', 'getFileBrowserProgramList'
,'guiDiffFiles', 'shellDiffFiles', 'editFile'
,'shellOpen', 'commandShell', 'fileBrowser')
__sigchld_handler_installed = False
gui_terminals = ['gnome-terminal', 'konsole', 'xterm', 'xfce4-terminal']
gui_file_browsers = ['nautilus', 'konqueror', 'thunar', 'dolphin']
def setupCommands():
# install the sig child handler to get rid of the zombie processes
global __sigchld_handler_installed
if not __sigchld_handler_installed:
signal.signal( signal.SIGCHLD, __sigchld_handler )
__sigchld_handler_installed = True
def __sigchld_handler( signum, frame ):
try:
while True:
pid, status = os.waitpid( -1, os.WNOHANG )
if pid == 0:
break
except OSError:
pass
def getTerminalProgramList():
return gui_terminals[:]
def getFileBrowserProgramList():
return gui_file_browsers[:]
def editFile( app, working_dir, all_filenames ):
app.log.infoheader( T_('Edit %s') % (' '.join( [str(name) for name in all_filenames] ),) )
p = app.prefs.editor
editor = p.program
if editor == '':
app.log.warning( T_('Please configure the editor in the Preferences Editor tab') )
return
options = p.options
editor_args = []
if options != '':
editor_args = shlex.split( options )
editor_args.extend( all_filenames )
__run_command( app, editor, editor_args, working_dir )
def shellOpen( app, working_dir, all_filenames ):
app.log.infoheader( T_('Open %s') % (' '.join( [str(name) for name in all_filenames] ),) )
for filename in all_filenames:
# xdg-open only accepts 1 filename at a time
__run_command( app, '/usr/bin/xdg-open', [filename], working_dir )
def guiDiffFiles( app, args ):
__run_command( app, app.prefs.getDiffTool().gui_diff_tool, args. os.getcwd() )
def shellDiffFiles( app, args ):
return __run_command_with_output( app, app.prefs.getDiffTool().shell_diff_tool, args )
def __titleFromPath( working_dir ):
title = []
try:
rel_path = working_dir.relative_to( os.environ['HOME'] )
except ValueError:
rel_path = working_dir
empty = pathlib.Path('.')
while rel_path != empty:
title.append( rel_path.name )
rel_path = rel_path.parent
return ' '.join( title )
def commandShell( app, working_dir ):
app.log.infoheader( 'Shell in %s' % (working_dir,) )
p = app.prefs.shell
# calc a title that is leaf to root so that the leaf shows up in a task bar first
title = __titleFromPath( working_dir )
with tempfile.NamedTemporaryFile( mode='w', delete=False, prefix='tmp-wb-shell', suffix='.sh' ) as f:
app.all_temp_files.append( f.name )
if len( p.terminal_init ) > 0:
f.write( ". '%s'\n" % (p.terminal_init,) )
# create an interactive login shell
f.write( 'exec "$SHELL" --login -i\n' )
f.close()
# chmod +x
os.chmod( f.name, 0o700 )
path = os.environ.get( 'PATH' )
if p.terminal_program == '':
app.log.warning( T_('Please configure the Terminal in the Preferences Shell tab') )
return
found = False
for folder in path.split( os.pathsep ):
exe = pathlib.Path( folder ) / p.terminal_program
if exe.is_file():
found = True
break
if not found:
app.log.warning( T_('Cannot find the Terminal program %s.') % (p.terminal_program,) )
app.log.warning( T_('Please configure a terminal program that is installed on the system in the Preferences Shell tab') )
return
os.environ['WB_WD'] = str( working_dir )
try:
if p.terminal_program == 'konsole':
__run_command( app, p.terminal_program,
['--workdir', working_dir, '-e', '/bin/bash', f.name],
working_dir )
elif p.terminal_program in ('gnome-terminal', 'xfce4-terminal'):
__run_command( app, p.terminal_program,
['--title', title, '--working-directory', working_dir, '-x', f.name],
working_dir )
elif terminal_program == 'xterm':
__run_command( app, p.terminal_program,
['-T', title, '-e', f.name],
working_dir )
finally:
del os.environ['WB_WD']
def fileBrowser( app, working_dir ):
app.log.infoheader( 'Browse files in %s' % (working_dir,) )
p = app.prefs.shell
path = os.environ.get("PATH")
found = False
if p.file_browser == '':
app.log.warning( T_('Please configure the File Browser in the Preferences Shell tab') )
return
for folder in path.split( os.pathsep ):
exe = pathlib.Path( folder ) / p.file_browser
if exe.is_file():
found = True
break
if not found:
app.log.warning( T_('Cannot find the File Browser program %s.') % (p.file_browser,) )
app.log.warning( T_('Please configure a File Browser program that is installed on the system in the Preferences Shell tab') )
return
if p.file_browser == 'konqueror':
__run_command( app,
p.file_browser,
['--mimetype', 'inode/directory', working_dir],
working_dir )
elif p.file_browser in ('nautilus', 'thunar', 'dolphin'):
__run_command( app,
p.file_browser,
[working_dir],
working_dir )
def __run_command( app, cmd, all_args, working_dir ):
all_args = [str(arg) for arg in all_args]
app.log.info( '%s %s' % (cmd, ' '.join( all_args ) ) )
proc = QtCore.QProcess()
proc.setStandardInputFile( proc.nullDevice() )
proc.setStandardOutputFile( proc.nullDevice() )
proc.setStandardErrorFile( proc.nullDevice() )
proc.startDetached( cmd, all_args, str( working_dir ) )
def __run_commandQQQ( app, cmd, args ):
app.log.info( '%s %s' % (cmd, ' '.join( args ) ) )
env = os.environ.copy()
cmd = asUtf8( cmd )
args = [asUtf8( arg ) for arg in args]
os.spawnvpe( os.P_NOWAIT, cmd, [cmd]+args, env )
def asUtf8( s ):
if isinstance( s, pathlib.Path ):
s = str( s )
if type( s ) == str:
return s.encode( 'utf-8' )
else:
return s
def __run_command_with_output( app, cmd, args ):
app.log.info( '%s %s' % (cmd, ' '.join( args )) )
try:
cmd = asUtf8( cmd )
args = [asUtf8( arg ) for arg in args]
proc = subprocess.Popen(
[cmd]+args,
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
output = proc.stdout.read()
proc.wait()
except EnvironmentError as e:
return 'error running %s %s: %s' % (cmd, ' '.join( args ), str(e))
return output
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for movie_lens."""
import copy
import functools
import os
import tempfile
from absl import flags
from absl.testing import absltest
import attr
import file_util
import test_util
from environments.recommenders import recsim_samplers
from environments.recommenders import recsim_wrapper
import numpy as np
from recsim.simulator import recsim_gym
from proxy_rewards import movie_lens_simulator as movie_lens
from proxy_rewards import utils as movie_lens_utils
FLAGS = flags.FLAGS
class MovieLensTestNoShift(absltest.TestCase):
def _initialize_from_config(self, env_config):
self.working_dir = tempfile.mkdtemp(dir='/tmp')
self.initial_embeddings = movie_lens_utils.load_embeddings(env_config)
self.genre_history = movie_lens_utils.load_genre_history(env_config)
user_ctor = functools.partial(movie_lens.User,
**attr.asdict(env_config.user_config))
self.dataset = movie_lens_utils.Dataset(
env_config.data_dir,
user_ctor=user_ctor,
movie_ctor=movie_lens.Movie,
genre_history=self.genre_history,
embeddings=self.initial_embeddings,
genre_shift=env_config.genre_shift,
bias_against_unseen=env_config.bias_against_unseen)
self.document_sampler = recsim_samplers.SingletonSampler(
self.dataset.get_movies(), movie_lens.Movie)
self.user_sampler = recsim_samplers.UserPoolSampler(
seed=env_config.seeds.user_sampler,
users=self.dataset.get_users(),
user_ctor=user_ctor)
self.user_model = movie_lens.UserModel(
user_sampler=self.user_sampler,
seed=env_config.seeds.user_model,
slate_size=env_config.slate_size,
)
env = movie_lens.MovieLensEnvironment(
self.user_model,
self.document_sampler,
num_candidates=self.document_sampler.size(),
slate_size=env_config.slate_size,
resample_documents=False)
env.reset()
reward_aggregator = movie_lens.average_ratings_reward
self.env = recsim_gym.RecSimGymEnv(env, reward_aggregator)
def setUp(self):
super(MovieLensTestNoShift, self).setUp()
self.data_dir = os.path.join(FLAGS.test_srcdir,
os.path.split(os.path.abspath(__file__))[0],
'test_data')
self.env_config = movie_lens.EnvConfig(
seeds=movie_lens.Seeds(0, 0),
data_dir=self.data_dir,
genre_history_path=os.path.join(self.data_dir, 'genre_history.json'),
embeddings_path=os.path.join(self.data_dir, 'embeddings.json'),
genre_shift=None,
bias_against_unseen=0.)
self._initialize_from_config(self.env_config)
def tearDown(self):
file_util.delete_recursively(self.working_dir)
super(MovieLensTestNoShift, self).tearDown()
def test_document_observation_space_matches(self):
for doc in self.dataset.get_movies():
self.assertIn(doc.create_observation(), doc.observation_space())
def test_user_observation_space_matches(self):
user = self.user_sampler.sample_user()
self.assertIn(user.create_observation(), user.observation_space())
def test_observations_in_observation_space(self):
for slate in [[0], [1], [2]]:
observation, _, _, _ = self.env.step(slate)
for field in ['doc', 'response', 'user']:
self.assertIn(observation[field],
self.env.observation_space.spaces[field])
def test_diversity_seeking_impacts_score(self):
n_diversity = 0
user = self.user_sampler.get_user(1)
user.diversity_seeking = True
for doc in self.dataset.get_movies():
if user.check_if_new_genre(doc) == 1.:
n_diversity += 1
self.assertNotEqual(user.score_document(doc), user.rate_document(doc))
else:
self.assertEqual(user.score_document(doc), user.rate_document(doc))
# Make sure that the test data contains at least one movie that was diverse
# for this user
self.assertGreater(n_diversity, 0)
user.diversity_seeking = False
for doc in self.dataset.get_movies():
self.assertEqual(user.score_document(doc), user.rate_document(doc))
def test_user_can_rate_document(self):
user = self.user_sampler.get_user(1)
for doc in self.dataset.get_movies():
self.assertBetween(
user.rate_document(doc), movie_lens.MIN_RATING_SCORE,
movie_lens.MAX_RATING_SCORE)
def test_user_genre_can_shift(self):
user = self.user_sampler.get_user(1)
ratings_before = [
user.rate_document(doc) for doc in self.dataset.get_movies()
]
genre_shift = [2.] * len(movie_lens_utils.GENRES)
user.shift_genre_preferences(genre_shift)
ratings_after = [
user.rate_document(doc) for doc in self.dataset.get_movies()
]
# NOTE: This test can fail with 1.0 == 1.0 if you have modified
# the original scores to the point that the genre shift does not
# push a pre-clipped score about 1.0. Similar for 5.0 == 5.0.
for pair in zip(ratings_before, ratings_after):
self.assertNotEqual(pair[0], pair[1])
def test_environment_can_advance_by_steps(self):
# Recommend some manual slates.
for slate in [[0], [1], [3]]:
# Tests that env.step completes successfully.
self.env.step(slate)
def test_environment_observation_space_is_as_expected(self):
for slate in [[0], [1], [2]]:
observation, _, _, _ = self.env.step(slate)
for field in ['doc', 'response', 'user']:
self.assertIn(observation[field],
self.env.observation_space.spaces[field])
def test_gym_environment_builder(self):
env = movie_lens.create_gym_environment(self.env_config)
env.seed(100)
env.reset()
# Recommend some manual slates and check that the observations are as
# expected.
for slate in [[0], [0], [2]]:
observation, _, _, _ = env.step(slate)
for field in ['doc', 'response', 'user']:
self.assertIn(observation[field], env.observation_space.spaces[field])
def test_if_user_state_resets(self):
observation = self.env.reset()
curr_user_id = observation['user']['user_id']
ta_vec = np.copy(self.env._environment.user_model._user_sampler
._users[curr_user_id].topic_affinity)
for i in range(3):
self.env.step([i])
self.env.reset()
ta_new = self.env._environment.user_model._user_sampler._users[
curr_user_id].topic_affinity
self.assertTrue(np.all(ta_new == ta_vec))
def test_user_order_is_shuffled(self):
"""Tests that user order does not follow a fixed pattern.
We test this by checking that the list is not perioc for periods between
0-10. Since there are only 5 unique users, this is enough to show that
it's not following a simple pattern.
"""
self.env.seed(100)
user_list = []
for _ in range(100):
observation = self.env.reset()
user_list.append(observation['user']['user_id'])
def _is_periodic(my_list, period):
for idx, val in enumerate(my_list[:-period]):
if val != my_list[idx + period]:
return False
return True
for period in range(1, 10):
self.assertFalse(_is_periodic(user_list, period))
def test_user_order_is_consistent(self):
self.env.reset_sampler()
first_list = []
for _ in range(100):
observation = self.env.reset()
first_list.append(observation['user']['user_id'])
self.env.reset_sampler()
other_list = []
for _ in range(100):
observation = self.env.reset()
other_list.append(observation['user']['user_id'])
self.assertEqual(first_list, other_list)
# Also check that changing the seed creates a new ordering.
config = copy.deepcopy(self.env_config)
config.seeds.user_sampler += 1
env = movie_lens.create_gym_environment(config)
other_list = []
for _ in range(100):
observation = env.reset()
other_list.append(observation['user']['user_id'])
self.assertNotEqual(first_list, other_list)
def test_ml_fairness_gym_environment_can_run(self):
ml_fairness_env = recsim_wrapper.wrap(self.env)
test_util.run_test_simulation(env=ml_fairness_env, stackelberg=True)
class MovieLensTestShift(MovieLensTestNoShift):
"""Test with genre shifts."""
def setUp(self):
super(MovieLensTestShift, self).setUp()
self.data_dir = os.path.join(FLAGS.test_srcdir,
os.path.split(os.path.abspath(__file__))[0],
'test_data')
self.env_config = movie_lens.EnvConfig(
seeds=movie_lens.Seeds(0, 0),
data_dir=self.data_dir,
genre_history_path=os.path.join(self.data_dir, 'genre_history.json'),
embeddings_path=os.path.join(self.data_dir, 'embeddings.json'),
genre_shift=[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
bias_against_unseen=0.)
self._initialize_from_config(self.env_config)
class MovieLensTestBiasAgainstUnseen(MovieLensTestNoShift):
"""Test with bias against unseen genres."""
def setUp(self):
super(MovieLensTestBiasAgainstUnseen, self).setUp()
self.data_dir = os.path.join(FLAGS.test_srcdir,
os.path.split(os.path.abspath(__file__))[0],
'test_data')
self.env_config = movie_lens.EnvConfig(
seeds=movie_lens.Seeds(0, 0),
data_dir=self.data_dir,
genre_history_path=os.path.join(self.data_dir, 'genre_history.json'),
embeddings_path=os.path.join(self.data_dir, 'embeddings.json'),
genre_shift=None,
bias_against_unseen=-1.)
self._initialize_from_config(self.env_config)
if __name__ == '__main__':
absltest.main()
|
|
import os
import sys
from SteamworksParser import steamworksparser
g_TypeConversionDict = {
"uint8": "byte",
"uint16": "ushort",
"uint32": "uint",
"uint64": "ulong",
"char": "string",
"int32": "int",
"int64": "long",
"uint8 *": "IntPtr",
"const char *": "string",
"const char **": "IntPtr",
# This is for CallbackMsg_t
"HSteamUser": "int"
}
g_CustomPackSize = {
# Callbacks
"AvatarImageLoaded_t": "4",
"FriendRichPresenceUpdate_t": "4",
"GameConnectedClanChatMsg_t": "4",
"GameConnectedChatLeave_t": "1",
"JoinClanChatRoomCompletionResult_t": "4",
"GameConnectedFriendChatMsg_t": "4",
"FriendsGetFollowerCount_t": "4",
"FriendsIsFollowing_t": "4",
"FriendsEnumerateFollowingList_t": "4",
"GSClientDeny_t": "4",
"GSClientKick_t": "4",
"GSClientGroupStatus_t": "1",
"GSStatsReceived_t": "4",
"GSStatsStored_t": "4",
"P2PSessionConnectFail_t": "1",
"SocketStatusCallback_t": "4",
"ValidateAuthTicketResponse_t": "4",
# Structs
"InputAnalogActionData_t": "1",
"InputDigitalActionData_t": "1",
}
g_SkippedStructs = (
# Lingering PS3 stuff.
"PSNGameBootInviteResult_t",
"PS3TrophiesInstalled_t",
# We remap these ISteamController structs to ISteamInput
"ControllerAnalogActionData_t",
"ControllerDigitalActionData_t",
"ControllerMotionData_t",
# String formatting functions. We just use .ToString() instead.
"SteamNetworkingIdentityRender",
"SteamNetworkingIPAddrRender",
"SteamNetworkingPOPIDRender",
# CustomType
"SteamIPAddress_t",
"SteamInputActionEvent_t",
)
g_SequentialStructs = (
"MatchMakingKeyValuePair_t",
)
g_SpecialFieldTypes = {
"PersonaStateChange_t": {
"m_nChangeFlags": "EPersonaChange"
},
"HTML_NeedsPaint_t": {
"pBGRA": "IntPtr"
},
# These two are returned by a function and the struct needs to be blittable.
"InputAnalogActionData_t": {
"bActive": "byte", # Originally bool
},
"InputDigitalActionData_t": {
"bState": "byte", # Originally bool
"bActive": "byte", # Originally bool
},
}
g_ExplicitStructs = {
"UserStatsReceived_t": {
"m_nGameID" : "0",
"m_eResult" : "8",
"m_steamIDUser" : "12",
}
}
def main(parser):
try:
os.makedirs("../com.rlabrecque.steamworks.net/Runtime/autogen/")
except OSError:
pass
lines = []
callbacklines = []
for f in parser.files:
for struct in f.structs:
lines.extend(parse(struct))
for callback in f.callbacks:
callbacklines.extend(parse(callback))
with open("../com.rlabrecque.steamworks.net/Runtime/autogen/SteamStructs.cs", "wb") as out:
with open("templates/header.txt", "r") as f:
out.write(bytes(f.read(), "utf-8"))
out.write(bytes("namespace Steamworks {\n", "utf-8"))
for line in lines:
out.write(bytes(line + "\n", "utf-8"))
out.write(bytes("}\n\n", "utf-8"))
out.write(bytes("#endif // !DISABLESTEAMWORKS\n", "utf-8"))
with open("../com.rlabrecque.steamworks.net/Runtime/autogen/SteamCallbacks.cs", "wb") as out:
with open("templates/header.txt", "r") as f:
out.write(bytes(f.read(), "utf-8"))
out.write(bytes("namespace Steamworks {\n", "utf-8"))
for line in callbacklines:
out.write(bytes(line + "\n", "utf-8"))
out.write(bytes("}\n\n", "utf-8"))
out.write(bytes("#endif // !DISABLESTEAMWORKS\n", "utf-8"))
def parse(struct):
if struct.name in g_SkippedStructs:
return []
lines = []
for comment in struct.c.rawprecomments:
if type(comment) is steamworksparser.BlankLine:
continue
lines.append("\t" + comment)
structname = struct.name
packsize = g_CustomPackSize.get(structname, "Packsize.value")
if g_ExplicitStructs.get(structname, False):
lines.append("\t[StructLayout(LayoutKind.Explicit, Pack = " + packsize + ")]")
elif struct.packsize:
customsize = ""
if len(struct.fields) == 0:
customsize = ", Size = 1"
lines.append("\t[StructLayout(LayoutKind.Sequential, Pack = " + packsize + customsize + ")]")
if struct.callbackid:
lines.append("\t[CallbackIdentity(Constants." + struct.callbackid + ")]")
for name in g_SequentialStructs:
if name == structname:
lines.append("\t[StructLayout(LayoutKind.Sequential)]")
break
lines.append("\tpublic struct " + structname + " {")
lines.extend(insert_constructors(structname))
if struct.callbackid:
lines.append("\t\tpublic const int k_iCallback = Constants." + struct.callbackid + ";")
for field in struct.fields:
lines.extend(parse_field(field, structname))
if struct.endcomments:
for comment in struct.endcomments.rawprecomments:
if type(comment) is steamworksparser.BlankLine:
lines.append("\t\t")
else:
lines.append("\t" + comment)
lines.append("\t}")
lines.append("")
return lines
def parse_field(field, structname):
lines = []
for comment in field.c.rawprecomments:
if type(comment) is steamworksparser.BlankLine:
lines.append("\t\t")
else:
lines.append("\t" + comment)
fieldtype = g_TypeConversionDict.get(field.type, field.type)
fieldtype = g_SpecialFieldTypes.get(structname, dict()).get(field.name, fieldtype)
explicit = g_ExplicitStructs.get(structname, False)
if explicit:
lines.append("\t\t[FieldOffset(" + explicit[field.name] + ")]")
comment = ""
if field.c.rawlinecomment:
comment = field.c.rawlinecomment
if field.arraysize:
constantsstr = ""
if not field.arraysize.isdigit():
constantsstr = "Constants."
if fieldtype == "byte[]":
lines.append("\t\t[MarshalAs(UnmanagedType.ByValArray, SizeConst = " + constantsstr + field.arraysize + ")]")
if structname == "MatchMakingKeyValuePair_t":
lines.append("\t\t[MarshalAs(UnmanagedType.ByValTStr, SizeConst = " + constantsstr + field.arraysize + ")]")
else:
lines.append("\t\t[MarshalAs(UnmanagedType.ByValArray, SizeConst = " + constantsstr + field.arraysize + ")]")
fieldtype += "[]"
if fieldtype == "bool":
lines.append("\t\t[MarshalAs(UnmanagedType.I1)]")
if field.arraysize and fieldtype == "string[]":
lines.append("\t\tprivate byte[] " + field.name + "_;")
lines.append("\t\tpublic string " + field.name + comment)
lines.append("\t\t{")
lines.append("\t\t\tget { return InteropHelp.ByteArrayToStringUTF8(" + field.name + "_); }")
lines.append("\t\t\tset { InteropHelp.StringToByteArrayUTF8(value, " + field.name + "_, " + constantsstr + field.arraysize + "); }")
lines.append("\t\t}")
else:
lines.append("\t\tpublic " + fieldtype + " " + field.name + ";" + comment)
return lines
def insert_constructors(name):
lines = []
if name == "MatchMakingKeyValuePair_t":
lines.append("\t\tMatchMakingKeyValuePair_t(string strKey, string strValue) {")
lines.append("\t\t\tm_szKey = strKey;")
lines.append("\t\t\tm_szValue = strValue;")
lines.append("\t\t}")
lines.append("")
return lines
if __name__ == "__main__":
if len(sys.argv) != 2:
print("TODO: Usage Instructions")
exit()
steamworksparser.Settings.fake_gameserver_interfaces = True
main(steamworksparser.parse(sys.argv[1]))
|
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module contains classes to wrap Python VTK to make nice molecular plots.
"""
import itertools
import math
import os
import subprocess
import time
import numpy as np
try:
import vtk
from vtk import vtkInteractorStyleTrackballCamera
except ImportError:
# VTK not present. The Camera is to set object to avoid errors in unittest.
vtk = None
vtkInteractorStyleTrackballCamera = object
from monty.dev import requires
from monty.serialization import loadfn
from pymatgen.core.periodic_table import Species
from pymatgen.core.sites import PeriodicSite
from pymatgen.core.structure import Structure
from pymatgen.util.coord import in_coord_list
module_dir = os.path.dirname(os.path.abspath(__file__))
EL_COLORS = loadfn(os.path.join(module_dir, "ElementColorSchemes.yaml"))
class StructureVis:
"""
Provides Structure object visualization using VTK.
"""
@requires(vtk, "Visualization requires the installation of VTK with " "Python bindings.")
def __init__(
self,
element_color_mapping=None,
show_unit_cell=True,
show_bonds=False,
show_polyhedron=True,
poly_radii_tol_factor=0.5,
excluded_bonding_elements=None,
):
"""
Constructs a Structure Visualization.
Args:
element_color_mapping: Optional color mapping for the elements,
as a dict of {symbol: rgb tuple}. For example, {"Fe": (255,
123,0), ....} If None is specified, a default based on
Jmol"s color scheme is used.
show_unit_cell: Set to False to not show the unit cell
boundaries. Defaults to True.
show_bonds: Set to True to show bonds. Defaults to True.
show_polyhedron: Set to True to show polyhedrons. Defaults to
False.
poly_radii_tol_factor: The polyhedron and bonding code uses the
ionic radii of the elements or species to determine if two
atoms are bonded. This specifies a tolerance scaling factor
such that atoms which are (1 + poly_radii_tol_factor) * sum
of ionic radii apart are still considered as bonded.
excluded_bonding_elements: List of atom types to exclude from
bonding determination. Defaults to an empty list. Useful
when trying to visualize a certain atom type in the
framework (e.g., Li in a Li-ion battery cathode material).
Useful keyboard shortcuts implemented.
h : Show help
A/a : Increase/decrease cell by one unit vector in a-direction
B/b : Increase/decrease cell by one unit vector in b-direction
C/c : Increase/decrease cell by one unit vector in c-direction
# : Toggle showing of polyhedrons
- : Toggle showing of bonds
[ : Decrease poly_radii_tol_factor by 0.05
] : Increase poly_radii_tol_factor by 0.05
r : Reset camera direction
o : Orthogonalize structure
Up/Down : Rotate view along Up direction by 90 clock/anticlockwise
Left/right : Rotate view along camera direction by 90
clock/anticlockwise
"""
# create a rendering window and renderer
self.ren = vtk.vtkRenderer()
self.ren_win = vtk.vtkRenderWindow()
self.ren_win.AddRenderer(self.ren)
self.ren.SetBackground(1, 1, 1)
self.title = "Structure Visualizer"
# create a renderwindowinteractor
self.iren = vtk.vtkRenderWindowInteractor()
self.iren.SetRenderWindow(self.ren_win)
self.mapper_map = {}
self.structure = None
if element_color_mapping:
self.el_color_mapping = element_color_mapping
else:
self.el_color_mapping = EL_COLORS["VESTA"]
self.show_unit_cell = show_unit_cell
self.show_bonds = show_bonds
self.show_polyhedron = show_polyhedron
self.poly_radii_tol_factor = poly_radii_tol_factor
self.excluded_bonding_elements = excluded_bonding_elements if excluded_bonding_elements else []
self.show_help = True
self.supercell = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
self.redraw()
style = StructureInteractorStyle(self)
self.iren.SetInteractorStyle(style)
self.ren.parent = self
def rotate_view(self, axis_ind=0, angle=0):
"""
Rotate the camera view.
Args:
axis_ind: Index of axis to rotate. Defaults to 0, i.e., a-axis.
angle: Angle to rotate by. Defaults to 0.
"""
camera = self.ren.GetActiveCamera()
if axis_ind == 0:
camera.Roll(angle)
elif axis_ind == 1:
camera.Azimuth(angle)
else:
camera.Pitch(angle)
self.ren_win.Render()
def write_image(self, filename="image.png", magnification=1, image_format="png"):
"""
Save render window to an image.
Arguments:
filename:
filename to save to. Defaults to image.png.
magnification:
magnification. Use it to render high res images.
image_format:
choose between jpeg, png. Png is the default.
"""
render_large = vtk.vtkRenderLargeImage()
render_large.SetInput(self.ren)
if image_format == "jpeg":
writer = vtk.vtkJPEGWriter()
writer.SetQuality(80)
else:
writer = vtk.vtkPNGWriter()
render_large.SetMagnification(magnification)
writer.SetFileName(filename)
writer.SetInputConnection(render_large.GetOutputPort())
self.ren_win.Render()
writer.Write()
del render_large
def redraw(self, reset_camera=False):
"""
Redraw the render window.
Args:
reset_camera: Set to True to reset the camera to a
pre-determined default for each structure. Defaults to False.
"""
self.ren.RemoveAllViewProps()
self.picker = None
self.add_picker_fixed()
self.helptxt_mapper = vtk.vtkTextMapper()
tprops = self.helptxt_mapper.GetTextProperty()
tprops.SetFontSize(14)
tprops.SetFontFamilyToTimes()
tprops.SetColor(0, 0, 0)
if self.structure is not None:
self.set_structure(self.structure, reset_camera)
self.ren_win.Render()
def orthongonalize_structure(self):
"""
Orthogonalize the structure.
"""
if self.structure is not None:
self.set_structure(self.structure.copy(sanitize=True))
self.ren_win.Render()
def display_help(self):
"""
Display the help for various keyboard shortcuts.
"""
helptxt = [
"h : Toggle help",
"A/a, B/b or C/c : Increase/decrease cell by one a," " b or c unit vector",
"# : Toggle showing of polyhedrons",
"-: Toggle showing of bonds",
"r : Reset camera direction",
"[/]: Decrease or increase poly_radii_tol_factor " "by 0.05. Value = " + str(self.poly_radii_tol_factor),
"Up/Down: Rotate view along Up direction by 90 " "clockwise/anticlockwise",
"Left/right: Rotate view along camera direction by " "90 clockwise/anticlockwise",
"s: Save view to image.png",
"o: Orthogonalize structure",
]
self.helptxt_mapper.SetInput("\n".join(helptxt))
self.helptxt_actor.SetPosition(10, 10)
self.helptxt_actor.VisibilityOn()
def set_structure(self, structure, reset_camera=True, to_unit_cell=True):
"""
Add a structure to the visualizer.
Args:
structure: structure to visualize
reset_camera: Set to True to reset the camera to a default
determined based on the structure.
to_unit_cell: Whether or not to fall back sites into the unit cell.
"""
self.ren.RemoveAllViewProps()
has_lattice = hasattr(structure, "lattice")
if has_lattice:
s = Structure.from_sites(structure, to_unit_cell=to_unit_cell)
s.make_supercell(self.supercell, to_unit_cell=to_unit_cell)
else:
s = structure
inc_coords = []
for site in s:
self.add_site(site)
inc_coords.append(site.coords)
count = 0
labels = ["a", "b", "c"]
colors = [(1, 0, 0), (0, 1, 0), (0, 0, 1)]
if has_lattice:
matrix = s.lattice.matrix
if self.show_unit_cell and has_lattice:
# matrix = s.lattice.matrix
self.add_text([0, 0, 0], "o")
for vec in matrix:
self.add_line((0, 0, 0), vec, colors[count])
self.add_text(vec, labels[count], colors[count])
count += 1
for (vec1, vec2) in itertools.permutations(matrix, 2):
self.add_line(vec1, vec1 + vec2)
for (vec1, vec2, vec3) in itertools.permutations(matrix, 3):
self.add_line(vec1 + vec2, vec1 + vec2 + vec3)
if self.show_bonds or self.show_polyhedron:
elements = sorted(s.composition.elements, key=lambda a: a.X)
anion = elements[-1]
def contains_anion(site):
for sp in site.species.keys():
if sp.symbol == anion.symbol:
return True
return False
anion_radius = anion.average_ionic_radius
for site in s:
exclude = False
max_radius = 0
color = np.array([0, 0, 0])
for sp, occu in site.species.items():
if sp.symbol in self.excluded_bonding_elements or sp == anion:
exclude = True
break
max_radius = max(max_radius, sp.average_ionic_radius)
color = color + occu * np.array(self.el_color_mapping.get(sp.symbol, [0, 0, 0]))
if not exclude:
max_radius = (1 + self.poly_radii_tol_factor) * (max_radius + anion_radius)
nn = structure.get_neighbors(site, float(max_radius))
nn_sites = []
for neighbor in nn:
if contains_anion(neighbor):
nn_sites.append(neighbor)
if not in_coord_list(inc_coords, neighbor.coords):
self.add_site(neighbor)
if self.show_bonds:
self.add_bonds(nn_sites, site)
if self.show_polyhedron:
color = [i / 255 for i in color]
self.add_polyhedron(nn_sites, site, color)
if self.show_help:
self.helptxt_actor = vtk.vtkActor2D()
self.helptxt_actor.VisibilityOn()
self.helptxt_actor.SetMapper(self.helptxt_mapper)
self.ren.AddActor(self.helptxt_actor)
self.display_help()
camera = self.ren.GetActiveCamera()
if reset_camera:
if has_lattice:
# Adjust the camera for best viewing
lengths = s.lattice.abc
pos = (matrix[1] + matrix[2]) * 0.5 + matrix[0] * max(lengths) / lengths[0] * 3.5
camera.SetPosition(pos)
camera.SetViewUp(matrix[2])
camera.SetFocalPoint((matrix[0] + matrix[1] + matrix[2]) * 0.5)
else:
origin = s.center_of_mass
max_site = max(s, key=lambda site: site.distance_from_point(origin))
camera.SetPosition(origin + 5 * (max_site.coords - origin))
camera.SetFocalPoint(s.center_of_mass)
self.structure = structure
self.title = s.composition.formula
def zoom(self, factor):
"""
Zoom the camera view by a factor.
"""
camera = self.ren.GetActiveCamera()
camera.Zoom(factor)
self.ren_win.Render()
def show(self):
"""
Display the visualizer.
"""
self.iren.Initialize()
self.ren_win.SetSize(800, 800)
self.ren_win.SetWindowName(self.title)
self.ren_win.Render()
self.iren.Start()
def add_site(self, site):
"""
Add a site to the render window. The site is displayed as a sphere, the
color of which is determined based on the element. Partially occupied
sites are displayed as a single element color, though the site info
still shows the partial occupancy.
Args:
site: Site to add.
"""
start_angle = 0
radius = 0
total_occu = 0
for specie, occu in site.species.items():
radius += occu * (
specie.ionic_radius
if isinstance(specie, Species) and specie.ionic_radius
else specie.average_ionic_radius
)
total_occu += occu
vis_radius = 0.2 + 0.002 * radius
for specie, occu in site.species.items():
if not specie:
color = (1, 1, 1)
elif specie.symbol in self.el_color_mapping:
color = [i / 255 for i in self.el_color_mapping[specie.symbol]]
mapper = self.add_partial_sphere(site.coords, vis_radius, color, start_angle, start_angle + 360 * occu)
self.mapper_map[mapper] = [site]
start_angle += 360 * occu
if total_occu < 1:
mapper = self.add_partial_sphere(
site.coords,
vis_radius,
(1, 1, 1),
start_angle,
start_angle + 360 * (1 - total_occu),
)
self.mapper_map[mapper] = [site]
def add_partial_sphere(self, coords, radius, color, start=0, end=360, opacity=1.0):
"""
Adding a partial sphere (to display partial occupancies.
Args:
coords (nd.array): Coordinates
radius (float): Radius of sphere
color (): Color of sphere.
start (float): Starting angle.
end (float): Ending angle.
opacity (float): Opacity.
"""
sphere = vtk.vtkSphereSource()
sphere.SetCenter(coords)
sphere.SetRadius(radius)
sphere.SetThetaResolution(18)
sphere.SetPhiResolution(18)
sphere.SetStartTheta(start)
sphere.SetEndTheta(end)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(sphere.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(color)
actor.GetProperty().SetOpacity(opacity)
self.ren.AddActor(actor)
return mapper
def add_text(self, coords, text, color=(0, 0, 0)):
"""
Add text at a coordinate.
Args:
coords: Coordinates to add text at.
text: Text to place.
color: Color for text as RGB. Defaults to black.
"""
source = vtk.vtkVectorText()
source.SetText(text)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(source.GetOutputPort())
follower = vtk.vtkFollower()
follower.SetMapper(mapper)
follower.GetProperty().SetColor(color)
follower.SetPosition(coords)
follower.SetScale(0.5)
self.ren.AddActor(follower)
follower.SetCamera(self.ren.GetActiveCamera())
def add_line(self, start, end, color=(0.5, 0.5, 0.5), width=1):
"""
Adds a line.
Args:
start: Starting coordinates for line.
end: Ending coordinates for line.
color: Color for text as RGB. Defaults to grey.
width: Width of line. Defaults to 1.
"""
source = vtk.vtkLineSource()
source.SetPoint1(start)
source.SetPoint2(end)
vertexIDs = vtk.vtkStringArray()
vertexIDs.SetNumberOfComponents(1)
vertexIDs.SetName("VertexIDs")
# Set the vertex labels
vertexIDs.InsertNextValue("a")
vertexIDs.InsertNextValue("b")
source.GetOutput().GetPointData().AddArray(vertexIDs)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(source.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(color)
actor.GetProperty().SetLineWidth(width)
self.ren.AddActor(actor)
def add_polyhedron(
self,
neighbors,
center,
color,
opacity=1.0,
draw_edges=False,
edges_color=[0.0, 0.0, 0.0],
edges_linewidth=2,
):
"""
Adds a polyhedron.
Args:
neighbors: Neighbors of the polyhedron (the vertices).
center: The atom in the center of the polyhedron.
color: Color for text as RGB.
opacity: Opacity of the polyhedron
draw_edges: If set to True, the a line will be drawn at each edge
edges_color: Color of the line for the edges
edges_linewidth: Width of the line drawn for the edges
"""
points = vtk.vtkPoints()
conv = vtk.vtkConvexPointSet()
for i, n in enumerate(neighbors):
x, y, z = n.coords
points.InsertPoint(i, x, y, z)
conv.GetPointIds().InsertId(i, i)
grid = vtk.vtkUnstructuredGrid()
grid.Allocate(1, 1)
grid.InsertNextCell(conv.GetCellType(), conv.GetPointIds())
grid.SetPoints(points)
dsm = vtk.vtkDataSetMapper()
polysites = [center]
polysites.extend(neighbors)
self.mapper_map[dsm] = polysites
if vtk.VTK_MAJOR_VERSION <= 5:
dsm.SetInputConnection(grid.GetProducerPort())
else:
dsm.SetInputData(grid)
ac = vtk.vtkActor()
# ac.SetMapper(mapHull)
ac.SetMapper(dsm)
ac.GetProperty().SetOpacity(opacity)
if color == "element":
# If partial occupations are involved, the color of the specie with
# the highest occupation is used
myoccu = 0.0
for specie, occu in center.species.items():
if occu > myoccu:
myspecie = specie
myoccu = occu
color = [i / 255 for i in self.el_color_mapping[myspecie.symbol]]
ac.GetProperty().SetColor(color)
else:
ac.GetProperty().SetColor(color)
if draw_edges:
ac.GetProperty().SetEdgeColor(edges_color)
ac.GetProperty().SetLineWidth(edges_linewidth)
ac.GetProperty().EdgeVisibilityOn()
self.ren.AddActor(ac)
def add_triangle(
self,
neighbors,
color,
center=None,
opacity=0.4,
draw_edges=False,
edges_color=[0.0, 0.0, 0.0],
edges_linewidth=2,
):
"""
Adds a triangular surface between three atoms.
Args:
atoms: Atoms between which a triangle will be drawn.
color: Color for triangle as RGB.
center: The "central atom" of the triangle
opacity: opacity of the triangle
draw_edges: If set to True, the a line will be drawn at each edge
edges_color: Color of the line for the edges
edges_linewidth: Width of the line drawn for the edges
"""
points = vtk.vtkPoints()
triangle = vtk.vtkTriangle()
for ii in range(3):
points.InsertNextPoint(neighbors[ii].x, neighbors[ii].y, neighbors[ii].z)
triangle.GetPointIds().SetId(ii, ii)
triangles = vtk.vtkCellArray()
triangles.InsertNextCell(triangle)
# polydata object
trianglePolyData = vtk.vtkPolyData()
trianglePolyData.SetPoints(points)
trianglePolyData.SetPolys(triangles)
# mapper
mapper = vtk.vtkPolyDataMapper()
mapper.SetInput(trianglePolyData)
ac = vtk.vtkActor()
ac.SetMapper(mapper)
ac.GetProperty().SetOpacity(opacity)
if color == "element":
if center is None:
raise ValueError(
"Color should be chosen according to the central atom, " "and central atom is not provided"
)
# If partial occupations are involved, the color of the specie with
# the highest occupation is used
myoccu = 0.0
for specie, occu in center.species.items():
if occu > myoccu:
myspecie = specie
myoccu = occu
color = [i / 255 for i in self.el_color_mapping[myspecie.symbol]]
ac.GetProperty().SetColor(color)
else:
ac.GetProperty().SetColor(color)
if draw_edges:
ac.GetProperty().SetEdgeColor(edges_color)
ac.GetProperty().SetLineWidth(edges_linewidth)
ac.GetProperty().EdgeVisibilityOn()
self.ren.AddActor(ac)
def add_faces(self, faces, color, opacity=0.35):
"""
Adding face of polygon.
Args:
faces (): Coordinates of the faces.
color (): Color.
opacity (float): Opacity
"""
for face in faces:
if len(face) == 3:
points = vtk.vtkPoints()
triangle = vtk.vtkTriangle()
for ii in range(3):
points.InsertNextPoint(face[ii][0], face[ii][1], face[ii][2])
triangle.GetPointIds().SetId(ii, ii)
triangles = vtk.vtkCellArray()
triangles.InsertNextCell(triangle)
trianglePolyData = vtk.vtkPolyData()
trianglePolyData.SetPoints(points)
trianglePolyData.SetPolys(triangles)
mapper = vtk.vtkPolyDataMapper()
if vtk.VTK_MAJOR_VERSION <= 5:
mapper.SetInputConnection(trianglePolyData.GetProducerPort())
else:
mapper.SetInputData(trianglePolyData)
# mapper.SetInput(trianglePolyData)
ac = vtk.vtkActor()
ac.SetMapper(mapper)
ac.GetProperty().SetOpacity(opacity)
ac.GetProperty().SetColor(color)
self.ren.AddActor(ac)
elif False and len(face) == 4:
points = vtk.vtkPoints()
for ii in range(4):
points.InsertNextPoint(face[ii][0], face[ii][1], face[ii][2])
line1 = vtk.vtkLine()
line1.GetPointIds().SetId(0, 0)
line1.GetPointIds().SetId(1, 2)
line2 = vtk.vtkLine()
line2.GetPointIds().SetId(0, 3)
line2.GetPointIds().SetId(1, 1)
lines = vtk.vtkCellArray()
lines.InsertNextCell(line1)
lines.InsertNextCell(line2)
polydata = vtk.vtkPolyData()
polydata.SetPoints(points)
polydata.SetLines(lines)
ruledSurfaceFilter = vtk.vtkRuledSurfaceFilter()
ruledSurfaceFilter.SetInput(polydata)
ruledSurfaceFilter.SetResolution(15, 15)
ruledSurfaceFilter.SetRuledModeToResample()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInput(ruledSurfaceFilter.GetOutput())
ac = vtk.vtkActor()
ac.SetMapper(mapper)
ac.GetProperty().SetOpacity(opacity)
ac.GetProperty().SetColor(color)
self.ren.AddActor(ac)
elif len(face) > 3:
center = np.zeros(3, np.float_)
for site in face:
center += site
center /= np.float_(len(face))
for ii, f in enumerate(face):
points = vtk.vtkPoints()
triangle = vtk.vtkTriangle()
points.InsertNextPoint(f[0], f[1], f[2])
ii2 = np.mod(ii + 1, len(face))
points.InsertNextPoint(face[ii2][0], face[ii2][1], face[ii2][2])
points.InsertNextPoint(center[0], center[1], center[2])
for ii in range(3):
triangle.GetPointIds().SetId(ii, ii)
triangles = vtk.vtkCellArray()
triangles.InsertNextCell(triangle)
trianglePolyData = vtk.vtkPolyData()
trianglePolyData.SetPoints(points)
trianglePolyData.SetPolys(triangles)
mapper = vtk.vtkPolyDataMapper()
if vtk.VTK_MAJOR_VERSION <= 5:
mapper.SetInputConnection(trianglePolyData.GetProducerPort())
else:
mapper.SetInputData(trianglePolyData)
# mapper.SetInput(trianglePolyData)
ac = vtk.vtkActor()
ac.SetMapper(mapper)
ac.GetProperty().SetOpacity(opacity)
ac.GetProperty().SetColor(color)
self.ren.AddActor(ac)
else:
raise ValueError("Number of points for a face should be >= 3")
def add_edges(self, edges, type="line", linewidth=2, color=[0.0, 0.0, 0.0]):
"""
Args:
edges (): List of edges
type ():
linewidth (): Width of line
color (nd.array/tuple): RGB color.
"""
points = vtk.vtkPoints()
lines = vtk.vtkCellArray()
for iedge, edge in enumerate(edges):
points.InsertPoint(2 * iedge, edge[0])
points.InsertPoint(2 * iedge + 1, edge[1])
lines.InsertNextCell(2)
lines.InsertCellPoint(2 * iedge)
lines.InsertCellPoint(2 * iedge + 1)
polydata = vtk.vtkPolyData()
polydata.SetPoints(points)
polydata.SetLines(lines)
mapper = vtk.vtkPolyDataMapper()
if vtk.VTK_MAJOR_VERSION <= 5:
mapper.SetInputConnection(polydata.GetProducerPort())
else:
mapper.SetInputData(polydata)
# mapper.SetInput(polydata)
ac = vtk.vtkActor()
ac.SetMapper(mapper)
ac.GetProperty().SetColor(color)
ac.GetProperty().SetLineWidth(linewidth)
self.ren.AddActor(ac)
def add_bonds(self, neighbors, center, color=None, opacity=None, radius=0.1):
"""
Adds bonds for a site.
Args:
neighbors: Neighbors of the site.
center: The site in the center for all bonds.
color: Color of the tubes representing the bonds
opacity: Opacity of the tubes representing the bonds
radius: Radius of tube s representing the bonds
"""
points = vtk.vtkPoints()
points.InsertPoint(0, center.x, center.y, center.z)
n = len(neighbors)
lines = vtk.vtkCellArray()
for i in range(n):
points.InsertPoint(i + 1, neighbors[i].coords)
lines.InsertNextCell(2)
lines.InsertCellPoint(0)
lines.InsertCellPoint(i + 1)
pd = vtk.vtkPolyData()
pd.SetPoints(points)
pd.SetLines(lines)
tube = vtk.vtkTubeFilter()
if vtk.VTK_MAJOR_VERSION <= 5:
tube.SetInputConnection(pd.GetProducerPort())
else:
tube.SetInputData(pd)
tube.SetRadius(radius)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(tube.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
if opacity is not None:
actor.GetProperty().SetOpacity(opacity)
if color is not None:
actor.GetProperty().SetColor(color)
self.ren.AddActor(actor)
def add_picker_fixed(self):
"""
Create a cell picker.Returns:
"""
picker = vtk.vtkCellPicker()
# Create a Python function to create the text for the text mapper used
# to display the results of picking.
def annotate_pick(obj, event):
if picker.GetCellId() < 0 and not self.show_help:
self.helptxt_actor.VisibilityOff()
else:
mapper = picker.GetMapper()
if mapper in self.mapper_map:
output = []
for site in self.mapper_map[mapper]:
row = [
"{} - ".format(site.species_string),
", ".join(["{:.3f}".format(c) for c in site.frac_coords]),
"[" + ", ".join(["{:.3f}".format(c) for c in site.coords]) + "]",
]
output.append("".join(row))
self.helptxt_mapper.SetInput("\n".join(output))
self.helptxt_actor.SetPosition(10, 10)
self.helptxt_actor.VisibilityOn()
self.show_help = False
self.picker = picker
picker.AddObserver("EndPickEvent", annotate_pick)
self.iren.SetPicker(picker)
def add_picker(self):
"""
Create a cell picker.
"""
picker = vtk.vtkCellPicker()
# Create a Python function to create the text for the text mapper used
# to display the results of picking.
source = vtk.vtkVectorText()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(source.GetOutputPort())
follower = vtk.vtkFollower()
follower.SetMapper(mapper)
follower.GetProperty().SetColor((0, 0, 0))
follower.SetScale(0.2)
self.ren.AddActor(follower)
follower.SetCamera(self.ren.GetActiveCamera())
follower.VisibilityOff()
def annotate_pick(obj, event):
if picker.GetCellId() < 0:
follower.VisibilityOff()
else:
pick_pos = picker.GetPickPosition()
mapper = picker.GetMapper()
if mapper in self.mapper_map:
site = self.mapper_map[mapper]
output = [
site.species_string,
"Frac. coords: " + " ".join(["{:.4f}".format(c) for c in site.frac_coords]),
]
source.SetText("\n".join(output))
follower.SetPosition(pick_pos)
follower.VisibilityOn()
picker.AddObserver("EndPickEvent", annotate_pick)
self.picker = picker
self.iren.SetPicker(picker)
class StructureInteractorStyle(vtkInteractorStyleTrackballCamera):
"""
A custom interactor style for visualizing structures.
"""
def __init__(self, parent):
"""
Args:
parent ():
"""
self.parent = parent
self.AddObserver("LeftButtonPressEvent", self.leftButtonPressEvent)
self.AddObserver("MouseMoveEvent", self.mouseMoveEvent)
self.AddObserver("LeftButtonReleaseEvent", self.leftButtonReleaseEvent)
self.AddObserver("KeyPressEvent", self.keyPressEvent)
def leftButtonPressEvent(self, obj, event):
"""
Args:
obj ():
event ():
"""
self.mouse_motion = 0
self.OnLeftButtonDown()
def mouseMoveEvent(self, obj, event):
"""
Args:
obj ():
event ():
"""
self.mouse_motion = 1
self.OnMouseMove()
def leftButtonReleaseEvent(self, obj, event):
"""
Args:
obj ():
event ():
"""
ren = obj.GetCurrentRenderer()
iren = ren.GetRenderWindow().GetInteractor()
if self.mouse_motion == 0:
pos = iren.GetEventPosition()
iren.GetPicker().Pick(pos[0], pos[1], 0, ren)
self.OnLeftButtonUp()
def keyPressEvent(self, obj, event):
"""
Args:
obj ():
event ():
"""
parent = obj.GetCurrentRenderer().parent
sym = parent.iren.GetKeySym()
if sym in "ABCabc":
if sym == "A":
parent.supercell[0][0] += 1
elif sym == "B":
parent.supercell[1][1] += 1
elif sym == "C":
parent.supercell[2][2] += 1
elif sym == "a":
parent.supercell[0][0] = max(parent.supercell[0][0] - 1, 1)
elif sym == "b":
parent.supercell[1][1] = max(parent.supercell[1][1] - 1, 1)
elif sym == "c":
parent.supercell[2][2] = max(parent.supercell[2][2] - 1, 1)
parent.redraw()
elif sym == "numbersign":
parent.show_polyhedron = not parent.show_polyhedron
parent.redraw()
elif sym == "minus":
parent.show_bonds = not parent.show_bonds
parent.redraw()
elif sym == "bracketleft":
parent.poly_radii_tol_factor -= 0.05 if parent.poly_radii_tol_factor > 0 else 0
parent.redraw()
elif sym == "bracketright":
parent.poly_radii_tol_factor += 0.05
parent.redraw()
elif sym == "h":
parent.show_help = not parent.show_help
parent.redraw()
elif sym == "r":
parent.redraw(True)
elif sym == "s":
parent.write_image("image.png")
elif sym == "Up":
parent.rotate_view(1, 90)
elif sym == "Down":
parent.rotate_view(1, -90)
elif sym == "Left":
parent.rotate_view(0, -90)
elif sym == "Right":
parent.rotate_view(0, 90)
elif sym == "o":
parent.orthongonalize_structure()
parent.redraw()
self.OnKeyPress()
def make_movie(structures, output_filename="movie.mp4", zoom=1.0, fps=20, bitrate="10000k", quality=1, **kwargs):
r"""
Generate a movie from a sequence of structures using vtk and ffmpeg.
Args:
structures ([Structure]): sequence of structures
output_filename (str): filename for structure output. defaults to
movie.mp4
zoom (float): A zoom to be applied to the visualizer. Defaults to 1.0.
fps (int): Frames per second for the movie. Defaults to 20.
bitrate (str): Video bitate. Defaults to "10000k" (fairly high
quality).
quality (int): A quality scale. Defaults to 1.
\\*\\*kwargs: Any kwargs supported by StructureVis to modify the images
generated.
"""
vis = StructureVis(**kwargs)
vis.show_help = False
vis.redraw()
vis.zoom(zoom)
sigfig = int(math.floor(math.log10(len(structures))) + 1)
filename = "image{0:0" + str(sigfig) + "d}.png"
for i, s in enumerate(structures):
vis.set_structure(s)
vis.write_image(filename.format(i), 3)
filename = "image%0" + str(sigfig) + "d.png"
args = [
"ffmpeg",
"-y",
"-i",
filename,
"-q:v",
str(quality),
"-r",
str(fps),
"-b:v",
str(bitrate),
output_filename,
]
subprocess.Popen(args)
class MultiStructuresVis(StructureVis):
"""
Visualization for multiple structures.
"""
DEFAULT_ANIMATED_MOVIE_OPTIONS = {
"time_between_frames": 0.1,
"looping_type": "restart",
"number_of_loops": 1,
"time_between_loops": 1.0,
}
def __init__(
self,
element_color_mapping=None,
show_unit_cell=True,
show_bonds=False,
show_polyhedron=False,
poly_radii_tol_factor=0.5,
excluded_bonding_elements=None,
animated_movie_options=DEFAULT_ANIMATED_MOVIE_OPTIONS,
):
"""
Args:
element_color_mapping: Optional color mapping for the elements,
as a dict of {symbol: rgb tuple}. For example, {"Fe": (255,
123,0), ....} If None is specified, a default based on
Jmol"s color scheme is used.
show_unit_cell: Set to False to not show the unit cell
boundaries. Defaults to True.
show_bonds: Set to True to show bonds. Defaults to True.
show_polyhedron: Set to True to show polyhedrons. Defaults to
False.
poly_radii_tol_factor: The polyhedron and bonding code uses the
ionic radii of the elements or species to determine if two
atoms are bonded. This specifies a tolerance scaling factor
such that atoms which are (1 + poly_radii_tol_factor) * sum
of ionic radii apart are still considered as bonded.
excluded_bonding_elements: List of atom types to exclude from
bonding determination. Defaults to an empty list. Useful
when trying to visualize a certain atom type in the
framework (e.g., Li in a Li-ion battery cathode material).
animated_movie_options (): Used for moving.
"""
super().__init__(
element_color_mapping=element_color_mapping,
show_unit_cell=show_unit_cell,
show_bonds=show_bonds,
show_polyhedron=show_polyhedron,
poly_radii_tol_factor=poly_radii_tol_factor,
excluded_bonding_elements=excluded_bonding_elements,
)
self.warningtxt_actor = vtk.vtkActor2D()
self.infotxt_actor = vtk.vtkActor2D()
self.structures = None
style = MultiStructuresInteractorStyle(self)
self.iren.SetInteractorStyle(style)
self.istruct = 0
self.current_structure = None
self.set_animated_movie_options(animated_movie_options=animated_movie_options)
def set_structures(self, structures, tags=None):
"""
Add list of structures to the visualizer.
Args:
structures (List of Structures):
tags (): List of tags.
"""
self.structures = structures
self.istruct = 0
self.current_structure = self.structures[self.istruct]
self.tags = tags if tags is not None else []
self.all_radii = []
self.all_vis_radii = []
for struct in self.structures:
struct_radii = []
struct_vis_radii = []
for site in struct:
radius = 0
for specie, occu in site.species.items():
radius += occu * (
specie.ionic_radius
if isinstance(specie, Species) and specie.ionic_radius
else specie.average_ionic_radius
)
vis_radius = 0.2 + 0.002 * radius
struct_radii.append(radius)
struct_vis_radii.append(vis_radius)
self.all_radii.append(struct_radii)
self.all_vis_radii.append(struct_vis_radii)
self.set_structure(self.current_structure, reset_camera=True, to_unit_cell=False)
def set_structure(self, structure, reset_camera=True, to_unit_cell=False):
"""
Add a structure to the visualizer.
Args:
structure: structure to visualize
reset_camera: Set to True to reset the camera to a default
determined based on the structure.
to_unit_cell: Whether or not to fall back sites into the unit cell.
"""
super().set_structure(structure=structure, reset_camera=reset_camera, to_unit_cell=to_unit_cell)
self.apply_tags()
def apply_tags(self):
"""
Apply tags.
"""
tags = {}
for tag in self.tags:
istruct = tag.get("istruct", "all")
if istruct != "all":
if istruct != self.istruct:
continue
site_index = tag["site_index"]
color = tag.get("color", [0.5, 0.5, 0.5])
opacity = tag.get("opacity", 0.5)
if site_index == "unit_cell_all":
struct_radii = self.all_vis_radii[self.istruct]
for isite, site in enumerate(self.current_structure):
vis_radius = 1.5 * tag.get("radius", struct_radii[isite])
tags[(isite, (0, 0, 0))] = {
"radius": vis_radius,
"color": color,
"opacity": opacity,
}
continue
cell_index = tag["cell_index"]
if "radius" in tag:
vis_radius = tag["radius"]
elif "radius_factor" in tag:
vis_radius = tag["radius_factor"] * self.all_vis_radii[self.istruct][site_index]
else:
vis_radius = 1.5 * self.all_vis_radii[self.istruct][site_index]
tags[(site_index, cell_index)] = {
"radius": vis_radius,
"color": color,
"opacity": opacity,
}
for site_and_cell_index, tag_style in tags.items():
isite, cell_index = site_and_cell_index
site = self.current_structure[isite]
if cell_index == (0, 0, 0):
coords = site.coords
else:
fcoords = site.frac_coords + np.array(cell_index)
site_image = PeriodicSite(
site.species,
fcoords,
self.current_structure.lattice,
to_unit_cell=False,
coords_are_cartesian=False,
properties=site.properties,
)
self.add_site(site_image)
coords = site_image.coords
vis_radius = tag_style["radius"]
color = tag_style["color"]
opacity = tag_style["opacity"]
self.add_partial_sphere(
coords=coords,
radius=vis_radius,
color=color,
start=0,
end=360,
opacity=opacity,
)
def set_animated_movie_options(self, animated_movie_options=None):
"""
Args:
animated_movie_options ():
"""
if animated_movie_options is None:
self.animated_movie_options = self.DEFAULT_ANIMATED_MOVIE_OPTIONS.copy()
else:
self.animated_movie_options = self.DEFAULT_ANIMATED_MOVIE_OPTIONS.copy()
for key in animated_movie_options:
if key not in self.DEFAULT_ANIMATED_MOVIE_OPTIONS.keys():
raise ValueError("Wrong option for animated movie")
self.animated_movie_options.update(animated_movie_options)
def display_help(self):
"""
Display the help for various keyboard shortcuts.
"""
helptxt = [
"h : Toggle help",
"A/a, B/b or C/c : Increase/decrease cell by one a," " b or c unit vector",
"# : Toggle showing of polyhedrons",
"-: Toggle showing of bonds",
"r : Reset camera direction",
"[/]: Decrease or increase poly_radii_tol_factor " "by 0.05. Value = " + str(self.poly_radii_tol_factor),
"Up/Down: Rotate view along Up direction by 90 " "clockwise/anticlockwise",
"Left/right: Rotate view along camera direction by " "90 clockwise/anticlockwise",
"s: Save view to image.png",
"o: Orthogonalize structure",
"n: Move to next structure",
"p: Move to previous structure",
"m: Animated movie of the structures",
]
self.helptxt_mapper.SetInput("\n".join(helptxt))
self.helptxt_actor.SetPosition(10, 10)
self.helptxt_actor.VisibilityOn()
def display_warning(self, warning):
"""
Args:
warning (str): Warning
"""
self.warningtxt_mapper = vtk.vtkTextMapper()
tprops = self.warningtxt_mapper.GetTextProperty()
tprops.SetFontSize(14)
tprops.SetFontFamilyToTimes()
tprops.SetColor(1, 0, 0)
tprops.BoldOn()
tprops.SetJustificationToRight()
self.warningtxt = "WARNING : {}".format(warning)
self.warningtxt_actor = vtk.vtkActor2D()
self.warningtxt_actor.VisibilityOn()
self.warningtxt_actor.SetMapper(self.warningtxt_mapper)
self.ren.AddActor(self.warningtxt_actor)
self.warningtxt_mapper.SetInput(self.warningtxt)
winsize = self.ren_win.GetSize()
self.warningtxt_actor.SetPosition(winsize[0] - 10, 10)
self.warningtxt_actor.VisibilityOn()
def erase_warning(self):
"""
Remove warnings.
"""
self.warningtxt_actor.VisibilityOff()
def display_info(self, info):
"""
Args:
info (str): Information.
"""
self.infotxt_mapper = vtk.vtkTextMapper()
tprops = self.infotxt_mapper.GetTextProperty()
tprops.SetFontSize(14)
tprops.SetFontFamilyToTimes()
tprops.SetColor(0, 0, 1)
tprops.BoldOn()
tprops.SetVerticalJustificationToTop()
self.infotxt = "INFO : {}".format(info)
self.infotxt_actor = vtk.vtkActor2D()
self.infotxt_actor.VisibilityOn()
self.infotxt_actor.SetMapper(self.infotxt_mapper)
self.ren.AddActor(self.infotxt_actor)
self.infotxt_mapper.SetInput(self.infotxt)
winsize = self.ren_win.GetSize()
self.infotxt_actor.SetPosition(10, winsize[1] - 10)
self.infotxt_actor.VisibilityOn()
def erase_info(self):
"""
Erase all info.
"""
self.infotxt_actor.VisibilityOff()
class MultiStructuresInteractorStyle(StructureInteractorStyle):
"""
Interactor for MultiStructureVis.
"""
def __init__(self, parent):
"""
Args:
parent ():
"""
StructureInteractorStyle.__init__(self, parent=parent)
def keyPressEvent(self, obj, event):
"""
Args:
obj ():
event ():
"""
parent = obj.GetCurrentRenderer().parent
sym = parent.iren.GetKeySym()
if sym == "n":
if parent.istruct == len(parent.structures) - 1:
parent.display_warning("LAST STRUCTURE")
parent.ren_win.Render()
else:
parent.istruct += 1
parent.current_structure = parent.structures[parent.istruct]
parent.set_structure(parent.current_structure, reset_camera=False, to_unit_cell=False)
parent.erase_warning()
parent.ren_win.Render()
elif sym == "p":
if parent.istruct == 0:
parent.display_warning("FIRST STRUCTURE")
parent.ren_win.Render()
else:
parent.istruct -= 1
parent.current_structure = parent.structures[parent.istruct]
parent.set_structure(parent.current_structure, reset_camera=False, to_unit_cell=False)
parent.erase_warning()
parent.ren_win.Render()
elif sym == "m":
parent.istruct = 0
parent.current_structure = parent.structures[parent.istruct]
parent.set_structure(parent.current_structure, reset_camera=False, to_unit_cell=False)
parent.erase_warning()
parent.ren_win.Render()
nloops = parent.animated_movie_options["number_of_loops"]
tstep = parent.animated_movie_options["time_between_frames"]
tloops = parent.animated_movie_options["time_between_loops"]
if parent.animated_movie_options["looping_type"] == "restart":
loop_istructs = range(len(parent.structures))
elif parent.animated_movie_options["looping_type"] == "palindrome":
loop_istructs = range(len(parent.structures)) + range(len(parent.structures) - 2, -1, -1)
else:
raise ValueError('"looping_type" should be "restart" or "palindrome"')
for iloop in range(nloops):
for istruct in loop_istructs:
time.sleep(tstep)
parent.istruct = istruct
parent.current_structure = parent.structures[parent.istruct]
parent.set_structure(parent.current_structure, reset_camera=False, to_unit_cell=False)
parent.display_info(
"Animated movie : structure {:d}/{:d} "
"(loop {:d}/{:d})".format(istruct + 1, len(parent.structures), iloop + 1, nloops)
)
parent.ren_win.Render()
time.sleep(tloops)
parent.erase_info()
parent.display_info("Ended animated movie ...")
parent.ren_win.Render()
StructureInteractorStyle.keyPressEvent(self, obj, event)
|
|
"""
Item Exporters are used to export/serialize items into different formats.
"""
import csv
import io
import sys
import pprint
import marshal
import six
from six.moves import cPickle as pickle
from xml.sax.saxutils import XMLGenerator
from scrapy.utils.serialize import ScrapyJSONEncoder
from scrapy.utils.python import to_bytes, to_unicode, to_native_str, is_listlike
from scrapy.item import BaseItem
from scrapy.exceptions import ScrapyDeprecationWarning
import warnings
__all__ = ['BaseItemExporter', 'PprintItemExporter', 'PickleItemExporter',
'CsvItemExporter', 'XmlItemExporter', 'JsonLinesItemExporter',
'JsonItemExporter', 'MarshalItemExporter']
class BaseItemExporter(object):
def __init__(self, **kwargs):
self._configure(kwargs)
def _configure(self, options, dont_fail=False):
"""Configure the exporter by poping options from the ``options`` dict.
If dont_fail is set, it won't raise an exception on unexpected options
(useful for using with keyword arguments in subclasses constructors)
"""
self.encoding = options.pop('encoding', None)
self.fields_to_export = options.pop('fields_to_export', None)
self.export_empty_fields = options.pop('export_empty_fields', False)
self.indent = options.pop('indent', None)
if not dont_fail and options:
raise TypeError("Unexpected options: %s" % ', '.join(options.keys()))
def export_item(self, item):
raise NotImplementedError
def serialize_field(self, field, name, value):
serializer = field.get('serializer', lambda x: x)
return serializer(value)
def start_exporting(self):
pass
def finish_exporting(self):
pass
def _get_serialized_fields(self, item, default_value=None, include_empty=None):
"""Return the fields to export as an iterable of tuples
(name, serialized_value)
"""
if include_empty is None:
include_empty = self.export_empty_fields
if self.fields_to_export is None:
if include_empty and not isinstance(item, dict):
field_iter = six.iterkeys(item.fields)
else:
field_iter = six.iterkeys(item)
else:
if include_empty:
field_iter = self.fields_to_export
else:
field_iter = (x for x in self.fields_to_export if x in item)
for field_name in field_iter:
if field_name in item:
field = {} if isinstance(item, dict) else item.fields[field_name]
value = self.serialize_field(field, field_name, item[field_name])
else:
value = default_value
yield field_name, value
class JsonLinesItemExporter(BaseItemExporter):
def __init__(self, file, **kwargs):
self._configure(kwargs, dont_fail=True)
self.file = file
kwargs.setdefault('ensure_ascii', not self.encoding)
self.encoder = ScrapyJSONEncoder(**kwargs)
def export_item(self, item):
itemdict = dict(self._get_serialized_fields(item))
data = self.encoder.encode(itemdict) + '\n'
self.file.write(to_bytes(data, self.encoding))
class JsonItemExporter(BaseItemExporter):
def __init__(self, file, **kwargs):
self._configure(kwargs, dont_fail=True)
self.file = file
# there is a small difference between the behaviour or JsonItemExporter.indent
# and ScrapyJSONEncoder.indent. ScrapyJSONEncoder.indent=None is needed to prevent
# the addition of newlines everywhere
json_indent = self.indent if self.indent is not None and self.indent > 0 else None
kwargs.setdefault('indent', json_indent)
kwargs.setdefault('ensure_ascii', not self.encoding)
self.encoder = ScrapyJSONEncoder(**kwargs)
self.first_item = True
def _beautify_newline(self):
if self.indent is not None:
self.file.write(b'\n')
def start_exporting(self):
self.file.write(b"[")
self._beautify_newline()
def finish_exporting(self):
self._beautify_newline()
self.file.write(b"]")
def export_item(self, item):
if self.first_item:
self.first_item = False
else:
self.file.write(b',')
self._beautify_newline()
itemdict = dict(self._get_serialized_fields(item))
data = self.encoder.encode(itemdict)
self.file.write(to_bytes(data, self.encoding))
class XmlItemExporter(BaseItemExporter):
def __init__(self, file, **kwargs):
self.item_element = kwargs.pop('item_element', 'item')
self.root_element = kwargs.pop('root_element', 'items')
self._configure(kwargs)
if not self.encoding:
self.encoding = 'utf-8'
self.xg = XMLGenerator(file, encoding=self.encoding)
def _beautify_newline(self, new_item=False):
if self.indent is not None and (self.indent > 0 or new_item):
self._xg_characters('\n')
def _beautify_indent(self, depth=1):
if self.indent:
self._xg_characters(' ' * self.indent * depth)
def start_exporting(self):
self.xg.startDocument()
self.xg.startElement(self.root_element, {})
self._beautify_newline(new_item=True)
def export_item(self, item):
self._beautify_indent(depth=1)
self.xg.startElement(self.item_element, {})
self._beautify_newline()
for name, value in self._get_serialized_fields(item, default_value=''):
self._export_xml_field(name, value, depth=2)
self._beautify_indent(depth=1)
self.xg.endElement(self.item_element)
self._beautify_newline(new_item=True)
def finish_exporting(self):
self.xg.endElement(self.root_element)
self.xg.endDocument()
def _export_xml_field(self, name, serialized_value, depth):
self._beautify_indent(depth=depth)
self.xg.startElement(name, {})
if hasattr(serialized_value, 'items'):
self._beautify_newline()
for subname, value in serialized_value.items():
self._export_xml_field(subname, value, depth=depth+1)
self._beautify_indent(depth=depth)
elif is_listlike(serialized_value):
self._beautify_newline()
for value in serialized_value:
self._export_xml_field('value', value, depth=depth+1)
self._beautify_indent(depth=depth)
elif isinstance(serialized_value, six.text_type):
self._xg_characters(serialized_value)
else:
self._xg_characters(str(serialized_value))
self.xg.endElement(name)
self._beautify_newline()
# Workaround for https://bugs.python.org/issue17606
# Before Python 2.7.4 xml.sax.saxutils required bytes;
# since 2.7.4 it requires unicode. The bug is likely to be
# fixed in 2.7.6, but 2.7.6 will still support unicode,
# and Python 3.x will require unicode, so ">= 2.7.4" should be fine.
if sys.version_info[:3] >= (2, 7, 4):
def _xg_characters(self, serialized_value):
if not isinstance(serialized_value, six.text_type):
serialized_value = serialized_value.decode(self.encoding)
return self.xg.characters(serialized_value)
else: # pragma: no cover
def _xg_characters(self, serialized_value):
return self.xg.characters(serialized_value)
class CsvItemExporter(BaseItemExporter):
def __init__(self, file, include_headers_line=True, join_multivalued=',', **kwargs):
self._configure(kwargs, dont_fail=True)
if not self.encoding:
self.encoding = 'utf-8'
self.include_headers_line = include_headers_line
self.stream = io.TextIOWrapper(
file,
line_buffering=False,
write_through=True,
encoding=self.encoding
) if six.PY3 else file
self.csv_writer = csv.writer(self.stream, **kwargs)
self._headers_not_written = True
self._join_multivalued = join_multivalued
def serialize_field(self, field, name, value):
serializer = field.get('serializer', self._join_if_needed)
return serializer(value)
def _join_if_needed(self, value):
if isinstance(value, (list, tuple)):
try:
return self._join_multivalued.join(value)
except TypeError: # list in value may not contain strings
pass
return value
def export_item(self, item):
if self._headers_not_written:
self._headers_not_written = False
self._write_headers_and_set_fields_to_export(item)
fields = self._get_serialized_fields(item, default_value='',
include_empty=True)
values = list(self._build_row(x for _, x in fields))
self.csv_writer.writerow(values)
def _build_row(self, values):
for s in values:
try:
yield to_native_str(s, self.encoding)
except TypeError:
yield s
def _write_headers_and_set_fields_to_export(self, item):
if self.include_headers_line:
if not self.fields_to_export:
if isinstance(item, dict):
# for dicts try using fields of the first item
self.fields_to_export = list(item.keys())
else:
# use fields declared in Item
self.fields_to_export = list(item.fields.keys())
row = list(self._build_row(self.fields_to_export))
self.csv_writer.writerow(row)
class PickleItemExporter(BaseItemExporter):
def __init__(self, file, protocol=2, **kwargs):
self._configure(kwargs)
self.file = file
self.protocol = protocol
def export_item(self, item):
d = dict(self._get_serialized_fields(item))
pickle.dump(d, self.file, self.protocol)
class MarshalItemExporter(BaseItemExporter):
def __init__(self, file, **kwargs):
self._configure(kwargs)
self.file = file
def export_item(self, item):
marshal.dump(dict(self._get_serialized_fields(item)), self.file)
class PprintItemExporter(BaseItemExporter):
def __init__(self, file, **kwargs):
self._configure(kwargs)
self.file = file
def export_item(self, item):
itemdict = dict(self._get_serialized_fields(item))
self.file.write(to_bytes(pprint.pformat(itemdict) + '\n'))
class PythonItemExporter(BaseItemExporter):
"""The idea behind this exporter is to have a mechanism to serialize items
to built-in python types so any serialization library (like
json, msgpack, binc, etc) can be used on top of it. Its main goal is to
seamless support what BaseItemExporter does plus nested items.
"""
def _configure(self, options, dont_fail=False):
self.binary = options.pop('binary', True)
super(PythonItemExporter, self)._configure(options, dont_fail)
if self.binary:
warnings.warn(
"PythonItemExporter will drop support for binary export in the future",
ScrapyDeprecationWarning)
if not self.encoding:
self.encoding = 'utf-8'
def serialize_field(self, field, name, value):
serializer = field.get('serializer', self._serialize_value)
return serializer(value)
def _serialize_value(self, value):
if isinstance(value, BaseItem):
return self.export_item(value)
if isinstance(value, dict):
return dict(self._serialize_dict(value))
if is_listlike(value):
return [self._serialize_value(v) for v in value]
encode_func = to_bytes if self.binary else to_unicode
if isinstance(value, (six.text_type, bytes)):
return encode_func(value, encoding=self.encoding)
return value
def _serialize_dict(self, value):
for key, val in six.iteritems(value):
key = to_bytes(key) if self.binary else key
yield key, self._serialize_value(val)
def export_item(self, item):
result = dict(self._get_serialized_fields(item))
if self.binary:
result = dict(self._serialize_dict(result))
return result
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Test Network Discovery
----------------------
The TD is on network 1 with sniffer1, network 2 has sniffer2, network 3 has
sniffer3. All three networks are connected to one IUT router.
"""
import unittest
from bacpypes.debugging import bacpypes_debugging, ModuleLogger, btox, xtob
from bacpypes.comm import Client, Server, bind
from bacpypes.pdu import PDU, Address, LocalBroadcast
from bacpypes.vlan import Network
from bacpypes.npdu import (
npdu_types, NPDU,
WhoIsRouterToNetwork, IAmRouterToNetwork, ICouldBeRouterToNetwork,
RejectMessageToNetwork, RouterBusyToNetwork, RouterAvailableToNetwork,
RoutingTableEntry, InitializeRoutingTable, InitializeRoutingTableAck,
EstablishConnectionToNetwork, DisconnectConnectionToNetwork,
WhatIsNetworkNumber, NetworkNumberIs,
)
from ..state_machine import match_pdu, StateMachineGroup
from ..time_machine import reset_time_machine, run_time_machine
from .helpers import SnifferStateMachine, NetworkLayerStateMachine, RouterNode
# some debugging
_debug = 0
_log = ModuleLogger(globals())
#
# TNetwork
#
@bacpypes_debugging
class TNetwork(StateMachineGroup):
def __init__(self):
if _debug: TNetwork._debug("__init__")
StateMachineGroup.__init__(self)
# reset the time machine
reset_time_machine()
if _debug: TNetwork._debug(" - time machine reset")
# implementation under test
self.iut = RouterNode()
# make a little LAN
self.vlan1 = Network(name="vlan1", broadcast_address=LocalBroadcast())
# test device
self.td = NetworkLayerStateMachine("1", self.vlan1)
self.append(self.td)
# sniffer node
self.sniffer1 = SnifferStateMachine("2", self.vlan1)
self.append(self.sniffer1)
# add the network
self.iut.add_network("3", self.vlan1, 1)
# make another little LAN
self.vlan2 = Network(name="vlan2", broadcast_address=LocalBroadcast())
# sniffer node
self.sniffer2 = SnifferStateMachine("4", self.vlan2)
self.append(self.sniffer2)
# add the network
self.iut.add_network("5", self.vlan2, 2)
# make another little LAN
self.vlan3 = Network(name="vlan3", broadcast_address=LocalBroadcast())
# sniffer node
self.sniffer3 = SnifferStateMachine("6", self.vlan3)
self.append(self.sniffer3)
# add the network
self.iut.add_network("7", self.vlan3, 3)
def run(self, time_limit=60.0):
if _debug: TNetwork._debug("run %r", time_limit)
# run the group
super(TNetwork, self).run()
# run it for some time
run_time_machine(time_limit)
if _debug:
TNetwork._debug(" - time machine finished")
for state_machine in self.state_machines:
TNetwork._debug(" - machine: %r", state_machine)
for direction, pdu in state_machine.transaction_log:
TNetwork._debug(" %s %s", direction, str(pdu))
# check for success
all_success, some_failed = super(TNetwork, self).check_for_success()
if _debug: TNetwork._debug(" - all_success, some_failed: %r, %r", all_success, some_failed)
assert all_success
@bacpypes_debugging
class TestSimple(unittest.TestCase):
def test_idle(self):
"""Test an idle network, nothing happens is success."""
if _debug: TestSimple._debug("test_idle")
# create a network
tnet = TNetwork()
# all start states are successful
tnet.td.start_state.success()
tnet.sniffer1.start_state.success()
tnet.sniffer2.start_state.success()
tnet.sniffer3.start_state.success()
# run the group
tnet.run()
@bacpypes_debugging
class TestWhoIsRouterToNetwork(unittest.TestCase):
def test_01(self):
"""Test broadcast for any router."""
if _debug: TestWhoIsRouterToNetwork._debug("test_01")
# create a network
tnet = TNetwork()
# test device sends request, sees response
tnet.td.start_state.doc("1-1-0") \
.send(WhoIsRouterToNetwork(
destination=LocalBroadcast(),
)).doc("1-1-1") \
.receive(IAmRouterToNetwork,
iartnNetworkList=[2, 3],
).doc("1-1-2") \
.success()
# sniffer on network 1 sees the request and the response
tnet.sniffer1.start_state.doc("1-2-0") \
.receive(PDU,
pduData=xtob('01.80' # version, network layer
'00' # message type, no network
)
).doc("1-2-1") \
.receive(PDU,
pduData=xtob('01.80' # version, network layer
'01 0002 0003' # message type and network list
)
).doc("1-2-2") \
.success()
# nothing received on network 2
tnet.sniffer2.start_state.doc("1-3-0") \
.timeout(3).doc("1-3-1") \
.success()
# nothing received on network 3
tnet.sniffer3.start_state.doc("1-4-0") \
.timeout(3).doc("1-4-1") \
.success()
# run the group
tnet.run()
def test_02(self):
"""Test broadcast for existing router."""
if _debug: TestWhoIsRouterToNetwork._debug("test_02")
# create a network
tnet = TNetwork()
# all start states are successful
tnet.td.start_state.doc("2-1-0") \
.send(WhoIsRouterToNetwork(2,
destination=LocalBroadcast(),
)).doc("2-1-1") \
.receive(IAmRouterToNetwork,
iartnNetworkList=[2],
).doc("2-1-2") \
.success()
tnet.sniffer1.start_state.success()
# nothing received on network 2
tnet.sniffer2.start_state.doc("2-2-0") \
.timeout(3).doc("2-2-1") \
.success()
tnet.sniffer3.start_state.success()
# run the group
tnet.run()
def test_03(self):
"""Test broadcast for a non-existent router."""
if _debug: TestWhoIsRouterToNetwork._debug("test_03")
# create a network
tnet = TNetwork()
# send request, receive nothing back
tnet.td.start_state.doc("3-1-0") \
.send(WhoIsRouterToNetwork(4,
destination=LocalBroadcast(),
)).doc("3-1-1") \
.timeout(3).doc("3-1-2") \
.success()
# sniffer on network 1 sees the request
tnet.sniffer1.start_state.doc("3-2-0") \
.receive(PDU,
pduData=xtob('01.80' # version, network layer
'00 0004' # message type and network
)
).doc("3-2-1") \
.success()
# sniffer on network 2 sees request forwarded by router
tnet.sniffer2.start_state.doc("3-3-0") \
.receive(PDU,
pduData=xtob('01.88' # version, network layer, routed
'0001 01 01' # snet/slen/sadr
'00 0004' # message type and network
),
).doc("3-3-1") \
.success()
tnet.sniffer3.start_state.success()
# run the group
tnet.run()
def test_04(self):
"""Test broadcast for a router to the network it is on."""
if _debug: TestWhoIsRouterToNetwork._debug("test_02")
# create a network
tnet = TNetwork()
# request router to network 1 on network 1, no response
tnet.td.start_state.doc("4-1-0") \
.send(WhoIsRouterToNetwork(1,
destination=LocalBroadcast(),
)).doc("4-1-1") \
.timeout(3).doc("4-1-2") \
.success()
tnet.sniffer1.start_state.success()
# nothing received on network 2
tnet.sniffer2.start_state.doc("4-2-0") \
.timeout(3).doc("4-2-1") \
.success()
tnet.sniffer3.start_state.success()
# run the group
tnet.run()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base classes for our unit tests.
Allows overriding of flags for use of fakes, and some black magic for
inline callbacks.
"""
import eventlet
eventlet.monkey_patch(os=False)
import copy
import gettext
import os
import shutil
import sys
import tempfile
import uuid
import fixtures
from oslo.config import cfg
import testtools
from nova import context
from nova import db
from nova.db import migration
from nova.network import manager as network_manager
from nova.objects import base as objects_base
from nova.openstack.common.db.sqlalchemy import session
from nova.openstack.common.fixture import moxstubout
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import paths
from nova import service
from nova.tests import conf_fixture
from nova.tests import policy_fixture
test_opts = [
cfg.StrOpt('sqlite_clean_db',
default='clean.sqlite',
help='File name of clean sqlite db'),
]
CONF = cfg.CONF
CONF.register_opts(test_opts)
CONF.import_opt('connection',
'nova.openstack.common.db.sqlalchemy.session',
group='database')
CONF.import_opt('sqlite_db', 'nova.openstack.common.db.sqlalchemy.session')
CONF.import_opt('enabled', 'nova.api.openstack', group='osapi_v3')
CONF.set_override('use_stderr', False)
logging.setup('nova')
_DB_CACHE = None
_TRUE_VALUES = ('True', 'true', '1', 'yes')
class Database(fixtures.Fixture):
def __init__(self, db_session, db_migrate, sql_connection,
sqlite_db, sqlite_clean_db):
self.sql_connection = sql_connection
self.sqlite_db = sqlite_db
self.sqlite_clean_db = sqlite_clean_db
self.engine = db_session.get_engine()
self.engine.dispose()
conn = self.engine.connect()
if sql_connection == "sqlite://":
if db_migrate.db_version() > db_migrate.db_initial_version():
return
else:
testdb = paths.state_path_rel(sqlite_db)
if os.path.exists(testdb):
return
db_migrate.db_sync()
if sql_connection == "sqlite://":
conn = self.engine.connect()
self._DB = "".join(line for line in conn.connection.iterdump())
self.engine.dispose()
else:
cleandb = paths.state_path_rel(sqlite_clean_db)
shutil.copyfile(testdb, cleandb)
def setUp(self):
super(Database, self).setUp()
if self.sql_connection == "sqlite://":
conn = self.engine.connect()
conn.connection.executescript(self._DB)
self.addCleanup(self.engine.dispose)
else:
shutil.copyfile(paths.state_path_rel(self.sqlite_clean_db),
paths.state_path_rel(self.sqlite_db))
class SampleNetworks(fixtures.Fixture):
"""Create sample networks in the database."""
def __init__(self, host=None):
self.host = host
def setUp(self):
super(SampleNetworks, self).setUp()
ctxt = context.get_admin_context()
network = network_manager.VlanManager(host=self.host)
bridge_interface = CONF.flat_interface or CONF.vlan_interface
network.create_networks(ctxt,
label='test',
cidr='10.0.0.0/8',
multi_host=CONF.multi_host,
num_networks=CONF.num_networks,
network_size=CONF.network_size,
cidr_v6=CONF.fixed_range_v6,
gateway=CONF.gateway,
gateway_v6=CONF.gateway_v6,
bridge=CONF.flat_network_bridge,
bridge_interface=bridge_interface,
vpn_start=CONF.vpn_start,
vlan_start=CONF.vlan_start,
dns1=CONF.flat_network_dns)
for net in db.network_get_all(ctxt):
network.set_network_host(ctxt, net)
class ReplaceModule(fixtures.Fixture):
"""Replace a module with a fake module."""
def __init__(self, name, new_value):
self.name = name
self.new_value = new_value
def _restore(self, old_value):
sys.modules[self.name] = old_value
def setUp(self):
super(ReplaceModule, self).setUp()
old_value = sys.modules.get(self.name)
sys.modules[self.name] = self.new_value
self.addCleanup(self._restore, old_value)
class ServiceFixture(fixtures.Fixture):
"""Run a service as a test fixture."""
def __init__(self, name, host=None, **kwargs):
name = name
host = host and host or uuid.uuid4().hex
kwargs.setdefault('host', host)
kwargs.setdefault('binary', 'nova-%s' % name)
self.kwargs = kwargs
def setUp(self):
super(ServiceFixture, self).setUp()
self.service = service.Service.create(**self.kwargs)
self.service.start()
self.addCleanup(self.service.kill)
class TranslationFixture(fixtures.Fixture):
"""Use gettext NullTranslation objects in tests."""
def setUp(self):
super(TranslationFixture, self).setUp()
nulltrans = gettext.NullTranslations()
gettext_fixture = fixtures.MonkeyPatch('gettext.translation',
lambda *x, **y: nulltrans)
self.gettext_patcher = self.useFixture(gettext_fixture)
class TestingException(Exception):
pass
class TestCase(testtools.TestCase):
"""Test case base class for all unit tests.
Due to the slowness of DB access, please consider deriving from
`NoDBTestCase` first.
"""
USES_DB = True
def setUp(self):
"""Run before each test method to initialize test environment."""
super(TestCase, self).setUp()
test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
try:
test_timeout = int(test_timeout)
except ValueError:
# If timeout value is invalid do not set a timeout.
test_timeout = 0
if test_timeout > 0:
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
self.useFixture(fixtures.NestedTempfile())
self.useFixture(fixtures.TempHomeDir())
self.useFixture(TranslationFixture())
if os.environ.get('OS_STDOUT_CAPTURE') in _TRUE_VALUES:
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if os.environ.get('OS_STDERR_CAPTURE') in _TRUE_VALUES:
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
fs = '%(levelname)s [%(name)s] %(message)s'
self.log_fixture = self.useFixture(fixtures.FakeLogger(format=fs))
self.useFixture(conf_fixture.ConfFixture(CONF))
if self.USES_DB:
global _DB_CACHE
if not _DB_CACHE:
_DB_CACHE = Database(session, migration,
sql_connection=CONF.database.connection,
sqlite_db=CONF.sqlite_db,
sqlite_clean_db=CONF.sqlite_clean_db)
self.useFixture(_DB_CACHE)
# NOTE(danms): Make sure to reset us back to non-remote objects
# for each test to avoid interactions. Also, backup the object
# registry.
objects_base.NovaObject.indirection_api = None
self._base_test_obj_backup = copy.copy(
objects_base.NovaObject._obj_classes)
self.addCleanup(self._restore_obj_registry)
mox_fixture = self.useFixture(moxstubout.MoxStubout())
self.mox = mox_fixture.mox
self.stubs = mox_fixture.stubs
self.addCleanup(self._clear_attrs)
self.useFixture(fixtures.EnvironmentVariable('http_proxy'))
self.policy = self.useFixture(policy_fixture.PolicyFixture())
CONF.set_override('fatal_exception_format_errors', True)
CONF.set_override('enabled', True, 'osapi_v3')
CONF.set_override('force_dhcp_release', False)
# This will be cleaned up by the NestedTempfile fixture
CONF.set_override('lock_path', tempfile.mkdtemp())
def _restore_obj_registry(self):
objects_base.NovaObject._obj_classes = self._base_test_obj_backup
def _clear_attrs(self):
# Delete attributes that don't start with _ so they don't pin
# memory around unnecessarily for the duration of the test
# suite
for key in [k for k in self.__dict__.keys() if k[0] != '_']:
del self.__dict__[key]
def flags(self, **kw):
"""Override flag variables for a test."""
group = kw.pop('group', None)
for k, v in kw.iteritems():
CONF.set_override(k, v, group)
def start_service(self, name, host=None, **kwargs):
svc = self.useFixture(ServiceFixture(name, host, **kwargs))
return svc.service
class APICoverage(object):
cover_api = None
def test_api_methods(self):
self.assertTrue(self.cover_api is not None)
api_methods = [x for x in dir(self.cover_api)
if not x.startswith('_')]
test_methods = [x[5:] for x in dir(self)
if x.startswith('test_')]
self.assertThat(
test_methods,
testtools.matchers.ContainsAll(api_methods))
class TimeOverride(fixtures.Fixture):
"""Fixture to start and remove time override."""
def setUp(self):
super(TimeOverride, self).setUp()
timeutils.set_time_override()
self.addCleanup(timeutils.clear_time_override)
class NoDBTestCase(TestCase):
"""
`NoDBTestCase` differs from TestCase in that DB access is not supported.
This makes tests run significantly faster. If possible, all new tests
should derive from this class.
"""
USES_DB = False
|
|
# -*- coding: utf-8 -*-
"""
sleekxmpp.jid
~~~~~~~~~~~~~~~~~~~~~~~
This module allows for working with Jabber IDs (JIDs).
Part of SleekXMPP: The Sleek XMPP Library
:copyright: (c) 2011 Nathanael C. Fritz
:license: MIT, see LICENSE for more details
"""
from __future__ import unicode_literals
import re
import socket
import stringprep
import threading
import encodings.idna
from copy import deepcopy
from sleekxmpp.util import stringprep_profiles
from sleekxmpp.thirdparty import OrderedDict
#: These characters are not allowed to appear in a JID.
ILLEGAL_CHARS = '\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r' + \
'\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19' + \
'\x1a\x1b\x1c\x1d\x1e\x1f' + \
' !"#$%&\'()*+,./:;<=>?@[\\]^_`{|}~\x7f'
#: The basic regex pattern that a JID must match in order to determine
#: the local, domain, and resource parts. This regex does NOT do any
#: validation, which requires application of nodeprep, resourceprep, etc.
JID_PATTERN = re.compile(
"^(?:([^\"&'/:<>@]{1,1023})@)?([^/@]{1,1023})(?:/(.{1,1023}))?$"
)
#: The set of escape sequences for the characters not allowed by nodeprep.
JID_ESCAPE_SEQUENCES = set(['\\20', '\\22', '\\26', '\\27', '\\2f',
'\\3a', '\\3c', '\\3e', '\\40', '\\5c'])
#: A mapping of unallowed characters to their escape sequences. An escape
#: sequence for '\' is also included since it must also be escaped in
#: certain situations.
JID_ESCAPE_TRANSFORMATIONS = {' ': '\\20',
'"': '\\22',
'&': '\\26',
"'": '\\27',
'/': '\\2f',
':': '\\3a',
'<': '\\3c',
'>': '\\3e',
'@': '\\40',
'\\': '\\5c'}
#: The reverse mapping of escape sequences to their original forms.
JID_UNESCAPE_TRANSFORMATIONS = {'\\20': ' ',
'\\22': '"',
'\\26': '&',
'\\27': "'",
'\\2f': '/',
'\\3a': ':',
'\\3c': '<',
'\\3e': '>',
'\\40': '@',
'\\5c': '\\'}
JID_CACHE = OrderedDict()
JID_CACHE_LOCK = threading.Lock()
JID_CACHE_MAX_SIZE = 1024
def _cache(key, parts, locked):
JID_CACHE[key] = (parts, locked)
if len(JID_CACHE) > JID_CACHE_MAX_SIZE:
with JID_CACHE_LOCK:
while len(JID_CACHE) > JID_CACHE_MAX_SIZE:
found = None
for key, item in JID_CACHE.items():
if not item[1]: # if not locked
found = key
break
if not found: # more than MAX_SIZE locked
# warn?
break
del JID_CACHE[found]
# pylint: disable=c0103
#: The nodeprep profile of stringprep used to validate the local,
#: or username, portion of a JID.
nodeprep = stringprep_profiles.create(
nfkc=True,
bidi=True,
mappings=[
stringprep_profiles.b1_mapping,
stringprep.map_table_b2],
prohibited=[
stringprep.in_table_c11,
stringprep.in_table_c12,
stringprep.in_table_c21,
stringprep.in_table_c22,
stringprep.in_table_c3,
stringprep.in_table_c4,
stringprep.in_table_c5,
stringprep.in_table_c6,
stringprep.in_table_c7,
stringprep.in_table_c8,
stringprep.in_table_c9,
lambda c: c in ' \'"&/:<>@'],
unassigned=[stringprep.in_table_a1])
# pylint: disable=c0103
#: The resourceprep profile of stringprep, which is used to validate
#: the resource portion of a JID.
resourceprep = stringprep_profiles.create(
nfkc=True,
bidi=True,
mappings=[stringprep_profiles.b1_mapping],
prohibited=[
stringprep.in_table_c12,
stringprep.in_table_c21,
stringprep.in_table_c22,
stringprep.in_table_c3,
stringprep.in_table_c4,
stringprep.in_table_c5,
stringprep.in_table_c6,
stringprep.in_table_c7,
stringprep.in_table_c8,
stringprep.in_table_c9],
unassigned=[stringprep.in_table_a1])
def _parse_jid(data):
"""
Parse string data into the node, domain, and resource
components of a JID, if possible.
:param string data: A string that is potentially a JID.
:raises InvalidJID:
:returns: tuple of the validated local, domain, and resource strings
"""
match = JID_PATTERN.match(data)
if not match:
raise InvalidJID('JID could not be parsed')
(node, domain, resource) = match.groups()
node = _validate_node(node)
domain = _validate_domain(domain)
resource = _validate_resource(resource)
return node, domain, resource
def _validate_node(node):
"""Validate the local, or username, portion of a JID.
:raises InvalidJID:
:returns: The local portion of a JID, as validated by nodeprep.
"""
try:
if node is not None:
node = nodeprep(node)
if not node:
raise InvalidJID('Localpart must not be 0 bytes')
if len(node) > 1023:
raise InvalidJID('Localpart must be less than 1024 bytes')
return node
except stringprep_profiles.StringPrepError:
raise InvalidJID('Invalid local part')
def _validate_domain(domain):
"""Validate the domain portion of a JID.
IP literal addresses are left as-is, if valid. Domain names
are stripped of any trailing label separators (`.`), and are
checked with the nameprep profile of stringprep. If the given
domain is actually a punyencoded version of a domain name, it
is converted back into its original Unicode form. Domains must
also not start or end with a dash (`-`).
:raises InvalidJID:
:returns: The validated domain name
"""
ip_addr = False
# First, check if this is an IPv4 address
try:
socket.inet_aton(domain)
ip_addr = True
except socket.error:
pass
# Check if this is an IPv6 address
if not ip_addr and hasattr(socket, 'inet_pton'):
try:
socket.inet_pton(socket.AF_INET6, domain.strip('[]'))
domain = '[%s]' % domain.strip('[]')
ip_addr = True
except (socket.error, ValueError):
pass
if not ip_addr:
# This is a domain name, which must be checked further
if domain and domain[-1] == '.':
domain = domain[:-1]
domain_parts = []
for label in domain.split('.'):
try:
label = encodings.idna.nameprep(label)
encodings.idna.ToASCII(label)
pass_nameprep = True
except UnicodeError:
pass_nameprep = False
if not pass_nameprep:
raise InvalidJID('Could not encode domain as ASCII')
if label.startswith('xn--'):
label = encodings.idna.ToUnicode(label)
for char in label:
if char in ILLEGAL_CHARS:
raise InvalidJID('Domain contains illegal characters')
if '-' in (label[0], label[-1]):
raise InvalidJID('Domain started or ended with -')
domain_parts.append(label)
domain = '.'.join(domain_parts)
if not domain:
raise InvalidJID('Domain must not be 0 bytes')
if len(domain) > 1023:
raise InvalidJID('Domain must be less than 1024 bytes')
return domain
def _validate_resource(resource):
"""Validate the resource portion of a JID.
:raises InvalidJID:
:returns: The local portion of a JID, as validated by resourceprep.
"""
try:
if resource is not None:
resource = resourceprep(resource)
if not resource:
raise InvalidJID('Resource must not be 0 bytes')
if len(resource) > 1023:
raise InvalidJID('Resource must be less than 1024 bytes')
return resource
except stringprep_profiles.StringPrepError:
raise InvalidJID('Invalid resource')
def _escape_node(node):
"""Escape the local portion of a JID."""
result = []
for i, char in enumerate(node):
if char == '\\':
if ''.join((node[i:i+3])) in JID_ESCAPE_SEQUENCES:
result.append('\\5c')
continue
result.append(char)
for i, char in enumerate(result):
if char != '\\':
result[i] = JID_ESCAPE_TRANSFORMATIONS.get(char, char)
escaped = ''.join(result)
if escaped.startswith('\\20') or escaped.endswith('\\20'):
raise InvalidJID('Escaped local part starts or ends with "\\20"')
_validate_node(escaped)
return escaped
def _unescape_node(node):
"""Unescape a local portion of a JID.
.. note::
The unescaped local portion is meant ONLY for presentation,
and should not be used for other purposes.
"""
unescaped = []
seq = ''
for i, char in enumerate(node):
if char == '\\':
seq = node[i:i+3]
if seq not in JID_ESCAPE_SEQUENCES:
seq = ''
if seq:
if len(seq) == 3:
unescaped.append(JID_UNESCAPE_TRANSFORMATIONS.get(seq, char))
# Pop character off the escape sequence, and ignore it
seq = seq[1:]
else:
unescaped.append(char)
unescaped = ''.join(unescaped)
return unescaped
def _format_jid(local=None, domain=None, resource=None):
"""Format the given JID components into a full or bare JID.
:param string local: Optional. The local portion of the JID.
:param string domain: Required. The domain name portion of the JID.
:param strin resource: Optional. The resource portion of the JID.
:return: A full or bare JID string.
"""
result = []
if local:
result.append(local)
result.append('@')
if domain:
result.append(domain)
if resource:
result.append('/')
result.append(resource)
return ''.join(result)
class InvalidJID(ValueError):
"""
Raised when attempting to create a JID that does not pass validation.
It can also be raised if modifying an existing JID in such a way as
to make it invalid, such trying to remove the domain from an existing
full JID while the local and resource portions still exist.
"""
# pylint: disable=R0903
class UnescapedJID(object):
"""
.. versionadded:: 1.1.10
"""
def __init__(self, local, domain, resource):
self._jid = (local, domain, resource)
# pylint: disable=R0911
def __getattr__(self, name):
"""Retrieve the given JID component.
:param name: one of: user, server, domain, resource,
full, or bare.
"""
if name == 'resource':
return self._jid[2] or ''
elif name in ('user', 'username', 'local', 'node'):
return self._jid[0] or ''
elif name in ('server', 'domain', 'host'):
return self._jid[1] or ''
elif name in ('full', 'jid'):
return _format_jid(*self._jid)
elif name == 'bare':
return _format_jid(self._jid[0], self._jid[1])
elif name == '_jid':
return getattr(super(JID, self), '_jid')
else:
return None
def __str__(self):
"""Use the full JID as the string value."""
return _format_jid(*self._jid)
def __repr__(self):
"""Use the full JID as the representation."""
return self.__str__()
class JID(object):
"""
A representation of a Jabber ID, or JID.
Each JID may have three components: a user, a domain, and an optional
resource. For example: user@domain/resource
When a resource is not used, the JID is called a bare JID.
The JID is a full JID otherwise.
**JID Properties:**
:jid: Alias for ``full``.
:full: The string value of the full JID.
:bare: The string value of the bare JID.
:user: The username portion of the JID.
:username: Alias for ``user``.
:local: Alias for ``user``.
:node: Alias for ``user``.
:domain: The domain name portion of the JID.
:server: Alias for ``domain``.
:host: Alias for ``domain``.
:resource: The resource portion of the JID.
:param string jid:
A string of the form ``'[user@]domain[/resource]'``.
:param string local:
Optional. Specify the local, or username, portion
of the JID. If provided, it will override the local
value provided by the `jid` parameter. The given
local value will also be escaped if necessary.
:param string domain:
Optional. Specify the domain of the JID. If
provided, it will override the domain given by
the `jid` parameter.
:param string resource:
Optional. Specify the resource value of the JID.
If provided, it will override the domain given
by the `jid` parameter.
:raises InvalidJID:
"""
# pylint: disable=W0212
def __init__(self, jid=None, **kwargs):
locked = kwargs.get('cache_lock', False)
in_local = kwargs.get('local', None)
in_domain = kwargs.get('domain', None)
in_resource = kwargs.get('resource', None)
parts = None
if in_local or in_domain or in_resource:
parts = (in_local, in_domain, in_resource)
# only check cache if there is a jid string, or parts, not if there
# are both
self._jid = None
key = None
if (jid is not None) and (parts is None):
if isinstance(jid, JID):
# it's already good to go, and there are no additions
self._jid = jid._jid
return
key = jid
self._jid, locked = JID_CACHE.get(jid, (None, locked))
elif jid is None and parts is not None:
key = parts
self._jid, locked = JID_CACHE.get(parts, (None, locked))
if not self._jid:
if not jid:
parsed_jid = (None, None, None)
elif not isinstance(jid, JID):
parsed_jid = _parse_jid(jid)
else:
parsed_jid = jid._jid
local, domain, resource = parsed_jid
if 'local' in kwargs:
local = _escape_node(in_local)
if 'domain' in kwargs:
domain = _validate_domain(in_domain)
if 'resource' in kwargs:
resource = _validate_resource(in_resource)
self._jid = (local, domain, resource)
if key:
_cache(key, self._jid, locked)
def unescape(self):
"""Return an unescaped JID object.
Using an unescaped JID is preferred for displaying JIDs
to humans, and they should NOT be used for any other
purposes than for presentation.
:return: :class:`UnescapedJID`
.. versionadded:: 1.1.10
"""
return UnescapedJID(_unescape_node(self._jid[0]),
self._jid[1],
self._jid[2])
def regenerate(self):
"""No-op
.. deprecated:: 1.1.10
"""
pass
def reset(self, data):
"""Start fresh from a new JID string.
:param string data: A string of the form ``'[user@]domain[/resource]'``.
.. deprecated:: 1.1.10
"""
self._jid = JID(data)._jid
@property
def resource(self):
return self._jid[2] or ''
@property
def user(self):
return self._jid[0] or ''
@property
def local(self):
return self._jid[0] or ''
@property
def node(self):
return self._jid[0] or ''
@property
def username(self):
return self._jid[0] or ''
@property
def bare(self):
return _format_jid(self._jid[0], self._jid[1])
@property
def server(self):
return self._jid[1] or ''
@property
def domain(self):
return self._jid[1] or ''
@property
def host(self):
return self._jid[1] or ''
@property
def full(self):
return _format_jid(*self._jid)
@property
def jid(self):
return _format_jid(*self._jid)
@property
def bare(self):
return _format_jid(self._jid[0], self._jid[1])
@resource.setter
def resource(self, value):
self._jid = JID(self, resource=value)._jid
@user.setter
def user(self, value):
self._jid = JID(self, local=value)._jid
@username.setter
def username(self, value):
self._jid = JID(self, local=value)._jid
@local.setter
def local(self, value):
self._jid = JID(self, local=value)._jid
@node.setter
def node(self, value):
self._jid = JID(self, local=value)._jid
@server.setter
def server(self, value):
self._jid = JID(self, domain=value)._jid
@domain.setter
def domain(self, value):
self._jid = JID(self, domain=value)._jid
@host.setter
def host(self, value):
self._jid = JID(self, domain=value)._jid
@full.setter
def full(self, value):
self._jid = JID(value)._jid
@jid.setter
def jid(self, value):
self._jid = JID(value)._jid
@bare.setter
def bare(self, value):
parsed = JID(value)._jid
self._jid = (parsed[0], parsed[1], self._jid[2])
def __str__(self):
"""Use the full JID as the string value."""
return _format_jid(*self._jid)
def __repr__(self):
"""Use the full JID as the representation."""
return self.__str__()
# pylint: disable=W0212
def __eq__(self, other):
"""Two JIDs are equal if they have the same full JID value."""
if isinstance(other, UnescapedJID):
return False
other = JID(other)
return self._jid == other._jid
# pylint: disable=W0212
def __ne__(self, other):
"""Two JIDs are considered unequal if they are not equal."""
return not self == other
def __hash__(self):
"""Hash a JID based on the string version of its full JID."""
return hash(self.__str__())
def __copy__(self):
"""Generate a duplicate JID."""
return JID(self)
def __deepcopy__(self, memo):
"""Generate a duplicate JID."""
return JID(deepcopy(str(self), memo))
|
|
import rules
from django.db import models
from django.utils import timezone
from django.urls import reverse
from django.contrib.auth.models import User
from django.template.defaultfilters import slugify
from django_extensions.db.models import TimeStampedModel
from communities.models import CommunitySubscription
from main.utils import add_perm
class EventManager(models.Manager):
def upcoming(self):
return self.get_queryset().filter(end__gte=timezone.now())
def past(self):
return self.get_queryset().filter(end__lt=timezone.now())
class Event(TimeStampedModel):
community = models.ForeignKey('communities.Community', on_delete=models.CASCADE, related_name='events')
name = models.CharField(max_length=64)
description = models.TextField(null=True, blank=True, help_text="You can write markdown here!")
slug = models.SlugField(max_length=64, help_text="Note: changing the slug will change the URL of the event")
begin = models.DateTimeField()
end = models.DateTimeField()
twitter_hashtag = models.CharField(
max_length=140, null=True, blank=True, help_text='Twitter hashtag of this event (without leading #)')
max_attendees = models.PositiveIntegerField(
blank=True, null=True,
help_text='Optional maximum number of attendees for this event. Leave blank for no limit.')
location = models.ForeignKey('locations.Location', on_delete=models.SET_NULL, related_name='events', null=True, blank=True)
publish = models.BooleanField(default=True, help_text='Should this event be published elsewhere?') # for shackspace blog posts
objects = EventManager()
def __str__(self):
return self.name
def is_upcoming(self):
return self.end >= timezone.now()
def is_past(self):
return self.end < timezone.now()
def is_full(self):
if not self.max_attendees:
return False
return self.rsvp_yes().count() >= self.max_attendees
def rsvp_yes(self):
return self.rsvps.filter(coming=True)
def rsvp_no(self):
return self.rsvps.filter(coming=False)
def save(self, *args, **kwargs):
create = False
if not self.id:
create = True
# slugify the name
slug = "{}-{}".format(slugify(self.name), str(self.begin.date()))
if Event.objects.filter(slug=slug, community=self.community):
# use datetime, because date was not unique
slug = "{}-{}".format(slugify(self.name), slugify(self.begin))
self.slug = slug
super().save(*args, **kwargs)
if create:
recipients = self.community.subscribers.filter(
userprofile__notify_on_new_event=True,
)
# send notification mail to all subscribers
if recipients:
from main.utils import send_notification
send_notification(
recipients=recipients,
subject='New event in community {}'.format(self.name),
template='events/mails/new_event.txt',
context={'event': self},
)
def get_absolute_url(self):
return reverse('event_detail', kwargs={'slug': self.slug,
'community_slug': self.community.slug})
def get_update_url(self):
return reverse('event_update', kwargs={'slug': self.slug,
'community_slug': self.community.slug})
def get_comment_create_url(self):
return reverse('eventcomment_create', kwargs={
'slug': self.slug, 'community_slug': self.community.slug})
def get_rsvp_yes_url(self):
return reverse('event_rsvp', kwargs={'slug': self.slug,
'community_slug': self.community.slug,
'answer': 'yes'})
def get_rsvp_no_url(self):
return reverse('event_rsvp', kwargs={'slug': self.slug,
'community_slug': self.community.slug,
'answer': 'no'})
def get_rsvp_reset_url(self):
return reverse('event_rsvp', kwargs={'slug': self.slug,
'community_slug': self.community.slug,
'answer': 'reset'})
class Meta:
ordering = ['begin', 'name']
unique_together = ('community', 'slug')
@add_perm('event.can_edit')
@rules.predicate
def can_edit_event(user, event):
if not user or not event:
return False
return user.has_perm('community.can_edit', event.community)
@add_perm('event.can_rsvp')
@add_perm('event.can_create_comment')
@rules.predicate
def is_subscriber(user, event):
if not user or not event:
return False
return user in event.community.subscribers.all()
class EventRSVP(TimeStampedModel):
event = models.ForeignKey('Event', on_delete=models.CASCADE, related_name='rsvps')
user = models.ForeignKey('auth.User', on_delete=models.CASCADE, related_name='rsvps')
coming = models.BooleanField()
def save(self, *args, **kwargs):
create = not self.id
super().save(*args, **kwargs)
if create:
recipients = set(self.event.rsvp_yes().filter(
user__userprofile__notify_on_new_rsvp_for_attending=True,
).exclude(user=self.user).values_list('user__pk', flat=True))
recipients |= set(self.event.community.community_subscriptions.filter(
role__in=[CommunitySubscription.ROLE_ADMIN, CommunitySubscription.ROLE_OWNER],
user__userprofile__notify_on_new_rsvp_for_organizer=True,
).exclude(user=self.user).values_list('user__pk', flat=True))
# send notification mail to all subscribers
if recipients:
from main.utils import send_notification
send_notification(
recipients=User.objects.filter(pk__in=list(recipients)),
subject='New RSVP for {}'.format(self.event.name),
template='events/mails/new_rsvp.txt',
context={'rsvp': self},
)
class Meta:
ordering = ('-coming', 'user__username')
unique_together = (
('event', 'user'),
)
class EventComment(TimeStampedModel):
event = models.ForeignKey('Event', on_delete=models.CASCADE, related_name='comments')
user = models.ForeignKey('auth.User', on_delete=models.CASCADE, related_name='comments')
text = models.TextField()
def save(self, *args, **kwargs):
create = not self.id
super().save(*args, **kwargs)
if create:
recipients = set(self.event.rsvp_yes().filter(
user__userprofile__notify_on_new_comment=True
).exclude(user=self.user).values_list('user__pk', flat=True))
recipients |= set(self.event.comments.filter(
user__userprofile__notify_on_new_comment=True
).exclude(user=self.user).values_list('user__pk', flat=True))
recipients |= set(self.event.community.community_subscriptions.filter(
role__in=[CommunitySubscription.ROLE_ADMIN, CommunitySubscription.ROLE_OWNER],
user__userprofile__notify_on_new_comment=True,
).exclude(user=self.user).values_list('user__pk', flat=True))
if recipients:
from main.utils import send_notification
print(recipients)
send_notification(
recipients=User.objects.filter(pk__in=recipients),
subject='New comment for {}'.format(self.event.name),
template='events/mails/new_comment.txt',
context={'comment': self},
)
|
|
"""Cinder Database Manipulation."""
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
import abc
import copy
import os
import time
from cloudferrylib.base.action import action
from cloudferrylib.base import exception
from cloudferrylib.copy_engines import base
from cloudferrylib.utils import files
from cloudferrylib.utils import local
from cloudferrylib.utils import log
from cloudferrylib.utils import remote_runner
from cloudferrylib.utils import sizeof_format
from cloudferrylib.utils import utils
from cloudferrylib.views import cinder_storage_view
LOG = log.getLogger(__name__)
NAMESPACE_CINDER_CONST = "cinder_database"
AVAILABLE = 'available'
CINDER_VOLUME = "cinder-volume"
HOST = 'host'
SSH_HOST = 'ssh_host'
BY_VTID = 'by_vtid'
ALL = 'all'
MOUNT_DELIM = '='
DEFAULT = 'default'
SRC = 'src'
DST = 'dst'
CLOUD = 'cloud'
RES = 'res'
CFG = 'cfg'
METADATA_TABLES = ('volume_metadata', 'volume_glance_metadata')
AWK_GET_MOUNTED_PREFIX = (
"/^nfs_shares_config/ "
"{res=$2} "
)
AWK_GET_MOUNTED_IN_BLOCK = (
" i && res && "
r"/^\[.*\]/{exit} "
)
AWK_GET_MOUNTED_SUFFIX = (
" END{print res}'"
" '%s' | xargs grep -v '^#'); "
"do mount | "
"awk '{if (match($1, \"^'$exp'$\") && $3 ~ \"cinder\") "
)
AWK_GET_MOUNTED_NFS_SHARES = ''.join([
AWK_GET_MOUNTED_PREFIX,
AWK_GET_MOUNTED_IN_BLOCK,
AWK_GET_MOUNTED_SUFFIX
])
AWK_GET_MOUNTED_LAST_NFS_SHARES = ''.join([
AWK_GET_MOUNTED_PREFIX,
AWK_GET_MOUNTED_SUFFIX
])
QUOTA_RESOURCES = ('volumes', 'gigabytes')
def _remote_runner(cloud):
return remote_runner.RemoteRunner(cloud[CFG].get(HOST),
cloud[CFG].ssh_user,
cloud[CFG].ssh_sudo_password,
sudo=True,
gateway=cloud[CFG].get(SSH_HOST))
def _volume_types_map(data):
return dict([(t['name'], t['id']) for t in data.get('volume_types', [])])
def _volume_types(data):
return data.get('volume_types', [])
def _modify_data(data):
for volume in data['volumes']:
if volume.get('status', '') != AVAILABLE:
volume['mountpoint'] = None
volume['status'] = 'available'
volume['instance_uuid'] = None
volume['attach_status'] = 'detached'
return data
def _clean_data(data):
# disregard volume types
if 'volume_types' in data:
del data['volume_types']
if 'volume_type_extra_specs' in data:
del data['volume_type_extra_specs']
return data
class CinderDatabaseInteraction(action.Action):
"""Abstract Action class."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def run(self, *args, **kwargs):
"""Run action."""
pass
def get_resource(self):
"""
Get cinder-volume resource.
:return: cinder_database resource
"""
cinder_resource = self.cloud.resources.get(
utils.STORAGE_RESOURCE)
if not cinder_resource:
raise exception.AbortMigrationError(
"No resource {res} found".format(res=utils.STORAGE_RESOURCE))
return cinder_resource
class GetVolumesDb(CinderDatabaseInteraction):
"""Retrieve Db info."""
def run(self, *args, **kwargs):
"""
Run GetVolumesDb action.
:return: namespace with db info
"""
return {NAMESPACE_CINDER_CONST:
self.get_resource().read_info()}
class TransportVolumes(CinderDatabaseInteraction):
"""
Migrate volumes.
Depends on 'GetVolumesDb' action, it must be run first.
"""
def __init__(self, *args, **kwargs):
super(TransportVolumes, self).__init__(*args, **kwargs)
def run(self, *args, **kwargs):
"""Run TransportVolumes Action."""
data_from_namespace = kwargs.get(NAMESPACE_CINDER_CONST)
if not data_from_namespace:
raise exception.AbortMigrationError(
"Cannot read attribute {attribute} from namespace".format(
attribute=NAMESPACE_CINDER_CONST))
data = data_from_namespace
self.get_resource().deploy(data)
class CopyVolumes(object):
"""
Copy volumes from NFS backend(s) to NFS backend(s).
Work via rsync, can handle big files
and resume after errors.
"""
def __init__(self, cfg, src_cloud, dst_cloud):
self.ssh_attempts = cfg.migrate.ssh_connection_attempts
self.key_filename = cfg.migrate.key_filename
self.storage = {
SRC: cfg.src_storage,
DST: cfg.dst_storage,
}
self.clouds = {
SRC: {
CLOUD: src_cloud,
RES: src_cloud.resources.get(utils.STORAGE_RESOURCE),
CFG: cfg.src,
},
DST: {
CLOUD: dst_cloud,
RES: dst_cloud.resources.get(utils.STORAGE_RESOURCE),
CFG: cfg.dst,
}
}
self.migration = src_cloud.migration
self.data = {SRC: {}, DST: {}}
self.dst_hosts = None
self.dst_mount = None
self.dst_dir_to_provider = None
self.dst_provider_to_vtid = None
self.dst_volume_types = None
self.path_map = None
self.mount_all = {}
def run(self):
"""Copy volumes and return result data.
:return: dict
"""
LOG.info('Start volumes migration process.')
for position in self.clouds:
self.data[position] = self.clouds[position][RES].read_info()
self._skip_existing_volumes()
self._try_copy_volumes()
self.data[SRC] = _modify_data(self.data[SRC])
self.data[SRC] = self.fix_metadata(self.data[SRC])
self.data[SRC] = _clean_data(self.data[SRC])
LOG.info('Volumes migration is completed.')
return self.data[SRC]
def _skip_existing_volumes(self):
LOG.info('Start compare existing volumes on the destination cloud. '
'If volumes exist on the destination cloud, '
'then skip migration for those volumes.')
res = []
dst_ids = [v['id'] for v in self.data[DST]['volumes']]
for v in self.data[SRC]['volumes']:
if v['id'] in dst_ids:
LOG.warning('Volume %s(%s) exists, skipping migration.',
v.get('display_name', ''), v['id'])
else:
res.append(v)
LOG.info('Volume %s(%s) does not exist on '
'the destination cloud, will be migrated.',
v.get('display_name', ''),
v['id'])
self.data[SRC]['volumes'] = res
LOG.info('All volumes on the source and '
'destination cloud have been compared.')
def fix_metadata(self, data):
"""Fix metadata table.
Replace src image ids by correspoing dst image ids.
:return: dict
"""
data = copy.deepcopy(data)
vol_ids = [v['id'] for v in data['volumes']]
migrated = self.migration[utils.IMAGE_RESOURCE]
for table in METADATA_TABLES:
metadata = data.get(table, {})
metadata = [m for m in metadata if m['volume_id'] in vol_ids]
for m in metadata:
if m['key'] == 'image_id':
m['value'] = migrated.migrated_id(m['value'])
data[table] = metadata
return data
def _run_cmd(self, cloud, cmd):
runner = _remote_runner(cloud)
output = runner.run(cmd)
res = output.split('\r\n')
return res if len(res) > 1 else res[0]
def run_repeat_on_errors(self, cloud, cmd):
"""Run remote command cmd.
:return: err or None
"""
runner = _remote_runner(cloud)
try:
runner.run_repeat_on_errors(cmd)
except remote_runner.RemoteExecutionError as e:
return e.message
def find_dir(self, position, paths, v):
"""
Find file v in paths.
:return: path to the file
"""
volume_filename = self.storage[position].volume_name_template + v['id']
LOG.debug('Looking for %s in %s', volume_filename, repr(paths))
if not paths:
return None
for p in paths:
cmd = 'ls -1 %s' % p
lst = self._run_cmd(self.clouds[position], cmd)
if lst and not isinstance(lst, list):
lst = [lst]
if volume_filename in lst:
LOG.debug('Found %s in %s', volume_filename, p)
return '%s/%s' % (p, volume_filename)
def run_transfer(self, src, dst):
"""Run repeating remote commmand.
:return: True on success (or False otherwise)
"""
data = {'host_src': self.clouds[SRC][CFG].get(HOST),
'path_src': src,
'host_dst': self.clouds[DST][CFG].get(HOST),
'path_dst': os.path.join(dst, os.path.basename(src)),
'gateway': self.clouds[SRC][CFG].get(SSH_HOST)}
copier = base.get_copier(self.clouds[SRC][CLOUD],
self.clouds[DST][CLOUD],
data)
try:
copier.transfer(data)
return True
except (remote_runner.RemoteExecutionError,
local.LocalExecutionFailed)as e:
LOG.debug(e, exc_info=True)
LOG.warning("Failed copying to %s from %s", dst, src)
return False
def volume_size(self, cloud, path):
"""
Get size of vol_file in bytes.
:return: int
"""
runner = _remote_runner(cloud)
return files.remote_file_size(runner, path)
def free_space(self, cloud, path):
"""
Get free space available on `path` in bytes.
:return: int
"""
cmd = (
'df -k "'
"%s"
'" | '
"awk 'FNR == 2 {print $4}'"
) % path
size = self._run_cmd(cloud, cmd)
# KB -> B
return int(size) * 1024
def _clean(self, cloud, filepath):
cmd = (
'rm -f %s'
) % filepath
LOG.info("Delete volume %s", filepath)
self.run_repeat_on_errors(cloud, cmd)
def transfer_if_enough_space(self, size, src, dst):
"""Copy if enough space.
:return: True on success (or False otherwise)
"""
LOG.info('Calculate free space on the destination cloud.')
dst_free_space = self.free_space(self.clouds[DST], dst)
if dst_free_space > size:
LOG.info("Enough space found on %s", dst)
LOG.info('Start copying volume.')
return self.run_transfer(src, dst)
LOG.warning("No enough space on %s", dst)
def checksum(self, cloud, path):
"""
Get checksum of `filepath`.
:return: str
"""
runner = _remote_runner(cloud)
return files.remote_md5_sum(runner, path)
def _transfer(self, src, dstpaths, volume, src_size):
LOG.debug("Trying transfer file for volume: %s[%s]",
volume.get('display_name', None), volume['id'])
dstfile = self.find_dir(DST, dstpaths, volume)
LOG.debug("Source file size = %d", src_size)
LOG.debug("Searching for space for volume: %s[%s]",
volume.get('display_name', None), volume['id'])
if dstfile:
LOG.info("File found on destination: %s", dstfile)
dst_size = self.volume_size(self.clouds[DST], dstfile)
LOG.debug("Destination file (%s) size = %d", dstfile, dst_size)
dst = os.path.dirname(dstfile)
LOG.info('Calculate and compare checksums volume on the source '
'and on the destionation cloud.')
if src_size == dst_size:
src_md5 = self.checksum(self.clouds[SRC], src)
dst_md5 = self.checksum(self.clouds[DST], dstfile)
if src_md5 == dst_md5:
LOG.info("Destination file %s is up-to-date. "
"Sizes and checksums are matched.", dstfile)
return dst, 0
LOG.info('Checksums are different. Start copying volume %s(%s)',
volume.get('display_name', ''),
volume['id'])
start_time = time.time()
if self.transfer_if_enough_space(src_size - dst_size, src, dst):
elapsed_time = time.time() - start_time
return dst, elapsed_time
else:
LOG.info('Copying volume %s(%s) failed. '
'Volume will be deleted.',
volume.get('display_name', ''),
volume['id'])
self._clean(self.clouds[DST], dstfile)
for dst in dstpaths:
start_time = time.time()
res = self.transfer_if_enough_space(src_size, src, dst)
elapsed_time = time.time() - start_time
if res:
return dst, elapsed_time
raise exception.AbortMigrationError('No space found for %s on %s' % (
str(volume), str(dstpaths)))
def _mount_output_all(self, position, dirs_only=False):
if position in self.mount_all \
and dirs_only in self.mount_all[position]:
return self.mount_all[position][dirs_only]
self.mount_all[position] = {}
self.mount_all[position][dirs_only] = {}
cmd = (
"awk -F'[ =\t]+' '/^enabled_backends/{res=$2} END{print res}' \""
"%s"
"\" | tr ',' '\n'"
) % (self.storage[position].conf)
backend_blocks = self._run_cmd(self.clouds[position], cmd)
if backend_blocks and not isinstance(backend_blocks, list):
backend_blocks = [backend_blocks]
for backend_block in backend_blocks:
cmd = (
r"awk -F'[ =\t]+' '/^\["
"%s"
r"\]/{i=1}"
" i && /^volume_backend_name/ {res=$2}"
" res && i &&"
r" /^\[.*\]/{exit}"
" END{print res}"
"'"
" '%s'"
) % (backend_block, self.storage[position].conf)
backend = self._run_cmd(self.clouds[position], cmd)
vtid = None
if backend:
vtids = [sp['volume_type_id']
for sp in
self.data[position]['volume_type_extra_specs']
if sp['key'] == 'volume_backend_name' and
sp['value'] == backend]
vtid = vtids[0] if vtids else None
print_cmd = ("{print $3}}'; done" if dirs_only
else "{print $3\"%s\"$1}}'; done" % MOUNT_DELIM)
cmd = (
"for exp in "
r"$(awk -F'[ =\t]+' '/^\["
"%s"
r"\]/{i=1} i && "
) % (backend_block)
cmd += AWK_GET_MOUNTED_NFS_SHARES % self.storage[position].conf
cmd += print_cmd
nfs_shares = self._run_cmd(self.clouds[position], cmd)
if not isinstance(nfs_shares, list):
nfs_shares = [nfs_shares]
fld = vtid if vtid else DEFAULT
if fld not in self.mount_all[position][dirs_only]:
self.mount_all[position][dirs_only][fld] = set([])
self.mount_all[position][dirs_only][fld].update(nfs_shares)
return self.mount_all[position][dirs_only]
def _mount_output(self, position, vt=None, dirs_only=False):
if dirs_only:
print_cmd = "{print $3}}'; done"
else:
print_cmd = "{print $3\"%s\"$1}}'; done" % MOUNT_DELIM
res = None
if vt:
res = self._mount_output_all(
position, dirs_only=dirs_only).get(vt['id'], None)
if not res:
res = self._mount_output_all(
position, dirs_only=dirs_only).get(DEFAULT, None)
if not res:
# default nfs_shares_config
cmd = (
"for exp in "
"$(awk -F'[ =\t]+' '"
)
cmd += \
AWK_GET_MOUNTED_LAST_NFS_SHARES % self.storage[position].conf
cmd += print_cmd
res = self._run_cmd(self.clouds[position], cmd)
res = set(res if isinstance(res, list) else [res])
if not res:
raise exception.AbortMigrationError(
'No NFS share found on "%s"' % position)
return res
def mount_dirs(self, position, vt=None):
"""
Get shares from mount output.
:return: list of paths
"""
return self._mount_output(position, vt=vt, dirs_only=True)
def _vt_map(self):
# host volume_type_id->hostname map
# cached property
if self.dst_volume_types is None:
self.dst_volume_types = _volume_types_map(self.data[DST])
res = dict(
[(vt['id'], self.dst_volume_types[vt['name']])
for vt in _volume_types(self.data[SRC])
if vt['name'] in self.dst_volume_types])
return res
def _dst_host(self, vtid=None):
# vtid -> dst_host
# cached property
if self.dst_hosts is None:
self.dst_hosts = \
[i.host for i in
self.clouds[DST][RES].cinder_client.services.list(
binary=CINDER_VOLUME) if i.state == 'up']
# cached property
if self.dst_volume_types is None:
self.dst_volume_types = _volume_types_map(self.data[DST])
host_map = {}
for h in self.dst_hosts:
if '@' in h:
_, t = h.split('@')
if t in self.dst_volume_types:
host_map[self.dst_volume_types[t]] = h
host = host_map.get(vtid, self.dst_hosts[0])
return host
def _dst_mount_info(self):
# cached property
if self.dst_mount is None:
self.dst_mount = {}
if not _volume_types(self.data[DST]):
self.dst_mount[DEFAULT] = set([
tuple(line.split(MOUNT_DELIM))
for line in self._mount_output(DST)
if line
])
for vt in _volume_types(self.data[DST]):
self.dst_mount[vt['id']] = set([])
output = self._mount_output(DST, vt=vt)
for line in output:
if line:
self.dst_mount[vt['id']].add(
tuple(line.split(MOUNT_DELIM)))
return self.dst_mount
def _dir_to_provider(self, dst):
# cached property
if self.dst_dir_to_provider is None:
mount_info = self._dst_mount_info()
if _volume_types(self.data[DST]):
self.dst_dir_to_provider = \
dict([t for vt in self.data[DST]['volume_types']
for t in mount_info[vt['id']]])
else:
self.dst_dir_to_provider = \
dict([t for t in mount_info[DEFAULT]])
return self.dst_dir_to_provider[dst]
def _provider_to_vtid(self, provider):
# cached property
if self.dst_provider_to_vtid is None:
mount_info = self._dst_mount_info()
if _volume_types(self.data[DST]):
self.dst_provider_to_vtid = \
dict([(t[1], vt['id'])
for vt in self.data[DST]['volume_types']
for t in mount_info[vt['id']]])
else:
self.dst_provider_to_vtid = \
dict([(t[1], None) for t in mount_info[DEFAULT]])
return self.dst_provider_to_vtid[provider]
def _path_map(self):
paths = {SRC: {'all': set([])}, DST: {'all': set([])}}
paths[SRC][BY_VTID] = {}
if not _volume_types(self.data[SRC]):
paths[SRC][ALL] = self.mount_dirs(SRC)
for vt in _volume_types(self.data[SRC]):
paths[SRC][BY_VTID][vt['id']] = self.mount_dirs(SRC, vt)
paths[DST][BY_VTID] = {}
mount_info = self._dst_mount_info()
if not _volume_types(self.data[DST]):
for t in mount_info.get(DEFAULT):
paths[DST][ALL].add(t[0])
for vt in _volume_types(self.data[DST]):
paths[DST][BY_VTID][vt['id']] = set(
t[0] for t in mount_info[vt['id']])
for i in self.clouds:
for sd in sorted(paths[i][BY_VTID].values()):
paths[i][ALL].update(sd)
return paths
def _paths(self, position, vtid=None):
# cached property
if self.path_map is None:
self.path_map = self._path_map()
if vtid:
res = self.path_map[position][BY_VTID][vtid]
if res:
return res
return self.path_map[position][ALL]
def _volumes_size_map(self):
LOG.info('Calculate size of each volume.')
volumes_size_map = {}
for position in self.clouds:
for v in self.data[position]['volumes']:
LOG.debug('Calculating size of volume %s on %s cloud',
v['id'], position)
volume_type_id = v.get('volume_type_id', None)
srcpaths = self._paths(position, volume_type_id)
src = self.find_dir(position, srcpaths, v)
vol_size = self.volume_size(self.clouds[position], src)
volumes_size_map[v['id']] = vol_size
LOG.info('Volume %s(%s) size is %s.',
v.get('display_name', ''),
v['id'],
sizeof_format.sizeof_fmt(vol_size))
return volumes_size_map
def _try_copy_volumes(self):
vt_map = self._vt_map()
failed = []
volumes_size_map = self._volumes_size_map()
view = cinder_storage_view.CinderStorageMigrationProgressView(
self.data[SRC]['volumes'],
self.data[DST]['volumes'],
volumes_size_map
)
view.show_stats()
for v in self.data[SRC]['volumes']:
LOG.info('Start migrate volume %s(%s)',
v.get('display_name', ''), v['id'])
volume_type_id = v.get('volume_type_id', None)
srcpaths = self._paths(SRC, volume_type_id)
LOG.debug('srcpaths: %s', str(srcpaths))
if volume_type_id in vt_map:
# src -> dst
v['volume_type_id'] = vt_map.get(volume_type_id, None)
else:
v['volume_type_id'] = None
LOG.debug('Vt map: %s', str(vt_map))
dstpaths = self._paths(DST, v['volume_type_id'])
if not dstpaths:
err_msg = 'No mount found on DST Cloud'
if v['volume_type_id']:
err_msg += ' for volume type: %s' % v['volume_type_id']
raise exception.AbortMigrationError(err_msg)
LOG.debug('dstpaths: %s', str(dstpaths))
src = self.find_dir(SRC, srcpaths, v)
if not src:
raise exception.AbortMigrationError(
'No SRC volume file found for %s[%s]'
% (v.get('display_name', None), v['id']))
dst, elapsed_time = self._transfer(src, dstpaths, v,
volumes_size_map[v['id']])
if dst:
v['provider_location'] = self._dir_to_provider(dst)
vtid = self._provider_to_vtid(v['provider_location'])
v[HOST] = self._dst_host(vtid)
view.sync_migrated_volumes_info(v, elapsed_time)
else:
failed.append(v)
view.sync_failed_volumes_info(v)
view.show_progress()
if failed:
LOG.error(
'Migration failed for volumes: %s',
', '.join([
"%s(%s)" % (v['display_name'], v['id'])
for v in failed])
)
self.data[SRC]['volumes'] = [
v for v in self.data[SRC]['volumes'] if v not in failed
]
return failed
|
|
import tensorflow as tf
import tensorflow.contrib.slim as slim
import tensorflow.contrib.framework as framework
import numpy as np
import configargparse
import os
import sys
import datetime
from fnst import vgg, losses, image_transform_net
# This file implements operations necessary to train the feedforward CNN for Fast Style Transfer.
def vgg_preprocessing(input_img_data):
ret = tf.to_float(input_img_data)
return ret - tf.constant([123.68, 116.779, 103.939]) # Subtract mean pixel values
def decode_image_pipeline(input_filedata, input_filename):
fnparts = os.path.splitext(input_filename)
ext = fnparts[1]
if ext == '.png':
return tf.image.decode_png(input_filedata, channels=3)
elif ext == '.jpg' or ext == '.jpeg':
return tf.image.decode_jpeg(input_filedata, channels=3)
# Get layer Tensors from the VGG-19 output collection dictionary
# Additionally, squeeze them all (to remove the batch dimension)
def get_style_layers(output_dict, scope_name='vgg_19'):
#print(str(output_dict))
return [
output_dict[scope_name+'/conv1/conv1_1'],
output_dict[scope_name+'/conv2/conv2_1'],
output_dict[scope_name+'/conv3/conv3_1'],
output_dict[scope_name+'/conv4/conv4_1'],
output_dict[scope_name+'/conv5/conv5_1'],
]
# Ditto for the content representation layer
def get_content_layer(output_dict, scope_name='vgg_19'):
return output_dict[scope_name+'/conv4/conv4_2']
# Constructs the VGG network graph with the appropriate settings
def get_vgg_layers(input_tensor, args, reuse=True):
vgg_device = None
if args.distributed:
vgg_device = '/job:worker/task:{:d}'.format(args.task_index)
with tf.device(vgg_device): # Always keep VGG pinned locally
#with slim.arg_scope([slim.variable, slim.model_variable], device='/cpu:0'):
with slim.arg_scope([slim.conv2d], reuse=reuse): # Reuse VGG weights with precompute and training compute
_, layers = vgg.vgg_19(input_tensor, is_training=False, spatial_squeeze=False)
return layers
def init_vgg_model(args, session, vgg_saver):
print('Loading VGG model weights from {}...'.format(args.vgg_19_checkpoint))
sys.stdout.flush()
vgg_saver.restore(session, args.vgg_19_checkpoint)
print('VGG model weights loaded!')
sys.stdout.flush()
# Computes the Gram matrices for the constant style images.
# 'filenames' should be a list of paths to the style images.
def precompute_gram_matrices(args, filenames, session_target=''):
print('Building precompute graph...')
sys.stdout.flush()
image_tensors = []
for filename in filenames:
print('Loading file: {}'.format(filename))
handle = open(filename, 'rb')
data = handle.read()
handle.close()
fd_tensor = tf.constant(data, dtype=tf.string, name='data-'+os.path.basename(filename))
decoded_tensor = decode_image_pipeline(fd_tensor, filename)
model_input = vgg_preprocessing(decoded_tensor)
model_input = tf.image.resize_bicubic([model_input], args.image_size)
model_input = tf.squeeze(model_input) #tf.reshape(model_input, [args.image_size[0], args.image_size[1], 3])
image_tensors.append(model_input)
stacked_images = tf.parallel_stack(image_tensors)
activations = get_vgg_layers(stacked_images, args, reuse=False)
#_, activations = vgg.vgg_19(stacked_images, is_training=False, spatial_squeeze=False)
style_layers = get_style_layers(activations)
n_style_images = len(filenames)
image_gram_matrices = []
for layer_n, batched_layer in enumerate(style_layers):
stacked_gram_matrices = losses.batched_gram_matx(args, batched_layer)
image_gram_matrices.append(stacked_gram_matrices)
vgg_model_vars = slim.get_model_variables(scope='vgg_19')
precompute_vgg_saver = tf.train.Saver(vgg_model_vars)
print("Launching precompute graph...")
sys.stdout.flush()
# Even in a distributed environment, we can just run this locally
# -- it's not as expensive as running training.
with tf.Session() as stage1:
print('initializing global variables...')
sys.stdout.flush()
stage1.run(tf.global_variables_initializer())
init_vgg_model(args, stage1, precompute_vgg_saver)
#precompute_vgg_saver.restore(stage1, args.vgg_19_checkpoint)
print('Precomputing style image activations...')
sys.stdout.flush()
actual_gram_matrices = stage1.run(image_gram_matrices)
return actual_gram_matrices, precompute_vgg_saver
# Builds the model network and other ops for training.
# Inputs:
# - args: a configargparse Namespace object.
# - content_input: a Tensor containing batched input images (4D, shape must be known at construction time)
# - style_gram_matrices: Precomputed style Gram matrices (returned from the above function)
# - style_image_weights: Per-file weights for each style image.
# Outputs:
# - transformed_content: The image transform network to use for training.
# - total_loss: The overall loss to use for training.
def build_training_network(args, content_input, style_gram_matrices, style_image_weights, reuse=False):
print("Building main compute graph...")
sys.stdout.flush()
transformed_content = image_transform_net.transform_net(content_input, args, reuse=reuse)
# Add ops to compute losses:
# First run everything through VGG-19:
batch_sz = content_input.shape.as_list()[0]
vgg_input = tf.concat([transformed_content, content_input], axis=0)
vgg_input = vgg_preprocessing(vgg_input)
print("VGG input shape: {}".format(vgg_input.shape))
sys.stdout.flush()
# Reuse VGG model weights from precompute
vgg_layers = get_vgg_layers(vgg_input, args, reuse=True)
#_, vgg_layers = vgg.vgg_19(vgg_input, is_training=False, spatial_squeeze=False)
# Now get the layers of interest (preserving the batch dimension)
style_layers = get_style_layers(vgg_layers, scope_name='vgg_19_1')
content_layer = get_content_layer(vgg_layers, scope_name='vgg_19_1')
# Compute content losses:
transformed_content_layers = tf.slice(content_layer, [0,0,0,0],[batch_sz,-1,-1,-1])
original_content_layers = tf.slice(content_layer, [batch_sz,0,0,0],[-1,-1,-1,-1])
batched_content_loss = losses.content_loss(original_content_layers, transformed_content_layers)
batched_content_loss *= args.content_loss_weight
# Subcomponents of overall style loss
style_loss_components = []
# Compute style loss subcomponents for each transformed image, style image, and actual layer
for layer_idx, vgg_layer in enumerate(style_layers):
transformed_input_layer = tf.slice(vgg_layer, [0, 0, 0, 0], [batch_sz, -1, -1, -1])
style_losses = losses.batched_layer_style_loss(args, transformed_input_layer, style_gram_matrices[layer_idx])
style_loss_components.append(style_losses)
content_loss = tf.reduce_sum(batched_content_loss)
style_loss = tf.reduce_sum(style_loss_components)
return transformed_content, content_loss, style_loss
# Builds ops for the input pipeline:
def build_input_ops(args):
filename_queue = tf.train.string_input_producer(tf.train.match_filenames_once(args.content_image, name='input-filenames'), name='filename-producer')
imreader = tf.WholeFileReader(name='image-reader')
filename, filedata = imreader.read(filename_queue)
#filedata = tf.Print(filedata, [filename], 'Processing as content: ')
imdata = tf.image.decode_image(filedata, channels=3)
imdata = tf.image.convert_image_dtype(imdata, tf.float32)
# Enforce image size constraints (also catch stray GIFs)
imdata = tf.image.resize_bicubic(tf.expand_dims(imdata, 0), args.image_size)
imdata = tf.reshape(imdata, args.image_size+[3])
training_batch = tf.train.shuffle_batch(
[imdata],
batch_size=args.batch_size,
capacity=args.input_queue_capacity,
min_after_dequeue=args.batch_size*4,
num_threads=args.input_threads,
shared_name='training-input-queue'
)
#print("Batch output Op shape: {}".format(str(training_batch.shape)))
#sys.stdout.flush()
return training_batch
# Builds ops for optimization, checkpointing, and other miscellaneous things.
def build_auxillary_ops(args, is_chief, loss):
# Model Save/Load:
transform_model_vars = image_transform_net.network_parameters(args)
transform_model_saver = tf.train.Saver(transform_model_vars)
# Optimization ops
global_step = framework.get_or_create_global_step()
optimizer = tf.train.AdamOptimizer(
learning_rate=args.learning_rate,
epsilon=args.adam_epsilon
)
train_step = optimizer.minimize(loss, global_step, transform_model_vars)
scaffold = tf.train.Scaffold(saver=transform_model_saver)
summary_writer = tf.summary.FileWriter(args.logdir + '-{}-{}'.format(args.job, args.task_index), tf.get_default_graph(), flush_secs=15)
hooks = [
tf.train.NanTensorHook(loss),
tf.train.SummarySaverHook(save_secs=args.summary_save_frequency, scaffold=scaffold, summary_writer=summary_writer),
]
chief_hooks=[
tf.train.CheckpointSaverHook(args.checkpoint_dir, save_secs=600, scaffold=scaffold),
tf.train.StepCounterHook(summary_writer=summary_writer),
]
return train_step, scaffold, hooks, chief_hooks, summary_writer
# 'losses' is actually a list of (deviceSpec, lossTensor) pairs
def multigpu_auxillary_ops(args, losses):
# Model Save/Load:
transform_model_vars = image_transform_net.network_parameters(args)
transform_model_saver = tf.train.Saver(transform_model_vars)
# Optimization ops
global_step = framework.get_or_create_global_step()
optimizer = tf.train.AdamOptimizer(
learning_rate=args.learning_rate,
epsilon=args.adam_epsilon
)
# compute gradients on each tower
tower_gvs = []
tower_losses = []
for device, loss in losses:
with tf.device(device):
grads_and_vars = optimizer.compute_gradients(loss, var_list=transform_model_vars)
tower_gvs.append(grads_and_vars)
tower_losses.append(loss)
grads = {} # dict mapping variables to gradients
for tower in tower_gvs:
for grad, var in tower:
grad = tf.expand_dims(grad, 0) # create a 'tower' dimension
if var in grads:
grads[var].append(grad)
else:
grads[var] = [grad]
with tf.device('/cpu:0'):
applied_gradients = []
for var, tower_grads in grads.items():
tower_grads = tf.concat(tower_grads, axis=0) # 5D-tensor (shape [tower, batch, height, width, depth])
avg_grad = tf.reduce_mean(tower_grads, axis=0) # 4D-tensor (tower dimension removed)
applied_gradients.append( (avg_grad, var) )
train_step = optimizer.apply_gradients(applied_gradients, global_step=global_step)
avg_loss = tf.reduce_mean(tower_losses)
tf.summary.scalar('Total Loss', avg_loss)
#train_step = optimizer.minimize(loss, global_step, transform_model_vars)
scaffold = tf.train.Scaffold(saver=transform_model_saver)
summary_writer = tf.summary.FileWriter(args.logdir + '-{}-{}'.format(args.job, args.task_index), tf.get_default_graph(), flush_secs=15)
hooks = [
tf.train.NanTensorHook(avg_loss),
tf.train.SummarySaverHook(save_secs=args.summary_save_frequency, scaffold=scaffold, summary_writer=summary_writer),
]
chief_hooks=[
tf.train.CheckpointSaverHook(args.checkpoint_dir, save_secs=600, scaffold=scaffold),
tf.train.StepCounterHook(summary_writer=summary_writer),
]
return train_step, scaffold, hooks, chief_hooks, summary_writer
# Runs precompute and builds the model + aux. ops.
# Can be run in a tf.Device context if necessary.
def setup_training(args, is_chief=True, server=None, cluster=None):
session_target=''
if server is not None:
session_target = server.target
style_image_paths = []
style_image_weights = []
for style_img_spec in args.style_image:
p = style_img_spec.split(':')
style_image_paths.append(p[0])
style_image_weights.append(float(p[1]))
input_device = None # Device for input ops
precompute_device = None # Device for Gram matrix precomputation
compute_device = None # Device for main network computations
if args.distributed:
# Input processing is always pinned to the chief worker.
# Each node's precomputations are always pinned to that node--
# this should prevent conflicts when restoring the VGG model.
# Compute is distributed over each server using the typical Parameter Server / Worker model.
input_device = '/job:worker/task:0'
precompute_device = '/job:worker/task:{:d}'.format(args.task_index)
compute_device = tf.train.replica_device_setter(
cluster=cluster,
worker_device='/job:worker/task:{:d}'.format(args.task_index)
)
else:
input_device = '/cpu:0'
with tf.device(precompute_device):
gram_matrices, vgg_saver = precompute_gram_matrices(args, style_image_paths, session_target)
with tf.device(input_device):
train_input = build_input_ops(args)
if args.n_gpus <= 1:
# No or single-GPU case:
with tf.device(compute_device):
transform_out, content_loss, style_loss = build_training_network(args, train_input, gram_matrices, style_image_weights)
variation_loss = tf.reduce_sum(tf.image.total_variation(transform_out)) * args.variation_loss_weight
loss = content_loss + style_loss + variation_loss
tf.summary.scalar('Content Loss', content_loss)
tf.summary.scalar('Variation Loss', variation_loss)
tf.summary.scalar('Style Loss', style_loss)
tf.summary.scalar('Total Loss', loss)
tf.summary.image('Transform Network Output', transform_out, max_outputs=3)
train_step, scaffold, hooks, chief_hooks, summary_writer = build_auxillary_ops(args, is_chief, loss)
else:
# Multi-GPU training:
per_gpu_losses = []
per_gpu_content_losses = []
per_gpu_style_losses = []
per_gpu_variation_losses = []
per_gpu_total_losses = []
per_gpu_transforms = []
for gpu_id in range(args.n_gpus):
device = '/gpu:{:d}'.format(gpu_id)
with tf.device(device):
reuse=True
if gpu_id == 0:
reuse=False
transform_out, content_loss, style_loss = build_training_network(args, train_input, gram_matrices, style_image_weights, reuse=reuse)
per_gpu_style_losses.append(style_loss)
per_gpu_content_losses.append(content_loss)
# compute variation loss on CPU:
variation_loss = tf.reduce_sum(tf.image.total_variation(transform_out)) * args.variation_loss_weight
per_gpu_variation_losses.append(variation_loss)
loss = content_loss + style_loss + variation_loss
per_gpu_total_losses.append(loss)
per_gpu_losses.append( (device, loss) )
per_gpu_transforms.append(transform_out)
tf.summary.scalar('Content Loss', tf.reduce_mean(per_gpu_content_losses))
tf.summary.scalar('Variation Loss', tf.reduce_mean(per_gpu_variation_losses))
tf.summary.scalar('Style Loss', tf.reduce_mean(per_gpu_style_losses))
tf.summary.scalar('Total Loss', tf.reduce_mean(per_gpu_total_losses))
# concatentate per-tower transform outputs along batch dimension
all_transforms_out = tf.concat(per_gpu_transforms, axis=0)
tf.summary.image('Transform Network Output', all_transforms_out, max_outputs=3)
train_step, scaffold, hooks, chief_hooks, summary_writer = multigpu_auxillary_ops(args, per_gpu_losses)
return train_step, scaffold, hooks, chief_hooks, summary_writer, vgg_saver
# Adds training-specific parameters to an ArgumentParser.
def add_training_args(parser):
parser.add_argument('--training', action='store_true', help='Perform transform model training.')
parser.add_argument('--learning-rate', type=float, default=0.001, help='Transformation network learning rate.')
parser.add_argument('--adam-epsilon', type=float, default=1e-08, help='Epsilon value for Adam optimizer.')
parser.add_argument('--content-loss-weight', default=8e-4, type=float, help='Alpha parameter for loss calculation.')
parser.add_argument('--variation-loss-weight', default=1e-5, type=float, help='Weighting factor for total variation in losses.')
parser.add_argument('--image-size', required=True, type=int, nargs=2, help='Height and width of ALL images used for training (both style and content)')
parser.add_argument('--content-image', required=True, action='append', help='File pattern (glob) matching training input (content) images. All inputs must be either PNGs or JPEGs.')
parser.add_argument('--style-image', required=True, action='append', help='An input style image path:weight pair (i.e. my_image.jpg:1.0).')
parser.add_argument('--vgg-19-checkpoint', default='vgg_19.ckpt', help='Checkpoint file containing VGG-19 model weights')
parser.add_argument('--session-trace-frequency', type=int, default=20, help='Trace and output session information every N training steps.')
parser.add_argument('--console-output-frequency', type=int, default=20, help='Print step information every N training steps.')
parser.add_argument('--summary-save-frequency', type=int, default=25, help='Save summaries every N seconds.')
parser.add_argument('--logdir', default='nst-logdir', help='Directory to save summaries to')
parser.add_argument('--input-queue-capacity', type=int, default=10000, help='Maximum number of images to keep prefetched in the input image queue.')
parser.add_argument('--batch-size', type=int, default=10, help='Training batch size.')
parser.add_argument('--training-epochs', type=int, default=None, help='Number of training epochs to perform')
parser.add_argument('--input-threads', type=int, default=4, help='Number of threads for input prefetching.')
parser.add_argument('--n-gpus', type=int, default=0, help='Number of GPUs available on this system')
|
|
# Copyright 2011 Department of Defence
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import couchdb
import urlparse
import json
import urllib2
import threading
import re
from pylons import request, response, session, tmpl_context as c, url
from pylons.controllers.util import abort, redirect
from lr.model import LRNode as sourceLRNode, \
NodeServiceModel, ResourceDataModel, LRNodeModel, defaultCouchServer, appConfig
from lr.lib.base import BaseController, render
from lr.lib import helpers as h
import base64
import pprint
import Queue
log = logging.getLogger(__name__)
class DistributeController(BaseController):
__TARGET_NODE_INFO = 'taget_node_info'
__OK = 'ok'
__ERROR = 'error'
def __before__(self):
self.resource_data = appConfig['couchdb.db.resourcedata']
"""REST Controller styled on the Atom Publishing Protocol"""
# To properly map this controller, ensure your config/routing.py
# file has a resource setup:
# map.resource('distribute', 'distribute')
def destination(self):
"""GET /destination: return node information"""
# url('distribute')
response = {self.__OK: True}
try:
response[self.__TARGET_NODE_INFO] = sourceLRNode.distributeInfo
except Exception as ex:
log.exception(ex)
response["error":"Internal error"]
log.info("received distribute request...returning: \n"+pprint.pformat(response, 4))
return json.dumps(response)
def _getDistinationInfo(self, connection):
# Make sure we only have one slash in the url path. More than one
#confuses pylons routing libary.
destinationURL = urlparse.urljoin(connection.destination_node_url.strip(),
"destination")
request = urllib2.Request(destinationURL)
credential = sourceLRNode.getDistributeCredentialFor(destinationURL)
if credential is not None:
base64string = base64.encodestring('%s:%s' % (credential['username'],credential['password'])).replace("\n", "")
request.add_header("Authorization", "Basic %s" % base64string)
log.info("\n\nAccess destination node at: "+pprint.pformat(request.__dict__))
return json.load(urllib2.urlopen(request))
def _canDistributeTo(self, connection, sourceNodeInfo):
if not connection.active:
return {self.__OK: False,
'connection_id': connection.connection_id,
self.__ERROR: 'Inactive connection'}
result={self.__OK:True, 'connection_id': connection.connection_id }
sourceNodeInfo = h.dictToObject(sourceNodeInfo)
try:
destinationNodeInfo = h.dictToObject(self._getDistinationInfo(connection)[self.__TARGET_NODE_INFO])
result['destinationNodeInfo'] = destinationNodeInfo
# Don't bother going through all the filter out rules if the source and
# destionation nodes are on the same community and network.
if((sourceNodeInfo.community_id == destinationNodeInfo.community_id) and
(sourceNodeInfo.network_id == destinationNodeInfo.network_id) and
not (sourceNodeInfo.gateway_node and destinationNodeInfo.gateway_node)):
pass
elif sourceNodeInfo.node_id == destinationNodeInfo.node_id:
result[self.__ERROR] = "Source and destination node must be different node."
elif ((sourceNodeInfo.gateway_node or destinationNodeInfo.gateway_node) != connection.gateway_connection):
result[self.__ERROR] = " 'gateway_connection' mismatch between nodes and connection data"
elif ((sourceNodeInfo.community_id != destinationNodeInfo.community_id) and
((not sourceNodeInfo.social_community) or (not destinationNodeInfo.social_community))):
result[self.__ERROR] = 'cannot distribute across non social communities'
elif ((sourceNodeInfo.network_id != destinationNodeInfo.network_id) and
((not sourceNodeInfo.gateway_node)or(not destinationNodeInfo.gateway_node))):
result[self.__ERROR] = 'cannot distribute across networks (or communities) unless gateway'
elif ((sourceNodeInfo.gateway_node and destinationNodeInfo.gateway_node)
and (sourceNodeInfo.network_id == destinationNodeInfo.network_id)):
result[self.__ERROR] = 'gateways must only distribute across different networks'
elif (sourceNodeInfo.gateway_node and not destinationNodeInfo.gateway_node):
result[self.__ERROR] = 'gateways can only distribute to gateways'
except urllib2.URLError as ex:
log.exception(ex)
result[self.__ERROR] = "Cannot reach destination node. "+str(ex.reason)
except Exception as ex:
log.exception(ex)
result[self.__ERROR] = "Internal error. Cannot process destination node info"
if result.has_key(self.__ERROR):
result[self.__OK] = False
return result
def _getDistributeDestinations(self):
""""Method to test the connections and returns a list of destionation node
if the connections are valid"""
gatewayConnectionList = []
connectionsStatusInfo = {self.__OK:True, 'connections':[]}
for connection in sourceLRNode.connections:
# Make sure that the connection is active
connectionsStatusInfo['connections'].append(self._canDistributeTo(connection, sourceLRNode.distributeInfo))
if (connectionsStatusInfo['connections'][-1][self.__OK] and
sourceLRNode.distributeInfo['gateway_node'] and
connectionsStatusInfo['connections'][-1]['destinationNodeInfo'].gateway_node and
connection.gateway_connection):
gatewayConnectionList.append(connection)
# Only one gateway connection is allowed, faulty network description
if len(gatewayConnectionList) > 1:
log.info("***Abort distribution. More than one gateway node connection")
connectionsStatusInfo[self.__ERROR] ="only one active gateway connection is allowed, faulty network description"
break
if len (sourceLRNode.connections) == 0:
connectionsStatusInfo[self.__ERROR] ="No connection present for distribution"
if connectionsStatusInfo.has_key(self.__ERROR) :
connectionsStatusInfo[self.__OK] = False
return connectionsStatusInfo
def create(self):
"""POST / distribute start distribution"""
distributeResults = Queue.Queue()
def doDistribution(connectionInfo, server, sourceUrl):
# We want to always use the replication filter function to replicate
# only distributable doc and filter out any other type of documents.
# However we don't have any query arguments until we test if there is any filter.
replicationOptions={'filter':ResourceDataModel.REPLICATION_FILTER,
'source':sourceUrl,
'connection_id': connectionInfo['connection_id'],
'query_params': None}
# If the destination node is using an filter and is not custom use it
# as the query params for the filter function
if ((connectionInfo['destinationNodeInfo'].filter_description is not None ) and
(connectionInfo['destinationNodeInfo'].filter_description.get('custom_filter') == False)):
replicationOptions['query_params'] =connectionInfo['destinationNodeInfo'].filter_description
#if distinationNode['distribute service'] .service_auth["service_authz"] is not None:
#log.info("Destination node '{}' require authentication".format(destinationUrl))
#Try to get the user name and password the url
#destinationUrl = connectionInfo['destinationNodeInfo'].resource_data_url
destinationUrl = connectionInfo['destinationNodeInfo'].incoming_url
credential = sourceLRNode.getDistributeCredentialFor(destinationUrl)
if credential is not None:
parsedUrl = urlparse.urlparse(destinationUrl)
destinationUrl = destinationUrl.replace(parsedUrl.netloc, "{0}:{1}@{2}".format(
credential['username'], credential['password'], parsedUrl.netloc))
if replicationOptions['query_params'] is None:
del replicationOptions['query_params']
replicationOptions['target'] = destinationUrl
authz_header = h.getBasicAuthHeaderFromURL(appConfig['couchdb.url.dbadmin']);
authz_header.update( { 'Content-Type': 'application/json'})
request = urllib2.Request(urlparse.urljoin(appConfig['couchdb.url'], '_replicator'),
headers=authz_header,
data = json.dumps(replicationOptions))
log.info("\n\nReplication started\nSource:{0}\nDestionation:{1}\nArgs:{2}".format(
sourceUrl, destinationUrl, pprint.pformat(replicationOptions)))
results = json.load(urllib2.urlopen(request))
connectionInfo['replication_results'] = results
distributeResults.put(connectionInfo)
log.debug("Replication results: " + pprint.pformat(results))
log.info("Distribute.......\n")
##Check if the distribte service is available on the node.
#if(sourceLRNode.isServiceAvailable(NodeServiceModel.DISTRIBUTE) == False):
#log.info("Distribute not available on node ")
#return
if((sourceLRNode.connections is None) or
(len(sourceLRNode.connections) ==0)):
log.info("No connection present for distribution")
return json.dumps({self.__ERROR:''})
log.info("Connections: \n{0}\n"+pprint.pformat([c.specData for c in sourceLRNode.connections]))
connectionsStatusInfo = self._getDistributeDestinations()
log.debug("\nSource Node Info:\n{0}".format(pprint.pformat(sourceLRNode.distributeInfo)))
log.debug("\n\n Distribute connections:\n{0}\n\n".format(pprint.pformat(connectionsStatusInfo)))
for connectionStatus in connectionsStatusInfo['connections']:
if connectionsStatusInfo.has_key(self.__ERROR) or connectionStatus.has_key(self.__ERROR) == True:
distributeResults.put(connectionStatus)
else:
replicationArgs = (connectionStatus, defaultCouchServer, self.resource_data )
# Use a thread to do the actual replication.
replicationThread = threading.Thread(target=doDistribution, args=replicationArgs)
replicationThread.start()
replicationThread.join()
log.debug("\n\n\n---------------------distribute threads end--------------------\n\n\n")
log.debug("\n\n\n----------Queue results Completed size: {0}--------------\n\n\n".format(distributeResults.qsize()))
connectionsStatusInfo['connections'] = []
while distributeResults.empty() == False:
connectionsStatusInfo['connections'].append(distributeResults.get())
log.debug("\n\n======== DISTRIBUTE RESULTS ============\n\n")
log.debug(pprint.pformat(connectionsStatusInfo))
return json.dumps(connectionsStatusInfo, indent=4)
|
|
#!/usr/bin/env python3.5
#
# tools.py
#
# (C) The James Hutton Institute 2016
# Author: Leighton Pritchard
"""
tools.py
This module provides helper functions used in the supplementary information
notebooks and scripts for the Holmes et al. (2017) paper.
"""
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import pickle
import random
import scipy
import seaborn as sns
from collections import defaultdict
from Bio import SeqIO
# PRNG seed
SEED = 123456789
def corrfunc(x, y, **kws):
"""Return a matplotlib axis with text describing the Spearman
correlation coefficient for x and y
This function is written to support plot_correlation
"""
coeff, _ = scipy.stats.spearmanr(x, y)
ax = plt.gca()
ax.annotate("r = {:.3f}".format(coeff),
xy=(.3, .5), size=15,
xycoords=ax.transAxes)
def plot_correlation(df, title=None):
"""Render Seaborn PairGrid of columns in df, with Pearson correlation
coefficients in the upper triangle, and KDE plots on the diagonal.
"""
g = sns.PairGrid(df)
g.map_lower(plt.scatter)
g.map_diag(sns.kdeplot, legend=False)
g.map_upper(corrfunc)
g.set(xticklabels=[])
g.set(title=title or '')
return g
def quantile_norm(df, columns=None):
"""Normalise the columns of df to each have the same distribution"""
df_matrix = df.as_matrix(columns=columns)
quantiles = np.mean(np.sort(df_matrix, axis=0), axis=1)
ranks = scipy.stats.mstats.rankdata(df_matrix, axis=0).astype(int) - 1
norm_matrix = quantiles[ranks]
return(pd.DataFrame(data=norm_matrix, index=df.index,
columns=columns or df.columns))
def plot_normalised(ctl_in, ctl_out, trt_in, trt_out):
"""Return violin plots of input/output control/treatment distributions"""
fig, axes = plt.subplots(2, 2, figsize=(12,6))
fig.subplots_adjust(hspace=.25)
axes = axes.ravel()
for ttl, arr, ax in zip(("control input", "control output",
"treatment input", "treatment output"),
(ctl_in, ctl_out, trt_in, trt_out),
axes):
ax.set_title(ttl)
sns.violinplot(np.log(arr), ax=ax)
def wide_to_long_df(df, stage):
"""Convert wide dataframe to long
This function is brittle, and only for Holmes et al SI
"""
if not stage:
stagestr = 'input'
else:
stagestr = 'output'
df.reset_index(level=0, inplace=True) # make probes a column
df = pd.melt(df, id_vars=['Systematic'],
value_vars=['{0}.1'.format(stagestr),
'{0}.2'.format(stagestr),
'{0}.3'.format(stagestr)])
df.columns = ['probe', 'class', stagestr]
df.loc[:, 'replicate'] = df['class'].astype(str).str[-1].astype(np.int64)
df = df[['probe', 'replicate', stagestr]]
df.set_index(['probe', 'replicate'], inplace=True)
return df
def wide_to_long_join(df_in, df_out, treatment):
"""Convert two wide dataframes to long and join on common index
This function is brittle and only for Holmes et al SI
"""
if treatment:
treatval = 1
else:
treatval = 0
df = pd.merge(wide_to_long_df(df_in, 0), wide_to_long_df(df_out, 1),
left_index=True, right_index=True)
df['treatment'] = treatval
df.reset_index(inplace=True)
return df
def wide_to_long(ctl_in, ctl_out, trt_in, trt_out):
"""Convert four dataframes from wide to long format
This function returns a dataframe with columns:
* probe
* replicate
* treatment
* repXtrt (combination of replicate and treatment)
* input
* output
* log_input
* log_output
"""
ctl_long = wide_to_long_join(ctl_in, ctl_out, treatment=False)
trt_long = wide_to_long_join(trt_in, trt_out, treatment=True)
data = ctl_long.append(trt_long, ignore_index=True)
data['log_input'] = np.log(data['input'])
data['log_output'] = np.log(data['output'])
data['repXtrt'] = 'rep' + data['replicate'].map(str) +\
'trt' + data['treatment'].map(str)
data = data[['probe',
'replicate', 'treatment', 'repXtrt',
'input', 'output',
'log_input', 'log_output']]
return data
def plot_input_output_violin(data):
"""Plot Seaborn violin plot of log input and output data"""
input_v_output = pd.melt(data,
id_vars=['probe', 'replicate', 'treatment'],
value_vars=['log_input', 'log_output'])
input_v_output.columns = ['probe', 'replicate', 'treatment',
'stage', 'log_intensity']
g = sns.violinplot(data=input_v_output, x="treatment", y="log_intensity",
hue="stage", split=True)
g.set_xticklabels(['control', 'treatment'])
g.set_ylabel("log(intensity)")
g.set_xlabel("")
g.set_title("log(intensity) distribution by treatment and input/output")
def unique_probe_matches(blastfiles):
"""Returns a dataframe of unique queries and their unique matches"""
# Columns in a BLASTN+ -outfmt 6 file
blast_columns = ['probe', 'match', 'identity', 'length', 'mismatch',
'gapopen', 'qstart', 'qend', 'sstart', 'send',
'evalue', 'bitscore']
df = None
for bfile in blastfiles:
if df is None:
df = pd.read_csv(bfile, sep="\t", names=blast_columns)
else:
df = df.append(pd.read_csv(bfile, sep="\t",
names=blast_columns))
df = df.drop_duplicates('probe') # Drop rows with repeated probes
return df
def annotate_seqdata(df, seqfiles):
"""Returns the passed dataframe, annotated with locus tags"""
ids = []
locus_tags = []
for seqfile in seqfiles:
for seq in SeqIO.parse(seqfile, 'fasta'):
labels = seq.description.split(' ')
for label in labels:
if label.startswith('[locus_tag'):
ids.append(seq.id)
locus_tags.append(label.split('=')[-1][:-1])
seqdf = pd.DataFrame({'match': ids, 'locus_tag': locus_tags})
return pd.merge(df, seqdf, 'inner', ['match'])
def index_column(df, colname):
"""Return the dataframe, with an index column for 'probe's"""
col_ids = df[colname].unique()
nvals = len(col_ids)
col_lookup = dict(zip(col_ids, range(nvals)))
df['{0}_index'.format(colname)] = df[colname].replace(col_lookup).values
return df
def reduce_dataset(df, colname, n=2000, seed=True):
"""Returns the passed dataframe, with a reduced set of rows"""
if seed:
random.seed(SEED) # for reproducibility of random choice
col_ids = df[colname].unique()
nvals = len(col_ids)
indices = [random.randint(0, nvals) for i in range(n)]
reduced = df.loc[df['{0}_index'.format(colname)].isin(indices)]
# create indices and values for probes
new_ids = reduced[colname].unique()
nvals = len(new_ids)
new_lookup = dict(zip(new_ids, range(nvals)))
# add data column with probe index from probe_lookup
reduced['{0}_index'.format(colname)] =\
reduced[colname].replace(new_lookup).values
return reduced
def reduce_dataset_by_column_value(df, colname, values):
"""Returns the passed dataframe, with only the passed column values"""
col_ids = df[colname].unique()
nvals = len(col_ids)
# Reduce dataset
reduced = df.loc[df['locus_tag'].isin(values)]
# create indices and values for probes
new_ids = reduced[colname].unique()
nvals = len(new_ids)
new_lookup = dict(zip(new_ids, range(nvals)))
# add data column with probe index from probe_lookup
reduced['{0}_index'.format(colname)] =\
reduced[colname].replace(new_lookup).values
return reduced
def extract_fit_variable_summary(fit, varname, index=None):
"""Returns summary information for a variable in the passed Stan fit object
Calculates mean, std, median, and 5%, 25%, 75% and 95% percentiles
for the passed variable, returning them as a dataframe.
"""
# Using Pandas methods
mean = pd.Series(fit[varname].mean(0), index=index)
se = pd.Series(fit[varname].std(0), index=index)
# Need to use numpy functions
median = pd.Series(np.median(fit[varname], 0), index=index)
perc_2_5 = pd.Series(np.percentile(fit[varname], 2.5, 0), index=index)
perc_25 = pd.Series(np.percentile(fit[varname], 25, 0), index=index)
perc_75 = pd.Series(np.percentile(fit[varname], 75, 0), index=index)
perc_97_5 = pd.Series(np.percentile(fit[varname], 97.5, 0), index=index)
return pd.DataFrame({'%s_mean' % varname: mean,
'%s_sem' % varname: se,
'%s_median' % varname: median,
'%s_2.5pc' % varname: perc_2_5,
'%s_97.5pc' % varname: perc_97_5,
'%s_25pc' % varname: perc_25,
'%s_75pc' % varname: perc_75})
def extract_df_variable_summary(df, varname, index=None):
"""Returns summary information for a variable in the passed datframe object
This function expects a dataframe of pickled fit information
Calculates mean, std, median, and 5%, 25%, 75% and 95% percentiles
for the passed variable, returning them as a dataframe.
"""
# Using Pandas methods
mean = pd.Series(df[varname][0].mean(0), index=index)
se = pd.Series(df[varname][0].std(0), index=index)
# Need to use numpy functions
median = pd.Series(np.median(df[varname][0], 0), index=index)
perc_2_5 = pd.Series(np.percentile(df[varname][0], 2.5, 0), index=index)
perc_25 = pd.Series(np.percentile(df[varname][0], 25, 0), index=index)
perc_75 = pd.Series(np.percentile(df[varname][0], 75, 0), index=index)
perc_97_5 = pd.Series(np.percentile(df[varname][0], 97.5, 0), index=index)
return pd.DataFrame({'%s_mean' % varname: mean,
'%s_sem' % varname: se,
'%s_median' % varname: median,
'%s_2.5pc' % varname: perc_2_5,
'%s_97.5pc' % varname: perc_97_5,
'%s_25pc' % varname: perc_25,
'%s_75pc' % varname: perc_75})
def extract_variable_summaries(obj, otype='fit',
varnames=['a', 'b', 'g', 'd'],
indices=None,
data=None):
"""Return dataframe of parameter estimate summaries
For this modelling there is a specific issue with estimating variables on
arrays (length 6), and estimating them on probes (length around 6000),
and having to combine them.
The calls to extract_*_variable_summary() return a dataframe for each
variable. We broadcast values for a and g across the probe dataset, and
join values for b and d directly.
"""
# Choice of function depends on object being passed
functions = {'fit': extract_fit_variable_summary,
'df': extract_df_variable_summary}
# Get dataframes for each fitted variable summary, keyed by variable name
dfdict = defaultdict()
for varname, index in zip(varnames, indices):
dfdict[varname] = functions[otype](obj, varname, index)
dfdict[varname].reset_index(inplace=True)
# Broadcast parameter estimates across probes
df = pd.merge(data, dfdict['a'],
left_on='repXtrt', right_on='index')
df = pd.merge(df, dfdict['b'],
left_on='locus_tag', right_on='index')
df = pd.merge(df, dfdict['g'],
left_on='repXtrt', right_on='index')
df = pd.merge(df, dfdict['d'],
left_on='locus_tag', right_on='index')
# Broadcast parameter estimates across locus tags
lt = pd.DataFrame(data['locus_tag'].unique())
lt.columns = ['locus_tag']
lt = pd.merge(lt, dfdict['b'],
left_on='locus_tag', right_on='index')
lt = pd.merge(lt, dfdict['d'],
left_on='locus_tag', right_on='index')
df.drop('index_x', 1, inplace=True)
df.drop('index_y', 1, inplace=True)
lt.drop('index_x', 1, inplace=True)
lt.drop('index_y', 1, inplace=True)
lt.sort_values('locus_tag', inplace=True)
return df, lt
def boxplot_medians(estimates, varnames=['a', 'b', 'g', 'd']):
"""Plot 2x2 boxplot of parameter median estimates"""
fig, axes = plt.subplots(int(len(varnames)/2), 2,
figsize=(12, 2 * len(varnames)))
axes = axes.ravel()
fig.subplots_adjust(hspace=0.3)
for idx, varname in enumerate(varnames):
sns.boxplot(estimates['{0}_median'.format(varname)],
ax=axes[idx])
axes[idx].set_title("Median {0}".format(varname))
def split_estimates(df, org):
"""Split the passed dataframe into either Sakai or DH10B subsets"""
if org == 'dh10b':
subset = df.loc[df['locus_tag'].str.startswith('ECDH10B')]
else:
subset = df.loc[~df['locus_tag'].str.startswith('ECDH10B')]
return subset
def plot_treatment_vs_control(df):
"""Plot median treatment vs control parameters"""
fig, axes = plt.subplots(1, 2, figsize=(12, 8))
axes = axes.ravel()
fig.subplots_adjust(hspace=0.3)
for idx, xvar, yvar, ax in zip(range(2),
['a_median', 'a_median',
'b_median', 'b_median'],
['g_median', 'd_median',
'g_median', 'd_median'],
axes):
ax.scatter(df[xvar], df[yvar], alpha=0.2)
ax.set_xlabel(xvar)
ax.set_ylabel(yvar)
def label_positive_effects(df):
"""Label the locus tags as having positive effects on treatment, control,
or both.
"""
df['trt_pos'] = df['d_25pc'] > 0
df['ctl_pos'] = df['b_25pc'] > np.percentile(df['b_median'], 97.5)
df['combined'] = df['trt_pos'] & df['ctl_pos']
return df
def plot_parameter(df, ax, varname, thresh, annotations=None, label=None, ylabel=None):
"""Plot the estimated parameter median, and 50% CI, in locus tag order on
the passed matplotlib axis
Credibility intervals are coloured blue if they include the threshold,
red (value below threshold) or green (value above threshold) otherwise.
annotations expects a dictionary where the key is the annotation text, and
the value is a tuple of co-ordinates for the centre of the text
"""
vals = df['{0}_median'.format(varname)]
cilo = df['{0}_25pc'.format(varname)]
cihi = df['{0}_75pc'.format(varname)]
ax.scatter(range(len(df)), vals, c='k', marker='.')
for idx, val, vlo, vhi in zip(range(len(df)),
vals, cilo, cihi):
if vlo < thresh < vhi:
color = 'b-'
elif val < thresh:
color = 'm-'
elif val > thresh:
color = 'g-'
else:
color = 'k-'
ax.plot([idx, idx], [vlo, vhi], color, alpha=0.4)
# Add box annotations, if requested
y0, y1 = ax.get_ylim()
max_y_ann = y1
if annotations is not None:
bbox_props = dict(boxstyle="square,pad=0.3", color="w")
for k, v in annotations.items():
# Text box
t = ax.text(0.5 * (v[0] + v[1]), v[2], k,
ha="center", va="center", bbox=bbox_props)
# Marker
offset = 0.075 * (y1 - y0)
ax.plot([v[0], v[0]], [v[2] - offset, v[2] - 0.5 * offset], 'k-')
ax.plot([v[1], v[1]], [v[2] - offset, v[2] - 0.5 * offset], 'k-')
ax.plot([v[0], v[1]], [v[2] - 0.75 * offset,
v[2] - 0.75 * offset], 'k-')
# Max ylim
max_y_ann = max(v[2] * 1.1, max_y_ann)
# Set x and y limits
ax.set_ylim(y0, max_y_ann)
ax.set_xlim(-1, len(df) + 1)
# Don't show x-axis ticks
ax.get_xaxis().set_visible(False)
# Draw label if asked
y0, y1 = ax.get_ylim()
bbox_props = dict(boxstyle="square,pad=0.3", color="w")
if label:
ax.text(-1, (y1 + (y1 - y0) * 0.01), label,
va="bottom", ha="left", bbox=bbox_props,
size="x-large")
# Draw y-axis label
if ylabel:
ax.set_ylabel(ylabel)
# Get index of locus tag for plotting
def get_lt_index(locus_tag, df):
return list(df['locus_tag']).index(locus_tag)
def get_annotation(tag, anndict):
try:
return anndict[tag]
except KeyError:
return None
def annotate_locus_tags(df, gbfilepath):
"""Add gene product annotations from gbfiles to passed dataframe
The annotations are added/placed in a column called "annotation", and are
identified on the basis of the "locus_tag" column
"""
products = dict()
startpos = defaultdict(int)
for record in SeqIO.parse(gbfilepath, 'genbank'):
products.update({ft.qualifiers['locus_tag'][0]:ft.qualifiers['product'][0]
for ft in record.features if
(ft.type == 'CDS' and
'product' in ft.qualifiers)})
startpos.update({ft.qualifiers['locus_tag'][0]:
int(ft.location.nofuzzy_start)
for ft in record.features if
ft.type == 'gene'})
df['annotation'] = df['locus_tag'].apply(get_annotation,
args=(products,))
df['startpos'] = df['locus_tag'].apply(get_annotation,
args=(startpos,))
return df
def parse_full_fit(picklefilename, datafilename):
"""Parses the full model fit into a Pandas dataframe which is returned
The returned dataframe has columns for mean, SEM, median, and 2.5, 25,
75, 97.5 percentiles
"""
# Load fit
with open(picklefilename, 'rb') as ifh:
fit = pickle.load(ifh)
indata = pd.read_csv(datafilename, sep="\t")
locus_tags = indata['locus_tag'].unique()
# Get dataframes for each fitted variable summary, and join them
dflist = []
for varname in ['a', 'b', 'g', 'd']:
dflist.append(extract_variable_summaries(fit, varname, locus_tags))
return pd.concat(dflist, axis=1)
def plot_errors(df):
"""Plot distributions of absolute and relative error in crossvalidation"""
fig, axes = plt.subplots(1, 2, figsize=(12,4))
fig.subplots_adjust(hspace=.25)
axes = axes.ravel()
for ttl, col, ax in zip(("absolute error", "relative error"),
("y_pred_abs_error", "y_pred_rel_error"),
axes):
ax.set_title(ttl)
sns.boxplot(df[col], ax=ax)
def plot_error_vs_column(df, colname):
fig, axes = plt.subplots(1, 2, figsize=(10, 4))
axes = axes.ravel()
for ttl, col, ax in zip(("absolute error", "relative error"),
("y_pred_abs_error", "y_pred_rel_error"),
axes):
ax.set_title("{0} v {1}".format(ttl, colname))
ax.set_xlabel(colname)
ax.set_ylabel(ttl)
ax.scatter(df[colname], df[col], alpha=0.05)
def plot_probe_predictions(locustag, df):
"""Plot prediction range and measured value for a specific gene"""
ltdata = df.loc[df['locus_tag'] == locustag].sort_values(['probe',
'treatment',
'replicate'])
plt.scatter(range(len(ltdata)), ltdata['log_output'], color='k')
for idx, obs, plo, pmd, phi in zip(range(len(ltdata)),
ltdata['log_output'],
ltdata['y_pred_5pc'],
ltdata['y_pred_median'],
ltdata['y_pred_95pc']):
if plo < obs < phi:
lcolor = 'b-'
pcolor = 'b.'
else:
lcolor = 'r-'
pcolor = 'r.'
plt.plot([idx, idx], [plo, phi], lcolor)
plt.plot([idx, idx], [pmd, pmd], pcolor)
plt.xticks(range(len(ltdata)), ltdata['probe'], rotation=90)
plt.xlim(-1, len(ltdata))
plt.title("Probe predictions: {0}, delta: {1}".format(locustag,
ltdata['d_median'].unique()))
def plot_locustag_predictions(df, tag):
"""Plot prediction range and measured output for a locus tag
Produce one axis per probe
"""
ltdata = df.loc[df['locus_tag'] == tag].sort_values(['treatment',
'probe',
'replicate'])
#print(ltdata)
probes = list(ltdata['probe'].unique())
numprobes = len(probes)
fig, axes = plt.subplots(1, numprobes, figsize=(6 * numprobes, 6))
try:
axes = axes.ravel()
except AttributeError:
axes = (axes,)
for ttl, arr, ax in zip(probes,
[ltdata[ltdata['probe'] == p] for p in probes],
axes):
# Plot input (grey) and output (black) measurements
ax.scatter(range(len(arr)), arr['log_input'], color='k', alpha=0.2)
ax.scatter(range(len(arr)), arr['log_output'], color='k')
# Plot prediciton errors
for idx, obs, trt, plo, pmd, phi in zip(range(len(arr)),
arr['log_output'],
arr['treatment'],
arr['y_pred_5pc'],
arr['y_pred_median'],
arr['y_pred_95pc']):
if plo < obs < phi:
if trt == 1:
lcolor = 'b-'
pcolor = 'b.'
else:
lcolor = 'y-'
pcolor = 'y.'
else:
if trt == 1:
lcolor = 'r-'
pcolor = 'r.'
else:
lcolor = 'g-'
pcolor = 'g.'
ax.plot([idx, idx], [plo, phi], lcolor)
ax.plot([idx, idx], [pmd, pmd], pcolor)
ax.set_title("{2} probe predictions: {0}, delta: {1}".format(ttl,
arr['d_median'].unique(),
tag))
|