gt
stringclasses
1 value
context
stringlengths
2.49k
119k
# -*- coding: utf-8 -*- """Tests for classes representing blacklist service clients.""" from __future__ import unicode_literals from dns.resolver import NXDOMAIN from future.moves.urllib.parse import urlparse, parse_qs from nose_parameterized import parameterized from requests.exceptions import HTTPError from spam_lists.exceptions import UnathorizedAPIKeyError, UnknownCodeError from spam_lists.clients import ( DNSBL, GoogleSafeBrowsing, HpHosts, BitmaskingDNSBL ) from test.compat import unittest, Mock, patch from test.unit.common_definitions import ( HostListTestMixin, host_list_host_factory, URLTesterTestMixin ) class DNSQuerySideEffects(object): """A class providing a side effect for a mock of query function.""" def __init__(self, expected_query_names, last_octet=2): """Initialize a new instance. :param expected_query_names: domain names for which the mock is expected to return a mock of a DNS answer object :param last_octet: a number to be used as last octet of an IP address provided by the answer object """ self.expected_query_names = expected_query_names self.last_octet = last_octet def __call__(self, query_name): """Query for a DNS name. :param query_name: a domain for which the mock is being queried :returns: a list containing a DNS answer mock :raises NXDOMAIN: if query_name is not included in the preconfigured list """ if query_name in self.expected_query_names: dns_answer_mock = Mock() return_value = '121.0.0.{}'.format(self.last_octet) dns_answer_mock.to_text.return_value = return_value return [dns_answer_mock] raise NXDOMAIN class DNSBLTestMixin(HostListTestMixin): """Tests for DNSBL client classes. :cvar query_domain_str: a string used as a suffix for DNS queries to a service :cvar host_with_unknown_code: a host value to be used during tests :ivar host_factory_mock: a mocked implementation of host factory used by tested instance. Uses host_list_host_factory as its implementation :ivar dnsbl_factory: constructor of instance of tested class :ivar tested_instance: an instance of tested class :ivar dns_query_patcher: an object used for patching query function used by DNSBL instance :ivar dns_query_mock: a mocked implementation of the query function """ query_domain_str = 'test.query.domain' host_with_unknown_code = 'hostwithunknowncode.com' def setUp(self): self.host_factory_mock = Mock() self.host_factory_mock.side_effect = host_list_host_factory classification_map = {} for i, k in enumerate(self.classification, 1): classification_map[2**i] = k self.tested_instance = self.dnsbl_factory( 'test_service', self.query_domain_str, classification_map, self.host_factory_mock ) self.dns_query_patcher = patch('spam_lists.clients.query') self.dns_query_mock = self.dns_query_patcher.start() self.dns_query_mock.side_effect = DNSQuerySideEffects([]) def tearDown(self): self.dns_query_patcher.stop() def _set_matching_hosts(self, hosts): host_objects = [self.host_factory_mock(h) for h in hosts] query_names = [h.relative_domain.derelativize() for h in host_objects] self.dns_query_mock.side_effect.expected_query_names = query_names @parameterized.expand([ ('lookup', host_with_unknown_code), ( 'lookup_matching', ['http://'+host_with_unknown_code] ) ]) def test_code_error_raised_by(self, function_name, tested_value): """Test if UnknownCodeError is raised for tested value. The error is expected to be raised when a DNSBL service returns a code whose value was not recognized by the client. Occurence of such an error means that there is probably a new return code provided by the service and that the client provided by this library must be updated to take this value into account. :param function_name: a name of a method to be tested :param tested_value: a host value to be used for the test """ self._set_matching_hosts([self.host_with_unknown_code]) self.dns_query_mock.side_effect.last_octet = 14 def function(hosts): func = getattr(self.tested_instance, function_name) return list(func(hosts)) self.assertRaises(UnknownCodeError, function, tested_value) class DNSBLTest(DNSBLTestMixin, unittest.TestCase): """Tests for DNSBL class.""" # pylint: disable=too-many-public-methods dnsbl_factory = DNSBL class BitmaskingDNSBLTest(DNSBLTestMixin, unittest.TestCase): """Tests for BitmaskingDNSBL class.""" # pylint: disable=too-many-public-methods dnsbl_factory = BitmaskingDNSBL def create_hp_hosts_get(classification, listed_hosts): """Get a function to replace the get function used by HpHosts. :param classification: a classification for given hosts :param listed_hosts: listed hosts for generating responses :returns: a function providing side effects of Mock instance for the get function """ class_str = ','.join(classification) def hp_hosts_get(url): """Get mock representing a response for a GET request. :param url: a request address :returns: a Mock instance representing response object expected by HpHosts """ query_string = urlparse(url).query query_data = parse_qs(query_string) content = 'Not Listed' host = query_data['s'][0] if host in listed_hosts: content = 'Listed,{}'.format(class_str) response = Mock() response.text = content return response return hp_hosts_get class HpHostsTest(HostListTestMixin, unittest.TestCase): """Tests for HpHosts client class. :cvar tested_instance: an instance of tested class :ivar listed_hosts: a list of host values assumed to be listed for tests :ivar get_patcher: an object used for patching get function used by a HpHosts instance :ivar get_mock: a mocked implementation of the get function. Uses a function returned by create_hp_hosts_get for given classification and list of hosts :ivar host_factory_mock: a mocked implementation of host factory used by tested instance. Uses host_list_host_factory as its implementation. """ # pylint: disable=too-many-public-methods @classmethod def setUpClass(cls): cls.tested_instance = HpHosts('spam_lists_test_suite') def setUp(self): self.listed_hosts = [] self.get_patcher = patch('spam_lists.clients.get') self.get_mock = self.get_patcher.start() self.get_mock.side_effect = create_hp_hosts_get( self.classification, [] ) self.host_factory_mock = Mock() self.tested_instance = HpHosts('spam_lists_test_suite') self.tested_instance._host_factory = self.host_factory_mock self.host_factory_mock.side_effect = host_list_host_factory def tearDown(self): self.get_patcher.stop() def _set_matching_hosts(self, hosts): side_effect = create_hp_hosts_get( self.classification, hosts ) self.get_mock.side_effect = side_effect def create_gsb_post(expected_401, spam_urls, classification): """Get mock for post function used by GoogleSafeBrowsing. :param expected_401: if True, the code of response mock returned by the returned function will be 401 :param spam_urls: a list of URLs to be recognized as spam :param classification: a classification used for spam URLs :returns: mocked implementation of post function """ def post(_, body): """Get mock of a response to a POST query to GSB Lookup API. :param body: a request body :returns: a Mock instance representing the response. Properties of the object depend on external values provided by the creator of the method: expected_401, spam_urls and classification """ response = Mock() if expected_401: response.status_code = 401 response.raise_for_status.side_effect = HTTPError else: urls = body.splitlines()[1:] classes = ['ok' if u not in spam_urls else ','.join(classification) for u in urls] response.text = '\n'.join(classes) code = 200 if spam_urls else 204 response.status_code = code return response return post class GoogleSafeBrowsingTest(URLTesterTestMixin, unittest.TestCase): """Tests for GoogleSafeBrowsing class. This class adds an additional test method to the ones provided by URLTesterTestMixin: test_unathorized_query_with. This method is used to test methods of GoogleSafeBrowsing class for expected behaviour while calling Google Safe Browsing lookup API with an unathorized API key. :cvar tested_instance: an instance of tested class :ivar post_patcher: an object used for patching post function used by GoogleSafeBrowsing instance :ivar mocked_post: a mocked implementation of the post function for the tested instance. Uses a function returned by create_gsb_post function as its implementation. """ # pylint: disable=too-many-public-methods def _get_expected_items_for_urls(self, urls): return self._get_expected_items(urls) @classmethod def setUpClass(cls): cls.tested_instance = GoogleSafeBrowsing( 'test_client', '0.1', 'test_key' ) def _set_up_post_mock(self, spam_urls, error_401_expected=False): side_efect = create_gsb_post( error_401_expected, spam_urls, self.classification ) self.mocked_post.side_effect = side_efect def setUp(self): self.post_patcher = patch('spam_lists.clients.post') self.mocked_post = self.post_patcher.start() def tearDown(self): self.post_patcher.stop() def _set_matching_urls(self, urls): self._set_up_post_mock(urls) @parameterized.expand([ ('any_match'), ('lookup_matching'), ('filter_matching') ]) def test_unathorized_query_with(self, function_name): """Test if a method raises an UnathorizedAPIKeyError. :param function_name: a name of a method to be tested """ tested_function = getattr(self.tested_instance, function_name) def called_function(urls): """Call the function and return its return value. This function ensures generators are fully executed, too. :param urls: URL values to be used during the test """ return list(tested_function(urls)) self._set_up_post_mock([], error_401_expected=True) self.assertRaises( UnathorizedAPIKeyError, called_function, self.valid_urls ) if __name__ == "__main__": # import sys;sys.argv = ['', 'Test.testName'] unittest.main()
import datetime from typing import List import pandas as pd import pytest from ruamel.yaml import YAML import great_expectations.exceptions as ge_exceptions from great_expectations.core.batch import ( Batch, BatchDefinition, BatchSpec, RuntimeBatchRequest, ) from great_expectations.core.batch_spec import ( PathBatchSpec, RuntimeDataBatchSpec, RuntimeQueryBatchSpec, S3BatchSpec, ) from great_expectations.core.id_dict import IDDict from great_expectations.datasource.data_connector import RuntimeDataConnector yaml = YAML() def test_self_check(basic_datasource): test_runtime_data_connector: RuntimeDataConnector = ( basic_datasource.data_connectors["test_runtime_data_connector"] ) assert test_runtime_data_connector.self_check() == { "class_name": "RuntimeDataConnector", "data_asset_count": 0, "data_assets": {}, "example_data_asset_names": [], "example_unmatched_data_references": [], "note": "RuntimeDataConnector will not have data_asset_names until they are " "passed in through RuntimeBatchRequest", "unmatched_data_reference_count": 0, } def test_error_checking_unknown_datasource(basic_datasource): test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]}) test_runtime_data_connector: RuntimeDataConnector = ( basic_datasource.data_connectors["test_runtime_data_connector"] ) # Test for an unknown datasource with pytest.raises(ValueError): # noinspection PyUnusedLocal batch_definition_list: List[ BatchDefinition ] = test_runtime_data_connector.get_batch_definition_list_from_batch_request( batch_request=RuntimeBatchRequest( datasource_name="non_existent_datasource", data_connector_name="test_runtime_data_connector", data_asset_name="my_data_asset", runtime_parameters={"batch_data": test_df}, batch_identifiers={"airflow_run_id": "first"}, ) ) def test_error_checking_unknown_data_connector(basic_datasource): test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]}) test_runtime_data_connector: RuntimeDataConnector = ( basic_datasource.data_connectors["test_runtime_data_connector"] ) # Test for an unknown data_connector with pytest.raises(ValueError): # noinspection PyUnusedLocal batch_definition_list: List[ BatchDefinition ] = test_runtime_data_connector.get_batch_definition_list_from_batch_request( batch_request=RuntimeBatchRequest( datasource_name=basic_datasource.name, data_connector_name="non_existent_data_connector", data_asset_name="my_data_asset", runtime_parameters={"batch_data": test_df}, batch_identifiers={"airflow_run_id": "first"}, ) ) def test_error_checking_missing_runtime_parameters(basic_datasource): test_runtime_data_connector: RuntimeDataConnector = ( basic_datasource.data_connectors["test_runtime_data_connector"] ) # test for missing runtime_parameters arg with pytest.raises(TypeError): # noinspection PyUnusedLocal, PyArgumentList batch_definition_list: List[ BatchDefinition ] = test_runtime_data_connector.get_batch_definition_list_from_batch_request( batch_request=RuntimeBatchRequest( datasource_name=basic_datasource.name, data_connector_name="test_runtime_data_connector", data_asset_name="my_data_asset", batch_identifiers={"pipeline_stage_name": "munge"}, ) ) def test_error_checking_too_many_runtime_parameters(basic_datasource): test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]}) test_runtime_data_connector: RuntimeDataConnector = ( basic_datasource.data_connectors["test_runtime_data_connector"] ) # test for too many runtime_parameters keys with pytest.raises(ge_exceptions.InvalidBatchRequestError): # noinspection PyUnusedLocal batch_definition_list: List[ BatchDefinition ] = test_runtime_data_connector.get_batch_definition_list_from_batch_request( batch_request=RuntimeBatchRequest( datasource_name=basic_datasource.name, data_connector_name="test_runtime_data_connector", data_asset_name="my_data_asset", runtime_parameters={"batch_data": test_df, "path": "my_path"}, batch_identifiers={"pipeline_stage_name": "munge"}, ) ) def test_batch_identifiers_and_batch_identifiers_success_all_keys_present( basic_datasource, ): test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]}) batch_identifiers: dict batch_identifiers = { "pipeline_stage_name": "core_processing", "airflow_run_id": 1234567890, "custom_key_0": "custom_value_0", } test_runtime_data_connector: RuntimeDataConnector = ( basic_datasource.data_connectors["test_runtime_data_connector"] ) # Verify that all keys in batch_identifiers are acceptable as batch_identifiers (using batch count). batch_request: dict = { "datasource_name": basic_datasource.name, "data_connector_name": test_runtime_data_connector.name, "data_asset_name": "IN_MEMORY_DATA_ASSET", "runtime_parameters": {"batch_data": test_df}, "batch_identifiers": batch_identifiers, } batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request) batch_definition_list: List[ BatchDefinition ] = test_runtime_data_connector.get_batch_definition_list_from_batch_request( batch_request=batch_request ) assert len(batch_definition_list) == 1 def test_batch_identifiers_and_batch_identifiers_error_illegal_keys( basic_datasource, ): test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]}) batch_identifiers: dict batch_identifiers = { "pipeline_stage_name": "core_processing", "airflow_run_id": 1234567890, "custom_key_0": "custom_value_0", "custom_key_1": "custom_value_1", } test_runtime_data_connector: RuntimeDataConnector = ( basic_datasource.data_connectors["test_runtime_data_connector"] ) # Insure that keys in batch_identifiers["batch_identifiers"] that are not among batch_identifiers declared in # configuration # are not accepted. In this test, all legal keys plus a single illegal key are present. batch_request: dict = { "datasource_name": basic_datasource.name, "data_connector_name": test_runtime_data_connector.name, "data_asset_name": "my_data_asset_name", "runtime_parameters": {"batch_data": test_df}, "batch_identifiers": batch_identifiers, } batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request) with pytest.raises(ge_exceptions.DataConnectorError): # noinspection PyUnusedLocal batch_definition_list: List[ BatchDefinition ] = test_runtime_data_connector.get_batch_definition_list_from_batch_request( batch_request=batch_request ) batch_identifiers = {"batch_identifiers": {"unknown_key": "some_value"}} test_runtime_data_connector: RuntimeDataConnector = ( basic_datasource.data_connectors["test_runtime_data_connector"] ) # Insure that keys in batch_identifiers["batch_identifiers"] that are not among batch_identifiers declared in # configuration # are not accepted. In this test, a single illegal key is present. batch_request: dict = { "datasource_name": basic_datasource.name, "data_connector_name": test_runtime_data_connector.name, "data_asset_name": "IN_MEMORY_DATA_ASSET", "runtime_parameters": {"batch_data": test_df}, "batch_identifiers": batch_identifiers, } batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request) with pytest.raises(ge_exceptions.DataConnectorError): # noinspection PyUnusedLocal batch_definition_list: List[ BatchDefinition ] = test_runtime_data_connector.get_batch_definition_list_from_batch_request( batch_request=batch_request ) def test_get_available_data_asset_names(basic_datasource): test_runtime_data_connector: RuntimeDataConnector = ( basic_datasource.data_connectors["test_runtime_data_connector"] ) expected_available_data_asset_names: List[str] = [] available_data_asset_names: List[ str ] = test_runtime_data_connector.get_available_data_asset_names() assert available_data_asset_names == expected_available_data_asset_names def test_get_available_data_asset_names_updating_after_batch_request(basic_datasource): test_runtime_data_connector: RuntimeDataConnector = ( basic_datasource.data_connectors["test_runtime_data_connector"] ) test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]}) # empty if data_connector has not been used assert test_runtime_data_connector.get_available_data_asset_names() == [] batch_identifiers = { "airflow_run_id": 1234567890, } batch_request: dict = { "datasource_name": basic_datasource.name, "data_connector_name": test_runtime_data_connector.name, "data_asset_name": "my_data_asset_1", "runtime_parameters": { "batch_data": test_df, }, "batch_identifiers": batch_identifiers, } batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request) # run with my_data_asset_1 test_runtime_data_connector.get_batch_definition_list_from_batch_request( batch_request=batch_request ) # updated to my_data_asset_1 assert test_runtime_data_connector.get_available_data_asset_names() == [ "my_data_asset_1" ] batch_identifiers = { "airflow_run_id": 1234567890, } batch_request: dict = { "datasource_name": basic_datasource.name, "data_connector_name": test_runtime_data_connector.name, "data_asset_name": "my_data_asset_2", "runtime_parameters": { "batch_data": test_df, }, "batch_identifiers": batch_identifiers, } batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request) # run with my_data_asset_2 test_runtime_data_connector.get_batch_definition_list_from_batch_request( batch_request=batch_request ) # updated to my_data_asset_1 and my_data_asset_2 assert test_runtime_data_connector.get_available_data_asset_names() == [ "my_data_asset_1", "my_data_asset_2", ] def test_data_references_cache_updating_after_batch_request( basic_datasource, ): test_runtime_data_connector: RuntimeDataConnector = ( basic_datasource.data_connectors["test_runtime_data_connector"] ) test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]}) # empty if data_connector has not been used assert test_runtime_data_connector.get_available_data_asset_names() == [] batch_identifiers = { "airflow_run_id": 1234567890, } batch_request: dict = { "datasource_name": basic_datasource.name, "data_connector_name": test_runtime_data_connector.name, "data_asset_name": "my_data_asset_1", "runtime_parameters": { "batch_data": test_df, }, "batch_identifiers": batch_identifiers, } batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request) # run with my_data_asset_1 test_runtime_data_connector.get_batch_definition_list_from_batch_request( batch_request=batch_request ) assert test_runtime_data_connector._data_references_cache == { "my_data_asset_1": { "1234567890": [ BatchDefinition( datasource_name="my_datasource", data_connector_name="test_runtime_data_connector", data_asset_name="my_data_asset_1", batch_identifiers=IDDict({"airflow_run_id": 1234567890}), ) ], } } # update with test_df_new: pd.DataFrame = pd.DataFrame(data={"col1": [5, 6], "col2": [7, 8]}) batch_identifiers = { "airflow_run_id": 987654321, } batch_request: dict = { "datasource_name": basic_datasource.name, "data_connector_name": test_runtime_data_connector.name, "data_asset_name": "my_data_asset_1", "runtime_parameters": { "batch_data": test_df_new, }, "batch_identifiers": batch_identifiers, } batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request) # run with with new_data_asset but a new batch test_runtime_data_connector.get_batch_definition_list_from_batch_request( batch_request=batch_request ) assert test_runtime_data_connector._data_references_cache == { "my_data_asset_1": { "1234567890": [ BatchDefinition( datasource_name="my_datasource", data_connector_name="test_runtime_data_connector", data_asset_name="my_data_asset_1", batch_identifiers=IDDict({"airflow_run_id": 1234567890}), ) ], "987654321": [ BatchDefinition( datasource_name="my_datasource", data_connector_name="test_runtime_data_connector", data_asset_name="my_data_asset_1", batch_identifiers=IDDict({"airflow_run_id": 987654321}), ) ], }, } # new data_asset_name test_df_new_asset: pd.DataFrame = pd.DataFrame( data={"col1": [9, 10], "col2": [11, 12]} ) batch_identifiers = { "airflow_run_id": 5555555, } batch_request: dict = { "datasource_name": basic_datasource.name, "data_connector_name": test_runtime_data_connector.name, "data_asset_name": "my_data_asset_2", "runtime_parameters": { "batch_data": test_df_new_asset, }, "batch_identifiers": batch_identifiers, } batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request) # run with with new_data_asset but a new batch test_runtime_data_connector.get_batch_definition_list_from_batch_request( batch_request=batch_request ) assert test_runtime_data_connector._data_references_cache == { "my_data_asset_1": { "1234567890": [ BatchDefinition( datasource_name="my_datasource", data_connector_name="test_runtime_data_connector", data_asset_name="my_data_asset_1", batch_identifiers=IDDict({"airflow_run_id": 1234567890}), ) ], "987654321": [ BatchDefinition( datasource_name="my_datasource", data_connector_name="test_runtime_data_connector", data_asset_name="my_data_asset_1", batch_identifiers=IDDict({"airflow_run_id": 987654321}), ) ], }, "my_data_asset_2": { "5555555": [ BatchDefinition( datasource_name="my_datasource", data_connector_name="test_runtime_data_connector", data_asset_name="my_data_asset_2", batch_identifiers=IDDict({"airflow_run_id": 5555555}), ) ] }, } assert test_runtime_data_connector.get_available_data_asset_names() == [ "my_data_asset_1", "my_data_asset_2", ] assert test_runtime_data_connector.get_data_reference_list_count() == 3 def test_get_batch_definition_list_from_batch_request_length_one( basic_datasource, ): test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]}) batch_identifiers: dict = { "airflow_run_id": 1234567890, } test_runtime_data_connector: RuntimeDataConnector = ( basic_datasource.data_connectors["test_runtime_data_connector"] ) batch_request: dict = { "datasource_name": basic_datasource.name, "data_connector_name": test_runtime_data_connector.name, "data_asset_name": "my_data_asset", "runtime_parameters": {"batch_data": test_df}, "batch_identifiers": batch_identifiers, } batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request) expected_batch_definition_list: List[BatchDefinition] = [ BatchDefinition( datasource_name="my_datasource", data_connector_name="test_runtime_data_connector", data_asset_name="my_data_asset", batch_identifiers=IDDict(batch_identifiers), ) ] batch_definition_list: List[ BatchDefinition ] = test_runtime_data_connector.get_batch_definition_list_from_batch_request( batch_request=batch_request ) assert batch_definition_list == expected_batch_definition_list def test_get_batch_definition_list_from_batch_request_with_and_without_data_asset_name( basic_datasource, ): test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]}) batch_identifiers = { "airflow_run_id": 1234567890, } test_runtime_data_connector: RuntimeDataConnector = ( basic_datasource.data_connectors["test_runtime_data_connector"] ) # data_asset_name is missing batch_request: dict = { "datasource_name": basic_datasource.name, "data_connector_name": test_runtime_data_connector.name, "runtime_parameters": { "batch_data": test_df, }, "batch_identifiers": batch_identifiers, } with pytest.raises(TypeError): # noinspection PyUnusedLocal batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request) # test that name can be set as "my_data_asset" batch_request: dict = { "datasource_name": basic_datasource.name, "data_connector_name": test_runtime_data_connector.name, "data_asset_name": "my_data_asset", "runtime_parameters": { "batch_data": test_df, }, "batch_identifiers": batch_identifiers, } batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request) batch_definition_list: List[ BatchDefinition ] = test_runtime_data_connector.get_batch_definition_list_from_batch_request( batch_request=batch_request ) assert len(batch_definition_list) == 1 # check that default value has been set assert batch_definition_list[0]["data_asset_name"] == "my_data_asset" def test__get_data_reference_list(basic_datasource): test_runtime_data_connector: RuntimeDataConnector = ( basic_datasource.data_connectors["test_runtime_data_connector"] ) expected_data_reference_list: List[str] = [] # noinspection PyProtectedMember data_reference_list: List[ str ] = test_runtime_data_connector._get_data_reference_list() assert data_reference_list == expected_data_reference_list def test_refresh_data_references_cache(basic_datasource): test_runtime_data_connector: RuntimeDataConnector = ( basic_datasource.data_connectors["test_runtime_data_connector"] ) assert len(test_runtime_data_connector._data_references_cache) == 0 def test__generate_batch_spec_parameters_from_batch_definition( basic_datasource, ): batch_identifiers = { "custom_key_0": "staging", "airflow_run_id": 1234567890, } test_runtime_data_connector: RuntimeDataConnector = ( basic_datasource.data_connectors["test_runtime_data_connector"] ) expected_batch_spec_parameters: dict = {"data_asset_name": "my_data_asset"} # noinspection PyProtectedMember batch_spec_parameters: dict = test_runtime_data_connector._generate_batch_spec_parameters_from_batch_definition( batch_definition=BatchDefinition( datasource_name="my_datasource", data_connector_name="test_runtime_data_connector", data_asset_name="my_data_asset", batch_identifiers=IDDict(batch_identifiers), ) ) assert batch_spec_parameters == expected_batch_spec_parameters def test__build_batch_spec(basic_datasource): batch_identifiers = { "custom_key_0": "staging", "airflow_run_id": 1234567890, } test_runtime_data_connector: RuntimeDataConnector = ( basic_datasource.data_connectors["test_runtime_data_connector"] ) batch_definition = BatchDefinition( datasource_name="my_datasource", data_connector_name="test_runtime_data_connector", data_asset_name="my_data_asset", batch_identifiers=IDDict(batch_identifiers), ) batch_spec: BatchSpec = test_runtime_data_connector.build_batch_spec( batch_definition=batch_definition, runtime_parameters={ "batch_data": pd.DataFrame({"x": range(10)}), }, ) assert type(batch_spec) == RuntimeDataBatchSpec assert set(batch_spec.keys()) == {"batch_data", "data_asset_name"} assert batch_spec["batch_data"].shape == (10, 1) batch_spec: BatchSpec = test_runtime_data_connector.build_batch_spec( batch_definition=batch_definition, runtime_parameters={ "query": "my_query", }, ) assert type(batch_spec) == RuntimeQueryBatchSpec batch_spec: BatchSpec = test_runtime_data_connector.build_batch_spec( batch_definition=batch_definition, runtime_parameters={"path": "my_path"} ) assert type(batch_spec) == PathBatchSpec batch_spec: BatchSpec = test_runtime_data_connector.build_batch_spec( batch_definition=batch_definition, runtime_parameters={"path": "s3://my.s3.path"}, ) assert type(batch_spec) == S3BatchSpec batch_spec: BatchSpec = test_runtime_data_connector.build_batch_spec( batch_definition=batch_definition, runtime_parameters={"path": "s3a://my.s3.path"}, ) assert type(batch_spec) == S3BatchSpec def test__get_data_reference_name(basic_datasource): data_connector_query: dict = { "batch_filter_parameters": { "airflow_run_id": 1234567890, } } batch_identifiers = IDDict(data_connector_query["batch_filter_parameters"]) test_runtime_data_connector: RuntimeDataConnector = ( basic_datasource.data_connectors["test_runtime_data_connector"] ) assert ( test_runtime_data_connector._get_data_reference_name(batch_identifiers) == "1234567890" ) data_connector_query: dict = { "batch_filter_parameters": { "run_id_1": 1234567890, "run_id_2": 1111111111, } } batch_identifiers = IDDict(data_connector_query["batch_filter_parameters"]) test_runtime_data_connector: RuntimeDataConnector = ( basic_datasource.data_connectors["test_runtime_data_connector"] ) assert ( test_runtime_data_connector._get_data_reference_name(batch_identifiers) == "1234567890-1111111111" ) def test_batch_identifiers_datetime( basic_datasource, ): test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]}) batch_identifiers: dict batch_identifiers = { "pipeline_stage_name": "core_processing", "airflow_run_id": 1234567890, "custom_key_0": datetime.datetime.utcnow(), } test_runtime_data_connector: RuntimeDataConnector = ( basic_datasource.data_connectors["test_runtime_data_connector"] ) # Verify that all keys in batch_identifiers are acceptable as batch_identifiers batch_request: dict = { "datasource_name": basic_datasource.name, "data_connector_name": test_runtime_data_connector.name, "data_asset_name": "IN_MEMORY_DATA_ASSET", "runtime_parameters": {"batch_data": test_df}, "batch_identifiers": batch_identifiers, } batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request) batch_definition = ( test_runtime_data_connector.get_batch_definition_list_from_batch_request( batch_request=batch_request )[0] ) batch = Batch( data=test_df, batch_request=batch_request, batch_definition=batch_definition ) try: _ = batch.id except TypeError: pytest.fail()
# -*- coding: utf-8 -*- ''' Create adjacency matrices and analyse terms dynamically ''' print('Create document-term ESOMs') #-------------------------------------------- #run create_Info_Files.py before running this #-------------------------------------------- import pickle, time, igraph, glob, os, somoclu, collections import itertools, codecs, seaborn, math, pprint, random, re from matplotlib import rc import pandas as pd import numpy as np import matplotlib.pyplot as plt from matplotlib import interactive import matplotlib.colors as colors from scipy.spatial import distance import seaborn as sns import sklearn.cluster as clusterAlgs from scipy.spatial.distance import pdist from scipy.cluster.hierarchy import dendrogram import scipy.cluster.hierarchy as sch #-------------------------------------------- print(time.asctime( time.localtime(time.time()) )) t = time.time() readDataPath = './data/artworks_tmp/artworks_doc_term' statsWritePath = './data/artworks_stats' figWritePath = './data/artworks_figs/doc_TermFigs' splitsMergesWritePath = './data/splitsMerges/doc_Term' if not os.path.exists(figWritePath): os.makedirs(figWritePath) LVLs = ['lvl1','lvl2','lvl3','lvlA'] #'lvl1','lvl2','lvl3', yearPeriods = ['2000s'] #'1800s', trueYearsIni = [1964]#1800, leaffont = [14,11,7,4]#12,7,6, for idy,years in enumerate(yearPeriods): print(years) for lIdx,lvl in enumerate(LVLs): print(lvl) persistTerms = [x.strip() for x in open(statsWritePath+'/'+years+lvl+'_unique_persistent_terms.txt','r').readlines()] dataDict = {'uniquePersistentTerms':persistTerms} for pIdx in range(10): periodIdx = str(pIdx) dataDict[periodIdx] = {} '''set up SOM'''#-------------------------------------------------------------------- # n_columns, n_rows = 100, 60 n_columns, n_rows = 150, 90 # n_columns, n_rows = 200, 120 lablshift = 1 #------------- # if lvl == 'lvl1': # n_columns, n_rows = 20, 12 # lablshift = .2 # elif lvl == 'lvl2': # n_columns, n_rows = 40, 24 # lablshift = .3 # elif lvl == 'lvl3': # n_columns, n_rows = 50, 30 # lablshift = .4 # elif lvl == 'lvlA': # n_columns, n_rows = 60, 40 # lablshift = .5 #------------ som = somoclu.Somoclu(n_columns, n_rows, maptype="toroid", initialization="pca") savefig = True SOMdimensionsString = 'x'.join([str(x) for x in [n_columns,n_rows]]) if pIdx<1: print('SOM dimension is: %s' %SOMdimensionsString) # #-------------------------------------------------------------------------------- # ------------------------------------------------------------------------------------------------------------------------ '''SOM data extraction from here on------------------------------------------------------------------------------------''' # ------------------------------------------------------------------------------------------------------------------------ '''Extract Self Organizing Maps of undirected weighted adj mats''' df = pd.read_table(readDataPath+'/binMat'+years+lvl+'_'+periodIdx+'.tsv', sep="\t", header=0,index_col=0) df = df.transpose() dfmax = df.max() dfmax[dfmax == 0] = 1 df = df / dfmax labels = df.index.tolist() nodes = df.index.tolist() som.update_data(df.values) epochs = 10 radius0 = 0 scale0 = 0.1 som.train(epochs=epochs, radius0=radius0, scale0=scale0) '''----------------------clustering params-----------''' clusterAlgLabel = 'AffinityPropagation' # KMeans8 , SpectralClustering,AffinityPropagation, Birch if clusterAlgLabel == 'Birch': algorithm = clusterAlgs.Birch() elif clusterAlgLabel == 'AffinityPropagation': original_shape = som.codebook.shape som.codebook.shape = (som._n_columns*som._n_rows, som.n_dim) init = -np.max(distance.pdist(som.codebook, 'euclidean')) som.codebook.shape = original_shape algorithm = clusterAlgs.AffinityPropagation(preference = init,damping = 0.9) elif clusterAlgLabel == 'KMeans8': algorithm = None print('Clustering algorithm employed: %s' %clusterAlgLabel) som.cluster(algorithm=algorithm) '''----------------------------------------------------''' colors = [] for bm in som.bmus: colors.append(som.clusters[bm[1], bm[0]]) areas = [200]*len(som.bmus) xDimension, yDimension = [], [] for x in som.bmus: xDimension.append(x[0]) yDimension.append(x[1]) if not os.path.exists(figWritePath+'/'+clusterAlgLabel+'Clusters/'+SOMdimensionsString): os.makedirs(figWritePath+'/'+clusterAlgLabel+'Clusters/'+SOMdimensionsString) fig, ax = plt.subplots() colMap = 'Spectral_r' plt.imshow(som.umatrix,cmap = colMap, aspect = 'auto') ax.scatter(xDimension,yDimension,s=areas,c=colors)# doneLabs = set(['']) for label, x, y in zip(labels, xDimension, yDimension): lblshiftRatio = 1 labFinshift = '' while labFinshift in doneLabs: potentialPositions = [(x, y+lblshiftRatio*lablshift), (x, y-lblshiftRatio*lablshift)]#,(x+lblshiftRatio*lablshift, y+lblshiftRatio*lablshift), # (x-lblshiftRatio*lablshift, y+lblshiftRatio*lablshift), (x+lblshiftRatio*lablshift, y-lblshiftRatio*lablshift), (x+lblshiftRatio*lablshift, y+lblshiftRatio*lablshift), # (x-lblshiftRatio*lablshift, y+lblshiftRatio*lablshift)] for pP in potentialPositions: labFinshift = pP if labFinshift not in doneLabs: break lblshiftRatio+=1 doneLabs.add(labFinshift) plt.annotate(label, xy = (x, y), xytext = labFinshift, textcoords = 'data', ha = 'center', va = 'center', fontsize = 8,bbox = dict(boxstyle = 'round,pad=0.1', fc = 'white', alpha = 0.4))#,arrowprops = dict(arrowstyle = '-', connectionstyle = 'arc3,rad=0')) plt.xlim(0,n_columns) plt.ylim(0,n_rows) plt.gca().invert_yaxis() plt.xlabel('ESOM') mng = plt.get_current_fig_manager() mng.window.state('zoomed') interactive(True) plt.show() fig.savefig(figWritePath+'/'+clusterAlgLabel+'Clusters/'+SOMdimensionsString+'/SOM_binMat'+years+lvl+'_'+periodIdx+'.png',bbox_inches='tight') plt.close() interactive(False) #----------------------------------------------------------------------------------------------- #------------------------------------------------------------------------------------------- '''extract matrix and distance matrix from panda format''' # data_array = np.matrix(df) # data_array_shape = data_array.shape # data_array_Trans = data_array.T # data_array_Trans_shape = data_array_Trans.shape # # data_array = data_array.T # data_dist = pdist(data_array,'euclidean') # computing the distance # data_dist_Trans = pdist(data_array_Trans,'euclidean') '''Create dendrograms''' # if not os.path.exists(figWritePath+'/hierarchicalClustering'): # os.makedirs(figWritePath+'/hierarchicalClustering') # plt.figure() # plt.xlabel('Term') # plt.ylabel('Mean') # plt.title('euclidean - farthest neighbor HCA') # dendrogram(sch.linkage(data_dist, method='complete'),labels=list(df.index),leaf_rotation=90.,leaf_font_size=leaffont[lIdx],orientation = 'top',truncate_mode = 'none',show_leaf_counts=True)#leaf_font_size=leaffont[lIdx] # ax = plt.gca() # ax.set_ylim(-0.01,ax.get_ylim()[1]) # mng = plt.get_current_fig_manager() # mng.window.state('zoomed') # interactive(True) # plt.show() # plt.savefig(figWritePath+'/hierarchicalClustering/dendroCategory'+years+lvl+'_'+periodIdx+'.pdf',bbox_inches='tight') # plt.close() # interactive(False) # plt.figure() # plt.xlabel('Artwork') # plt.ylabel('Mean') # plt.title('euclidean - farthest neighbor HCA') # dendrogram(sch.linkage(data_dist_Trans, method='complete'),labels=list(df.columns),leaf_rotation=90.,orientation = 'top',truncate_mode = 'none',show_leaf_counts=True)#leaf_font_size=leaffont[lIdx] # ax = plt.gca() # ax.set_ylim(-0.01,ax.get_ylim()[1]) # mng = plt.get_current_fig_manager() # mng.window.state('zoomed') # interactive(True) # plt.show() # plt.savefig(figWritePath+'/hierarchicalClustering/dendroArtwork'+years+lvl+'_'+periodIdx+'.pdf',bbox_inches='tight') # plt.close() # interactive(False) '''Create dendroheatmap''' # # Compute and plot first dendrogram. # fig = plt.figure()#figsize=(8,8) # ax1 = fig.add_axes([0.09,0.1,0.2,0.6]) # side_dendrogram = sch.linkage(data_array_Trans, method='complete') # Z1 = sch.dendrogram(side_dendrogram, orientation='left',truncate_mode = 'none',show_leaf_counts=False,no_labels = True) # ax1.set_ylim(-0.01,ax1.get_ylim()[1]) # ax1.set_xticks([]) # ax1.set_yticks([]) # # Compute and plot second dendrogram. # ax2 = fig.add_axes([0.3,0.71,0.6,0.2]) # top_dendrogram = sch.linkage(data_dist, method='complete') # Z2 = sch.dendrogram(top_dendrogram, truncate_mode = 'none',show_leaf_counts=False,no_labels = True) # ax2.set_ylim(-0.01,ax2.get_ylim()[1]) # ax2.set_xticks([]) # ax2.set_yticks([]) # # Plot distance matrix. # axmatrix = fig.add_axes([0.3,0.1,0.6,0.6]) # idx1 = Z1['leaves'] # idx2 = Z2['leaves'] # data_array = data_array[:,idx1] # data_array = data_array[idx2,:] # ylabels = df.columns[idx1] # xlabels = df.index[idx2] # xlabels = [x.replace('_',' ') for x in xlabels] # newylabels = [] # for yl in ylabels: # yl = yl.strip().split(' ') # if len(yl)>5: # yl.insert(3,'\n') # yl = ' '.join(yl) # newylabels.append(yl) # axmatrix.matshow(data_array.T, aspect='auto', cmap=plt.cm.Spectral_r, origin='lower', vmin=0, vmax=1)#) # axmatrix.xaxis.tick_bottom() # axmatrix.set_xticks(np.arange(len(df.index))) # axmatrix.yaxis.tick_right() # axmatrix.set_yticks(np.arange(len(df.columns))) # axmatrix.set_xticklabels(xlabels, fontsize=10, rotation = 90) # axmatrix.set_yticklabels(newylabels,fontsize=10)#minor=False, # axmatrix.grid(True,color='gray') # mng = plt.get_current_fig_manager() # mng.window.state('zoomed') # interactive(True) # fig.show() # fig.savefig(figWritePath+'/hierarchicalClustering/dendroHeatmap'+years+lvl+'_'+periodIdx+'.pdf',bbox_inches='tight') # plt.close() # interactive(False) #----------------------------------------------------------------------------------------------- '''Check for merges and splits in Greenwich files'''#------------------------------------------- #----------------------------------------------------------------------------------------------- if int(periodIdx)>0: if not os.path.exists(splitsMergesWritePath+'/'+SOMdimensionsString): os.makedirs(splitsMergesWritePath+'/'+SOMdimensionsString) tmpStrClusters = [','.join([str(y) for y in x]) for x in som.bmus] strClustDict[periodIdx] = {} for idx, sC in enumerate(tmpStrClusters): if sC in strClustDict[periodIdx]: strClustDict[periodIdx][sC].append(nodes[idx]) else: strClustDict[periodIdx][sC] = [nodes[idx]] tmpSameBMUsNodes = list(strClustDict[periodIdx].values()) invStrClustDict[periodIdx] = {','.join(v):k for k,v in strClustDict[periodIdx].items()} dataDict[periodIdx]['bmuNodes'] = tmpSameBMUsNodes tmpsplits,tmpmerges = 0, 0 with open(splitsMergesWritePath+'/'+SOMdimensionsString+'/changes'+years+lvl+'_'+periodIdx+'.txt','w') as f: for tsbn in tmpSameBMUsNodes: if tsbn not in dataDict[str(int(periodIdx)-1)]['bmuNodes']: oldbmucoords = [] for ts in tsbn: for ots in dataDict[str(int(periodIdx)-1)]['bmuNodes']: if ts in ots: oldbmucoords.append(invStrClustDict[str(int(periodIdx)-1)][','.join(ots)]) if len(set(oldbmucoords)) < 2: f.write('Terms %s at %s were split from %s \n' %(','.join(tsbn),invStrClustDict[periodIdx][','.join(tsbn)],'|'.join(oldbmucoords))) if len(tsbn) <= len(strClustDict[str(int(periodIdx)-1)][oldbmucoords[0]])/2: tmpsplits+=len(tsbn) termDislocation['splits'].extend(tsbn) termDislocation['both'].extend(tsbn) else: f.write('Terms %s at %s were merged from %s \n' %(','.join(tsbn),invStrClustDict[periodIdx][','.join(tsbn)],'|'.join(oldbmucoords))) for tmpclusts in [strClustDict[str(int(periodIdx)-1)][x] for x in set(oldbmucoords)]: tmpclustIntersect = set(tmpclusts).intersection(set(tsbn)) if len(tmpclustIntersect) <= len(tsbn)/2: tmpmerges+=len(tmpclustIntersect) termDislocation['merges'].extend(tmpclustIntersect) termDislocation['both'].extend(tmpclustIntersect) # termDislocation['both'].extend(tsbn) dislocationDict['merges'].append(100*tmpmerges/len(dataDict['uniquePersistentTerms'])) dislocationDict['splits'].append(100*tmpsplits/len(dataDict['uniquePersistentTerms'])) dislocationDict['both'].append(100*(tmpmerges+tmpsplits)/len(dataDict['uniquePersistentTerms'])) else: tmpStrClusters = [','.join([str(y) for y in x]) for x in som.bmus] strClustDict = {periodIdx:{}} for idx, sC in enumerate(tmpStrClusters): if sC in strClustDict[periodIdx]: strClustDict[periodIdx][sC].append(nodes[idx]) else: strClustDict[periodIdx][sC] = [nodes[idx]] dataDict[periodIdx]['bmuNodes'] = list(strClustDict[periodIdx].values()) invStrClustDict = {periodIdx:{','.join(v):k for k,v in strClustDict[periodIdx].items()}} dislocationDict = {'merges':[],'splits':[],'both':[]} termDislocation = {'merges':[],'splits':[],'both':[]} #------------------------------------------------------------------------------------------------------------------------------------- elapsed = time.time() - t print('Total time Elapsed: %.2f seconds' % elapsed)
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """learn_main tests.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import json import os import tensorflow as tf from tensorflow.contrib.learn.python.learn import learn_runner from tensorflow.contrib.learn.python.learn import run_config class TestExperiment(tf.contrib.learn.Experiment): def __init__(self, default=None, config=None): self.default = default self.config = config @property def estimator(self): class Estimator(object): config = self.config return Estimator() def local_run(self): return "local_run" def train(self): return "train" def run_std_server(self): return "run_std_server" def train_and_evaluate(self): return "train_and_evaluate" def simple_task(self): return "simple_task, default=%s." % self.default # pylint: disable=unused-argument def build_experiment(output_dir): tf.logging.info("In default build_experiment.") return TestExperiment() def build_non_experiment(output_dir): return "Ceci n'est pas un Experiment." # pylint: enable=unused-argument def build_distributed_cluster_spec(): return tf.train.ClusterSpec( {"ps": ["localhost:1234", "localhost:1235"], "worker": ["localhost:1236", "localhost:1237"], "master": ["localhost:1238"], "foo_has_no_default_schedule": ["localhost:1239"]}) def build_non_distributed_cluster_spec(): return tf.train.ClusterSpec({"foo": ["localhost:1234"]}) class MainTest(tf.test.TestCase): def setUp(self): # Ensure the TF_CONFIG environment variable is unset for all tests. os.environ.pop("TF_CONFIG", None) def test_run_with_custom_schedule(self): self.assertEqual( "simple_task, default=None.", learn_runner.run(build_experiment, output_dir="/tmp", schedule="simple_task")) def test_run_with_explicit_local_run(self): self.assertEqual( "local_run", learn_runner.run(build_experiment, output_dir="/tmp", schedule="local_run")) def test_schedule_from_tf_config(self): os.environ["TF_CONFIG"] = json.dumps( {"cluster": build_distributed_cluster_spec().as_dict(), "task": {"type": "worker"}}) # RunConfig constructor will set job_name from TF_CONFIG. config = run_config.RunConfig() self.assertEqual( "train", learn_runner.run(lambda output_dir: TestExperiment(config=config), output_dir="/tmp")) def test_schedule_from_manually_specified_job_name(self): config = run_config.RunConfig( job_name="worker", cluster_spec=build_distributed_cluster_spec()) self.assertEqual( "train", learn_runner.run(lambda output_dir: TestExperiment(config=config), output_dir="/tmp")) def test_schedule_from_config_runs_train_and_evaluate_on_master(self): config = run_config.RunConfig( job_name="master", cluster_spec=build_distributed_cluster_spec(), task=0, is_chief=True) self.assertEqual( "train_and_evaluate", learn_runner.run(lambda output_dir: TestExperiment(config=config), output_dir="/tmp")) def test_schedule_from_config_runs_serve_on_ps(self): config = run_config.RunConfig( job_name="ps", cluster_spec=build_distributed_cluster_spec()) self.assertEqual( "run_std_server", learn_runner.run(lambda output_dir: TestExperiment(config=config), output_dir="/tmp")) def test_schedule_from_config_runs_train_on_worker(self): config = run_config.RunConfig( job_name="worker", cluster_spec=build_distributed_cluster_spec()) self.assertEqual( "train", learn_runner.run(lambda output_dir: TestExperiment(config=config), output_dir="/tmp")) def test_fail_no_output_dir(self): self.assertRaisesRegexp(ValueError, "Must specify an output directory", learn_runner.run, build_experiment, "", "simple_task") def test_no_schedule_and_no_config_runs_train_and_evaluate(self): self.assertEqual( "train_and_evaluate", learn_runner.run(build_experiment, output_dir="/tmp")) def test_no_schedule_and_non_distributed_runs_train_and_evaluate(self): config = run_config.RunConfig( cluster_spec=build_non_distributed_cluster_spec()) self.assertEqual( "train_and_evaluate", learn_runner.run(lambda output_dir: TestExperiment(config=config), output_dir="/tmp")) def test_fail_job_name_with_no_default_schedule(self): config = run_config.RunConfig( job_name="foo_has_no_default_schedule", cluster_spec=build_distributed_cluster_spec()) create_experiment_fn = lambda output_dir: TestExperiment(config=config) self.assertRaisesRegexp(ValueError, "No default schedule", learn_runner.run, create_experiment_fn, "/tmp") def test_fail_non_callable(self): self.assertRaisesRegexp(TypeError, "Experiment builder .* is not callable", learn_runner.run, "not callable", "/tmp", "simple_test") def test_fail_not_experiment(self): self.assertRaisesRegexp(TypeError, "Experiment builder did not return an Experiment", learn_runner.run, build_non_experiment, "/tmp", "simple_test") def test_fail_non_existent_task(self): self.assertRaisesRegexp(ValueError, "Schedule references non-existent task", learn_runner.run, build_experiment, "/tmp", "mirage") def test_fail_non_callable_task(self): self.assertRaisesRegexp(TypeError, "Schedule references non-callable member", learn_runner.run, build_experiment, "/tmp", "default") def test_fail_schedule_from_config_with_no_job_name(self): config = run_config.RunConfig( job_name=None, cluster_spec=build_distributed_cluster_spec()) self.assertRaisesRegexp( ValueError, "Must specify a schedule", learn_runner.run, lambda output_dir: TestExperiment(config=config), output_dir="/tmp") if __name__ == "__main__": tf.test.main()
# Copyright 2014 Citrix Systems # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import requests from neutron.common import exceptions as n_exc from neutron.i18n import _LE from neutron.i18n import _LI from oslo_log import log as logging from oslo_serialization import jsonutils LOG = logging.getLogger(__name__) CONTENT_TYPE_HEADER = 'Content-type' ACCEPT_HEADER = 'Accept' AUTH_HEADER = 'Cookie' DRIVER_HEADER = 'X-OpenStack-LBaaS' TENANT_HEADER = 'X-Tenant-ID' JSON_CONTENT_TYPE = 'application/json' DRIVER_HEADER_VALUE = 'netscaler-openstack-lbaas' NITRO_LOGIN_URI = 'nitro/v2/config/login' class NCCException(n_exc.NeutronException): """Represents exceptions thrown by NSClient.""" CONNECTION_ERROR = 1 REQUEST_ERROR = 2 RESPONSE_ERROR = 3 UNKNOWN_ERROR = 4 def __init__(self, error, status=requests.codes.SERVICE_UNAVAILABLE): self.message = _("NCC Error %d") % error super(NCCException, self).__init__() self.error = error self.status = status def is_not_found_exception(self): if int(self.status) == requests.codes.NOT_FOUND: return True class NSClient(object): """Client to operate on REST resources of NetScaler Control Center.""" def __init__(self, service_uri, username, password, ncc_cleanup_mode="False"): if not service_uri: LOG.exception(_LE("No NetScaler Control Center URI specified. " "Cannot connect.")) raise NCCException(NCCException.CONNECTION_ERROR) self.service_uri = service_uri.strip('/') self.auth = None self.cleanup_mode = False if username and password: self.username = username self.password = password if ncc_cleanup_mode.lower() == "true": self.cleanup_mode = True def create_resource(self, tenant_id, resource_path, object_name, object_data): """Create a resource of NetScaler Control Center.""" return self._resource_operation('POST', tenant_id, resource_path, object_name=object_name, object_data=object_data) def is_login(self, resource_uri): if 'login' in resource_uri.lower(): return True else: return False def login(self): """Get session based login""" login_obj = {"username": self.username, "password": self.password} msg = "NetScaler driver login:" + repr(login_obj) LOG.info(msg) resp_status, result = self.create_resource("login", NITRO_LOGIN_URI, "login", login_obj) LOG.info(_LI("Response: status : %(status)s %result(result)s"), { "status": resp_status, "result": result['body']}) result_body = jsonutils.loads(result['body']) session_id = None if result_body and "login" in result_body: logins = result_body["login"] if isinstance(logins, list): login = logins[0] else: login = logins if login and "sessionid" in login: session_id = login["sessionid"] if session_id: LOG.info(_LI("Response: %(result)s"), {"result": result['body']}) LOG.info( _LI("Session_id = %(session_id)s") % {"session_id": session_id}) # Update sessin_id in auth self.auth = "SessId=%s" % session_id else: raise NCCException(NCCException.RESPONSE_ERROR) def retrieve_resource(self, tenant_id, resource_path, parse_response=True): """Retrieve a resource of NetScaler Control Center.""" return self._resource_operation('GET', tenant_id, resource_path) def update_resource(self, tenant_id, resource_path, object_name, object_data): """Update a resource of the NetScaler Control Center.""" return self._resource_operation('PUT', tenant_id, resource_path, object_name=object_name, object_data=object_data) def remove_resource(self, tenant_id, resource_path, parse_response=True): """Remove a resource of NetScaler Control Center.""" if self.cleanup_mode: return True else: return self._resource_operation('DELETE', tenant_id, resource_path) def _resource_operation(self, method, tenant_id, resource_path, object_name=None, object_data=None): resource_uri = "%s/%s" % (self.service_uri, resource_path) if not self.auth and not self.is_login(resource_uri): # Creating a session for the first time self.login() headers = self._setup_req_headers(tenant_id) request_body = None if object_data: if isinstance(object_data, str): request_body = object_data else: obj_dict = {object_name: object_data} request_body = jsonutils.dumps(obj_dict) try: response_status, resp_dict = (self. _execute_request(method, resource_uri, headers, body=request_body)) except NCCException as e: if e.status == requests.codes.NOT_FOUND and method == 'DELETE': return 200, {} else: raise e return response_status, resp_dict def _is_valid_response(self, response_status): # when status is less than 400, the response is fine return response_status < requests.codes.bad_request def _setup_req_headers(self, tenant_id): headers = {ACCEPT_HEADER: JSON_CONTENT_TYPE, CONTENT_TYPE_HEADER: JSON_CONTENT_TYPE, DRIVER_HEADER: DRIVER_HEADER_VALUE, TENANT_HEADER: tenant_id, AUTH_HEADER: self.auth} return headers def _get_response_dict(self, response): response_dict = {'status': int(response.status_code), 'body': response.text, 'headers': response.headers} if self._is_valid_response(int(response.status_code)): if response.text: response_dict['dict'] = response.json() return response_dict def _execute_request(self, method, resource_uri, headers, body=None): service_uri_dict = {"service_uri": self.service_uri} try: response = requests.request(method, url=resource_uri, headers=headers, data=body) except requests.exceptions.SSLError: LOG.exception(_LE("SSL error occurred while connecting " "to %(service_uri)s"), service_uri_dict) raise NCCException(NCCException.CONNECTION_ERROR) except requests.exceptions.ConnectionError: LOG.exception(_LE("Connection error occurred while connecting" "to %(service_uri)s"), service_uri_dict) raise NCCException(NCCException.CONNECTION_ERROR) except requests.exceptions.Timeout: LOG.exception( _LE("Request to %(service_uri)s timed out"), service_uri_dict) raise NCCException(NCCException.CONNECTION_ERROR) except (requests.exceptions.URLRequired, requests.exceptions.InvalidURL, requests.exceptions.MissingSchema, requests.exceptions.InvalidSchema): LOG.exception(_LE("Request did not specify a valid URL")) raise NCCException(NCCException.REQUEST_ERROR) except requests.exceptions.TooManyRedirects: LOG.exception(_LE("Too many redirects occurred for request ")) raise NCCException(NCCException.REQUEST_ERROR) except requests.exceptions.RequestException: LOG.exception( _LE("A request error while connecting to %(service_uri)s"), service_uri_dict) raise NCCException(NCCException.REQUEST_ERROR) except Exception: LOG.exception( _LE("A unknown error occurred during request to" " %(service_uri)s"), service_uri_dict) raise NCCException(NCCException.UNKNOWN_ERROR) resp_dict = self._get_response_dict(response) resp_body = resp_dict['body'] LOG.info(_LI("Response: %(resp_body)s"), {"resp_body": resp_body}) response_status = resp_dict['status'] if response_status == requests.codes.unauthorized: LOG.exception(_LE("Unable to login. Invalid credentials passed." "for: %s"), self.service_uri) if not self.is_login(resource_uri): # Session expired, relogin and retry.... self.login() # Retry the operation headers.update({AUTH_HEADER: self.auth}) self._execute_request(method, resource_uri, headers, body) else: raise NCCException(NCCException.RESPONSE_ERROR) if not self._is_valid_response(response_status): response_msg = resp_body response_dict = {"method": method, "url": resource_uri, "response_status": response_status, "response_msg": response_msg} LOG.exception(_LE("Failed %(method)s operation on %(url)s " "status code: %(response_status)s " "message: %(response_msg)s"), response_dict) raise NCCException(NCCException.RESPONSE_ERROR, response_status) return response_status, resp_dict
#!/usr/bin/env python # -*- coding: utf-8 -*- ############################################################################### # The MIT License (MIT) # # Copyright (c) 2015 NetCharm <netcharm@gmail.com> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. ############################################################################### from __future__ import unicode_literals from __future__ import division import os import sys import math import cmath import StringIO import codecs from utils.common import * from pymeshio import pmx from pymeshio.pmx import reader as pmxReader def pmxLoad(f_pmx): model = None if os.path.isfile(f_pmx): model = pmxReader.read_from_file(f_pmx) return(model) def pmx2egg(pmx): modelname = os.path.basename(pmx.path) modelpath = os.path.dirname(pmx.path) lines = [] # # make egg header # # lines.append('<CoordinateSystem> { Z-Up }') lines.append('<CoordinateSystem> { Y-up-left }') lines.append('') lines.append('<Comment> {') lines.append(' "%s"' % pmx.path) lines.append('}') lines.append('') # # load texture list # tex_mode = dict() for m in pmx.materials: tmode = ('Modulate', None) if m.texture_index >= 0: tex_mode[pmx.textures[m.texture_index]] = tmode if m.sphere_texture_index>=0: if m.sphere_mode > 0: if m.sphere_mode == 1: tmode = ('ModulateGlow', m.name+'_sphere') elif m.sphere_mode == 2: tmode = ('Add', m.name+'_sphere') elif m.sphere_mode == 3: tmode = ('Replace', m.name+'_sphere') tex_mode[pmx.textures[m.sphere_texture_index]] = tmode if m.toon_texture_index>=0: if m.toon_sharing_flag > 0: tmode = ('Glow', m.name+'_toon') tex_mode[pmx.textures[m.toon_texture_index]] = tmode # # load shared toon textures # idx = 0 textures = [] for tex in pmx.textures: if tex in tex_mode: tmode = tex_mode[tex] else: tmode = 'REPLACE' tex = os.path.basename(bmp2png(os.path.join(modelpath, tex))) lines.append('<Texture> tex_%04d {' % (idx)) lines.append(' "%s"' % (tex)) lines.append(' <Scalar> minfilter { NEAREST_MIPMAP_NEAREST }') lines.append(' <Scalar> magfilter { NEAREST_MIPMAP_NEAREST }') lines.append(' <Scalar> magfilteralpha { NEAREST_MIPMAP_NEAREST }') lines.append(' <Scalar> magfiltercolor { NEAREST_MIPMAP_NEAREST }') if tmode[1]: lines.append(' <Scalar> format { RGB }') lines.append(' <Scalar> wrapu { CLAMP }') lines.append(' <Scalar> envtype { %s }' % tmode[0]) lines.append(' <Scalar> stage-name { %s }' % tmode[1]) lines.append(' <Scalar> tex-gen { EYE_SPHERE_MAP}') lines.append(' <Scalar> alpha { ON }') else: lines.append(' <Scalar> format { RGBA }') lines.append(' <Scalar> envtype { ModulateGloss }') lines.append(' <Scalar> alpha { ON }') lines.append(' <Scalar> draw-order { 10000 }') lines.append(' <Transform> { <Scale> { 1 -1 1 } }') lines.append('}') textures.append(u'tex_%04d' % (idx)) idx += 1 lines.append('') idx = 0 for toon in ['toon0.bmp', 'toon01.bmp', 'toon02.bmp', 'toon03.bmp', 'toon04.bmp', 'toon05.bmp', 'toon06.bmp', 'toon07.bmp', 'toon08.bmp', 'toon09.bmp', 'toon10.bmp']: tmode = ('Glow', 'ts_toon_%02d' % idx) tex = os.path.basename(bmp2png(os.path.join(modelpath, toon))) lines.append('<Texture> toon_%02d {' % (idx)) lines.append(' "%s"' % (toon)) lines.append(' <Scalar> minfilter { NEAREST_MIPMAP_NEAREST }') lines.append(' <Scalar> magfilter { NEAREST_MIPMAP_NEAREST }') lines.append(' <Scalar> magfilteralpha { NEAREST_MIPMAP_NEAREST }') lines.append(' <Scalar> magfiltercolor { NEAREST_MIPMAP_NEAREST }') lines.append(' <Scalar> format { RGB }') lines.append(' <Scalar> wrapu { CLAMP }') lines.append(' <Scalar> envtype { %s }' % tmode[0]) lines.append(' <Scalar> stage-name { %s }' % tmode[1]) lines.append(' <Scalar> tex-gen { EYE_SPHERE_MAP}') lines.append(' <Scalar> alpha { ON }') lines.append(' <Scalar> draw-order { 10000 }') lines.append(' <Transform> { <Scale> { 1 -1 1 } }') lines.append('}') textures.append(u'tex_%04d' % (idx)) idx += 1 lines.append('') # # load materials # idx = 0 for m in pmx.materials: lines.append('<Material> "%s" {' % m.name) lines.append(' <Scalar> ambientr { %.8f }' % m.ambient_color.r) lines.append(' <Scalar> ambientg { %.8f }' % m.ambient_color.g) lines.append(' <Scalar> ambientb { %.8f }' % m.ambient_color.b) lines.append(' <Scalar> ambienta { %.8f }' % m.alpha) lines.append(' <Scalar> diffr { %.8f }' % m.diffuse_color.r) lines.append(' <Scalar> diffg { %.8f }' % m.diffuse_color.g) lines.append(' <Scalar> diffb { %.8f }' % m.diffuse_color.b) lines.append(' <Scalar> diffa { %.8f }' % m.alpha) lines.append(' <Scalar> emissionr { %.8f }' % m.edge_color.r) lines.append(' <Scalar> emissiong { %.8f }' % m.edge_color.g) lines.append(' <Scalar> emissionb { %.8f }' % m.edge_color.b) lines.append(' <Scalar> emissiona { %.8f }' % m.edge_color.a) lines.append(' <Scalar> specr { %.8f }' % m.specular_color.r) lines.append(' <Scalar> specg { %.8f }' % m.specular_color.g) lines.append(' <Scalar> specb { %.8f }' % m.specular_color.b) lines.append(' <Scalar> speca { %.8f }' % m.alpha) lines.append(' <Scalar> shininess { %.8f }' % m.specular_factor ) lines.append(' <Scalar> local { 0 }') lines.append('}') lines.append('') # # load morph data # dxyz = dict() for morph in pmx.morphs: if morph.morph_type == 1: for idx in xrange(len(morph.offsets)): offset = morph.offsets[idx] v = pmx.vertices[offset.vertex_index].position o = offset.position_offset i = offset.vertex_index if i in dxyz: dxyz[i].append((morph.name, o)) else: dxyz[i] = [(morph.name, o)] # # load vertices # idx = 0 lines.append('<VertexPool> "%s" {' % pmx.name) for v in pmx.vertices: p = v.position uv = v.uv n = v.normal lines.append(' <Vertex> %d {' % idx) # lines.append(' %.8f %.8f %.8f' % (p.x, p.z, p.y)) lines.append(' %.8f %.8f %.8f' % (p.x, p.y, p.z)) lines.append(' <UV> { %.8f %.8f }' % (uv.x, uv.y)) lines.append(' <Normal> { %.8f %.8f %.8f }' % (n.x, n.y, n.z)) lines.append(' <RGBA> { 1 1 1 1 }') if idx in dxyz: for vm in dxyz[idx]: lines.append(' <Dxyz> "%s" { %.8f %.8f %.8f }' % (vm[0], vm[1].x, vm[1].y, vm[1].z)) lines.append(' }') idx += 1 lines.append('}') lines.append('') # # load face polygon # vIndex = 0 lines.append('<Group> "%s" {' % pmx.name) lines.append(' <Dart> { 1 }') lines.append(' <Group> "Body" {') for m in pmx.materials: lines.append(' <Group> "%s" {' % m.name) for idx in xrange(vIndex, vIndex+m.vertex_count, 3): lines.append(' <Polygon> {') lines.append(' <VertexRef> { %d %d %d <Ref> { %s } }' % ( pmx.indices[idx], pmx.indices[idx+1], pmx.indices[idx+2], pmx.name)) lines.append(' <MRef> { "%s" }' % (m.name)) if m.flag & 0x00000001: lines.append(' <BFace> { 1 }') if m.texture_index >= 0: lines.append(' <TRef> { "%s" }' % (textures[m.texture_index])) if m.sphere_texture_index >= 0: lines.append(' <TRef> { "%s" }' % (textures[m.sphere_texture_index])) if m.toon_sharing_flag > 0: if m.toon_texture_index >= 0: lines.append(' <TRef> { "%s" }' % (textures[m.toon_texture_index])) else: if m.toon_texture_index > 0: lines.append(' <TRef> { "%s" }' % (u'toon_%02d' % m.toon_texture_index)) else: lines.append(' <TRef> { "%s" }' % (u'toon_00')) lines.append(' }') lines.append(' }') vIndex += m.vertex_count lines.append(' }') # # load bone data # idx = 0 depth = 0 boneRelative = dict() if parent_index in boneRelative: boneRelative[parent_index].append(bone) else: boneRelative[parent_index] = [bone] # if bone.parent_index == -1: # depth = 1 # lines.append(' '*depth+'<Joint> %s {' % bone.name) # if bone.parent_index in boneRelative: # boneRelative[bone.parent_index].append() # else: # boneRelative[bone.parent_index] = dict() # lines.append(' '*depth+'}') # pass lines.append('}') lines.append('') # # make animation data # idx = 0 lines.append('<Table> expressions {') for morph in pmx.morphs: lines.append(' <Bundle> "%s" {' % pmx.name) lines.append(' <Table> morph {') lines.append(' <S$Anim> "%s" {' % morph.name) lines.append(' <Scalar> fps { 5 }') lines.append(' <V> { 0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9 1.0 }') lines.append(' }') lines.append(' }') lines.append(' }') idx += 1 lines.append('}') lines.append('') # # # lines.append('') lines.append('') lines.append('') lines.append('') lines.append('') return(lines) return('\n'.join(lines)) pass if __name__ == '__main__': pmxfile = 'models/meiko/meiko.pmx' pmxmodel = pmxLoad(pmxfile) egg = pmx2egg(pmxmodel) with codecs.open('egg_test.egg', 'w') as f: f.writelines('\n'.join(egg)) pass
""" desisim.pixsim ============== Tools for DESI pixel level simulations using specter """ from __future__ import absolute_import, division, print_function import sys import os import os.path import random from time import asctime import socket import astropy.units as u import numpy as np import desimodel.io import desispec.io from desispec.image import Image import desispec.cosmics from . import obs, io from desiutil.log import get_logger log = get_logger() # Inhibit download of IERS-A catalog, even from a good server. # Note that this is triggered by a call to astropy.time.Time(), # which is subsequently used to compute sidereal_time(). # It's the initialization of astropy.time.Time() itself that makes the call. from desiutil.iers import freeze_iers from astropy.time import Time def simulate_exposure(simspecfile, rawfile, cameras=None, ccdshape=None, simpixfile=None, addcosmics=None, comm=None, **kwargs): """ Simulate frames from an exposure, including I/O Args: simspecfile: input simspec format file with spectra rawfile: output raw data file to write Options: cameras: str or list of str, e.g. b0, r1, .. z9 ccdshape: (npix_y, npix_x) primarily used to limit memory while testing simpixfile: output file for noiseless truth pixels addcosmics: if True (must be specified via command input), add cosmics from real data comm: MPI communicator object Additional keyword args are passed to pixsim.simulate() For a lower-level pixel simulation interface that doesn't perform I/O, see pixsim.simulate() Note: call desi_preproc or desispec.preproc.preproc to pre-process the output desi*.fits file for overscan subtraction, noise estimation, etc. """ #- Split communicator by nodes; each node processes N frames #- Assumes / requires equal number of ranks per node if comm is not None: rank, size = comm.rank, comm.size num_nodes = mpi_count_nodes(comm) comm_node, node_index, num_nodes = mpi_split_by_node(comm, 1) node_rank = comm_node.rank node_size = comm_node.size else: log.debug('Not using MPI') rank, size = 0, 1 comm_node = None node_index = 0 num_nodes = 1 node_rank = 0 node_size = 1 if rank == 0: log.debug('Starting simulate_exposure at {}'.format(asctime())) if cameras is None: if rank == 0: from astropy.io import fits fibermap = fits.getdata(simspecfile, 'FIBERMAP') cameras = io.fibers2cameras(fibermap['FIBER']) log.debug('Found cameras {} in input simspec file'.format(cameras)) if len(cameras) % num_nodes != 0: raise ValueError('Number of cameras {} should be evenly divisible by number of nodes {}'.format( len(cameras), num_nodes)) if comm is not None: cameras = comm.bcast(cameras, root=0) #- Fail early if camera alreaady in output file if rank == 0 and os.path.exists(rawfile): from astropy.io import fits err = False with fits.open(rawfile) as fx: for camera in cameras: if camera in fx: log.error('Camera {} already in {}'.format(camera, rawfile)) err = True if err: raise ValueError('Some cameras already in output file') #- Read simspec input; I/O layer handles MPI broadcasting if rank == 0: log.debug('Reading simspec at {}'.format(asctime())) mycameras = cameras[node_index::num_nodes] if node_rank == 0: log.info("Assigning cameras {} to comm_exp node {}".format(mycameras, node_index)) simspec = io.read_simspec(simspecfile, cameras=mycameras, readflux=False, comm=comm) night = simspec.header['NIGHT'] expid = simspec.header['EXPID'] if rank == 0: log.debug('Reading PSFs at {}'.format(asctime())) psfs = dict() #need to initialize previous channel previous_channel = 'a' for camera in mycameras: #- Note: current PSF object can't be pickled and thus every #- rank must read it instead of rank 0 read + bcast channel = camera[0] if channel not in psfs: log.info('Reading {} PSF at {}'.format(channel, asctime())) psfs[channel] = desimodel.io.load_psf(channel) #- Trim effective CCD size; mainly to limit memory for testing if ccdshape is not None: psfs[channel].npix_y, psfs[channel].npix_x = ccdshape psf = psfs[channel] cosmics=None #avoid re-broadcasting cosmics if we can if previous_channel != channel: if (addcosmics is True) and (node_rank == 0): cosmics_file = io.find_cosmics(camera, simspec.header['EXPTIME']) log.info('Reading cosmics templates {} at {}'.format( cosmics_file, asctime())) shape = (psf.npix_y, psf.npix_x) cosmics = io.read_cosmics(cosmics_file, expid, shape=shape) if (addcosmics is True) and (comm_node is not None): if node_rank == 0: log.info('Broadcasting cosmics at {}'.format(asctime())) cosmics = comm_node.bcast(cosmics, root=0) else: log.debug("Cosmics not requested") if node_rank == 0: log.info("Starting simulate for camera {} on node {}".format(camera,node_index)) image, rawpix, truepix = simulate(camera, simspec, psf, comm=comm_node, preproc=False, cosmics=cosmics, **kwargs) #- Use input communicator as barrier since multiple sub-communicators #- will write to the same output file if rank == 0: log.debug('Writing outputs at {}'.format(asctime())) tmprawfile = rawfile + '.tmp' if comm is not None: for i in range(comm.size): if (i == comm.rank) and (comm_node.rank == 0): desispec.io.write_raw(tmprawfile, rawpix, image.meta, camera=camera) if simpixfile is not None: io.write_simpix(simpixfile, truepix, camera=camera, meta=image.meta) comm.barrier() else: desispec.io.write_raw(tmprawfile, rawpix, image.meta, camera=camera) if simpixfile is not None: io.write_simpix(simpixfile, truepix, camera=camera, meta=image.meta) if rank == 0: log.info('Wrote {}'.format(rawfile)) log.debug('done at {}'.format(asctime())) previous_channel = channel #- All done; rename temporary raw file to final location if comm is None or comm.rank == 0: os.rename(tmprawfile, rawfile) def simulate(camera, simspec, psf, nspec=None, ncpu=None, cosmics=None, wavemin=None, wavemax=None, preproc=True, comm=None): """Run pixel-level simulation of input spectra Args: camera (string) : b0, r1, .. z9 simspec : desispec.io.SimSpec object from desispec.io.read_simspec() psf : subclass of specter.psf.psf.PSF, e.g. from desimodel.io.load_psf() Options: nspec (int): number of spectra to simulate ncpu (int): number of CPU cores to use in parallel cosmics (desispec.image.Image): e.g. from desisim.io.read_cosmics() wavemin (float): minimum wavelength range to simulate wavemax (float): maximum wavelength range to simulate preproc (boolean, optional) : also preprocess raw data (default True) Returns: (image, rawpix, truepix) tuple, where image is the preproc Image object (only header is meaningful if preproc=False), rawpix is a 2D ndarray of unprocessed raw pixel data, and truepix is a 2D ndarray of truth for image.pix """ freeze_iers() if (comm is None) or (comm.rank == 0): log.info('Starting pixsim.simulate camera {} at {}'.format(camera, asctime())) #- parse camera name into channel and spectrograph number channel = camera[0].lower() ispec = int(camera[1]) assert channel in 'brz', \ 'unrecognized channel {} camera {}'.format(channel, camera) assert 0 <= ispec < 10, \ 'unrecognized spectrograph {} camera {}'.format(ispec, camera) assert len(camera) == 2, \ 'unrecognized camera {}'.format(camera) #- Load DESI parameters params = desimodel.io.load_desiparams() #- this is not necessarily true, the truth in is the fibermap nfibers = params['spectro']['nfibers'] phot = simspec.cameras[camera].phot if simspec.cameras[camera].skyphot is not None: phot += simspec.cameras[camera].skyphot if nspec is not None: phot = phot[0:nspec] else: nspec = phot.shape[0] #- Trim wavelengths if needed wave = simspec.cameras[camera].wave if wavemin is not None: ii = (wave >= wavemin) phot = phot[:, ii] wave = wave[ii] if wavemax is not None: ii = (wave <= wavemax) phot = phot[:, ii] wave = wave[ii] #- Project to image and append that to file if (comm is None) or (comm.rank == 0): log.info('Starting {} projection at {}'.format(camera, asctime())) # The returned true pixel values will only exist on rank 0 in the # MPI case. Otherwise it will be None. truepix = parallel_project(psf, wave, phot, ncpu=ncpu, comm=comm) if (comm is None) or (comm.rank == 0): log.info('Finished {} projection at {}'.format(camera, asctime())) image = None rawpix = None if (comm is None) or (comm.rank == 0): #- Start metadata header header = simspec.header.copy() header['CAMERA'] = camera header['DOSVER'] = 'SIM' header['FEEVER'] = 'SIM' header['DETECTOR'] = 'SIM' #- Add cosmics from library of dark images ny = truepix.shape[0] // 2 nx = truepix.shape[1] // 2 if cosmics is not None: # set to zeros values with mask bit 0 (= dead column or hot pixels) cosmics_pix = cosmics.pix*((cosmics.mask&1)==0) pix = np.random.poisson(truepix) + cosmics_pix try: #- cosmics templates >= v0.3 rdnoiseA = cosmics.meta['OBSRDNA'] rdnoiseB = cosmics.meta['OBSRDNB'] rdnoiseC = cosmics.meta['OBSRDNC'] rdnoiseD = cosmics.meta['OBSRDND'] except KeyError: #- cosmics templates <= v0.2 print(cosmic.meta) rdnoiseA = cosmics.meta['RDNOISE0'] rdnoiseB = cosmics.meta['RDNOISE1'] rdnoiseC = cosmics.meta['RDNOISE2'] rdnoiseD = cosmics.meta['RDNOISE3'] else: pix = truepix readnoise = params['ccd'][channel]['readnoise'] rdnoiseA = rdnoiseB = rdnoiseC = rdnoiseD = readnoise #- data already has noise if cosmics were added noisydata = (cosmics is not None) #- Split by amplifier and expand into raw data nprescan = params['ccd'][channel]['prescanpixels'] if 'overscanpixels' in params['ccd'][channel]: noverscan = params['ccd'][channel]['overscanpixels'] else: noverscan = 50 #- Reproducibly random overscan bias level offsets across diff exp assert channel in 'brz' if channel == 'b': irand = ispec elif channel == 'r': irand = 10 + ispec elif channel == 'z': irand = 20 + ispec seeds = np.random.RandomState(0).randint(2**32-1, size=30) rand = np.random.RandomState(seeds[irand]) nyraw = ny nxraw = nx + nprescan + noverscan rawpix = np.empty( (nyraw*2, nxraw*2), dtype=np.int32 ) gain = params['ccd'][channel]['gain'] #- Amp A/1 Lower Left rawpix[0:nyraw, 0:nxraw] = \ photpix2raw(pix[0:ny, 0:nx], gain, rdnoiseA, readorder='lr', nprescan=nprescan, noverscan=noverscan, offset=rand.uniform(100, 200), noisydata=noisydata) #- Amp B/2 Lower Right rawpix[0:nyraw, nxraw:nxraw+nxraw] = \ photpix2raw(pix[0:ny, nx:nx+nx], gain, rdnoiseB, readorder='rl', nprescan=nprescan, noverscan=noverscan, offset=rand.uniform(100, 200), noisydata=noisydata) #- Amp C/3 Upper Left rawpix[nyraw:nyraw+nyraw, 0:nxraw] = \ photpix2raw(pix[ny:ny+ny, 0:nx], gain, rdnoiseC, readorder='lr', nprescan=nprescan, noverscan=noverscan, offset=rand.uniform(100, 200), noisydata=noisydata) #- Amp D/4 Upper Right rawpix[nyraw:nyraw+nyraw, nxraw:nxraw+nxraw] = \ photpix2raw(pix[ny:ny+ny, nx:nx+nx], gain, rdnoiseD, readorder='rl', nprescan=nprescan, noverscan=noverscan, offset=rand.uniform(100, 200), noisydata=noisydata) def xyslice2header(xyslice): ''' convert 2D slice into IRAF style [a:b,c:d] header value e.g. xyslice2header(np.s_[0:10, 5:20]) -> '[6:20,1:10]' ''' yy, xx = xyslice value = '[{}:{},{}:{}]'.format(xx.start+1, xx.stop, yy.start+1, yy.stop) return value #- Amp order from DESI-1964 (previously 1-4 instead of A-D) #- C D #- A B xoffset = nprescan+nx+noverscan header['PRESECA'] = xyslice2header(np.s_[0:nyraw, 0:0+nprescan]) header['DATASECA'] = xyslice2header(np.s_[0:nyraw, nprescan:nprescan+nx]) header['BIASSECA'] = xyslice2header(np.s_[0:nyraw, nprescan+nx:nprescan+nx+noverscan]) header['CCDSECA'] = xyslice2header(np.s_[0:ny, 0:nx]) header['PRESECB'] = xyslice2header(np.s_[0:nyraw, xoffset+noverscan+nx:xoffset+noverscan+nx+nprescan]) header['DATASECB'] = xyslice2header(np.s_[0:nyraw, xoffset+noverscan:xoffset+noverscan+nx]) header['BIASSECB'] = xyslice2header(np.s_[0:nyraw, xoffset:xoffset+noverscan]) header['CCDSECB'] = xyslice2header(np.s_[0:ny, nx:2*nx]) header['PRESECC'] = xyslice2header(np.s_[nyraw:2*nyraw, 0:0+nprescan]) header['DATASECC'] = xyslice2header(np.s_[nyraw:2*nyraw, nprescan:nprescan+nx]) header['BIASSECC'] = xyslice2header(np.s_[nyraw:2*nyraw, nprescan+nx:nprescan+nx+noverscan]) header['CCDSECC'] = xyslice2header(np.s_[ny:2*ny, 0:nx]) header['PRESECD'] = xyslice2header(np.s_[nyraw:2*nyraw, xoffset+noverscan+nx:xoffset+noverscan+nx+nprescan]) header['DATASECD'] = xyslice2header(np.s_[nyraw:2*nyraw, xoffset+noverscan:xoffset+noverscan+nx]) header['BIASSECD'] = xyslice2header(np.s_[nyraw:2*nyraw, xoffset:xoffset+noverscan]) header['CCDSECD'] = xyslice2header(np.s_[ny:2*ny, nx:2*nx]) #- Add additional keywords to mimic real raw data header['INSTRUME'] = 'DESI' header['PROCTYPE'] = 'RAW' header['PRODTYPE'] = 'image' header['EXPFRAME'] = 0 header['REQTIME'] = simspec.header['EXPTIME'] header['TIMESYS'] = 'UTC' #- DATE-OBS format YEAR-MM-DDThh:mm:ss.sss -> OBSID kpnoYEARMMDDthhmmss header['OBSID']='kp4m'+header['DATE-OBS'][0:19].replace('-','').replace(':','').lower() header['TIME-OBS'] = header['DATE-OBS'].split('T')[1] header['DELTARA'] = 0.0 header['DELTADEC'] = 0.0 header['SPECGRPH'] = ispec header['CCDNAME'] = 'CCDS' + str(ispec) + str(channel).upper() header['CCDPREP'] = 'purge,clear' header['CCDSIZE'] = '{},{}'.format(rawpix.shape[0], rawpix.shape[1]) header['CCDTEMP'] = 850.0 header['CPUTEMP'] = 63.7 header['CASETEMP'] = 62.8 header['CCDTMING'] = 'sim_timing.txt' header['CCDCFG'] = 'sim.cfg' header['SETTINGS'] = 'sim_detectors.json' header['VESSEL'] = 7 #- I don't know what this is header['FEEBOX'] = 'sim097' header['PGAGAIN'] = 5 header['OCSVER'] = 'SIM' header['CONSTVER'] = 'SIM' header['BLDTIME'] = 0.35 header['DIGITIME'] = 61.9 #- Remove some spurious header keywords from upstream if 'BUNIT' in header and header['BUNIT'] == 'Angstrom': del header['BUNIT'] if 'MJD' in header and 'MJD-OBS' not in header: header['MJD-OBS'] = header['MJD'] del header['MJD'] for key in ['RA', 'DEC']: if key in header: del header[key] #- Drive MJD-OBS from DATE-OBS if needed if 'MJD-OBS' not in header: header['MJD-OBS'] = Time(header['DATE-OBS']).mjd #- from http://www-kpno.kpno.noao.edu/kpno-misc/mayall_params.html kpno_longitude = -(111. + 35/60. + 59.6/3600) * u.deg #- Convert DATE-OBS to sexigesimal (sigh) Local Sidereal Time #- Use mean ST as close enough for sims to avoid nutation calc t = Time(header['DATE-OBS']) # This calculation can raise a non-catastrophic "overflow encountered in # double_scalars" error in astropy (see # https://github.com/desihub/specsim/pull/120); catch it here. with np.errstate(all='ignore'): st = t.sidereal_time('mean', kpno_longitude).to('deg').value hour = st/15 minute = (hour % 1)*60 second = (minute % 1)*60 header['ST'] = '{:02d}:{:02d}:{:0.3f}'.format( int(hour), int(minute), second) if preproc: log.debug('Running preprocessing at {}'.format(asctime())) image = desispec.preproc.preproc(rawpix, header, primary_header=simspec.header) else: log.debug('Skipping preprocessing') image = Image(np.zeros(truepix.shape), np.zeros(truepix.shape), meta=header) if (comm is None) or (comm.rank == 0): log.info('Finished pixsim.simulate for camera {} at {}'.format(camera, asctime())) return image, rawpix, truepix def photpix2raw(phot, gain=1.0, readnoise=3.0, offset=None, nprescan=7, noverscan=50, readorder='lr', noisydata=True): ''' Add prescan, overscan, noise, and integerization to an image Args: phot: 2D float array of mean input photons per pixel gain (float, optional): electrons/ADU readnoise (float, optional): CCD readnoise in electrons offset (float, optional): bias offset to add nprescan (int, optional): number of prescan pixels to add noverscan (int, optional): number of overscan pixels to add readorder (str, optional): 'lr' or 'rl' to indicate readout order 'lr' : add prescan on left and overscan on right of image 'rl' : add prescan on right and overscan on left of image noisydata (boolean, optional) : if True, don't add noise, e.g. because input signal already had noise from a cosmics image Returns 2D integer ndarray: image = int((poisson(phot) + offset + gauss(readnoise))/gain) Integerization happens twice: the mean photons are poisson sampled into integers, but then offets, readnoise, and gain are applied before resampling into ADU integers This is intended to be used per-amplifier, not for an entire CCD image. ''' ny = phot.shape[0] nx = phot.shape[1] + nprescan + noverscan #- reading from right to left is effectively swapping pre/overscan counts if readorder.lower() in ('rl', 'rightleft'): nprescan, noverscan = noverscan, nprescan img = np.zeros((ny, nx), dtype=float) img[:, nprescan:nprescan+phot.shape[1]] = phot if offset is None: offset = np.random.uniform(100, 200) if noisydata: #- Data already has noise; just add offset and noise to pre/overscan img += offset img[0:ny, 0:nprescan] += np.random.normal(scale=readnoise, size=(ny, nprescan)) ix = phot.shape[1] + nprescan img[0:ny, ix:ix+noverscan] += np.random.normal(scale=readnoise, size=(ny, noverscan)) img /= gain else: #- Add offset and noise to everything noise = np.random.normal(loc=offset, scale=readnoise, size=img.shape) img = np.random.poisson(img) + noise img /= gain return img.astype(np.int32) #- Helper function for multiprocessing parallel project def _project(args): """ Helper function to project photons onto a subimage Args: tuple/array of [psf, wave, phot, specmin] Returns (xyrange, subimage) such that xmin, xmax, ymin, ymax = xyrange image[ymin:ymax, xmin:xmax] += subimage """ try: psf, wave, phot, specmin = args nspec = phot.shape[0] if phot.shape[-1] != wave.shape[-1]: raise ValueError('phot.shape {} vs. wave.shape {} mismatch'.format(phot.shape, wave.shape)) xyrange = psf.xyrange( [specmin, specmin+nspec], wave ) img = psf.project(wave, phot, specmin=specmin, xyrange=xyrange) return (xyrange, img) except Exception as e: if os.getenv('UNITTEST_SILENT') is None: import traceback print('-'*60) print('ERROR in _project', psf.wmin, psf.wmax, wave[0], wave[-1], phot.shape, specmin) traceback.print_exc() print('-'*60) raise e #- Move this into specter itself? def parallel_project(psf, wave, phot, specmin=0, ncpu=None, comm=None): """ Using psf, project phot[nspec, nw] vs. wave[nw] onto image Return 2D image """ img = None if comm is not None: # MPI version # Get a smaller communicator if not enough spectra nspec = phot.shape[0] if nspec < comm.size: keep = int(comm.rank < nspec) comm = comm.Split(color=keep) if not keep: return None specs = np.arange(phot.shape[0], dtype=np.int32) myspecs = np.array_split(specs, comm.size)[comm.rank] nspec = phot.shape[0] iispec = np.linspace(specmin, nspec, int(comm.size+1)).astype(int) args = list() if comm.rank == 0: for i in range(comm.size): if iispec[i+1] > iispec[i]: args.append( [psf, wave, phot[iispec[i]:iispec[i+1]], iispec[i]] ) args=comm.scatter(args,root=0) #now that all ranks have args, we can call _project xy_subimg=_project(args) #_project calls project calls spotgrid etc xy_subimg=comm.gather(xy_subimg,root=0) if comm.rank ==0: #now all the data should be back at rank 0 # use same technique as multiprocessing to recombine the data img = np.zeros( (psf.npix_y, psf.npix_x) ) for xyrange, subimg in xy_subimg: xmin, xmax, ymin, ymax = xyrange img[ymin:ymax, xmin:xmax] += subimg #end of mpi section else: import multiprocessing as mp if ncpu is None: # Avoid hyperthreading ncpu = mp.cpu_count() // 2 if ncpu <= 1: #- Serial version log.debug('Not using multiprocessing (ncpu={})'.format(ncpu)) img = psf.project(wave, phot, specmin=specmin) else: #- multiprocessing version #- Split the spectra into ncpu groups log.debug('Using multiprocessing (ncpu={})'.format(ncpu)) nspec = phot.shape[0] iispec = np.linspace(specmin, nspec, ncpu+1).astype(int) args = list() for i in range(ncpu): if iispec[i+1] > iispec[i]: #- can be false if nspec < ncpu args.append( [psf, wave, phot[iispec[i]:iispec[i+1]], iispec[i]] ) #- Create pool of workers to do the projection using _project #- xyrange, subimg = _project( [psf, wave, phot, specmin] ) pool = mp.Pool(ncpu) xy_subimg = pool.map(_project, args) #print("xy_subimg from pool") #print(xy_subimg) #print(len(xy_subimg)) img = np.zeros( (psf.npix_y, psf.npix_x) ) for xyrange, subimg in xy_subimg: xmin, xmax, ymin, ymax = xyrange img[ymin:ymax, xmin:xmax] += subimg #- Prevents hangs of Travis tests pool.close() pool.join() return img def get_nodes_per_exp(nnodes,nexposures,ncameras,user_nodes_per_comm_exp=None): """ Calculate how many nodes to use per exposure Args: nnodes: number of nodes in MPI COMM_WORLD (not number of ranks) nexposures: number of exposures to process ncameras: number of cameras per exposure user_nodes_per_comm_exp (int, optional): user override of number of nodes to use; used to check requirements Returns number of nodes to include in sub-communicators used to process individual exposures Notes: * Uses the largest number of nodes per exposure that will still result in efficient node usage * requires that (nexposures*ncameras) / nnodes = int * the derived nodes_per_comm_exp * nexposures / nodes = int * See desisim.test.test_pixsim.test_get_nodes_per_exp() for examples * if user_nodes_per_comm_exp is given, requires that GreatestCommonDivisor(nnodes, ncameras) / user_nodes_per_comm_exp = int """ from math import gcd import desiutil.log as logging log = logging.get_logger() log.setLevel(logging.INFO) #check if nframes is evenly divisible by nnodes nframes = ncameras*nexposures if nframes % nnodes !=0: ### msg=("nframes {} must be evenly divisible by nnodes {}, try again".format(nframes, nnodes)) ### raise ValueError(msg) msg=("nframes {} is not evenly divisible by nnodes {}; packing will be inefficient".format(nframes, nnodes)) log.warning(msg) else: log.debug("nframes {} is evenly divisible by nnodes {}, check passed".format(nframes, nnodes)) #find greatest common divisor between nnodes and ncameras #greatest common divisor = greatest common factor #we use python's built in gcd greatest_common_factor=gcd(nnodes,ncameras) #the greatest common factor must be greater than one UNLESS we are on one node if nnodes > 1: if greatest_common_factor == 1: msg=("greatest common factor {} between nnodes {} and nframes {} must be larger than one, try again".format(greatest_common_factor, nnodes, nframes)) raise ValueError(msg) else: log.debug("greatest common factor {} between nnodes {} and nframes {} is greater than one, check passed".format(greatest_common_factor, nnodes, nframes)) #check to make sure the user hasn't specified a really asinine value of user_nodes_per_comm_exp if user_nodes_per_comm_exp is not None: if greatest_common_factor % user_nodes_per_comm_exp !=0: msg=("user-specified value of user_nodes_per_comm_exp {} is bad, try again".format(user_nodes_per_comm_exp)) raise ValueError(msg) else: log.debug("user-specified value of user_nodes_per_comm_exp {} is good, check passed".format(user_nodes_per_comm_exp)) nodes_per_comm_exp=user_nodes_per_comm_exp #if the user didn't specify anything, use the greatest common factor if user_nodes_per_comm_exp is None: nodes_per_comm_exp=greatest_common_factor #finally check to make sure exposures*gcf/nnodes is an integer to avoid inefficient node use if (nexposures*nodes_per_comm_exp) % nnodes != 0: ### msg=("nexposures {} * nodes_per_comm_exp {} does not divide evenly into nnodes {}, try again".format(nexposures, nodes_per_comm_exp, nnodes)) ### raise ValueError(msg) msg=("nexposures {} * nodes_per_comm_exp {} does not divide evenly into nnodes {}; packing will be inefficient".format(nexposures, nodes_per_comm_exp, nnodes)) log.warning(msg) else: log.debug("nexposures {} * nodes_per_comm_exp {} divides evenly into nnodes {}, check passed".format(nexposures, nodes_per_comm_exp, nnodes)) return nodes_per_comm_exp #------------------------------------------------------------------------- #- MPI utility functions #- These functions assist with splitting a communicator across node boundaries. #- That constraint isn't required by MPI, but can be convenient for humans #- thinking about "I want to process one camera with one node" or "I want to #- process 6 exposures with 20 nodes using 10 nodes per exposure" def mpi_count_nodes(comm): ''' Return the number of nodes in this communicator ''' nodenames = comm.allgather(socket.gethostname()) num_nodes=len(set(nodenames)) return num_nodes def mpi_split_by_node(comm, nodes_per_communicator): ''' Split an MPI communicator into sub-communicators with integer numbers of nodes per communicator Args: comm: MPI communicator nodes_per_communicator: number of nodes per sub-communicator Returns: MPI sub-communicator, node_index, total_num_nodes Notes: * total number of nodes in original communicator must be an integer multiple of nodes_per_communicator * if comm is split into N sub-communicators, node_index is the index of which of the N is returned for this rank * total_num_nodes = number of nodes in original communicator ''' num_nodes = mpi_count_nodes(comm) if comm.size % num_nodes != 0: raise ValueError('Variable number of ranks per node') if num_nodes % nodes_per_communicator != 0: raise ValueError('Input number of nodes {} must be divisible by nodes_per_communicator {}'.format( num_nodes, nodes_per_communicator)) ranks_per_communicator = comm.size // (num_nodes // nodes_per_communicator) node_index = comm.rank // ranks_per_communicator comm_node = comm.Split(color = node_index) return comm_node, node_index, num_nodes
import httplib import StringIO import socket from unittest import TestCase from test import test_support HOST = test_support.HOST class FakeSocket: def __init__(self, text, fileclass=StringIO.StringIO): self.text = text self.fileclass = fileclass self.data = '' def sendall(self, data): self.data += data def makefile(self, mode, bufsize=None): if mode != 'r' and mode != 'rb': raise httplib.UnimplementedFileMode() return self.fileclass(self.text) class NoEOFStringIO(StringIO.StringIO): """Like StringIO, but raises AssertionError on EOF. This is used below to test that httplib doesn't try to read more from the underlying file than it should. """ def read(self, n=-1): data = StringIO.StringIO.read(self, n) if data == '': raise AssertionError('caller tried to read past EOF') return data def readline(self, length=None): data = StringIO.StringIO.readline(self, length) if data == '': raise AssertionError('caller tried to read past EOF') return data class HeaderTests(TestCase): def test_auto_headers(self): # Some headers are added automatically, but should not be added by # .request() if they are explicitly set. import httplib class HeaderCountingBuffer(list): def __init__(self): self.count = {} def append(self, item): kv = item.split(':') if len(kv) > 1: # item is a 'Key: Value' header string lcKey = kv[0].lower() self.count.setdefault(lcKey, 0) self.count[lcKey] += 1 list.append(self, item) for explicit_header in True, False: for header in 'Content-length', 'Host', 'Accept-encoding': conn = httplib.HTTPConnection('example.com') conn.sock = FakeSocket('blahblahblah') conn._buffer = HeaderCountingBuffer() body = 'spamspamspam' headers = {} if explicit_header: headers[header] = str(len(body)) conn.request('POST', '/', body, headers) self.assertEqual(conn._buffer.count[header.lower()], 1) class BasicTest(TestCase): def test_status_lines(self): # Test HTTP status lines body = "HTTP/1.1 200 Ok\r\n\r\nText" sock = FakeSocket(body) resp = httplib.HTTPResponse(sock) resp.begin() self.assertEqual(resp.read(), 'Text') self.assertTrue(resp.isclosed()) body = "HTTP/1.1 400.100 Not Ok\r\n\r\nText" sock = FakeSocket(body) resp = httplib.HTTPResponse(sock) self.assertRaises(httplib.BadStatusLine, resp.begin) def test_partial_reads(self): # if we have a lenght, the system knows when to close itself # same behaviour than when we read the whole thing with read() body = "HTTP/1.1 200 Ok\r\nContent-Length: 4\r\n\r\nText" sock = FakeSocket(body) resp = httplib.HTTPResponse(sock) resp.begin() self.assertEqual(resp.read(2), 'Te') self.assertFalse(resp.isclosed()) self.assertEqual(resp.read(2), 'xt') self.assertTrue(resp.isclosed()) def test_host_port(self): # Check invalid host_port for hp in ("www.python.org:abc", "www.python.org:"): self.assertRaises(httplib.InvalidURL, httplib.HTTP, hp) for hp, h, p in (("[fe80::207:e9ff:fe9b]:8000", "fe80::207:e9ff:fe9b", 8000), ("www.python.org:80", "www.python.org", 80), ("www.python.org", "www.python.org", 80), ("[fe80::207:e9ff:fe9b]", "fe80::207:e9ff:fe9b", 80)): http = httplib.HTTP(hp) c = http._conn if h != c.host: self.fail("Host incorrectly parsed: %s != %s" % (h, c.host)) if p != c.port: self.fail("Port incorrectly parsed: %s != %s" % (p, c.host)) def test_response_headers(self): # test response with multiple message headers with the same field name. text = ('HTTP/1.1 200 OK\r\n' 'Set-Cookie: Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"\r\n' 'Set-Cookie: Part_Number="Rocket_Launcher_0001"; Version="1";' ' Path="/acme"\r\n' '\r\n' 'No body\r\n') hdr = ('Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"' ', ' 'Part_Number="Rocket_Launcher_0001"; Version="1"; Path="/acme"') s = FakeSocket(text) r = httplib.HTTPResponse(s) r.begin() cookies = r.getheader("Set-Cookie") if cookies != hdr: self.fail("multiple headers not combined properly") def test_read_head(self): # Test that the library doesn't attempt to read any data # from a HEAD request. (Tickles SF bug #622042.) sock = FakeSocket( 'HTTP/1.1 200 OK\r\n' 'Content-Length: 14432\r\n' '\r\n', NoEOFStringIO) resp = httplib.HTTPResponse(sock, method="HEAD") resp.begin() if resp.read() != "": self.fail("Did not expect response from HEAD request") def test_send_file(self): expected = 'GET /foo HTTP/1.1\r\nHost: example.com\r\n' \ 'Accept-Encoding: identity\r\nContent-Length:' body = open(__file__, 'rb') conn = httplib.HTTPConnection('example.com') sock = FakeSocket(body) conn.sock = sock conn.request('GET', '/foo', body) self.assertTrue(sock.data.startswith(expected)) def test_chunked(self): chunked_start = ( 'HTTP/1.1 200 OK\r\n' 'Transfer-Encoding: chunked\r\n\r\n' 'a\r\n' 'hello worl\r\n' '1\r\n' 'd\r\n' ) sock = FakeSocket(chunked_start + '0\r\n') resp = httplib.HTTPResponse(sock, method="GET") resp.begin() self.assertEquals(resp.read(), 'hello world') resp.close() for x in ('', 'foo\r\n'): sock = FakeSocket(chunked_start + x) resp = httplib.HTTPResponse(sock, method="GET") resp.begin() try: resp.read() except httplib.IncompleteRead, i: self.assertEquals(i.partial, 'hello world') else: self.fail('IncompleteRead expected') finally: resp.close() def test_negative_content_length(self): sock = FakeSocket('HTTP/1.1 200 OK\r\nContent-Length: -1\r\n\r\nHello\r\n') resp = httplib.HTTPResponse(sock, method="GET") resp.begin() self.assertEquals(resp.read(), 'Hello\r\n') resp.close() class OfflineTest(TestCase): def test_responses(self): self.assertEquals(httplib.responses[httplib.NOT_FOUND], "Not Found") class TimeoutTest(TestCase): PORT = None def setUp(self): self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM) TimeoutTest.PORT = test_support.bind_port(self.serv) self.serv.listen(5) def tearDown(self): self.serv.close() self.serv = None def testTimeoutAttribute(self): '''This will prove that the timeout gets through HTTPConnection and into the socket. ''' # default -- use global socket timeout self.assert_(socket.getdefaulttimeout() is None) socket.setdefaulttimeout(30) try: httpConn = httplib.HTTPConnection(HOST, TimeoutTest.PORT) httpConn.connect() finally: socket.setdefaulttimeout(None) self.assertEqual(httpConn.sock.gettimeout(), 30) httpConn.close() # no timeout -- do not use global socket default self.assert_(socket.getdefaulttimeout() is None) socket.setdefaulttimeout(30) try: httpConn = httplib.HTTPConnection(HOST, TimeoutTest.PORT, timeout=None) httpConn.connect() finally: socket.setdefaulttimeout(None) self.assertEqual(httpConn.sock.gettimeout(), None) httpConn.close() # a value httpConn = httplib.HTTPConnection(HOST, TimeoutTest.PORT, timeout=30) httpConn.connect() self.assertEqual(httpConn.sock.gettimeout(), 30) httpConn.close() class HTTPSTimeoutTest(TestCase): # XXX Here should be tests for HTTPS, there isn't any right now! def test_attributes(self): # simple test to check it's storing it if hasattr(httplib, 'HTTPSConnection'): h = httplib.HTTPSConnection(HOST, TimeoutTest.PORT, timeout=30) self.assertEqual(h.timeout, 30) def test_main(verbose=None): test_support.run_unittest(HeaderTests, OfflineTest, BasicTest, TimeoutTest, HTTPSTimeoutTest) if __name__ == '__main__': test_main()
# data structure: # D = { typename : { bid level : revocation num}} import numpy as np revocation_num = { 'c3.2xlarge': { 0.12: 4208.3, 0.13: 4208.3, 0.14: 4208.3, 0.15: 4208.3, 0.16: 4208.3, 0.17: 4198.54, 0.18: 4095.47, 0.19: 3763.73, 0.2: 3308.16, 0.21: 2763.84, 0.22: 2250.29, 0.23: 1820.61, 0.24: 1458.2, 0.25: 1171.07, 0.26: 933.74, 0.27: 730.83, 0.28: 572.54, 0.29: 446.94, 0.3: 349.77, 0.31: 267.04, 0.32: 216.26, 0.33: 172.47, 0.34: 139.04, 0.35: 110.14, 0.36: 86.17, 0.37: 70.61, 0.38: 58.17, 0.39: 47.67, 0.4: 40.04, 0.41: 34.18, 0.42: 28.5, 0.43: 23.66, 0.44: 19.26, 0.45: 16.03, 0.46: 13.43, 0.47: 11.18, 0.48: 9.12, 0.49: 7.92, 0.5: 6.48, 0.51: 5.87, 0.52: 5.17, 0.53: 4.64, 0.54: 4.19, 0.55: 3.84, 0.56: 3.46, 0.57: 3.32, 0.58: 3.12, 0.59: 3.02, 0.6: 2.53, 0.61: 2.5, 0.62: 2.43, 0.63: 2.39, 0.64: 2.36, 0.65: 2.34, 0.66: 2.33, 0.67: 2.33, 0.68: 2.31, 0.69: 2.3, 0.7: 2.29, 0.71: 2.29, 0.72: 2.2, 0.73: 2.2, 0.74: 2.2, 0.75: 2.2, 0.76: 2.2, 0.77: 2.2, 0.78: 2.19, 0.79: 2.19, 0.8: 2.16, 0.81: 2.16, 0.82: 2.16, 0.83: 2.16, 0.84: 2.16, 0.85: 2.14, 0.86: 2.14, 0.87: 2.14, 0.88: 2.14, 0.89: 2.14, 0.9: 2.14, 0.91: 2.13, 0.92: 2.13, 0.93: 2.13, 0.94: 2.13, 0.95: 2.13, 0.96: 2.1, 0.97: 2.1, 0.98: 2.09 }, 'c3.4xlarge': { 0.12: 3941.77, 0.13: 3941.77, 0.14: 3941.77, 0.15: 3941.77, 0.16: 3934.8, 0.17: 3842.84, 0.18: 3594.43, 0.19: 3189.54, 0.2: 2719.54, 0.21: 2260.2, 0.22: 1844.71, 0.23: 1508.37, 0.24: 1234.52, 0.25: 1022.9, 0.26: 861.98, 0.27: 731.29, 0.28: 623.68, 0.29: 538.9, 0.3: 447.66, 0.31: 399.98, 0.32: 358.72, 0.33: 326.46, 0.34: 296.04, 0.35: 270.64, 0.36: 246.52, 0.37: 227.31, 0.38: 209.86, 0.39: 194.92, 0.4: 180.93, 0.41: 168.48, 0.42: 157.32, 0.43: 147.14, 0.44: 137.53, 0.45: 128.66, 0.46: 120.52, 0.47: 113.03, 0.48: 106.08, 0.49: 100.73, 0.5: 93.67, 0.51: 88.72, 0.52: 84.44, 0.53: 80.53, 0.54: 76.68, 0.55: 73.27, 0.56: 70.14, 0.57: 67.18, 0.58: 64.37, 0.59: 62.07, 0.6: 55.86, 0.61: 54.0, 0.62: 52.5, 0.63: 51.06, 0.64: 49.64, 0.65: 48.4, 0.66: 47.11, 0.67: 45.64, 0.68: 44.47, 0.69: 43.72, 0.7: 42.63, 0.71: 41.67, 0.72: 40.34, 0.73: 39.52, 0.74: 38.81, 0.75: 38.02, 0.76: 37.4, 0.77: 36.76, 0.78: 36.22, 0.79: 35.56, 0.8: 35.21, 0.81: 34.74, 0.82: 34.33, 0.83: 34.01, 0.84: 33.51, 0.85: 33.18, 0.86: 32.88, 0.87: 32.54, 0.88: 32.01, 0.89: 31.47, 0.9: 31.13, 0.91: 31.0, 0.92: 31.0, 0.93: 31.0, 0.94: 29.64, 0.95: 29.61, 0.96: 29.06, 0.97: 29.02, 0.98: 29.0 }, 'c3.8xlarge': { 0.12: 2889.94, 0.13: 2889.94, 0.14: 2889.94, 0.15: 2889.94, 0.16: 2835.78, 0.17: 2289.62, 0.18: 1703.28, 0.19: 1270.56, 0.2: 966.24, 0.21: 732.17, 0.22: 560.76, 0.23: 436.27, 0.24: 337.92, 0.25: 263.46, 0.26: 199.2, 0.27: 149.81, 0.28: 120.31, 0.29: 99.14, 0.3: 82.86, 0.31: 72.23, 0.32: 64.88, 0.33: 59.32, 0.34: 54.71, 0.35: 49.94, 0.36: 45.73, 0.37: 42.7, 0.38: 40.02, 0.39: 37.72, 0.4: 35.59, 0.41: 33.59, 0.42: 31.77, 0.43: 30.17, 0.44: 28.64, 0.45: 27.19, 0.46: 25.81, 0.47: 24.44, 0.48: 21.02, 0.49: 20.27, 0.5: 18.86, 0.51: 18.27, 0.52: 17.86, 0.53: 17.32, 0.54: 16.9, 0.55: 16.53, 0.56: 16.18, 0.57: 15.87, 0.58: 15.53, 0.59: 15.13, 0.6: 12.24, 0.61: 12.12, 0.62: 12.0, 0.63: 11.72, 0.64: 11.57, 0.65: 11.37, 0.66: 11.29, 0.67: 11.13, 0.68: 11.01, 0.69: 10.98, 0.7: 10.92, 0.71: 10.82, 0.72: 10.79, 0.73: 10.71, 0.74: 10.64, 0.75: 10.6, 0.76: 10.57, 0.77: 10.44, 0.78: 10.37, 0.79: 10.32, 0.8: 10.21, 0.81: 10.13, 0.82: 10.12, 0.83: 10.11, 0.84: 10.08, 0.85: 9.94, 0.86: 9.86, 0.87: 9.7, 0.88: 9.69, 0.89: 9.66, 0.9: 9.62, 0.91: 9.54, 0.92: 9.43, 0.93: 9.43, 0.94: 9.43, 0.95: 9.43, 0.96: 8.98, 0.97: 8.98, 0.98: 8.97 }, 'c3.large': { 0.12: 836.22, 0.13: 836.22, 0.14: 836.22, 0.15: 836.22, 0.16: 795.31, 0.17: 417.47, 0.18: 199.52, 0.19: 109.4, 0.2: 74.84, 0.21: 54.79, 0.22: 42.29, 0.23: 31.68, 0.24: 26.61, 0.25: 22.72, 0.26: 20.03, 0.27: 17.57, 0.28: 16.18, 0.29: 14.64, 0.3: 13.76, 0.31: 12.74, 0.32: 12.14, 0.33: 11.69, 0.34: 11.32, 0.35: 11.04, 0.36: 10.68, 0.37: 10.38, 0.38: 10.07, 0.39: 9.72, 0.4: 9.56, 0.41: 9.34, 0.42: 9.19, 0.43: 8.96, 0.44: 8.78, 0.45: 8.63, 0.46: 8.49, 0.47: 8.36, 0.48: 7.94, 0.49: 7.83, 0.5: 7.78, 0.51: 7.66, 0.52: 7.59, 0.53: 7.5, 0.54: 7.41, 0.55: 7.32, 0.56: 7.24, 0.57: 7.19, 0.58: 7.09, 0.59: 7.02, 0.6: 6.98, 0.61: 6.9, 0.62: 6.86, 0.63: 6.86, 0.64: 6.82, 0.65: 6.79, 0.66: 6.79, 0.67: 6.76, 0.68: 6.74, 0.69: 6.73, 0.7: 6.72, 0.71: 6.72, 0.72: 6.7, 0.73: 6.68, 0.74: 6.67, 0.75: 6.66, 0.76: 6.62, 0.77: 6.6, 0.78: 6.57, 0.79: 6.56, 0.8: 6.54, 0.81: 6.5, 0.82: 6.48, 0.83: 6.47, 0.84: 6.46, 0.85: 6.46, 0.86: 6.29, 0.87: 6.28, 0.88: 6.28, 0.89: 6.28, 0.9: 6.28, 0.91: 6.28, 0.92: 6.28, 0.93: 6.28, 0.94: 6.28, 0.95: 6.28, 0.96: 6.0, 0.97: 6.0, 0.98: 6.0 }, 'c3.xlarge': { 0.12: 3116.36, 0.13: 3116.36, 0.14: 3116.36, 0.15: 3116.36, 0.16: 3108.72, 0.17: 2859.2, 0.18: 2341.02, 0.19: 1814.31, 0.2: 1372.86, 0.21: 1050.86, 0.22: 815.73, 0.23: 647.33, 0.24: 517.17, 0.25: 425.46, 0.26: 358.77, 0.27: 300.87, 0.28: 253.64, 0.29: 217.19, 0.3: 187.81, 0.31: 163.92, 0.32: 142.6, 0.33: 126.06, 0.34: 108.92, 0.35: 97.5, 0.36: 87.1, 0.37: 78.5, 0.38: 70.6, 0.39: 61.97, 0.4: 55.89, 0.41: 50.47, 0.42: 45.23, 0.43: 39.89, 0.44: 34.56, 0.45: 30.43, 0.46: 27.06, 0.47: 24.29, 0.48: 20.99, 0.49: 19.44, 0.5: 17.9, 0.51: 16.39, 0.52: 14.97, 0.53: 13.87, 0.54: 12.93, 0.55: 12.17, 0.56: 11.43, 0.57: 10.86, 0.58: 9.86, 0.59: 9.32, 0.6: 8.98, 0.61: 8.54, 0.62: 8.12, 0.63: 7.86, 0.64: 7.63, 0.65: 7.38, 0.66: 7.23, 0.67: 6.24, 0.68: 6.13, 0.69: 6.03, 0.7: 5.99, 0.71: 5.87, 0.72: 5.8, 0.73: 5.77, 0.74: 5.61, 0.75: 5.53, 0.76: 5.5, 0.77: 5.48, 0.78: 5.47, 0.79: 5.47, 0.8: 5.43, 0.81: 5.27, 0.82: 5.26, 0.83: 5.2, 0.84: 5.11, 0.85: 5.03, 0.86: 5.01, 0.87: 4.98, 0.88: 4.94, 0.89: 4.92, 0.9: 4.87, 0.91: 4.63, 0.92: 4.63, 0.93: 4.63, 0.94: 4.63, 0.95: 4.63, 0.96: 4.44, 0.97: 4.44, 0.98: 4.44 }, 'd2.2xlarge': { 0.12: 291.73, 0.13: 163.37, 0.14: 91.53, 0.15: 51.86, 0.16: 40.96, 0.17: 31.8, 0.18: 25.69, 0.19: 23.58, 0.2: 19.41, 0.21: 16.78, 0.22: 14.32, 0.23: 12.27, 0.24: 10.98, 0.25: 9.6, 0.26: 8.66, 0.27: 8.21, 0.28: 7.68, 0.29: 6.58, 0.3: 6.29, 0.31: 5.99, 0.32: 5.73, 0.33: 5.44, 0.34: 5.09, 0.35: 4.89, 0.36: 4.69, 0.37: 4.37, 0.38: 4.29, 0.39: 4.22, 0.4: 4.16, 0.41: 4.02, 0.42: 4.01, 0.43: 3.97, 0.44: 3.18, 0.45: 3.14, 0.46: 3.07, 0.47: 3.07, 0.48: 3.03, 0.49: 3.01, 0.5: 2.96, 0.51: 2.92, 0.52: 2.76, 0.53: 2.76, 0.54: 2.71, 0.55: 2.68, 0.56: 2.68, 0.57: 2.63, 0.58: 2.62, 0.59: 2.6, 0.6: 2.57, 0.61: 2.57, 0.62: 2.57, 0.63: 2.56, 0.64: 2.56, 0.65: 2.54, 0.66: 2.49, 0.67: 2.48, 0.68: 2.48, 0.69: 2.47, 0.7: 2.47, 0.71: 2.44, 0.72: 2.44, 0.73: 2.42, 0.74: 2.41, 0.75: 2.41, 0.76: 2.41, 0.77: 2.41, 0.78: 2.41, 0.79: 2.41, 0.8: 2.4, 0.81: 2.4, 0.82: 2.39, 0.83: 2.39, 0.84: 2.38, 0.85: 2.37, 0.86: 2.37, 0.87: 2.37, 0.88: 2.37, 0.89: 2.37, 0.9: 2.37, 0.91: 2.37, 0.92: 2.37, 0.93: 2.37, 0.94: 2.37, 0.95: 2.37, 0.96: 2.37, 0.97: 2.37, 0.98: 2.37 }, 'd2.4xlarge': { 0.12: 170.89, 0.13: 125.82, 0.14: 99.93, 0.15: 70.76, 0.16: 60.23, 0.17: 52.52, 0.18: 44.23, 0.19: 39.07, 0.2: 33.63, 0.21: 28.84, 0.22: 26.57, 0.23: 21.34, 0.24: 19.08, 0.25: 18.4, 0.26: 16.62, 0.27: 15.89, 0.28: 15.36, 0.29: 13.13, 0.3: 12.72, 0.31: 11.17, 0.32: 10.93, 0.33: 10.66, 0.34: 10.5, 0.35: 10.27, 0.36: 9.97, 0.37: 9.61, 0.38: 9.36, 0.39: 8.58, 0.4: 8.43, 0.41: 8.33, 0.42: 8.27, 0.43: 8.2, 0.44: 6.91, 0.45: 6.66, 0.46: 6.64, 0.47: 6.51, 0.48: 6.36, 0.49: 6.24, 0.5: 6.24, 0.51: 6.22, 0.52: 5.97, 0.53: 5.88, 0.54: 5.88, 0.55: 5.87, 0.56: 5.36, 0.57: 5.3, 0.58: 5.3, 0.59: 5.29, 0.6: 5.21, 0.61: 5.21, 0.62: 5.06, 0.63: 5.06, 0.64: 5.06, 0.65: 5.01, 0.66: 4.99, 0.67: 4.99, 0.68: 4.99, 0.69: 4.94, 0.7: 4.89, 0.71: 4.89, 0.72: 4.86, 0.73: 4.39, 0.74: 4.39, 0.75: 4.39, 0.76: 4.39, 0.77: 4.39, 0.78: 4.39, 0.79: 4.39, 0.8: 4.39, 0.81: 4.39, 0.82: 4.39, 0.83: 4.39, 0.84: 4.39, 0.85: 4.39, 0.86: 4.39, 0.87: 4.39, 0.88: 4.39, 0.89: 4.39, 0.9: 4.39, 0.91: 4.39, 0.92: 4.39, 0.93: 4.39, 0.94: 4.39, 0.95: 4.39, 0.96: 4.39, 0.97: 4.39, 0.98: 4.39 }, 'd2.8xlarge': { 0.12: 117.63, 0.13: 86.46, 0.14: 58.24, 0.15: 33.93, 0.16: 27.38, 0.17: 19.51, 0.18: 11.27, 0.19: 10.1, 0.2: 7.47, 0.21: 5.44, 0.22: 4.37, 0.23: 3.69, 0.24: 3.36, 0.25: 3.11, 0.26: 3.08, 0.27: 3.04, 0.28: 3.03, 0.29: 2.47, 0.3: 2.37, 0.31: 2.33, 0.32: 2.29, 0.33: 2.29, 0.34: 2.27, 0.35: 2.26, 0.36: 2.24, 0.37: 2.24, 0.38: 2.24, 0.39: 2.24, 0.4: 2.24, 0.41: 2.24, 0.42: 2.24, 0.43: 2.24, 0.44: 1.77, 0.45: 1.77, 0.46: 1.77, 0.47: 1.76, 0.48: 1.76, 0.49: 1.76, 0.5: 1.76, 0.51: 1.76, 0.52: 1.76, 0.53: 1.76, 0.54: 1.76, 0.55: 1.76, 0.56: 1.76, 0.57: 1.76, 0.58: 1.76, 0.59: 1.74, 0.6: 1.74, 0.61: 1.74, 0.62: 1.74, 0.63: 1.74, 0.64: 1.74, 0.65: 1.74, 0.66: 1.74, 0.67: 1.74, 0.68: 1.74, 0.69: 1.74, 0.7: 1.74, 0.71: 1.74, 0.72: 1.74, 0.73: 1.74, 0.74: 1.74, 0.75: 1.74, 0.76: 1.74, 0.77: 1.74, 0.78: 1.74, 0.79: 1.74, 0.8: 1.74, 0.81: 1.74, 0.82: 1.73, 0.83: 1.73, 0.84: 1.73, 0.85: 1.73, 0.86: 1.73, 0.87: 1.73, 0.88: 1.73, 0.89: 1.73, 0.9: 1.73, 0.91: 1.73, 0.92: 1.73, 0.93: 1.73, 0.94: 1.73, 0.95: 1.73, 0.96: 1.73, 0.97: 1.73, 0.98: 1.73 }, 'd2.xlarge': { 0.12: 245.48, 0.13: 231.32, 0.14: 218.29, 0.15: 203.58, 0.16: 181.41, 0.17: 149.42, 0.18: 127.9, 0.19: 111.63, 0.2: 95.73, 0.21: 80.03, 0.22: 71.14, 0.23: 61.41, 0.24: 50.34, 0.25: 43.22, 0.26: 36.44, 0.27: 31.92, 0.28: 28.29, 0.29: 24.94, 0.3: 21.38, 0.31: 17.98, 0.32: 15.73, 0.33: 14.13, 0.34: 12.71, 0.35: 11.32, 0.36: 10.47, 0.37: 9.94, 0.38: 9.02, 0.39: 8.09, 0.4: 7.39, 0.41: 6.93, 0.42: 6.62, 0.43: 6.06, 0.44: 4.67, 0.45: 4.26, 0.46: 3.92, 0.47: 3.71, 0.48: 3.48, 0.49: 3.28, 0.5: 3.14, 0.51: 3.07, 0.52: 2.92, 0.53: 2.84, 0.54: 2.73, 0.55: 2.73, 0.56: 2.4, 0.57: 2.37, 0.58: 2.03, 0.59: 2.02, 0.6: 1.98, 0.61: 1.97, 0.62: 1.93, 0.63: 1.88, 0.64: 1.88, 0.65: 1.88, 0.66: 1.87, 0.67: 1.82, 0.68: 1.82, 0.69: 1.82, 0.7: 1.82, 0.71: 1.81, 0.72: 1.8, 0.73: 1.66, 0.74: 1.66, 0.75: 1.66, 0.76: 1.66, 0.77: 1.66, 0.78: 1.66, 0.79: 1.66, 0.8: 1.66, 0.81: 1.66, 0.82: 1.66, 0.83: 1.66, 0.84: 1.66, 0.85: 1.66, 0.86: 1.66, 0.87: 1.66, 0.88: 1.66, 0.89: 1.66, 0.9: 1.66, 0.91: 1.66, 0.92: 1.66, 0.93: 1.66, 0.94: 1.66, 0.95: 1.66, 0.96: 1.66, 0.97: 1.66, 0.98: 1.66 }, 'g2.2xlarge': { 0.12: 2952.92, 0.13: 2815.1, 0.14: 2516.51, 0.15: 2183.04, 0.16: 1900.16, 0.17: 1629.76, 0.18: 1394.2, 0.19: 1159.24, 0.2: 990.44, 0.21: 840.39, 0.22: 721.26, 0.23: 618.1, 0.24: 531.99, 0.25: 462.84, 0.26: 400.56, 0.27: 341.27, 0.28: 296.03, 0.29: 256.2, 0.3: 226.19, 0.31: 191.2, 0.32: 180.84, 0.33: 165.57, 0.34: 156.62, 0.35: 150.16, 0.36: 142.07, 0.37: 134.2, 0.38: 130.44, 0.39: 121.57, 0.4: 117.71, 0.41: 112.93, 0.42: 109.24, 0.43: 106.73, 0.44: 99.12, 0.45: 94.07, 0.46: 92.01, 0.47: 88.77, 0.48: 86.58, 0.49: 84.9, 0.5: 82.21, 0.51: 80.96, 0.52: 77.19, 0.53: 75.26, 0.54: 73.98, 0.55: 68.19, 0.56: 67.04, 0.57: 65.48, 0.58: 64.59, 0.59: 63.08, 0.6: 62.39, 0.61: 59.37, 0.62: 56.98, 0.63: 56.31, 0.64: 55.67, 0.65: 55.02, 0.66: 53.97, 0.67: 53.46, 0.68: 52.92, 0.69: 52.3, 0.7: 51.72, 0.71: 51.19, 0.72: 50.44, 0.73: 49.41, 0.74: 47.42, 0.75: 46.84, 0.76: 46.58, 0.77: 45.92, 0.78: 45.59, 0.79: 44.94, 0.8: 44.59, 0.81: 44.32, 0.82: 44.24, 0.83: 43.87, 0.84: 43.71, 0.85: 43.23, 0.86: 42.98, 0.87: 42.36, 0.88: 42.36, 0.89: 42.36, 0.9: 42.3, 0.91: 42.3, 0.92: 42.3, 0.93: 41.5, 0.94: 41.5, 0.95: 41.5, 0.96: 41.5, 0.97: 41.5, 0.98: 41.5 }, 'g2.8xlarge': { 0.12: 706.53, 0.13: 704.86, 0.14: 701.34, 0.15: 691.92, 0.16: 676.66, 0.17: 655.06, 0.18: 622.41, 0.19: 589.04, 0.2: 542.97, 0.21: 511.72, 0.22: 484.78, 0.23: 457.91, 0.24: 435.38, 0.25: 412.98, 0.26: 393.48, 0.27: 364.93, 0.28: 352.46, 0.29: 329.16, 0.3: 314.61, 0.31: 295.22, 0.32: 281.8, 0.33: 270.28, 0.34: 256.86, 0.35: 245.81, 0.36: 233.74, 0.37: 220.81, 0.38: 213.07, 0.39: 174.12, 0.4: 169.46, 0.41: 166.51, 0.42: 163.77, 0.43: 161.59, 0.44: 140.29, 0.45: 135.28, 0.46: 133.07, 0.47: 131.54, 0.48: 129.51, 0.49: 128.57, 0.5: 122.59, 0.51: 121.34, 0.52: 120.23, 0.53: 117.14, 0.54: 116.04, 0.55: 107.29, 0.56: 106.52, 0.57: 105.62, 0.58: 103.51, 0.59: 94.57, 0.6: 94.11, 0.61: 93.49, 0.62: 90.12, 0.63: 89.64, 0.64: 89.07, 0.65: 88.82, 0.66: 87.18, 0.67: 86.7, 0.68: 84.61, 0.69: 84.3, 0.7: 83.39, 0.71: 83.12, 0.72: 82.5, 0.73: 82.17, 0.74: 81.69, 0.75: 81.17, 0.76: 80.89, 0.77: 78.99, 0.78: 78.7, 0.79: 78.54, 0.8: 78.39, 0.81: 78.34, 0.82: 78.23, 0.83: 78.08, 0.84: 77.73, 0.85: 76.97, 0.86: 76.53, 0.87: 76.51, 0.88: 76.51, 0.89: 76.51, 0.9: 76.51, 0.91: 76.51, 0.92: 76.51, 0.93: 76.51, 0.94: 76.51, 0.95: 76.51, 0.96: 76.51, 0.97: 76.51, 0.98: 76.51 }, 'i2.2xlarge': { 0.12: 501.09, 0.13: 322.32, 0.14: 190.69, 0.15: 98.48, 0.16: 52.12, 0.17: 28.71, 0.18: 16.6, 0.19: 12.5, 0.2: 11.33, 0.21: 10.2, 0.22: 9.64, 0.23: 8.7, 0.24: 7.6, 0.25: 7.01, 0.26: 5.99, 0.27: 5.72, 0.28: 5.49, 0.29: 5.38, 0.3: 5.17, 0.31: 4.52, 0.32: 4.41, 0.33: 4.23, 0.34: 4.19, 0.35: 4.01, 0.36: 3.91, 0.37: 3.88, 0.38: 3.71, 0.39: 2.77, 0.4: 2.74, 0.41: 2.74, 0.42: 2.74, 0.43: 2.74, 0.44: 2.73, 0.45: 2.7, 0.46: 2.69, 0.47: 2.69, 0.48: 2.69, 0.49: 2.69, 0.5: 2.56, 0.51: 2.56, 0.52: 2.56, 0.53: 2.56, 0.54: 2.56, 0.55: 2.03, 0.56: 2.03, 0.57: 2.03, 0.58: 2.03, 0.59: 1.83, 0.6: 1.83, 0.61: 1.83, 0.62: 1.79, 0.63: 1.79, 0.64: 1.79, 0.65: 1.79, 0.66: 1.79, 0.67: 1.79, 0.68: 1.79, 0.69: 1.79, 0.7: 1.79, 0.71: 1.79, 0.72: 1.79, 0.73: 1.79, 0.74: 1.79, 0.75: 1.79, 0.76: 1.79, 0.77: 1.79, 0.78: 1.79, 0.79: 1.79, 0.8: 1.79, 0.81: 1.79, 0.82: 1.79, 0.83: 1.79, 0.84: 1.79, 0.85: 1.79, 0.86: 1.79, 0.87: 1.79, 0.88: 1.79, 0.89: 1.79, 0.9: 1.79, 0.91: 1.79, 0.92: 1.79, 0.93: 1.79, 0.94: 1.79, 0.95: 1.79, 0.96: 1.79, 0.97: 1.79, 0.98: 1.79 }, 'i2.4xlarge': { 0.12: 366.49, 0.13: 306.62, 0.14: 240.62, 0.15: 182.38, 0.16: 135.67, 0.17: 104.33, 0.18: 87.87, 0.19: 69.29, 0.2: 48.2, 0.21: 34.11, 0.22: 23.24, 0.23: 19.3, 0.24: 17.83, 0.25: 16.36, 0.26: 12.68, 0.27: 11.37, 0.28: 10.2, 0.29: 9.14, 0.3: 7.77, 0.31: 7.06, 0.32: 6.67, 0.33: 6.33, 0.34: 6.1, 0.35: 5.8, 0.36: 5.58, 0.37: 5.42, 0.38: 5.07, 0.39: 4.31, 0.4: 4.13, 0.41: 3.89, 0.42: 3.79, 0.43: 3.71, 0.44: 3.56, 0.45: 3.46, 0.46: 3.39, 0.47: 3.33, 0.48: 3.31, 0.49: 3.27, 0.5: 3.22, 0.51: 3.18, 0.52: 3.16, 0.53: 2.94, 0.54: 2.94, 0.55: 2.1, 0.56: 2.09, 0.57: 2.09, 0.58: 2.06, 0.59: 2.04, 0.6: 2.04, 0.61: 2.04, 0.62: 2.04, 0.63: 2.04, 0.64: 2.04, 0.65: 2.04, 0.66: 2.02, 0.67: 2.02, 0.68: 1.92, 0.69: 1.92, 0.7: 1.92, 0.71: 1.92, 0.72: 1.92, 0.73: 1.92, 0.74: 1.92, 0.75: 1.92, 0.76: 1.92, 0.77: 1.92, 0.78: 1.92, 0.79: 1.92, 0.8: 1.92, 0.81: 1.92, 0.82: 1.92, 0.83: 1.92, 0.84: 1.92, 0.85: 1.92, 0.86: 1.92, 0.87: 1.92, 0.88: 1.92, 0.89: 1.92, 0.9: 1.92, 0.91: 1.92, 0.92: 1.92, 0.93: 1.92, 0.94: 1.92, 0.95: 1.92, 0.96: 1.92, 0.97: 1.92, 0.98: 1.92 }, 'i2.8xlarge': { 0.12: 117.09, 0.13: 88.19, 0.14: 69.54, 0.15: 48.66, 0.16: 20.81, 0.17: 10.83, 0.18: 9.73, 0.19: 9.02, 0.2: 8.33, 0.21: 7.59, 0.22: 7.09, 0.23: 6.38, 0.24: 5.96, 0.25: 5.52, 0.26: 4.86, 0.27: 4.51, 0.28: 4.09, 0.29: 3.73, 0.3: 3.52, 0.31: 2.92, 0.32: 2.77, 0.33: 2.66, 0.34: 2.48, 0.35: 2.42, 0.36: 2.36, 0.37: 2.29, 0.38: 2.22, 0.39: 2.06, 0.4: 1.97, 0.41: 1.83, 0.42: 1.79, 0.43: 1.72, 0.44: 1.59, 0.45: 1.58, 0.46: 1.57, 0.47: 1.54, 0.48: 1.53, 0.49: 1.52, 0.5: 1.52, 0.51: 1.5, 0.52: 1.47, 0.53: 1.44, 0.54: 1.44, 0.55: 1.4, 0.56: 1.4, 0.57: 1.39, 0.58: 1.39, 0.59: 1.39, 0.6: 1.39, 0.61: 1.38, 0.62: 1.38, 0.63: 1.37, 0.64: 1.37, 0.65: 1.37, 0.66: 1.37, 0.67: 1.37, 0.68: 1.37, 0.69: 1.37, 0.7: 1.37, 0.71: 1.37, 0.72: 1.37, 0.73: 1.37, 0.74: 1.37, 0.75: 1.37, 0.76: 1.37, 0.77: 1.37, 0.78: 1.37, 0.79: 1.37, 0.8: 1.37, 0.81: 1.37, 0.82: 1.37, 0.83: 1.37, 0.84: 1.37, 0.85: 1.37, 0.86: 1.37, 0.87: 1.37, 0.88: 1.37, 0.89: 1.37, 0.9: 1.37, 0.91: 1.37, 0.92: 1.37, 0.93: 1.37, 0.94: 1.37, 0.95: 1.37, 0.96: 1.37, 0.97: 1.37, 0.98: 1.37 }, 'i2.xlarge': { 0.12: 282.46, 0.13: 232.22, 0.14: 159.23, 0.15: 75.39, 0.16: 31.29, 0.17: 14.22, 0.18: 2.94, 0.19: 2.93, 0.2: 2.23, 0.21: 2.11, 0.22: 2.06, 0.23: 1.3, 0.24: 1.3, 0.25: 1.29, 0.26: 1.28, 0.27: 1.28, 0.28: 1.28, 0.29: 1.01, 0.3: 1.01, 0.31: 0.98, 0.32: 0.98, 0.33: 0.98, 0.34: 0.98, 0.35: 0.98, 0.36: 0.98, 0.37: 0.98, 0.38: 0.98, 0.39: 0.96, 0.4: 0.96, 0.41: 0.96, 0.42: 0.96, 0.43: 0.96, 0.44: 0.51, 0.45: 0.51, 0.46: 0.51, 0.47: 0.51, 0.48: 0.51, 0.49: 0.51, 0.5: 0.51, 0.51: 0.51, 0.52: 0.51, 0.53: 0.51, 0.54: 0.51, 0.55: 0.5, 0.56: 0.5, 0.57: 0.5, 0.58: 0.5, 0.59: 0.49, 0.6: 0.49, 0.61: 0.49, 0.62: 0.49, 0.63: 0.49, 0.64: 0.49, 0.65: 0.49, 0.66: 0.49, 0.67: 0.49, 0.68: 0.49, 0.69: 0.49, 0.7: 0.49, 0.71: 0.49, 0.72: 0.49, 0.73: 0.49, 0.74: 0.49, 0.75: 0.49, 0.76: 0.49, 0.77: 0.49, 0.78: 0.49, 0.79: 0.49, 0.8: 0.49, 0.81: 0.49, 0.82: 0.49, 0.83: 0.49, 0.84: 0.49, 0.85: 0.49, 0.86: 0.49, 0.87: 0.49, 0.88: 0.49, 0.89: 0.49, 0.9: 0.49, 0.91: 0.49, 0.92: 0.49, 0.93: 0.49, 0.94: 0.49, 0.95: 0.49, 0.96: 0.49, 0.97: 0.49, 0.98: 0.49 }, 'm3.2xlarge': { 0.12: 3251.41, 0.13: 3249.8, 0.14: 3134.78, 0.15: 2688.93, 0.16: 2085.14, 0.17: 1487.5, 0.18: 1026.47, 0.19: 718.37, 0.2: 508.89, 0.21: 379.22, 0.22: 284.66, 0.23: 216.24, 0.24: 163.46, 0.25: 123.39, 0.26: 94.77, 0.27: 71.92, 0.28: 49.36, 0.29: 41.19, 0.3: 31.72, 0.31: 26.26, 0.32: 21.6, 0.33: 18.32, 0.34: 14.67, 0.35: 11.93, 0.36: 10.09, 0.37: 8.72, 0.38: 7.54, 0.39: 6.11, 0.4: 5.79, 0.41: 5.51, 0.42: 5.28, 0.43: 4.99, 0.44: 4.77, 0.45: 4.48, 0.46: 4.36, 0.47: 4.26, 0.48: 4.09, 0.49: 4.01, 0.5: 3.91, 0.51: 3.86, 0.52: 3.79, 0.53: 3.58, 0.54: 3.49, 0.55: 3.41, 0.56: 3.31, 0.57: 3.21, 0.58: 3.12, 0.59: 3.07, 0.6: 3.03, 0.61: 2.94, 0.62: 2.88, 0.63: 2.86, 0.64: 2.81, 0.65: 2.79, 0.66: 2.76, 0.67: 2.71, 0.68: 2.67, 0.69: 2.66, 0.7: 2.63, 0.71: 2.62, 0.72: 2.62, 0.73: 2.62, 0.74: 2.61, 0.75: 2.59, 0.76: 2.51, 0.77: 2.51, 0.78: 2.51, 0.79: 2.48, 0.8: 2.48, 0.81: 2.47, 0.82: 2.47, 0.83: 2.44, 0.84: 2.42, 0.85: 2.41, 0.86: 2.38, 0.87: 2.38, 0.88: 2.38, 0.89: 2.38, 0.9: 2.38, 0.91: 2.38, 0.92: 2.38, 0.93: 2.38, 0.94: 2.29, 0.95: 2.29, 0.96: 2.29, 0.97: 2.29, 0.98: 2.29 }, 'm3.large': { 0.12: 1245.54, 0.13: 1231.91, 0.14: 834.16, 0.15: 431.16, 0.16: 256.97, 0.17: 170.09, 0.18: 123.63, 0.19: 91.68, 0.2: 69.63, 0.21: 54.64, 0.22: 40.44, 0.23: 31.51, 0.24: 25.44, 0.25: 21.03, 0.26: 17.31, 0.27: 13.82, 0.28: 11.2, 0.29: 9.66, 0.3: 8.42, 0.31: 7.1, 0.32: 6.1, 0.33: 5.13, 0.34: 4.23, 0.35: 3.7, 0.36: 3.2, 0.37: 2.83, 0.38: 2.51, 0.39: 2.37, 0.4: 2.33, 0.41: 2.3, 0.42: 2.3, 0.43: 2.28, 0.44: 2.18, 0.45: 2.16, 0.46: 2.12, 0.47: 2.12, 0.48: 2.11, 0.49: 2.1, 0.5: 2.08, 0.51: 2.02, 0.52: 2.01, 0.53: 1.98, 0.54: 1.98, 0.55: 1.98, 0.56: 1.98, 0.57: 1.98, 0.58: 1.98, 0.59: 1.98, 0.6: 1.98, 0.61: 1.98, 0.62: 1.98, 0.63: 1.98, 0.64: 1.98, 0.65: 1.98, 0.66: 1.98, 0.67: 1.98, 0.68: 1.98, 0.69: 1.98, 0.7: 1.98, 0.71: 1.98, 0.72: 1.98, 0.73: 1.98, 0.74: 1.98, 0.75: 1.98, 0.76: 1.98, 0.77: 1.98, 0.78: 1.98, 0.79: 1.98, 0.8: 1.98, 0.81: 1.98, 0.82: 1.98, 0.83: 1.98, 0.84: 1.98, 0.85: 1.98, 0.86: 1.98, 0.87: 1.98, 0.88: 1.98, 0.89: 1.98, 0.9: 1.98, 0.91: 1.98, 0.92: 1.98, 0.93: 1.98, 0.94: 1.98, 0.95: 1.98, 0.96: 1.98, 0.97: 1.98, 0.98: 1.98 }, 'm3.medium': { 0.12: 498.22, 0.13: 486.34, 0.14: 341.01, 0.15: 246.89, 0.16: 184.7, 0.17: 120.43, 0.18: 72.62, 0.19: 44.86, 0.2: 29.71, 0.21: 20.67, 0.22: 15.88, 0.23: 12.54, 0.24: 9.56, 0.25: 7.92, 0.26: 6.21, 0.27: 5.67, 0.28: 5.14, 0.29: 3.24, 0.3: 2.06, 0.31: 2.06, 0.32: 2.06, 0.33: 2.06, 0.34: 2.06, 0.35: 2.04, 0.36: 2.04, 0.37: 2.04, 0.38: 2.04, 0.39: 2.04, 0.4: 2.04, 0.41: 2.04, 0.42: 2.04, 0.43: 2.03, 0.44: 2.02, 0.45: 2.02, 0.46: 2.02, 0.47: 2.02, 0.48: 2.02, 0.49: 2.02, 0.5: 2.02, 0.51: 2.02, 0.52: 2.02, 0.53: 2.02, 0.54: 2.02, 0.55: 2.02, 0.56: 2.02, 0.57: 2.02, 0.58: 2.02, 0.59: 2.02, 0.6: 2.01, 0.61: 2.01, 0.62: 2.01, 0.63: 2.01, 0.64: 2.01, 0.65: 2.01, 0.66: 2.01, 0.67: 2.01, 0.68: 2.01, 0.69: 2.01, 0.7: 2.01, 0.71: 2.01, 0.72: 2.01, 0.73: 2.01, 0.74: 2.01, 0.75: 2.01, 0.76: 2.01, 0.77: 2.01, 0.78: 2.01, 0.79: 2.01, 0.8: 2.01, 0.81: 2.01, 0.82: 2.01, 0.83: 2.01, 0.84: 2.01, 0.85: 2.01, 0.86: 2.01, 0.87: 2.01, 0.88: 2.01, 0.89: 2.01, 0.9: 2.01, 0.91: 2.01, 0.92: 2.01, 0.93: 2.01, 0.94: 2.01, 0.95: 2.01, 0.96: 2.01, 0.97: 2.01, 0.98: 2.01 }, 'm3.xlarge': { 0.12: 3659.6, 0.13: 3655.48, 0.14: 3478.44, 0.15: 2809.06, 0.16: 1958.68, 0.17: 1151.16, 0.18: 658.77, 0.19: 406.11, 0.2: 261.24, 0.21: 181.14, 0.22: 124.84, 0.23: 89.01, 0.24: 64.78, 0.25: 50.66, 0.26: 42.31, 0.27: 37.4, 0.28: 35.18, 0.29: 32.37, 0.3: 29.66, 0.31: 25.58, 0.32: 21.2, 0.33: 16.84, 0.34: 14.48, 0.35: 13.14, 0.36: 12.13, 0.37: 11.36, 0.38: 10.69, 0.39: 10.09, 0.4: 9.6, 0.41: 9.12, 0.42: 8.36, 0.43: 7.69, 0.44: 7.2, 0.45: 6.7, 0.46: 6.32, 0.47: 5.99, 0.48: 5.66, 0.49: 5.33, 0.5: 4.99, 0.51: 4.71, 0.52: 4.42, 0.53: 4.13, 0.54: 3.96, 0.55: 3.7, 0.56: 3.64, 0.57: 3.57, 0.58: 3.49, 0.59: 3.42, 0.6: 3.37, 0.61: 3.28, 0.62: 3.2, 0.63: 3.1, 0.64: 2.99, 0.65: 2.87, 0.66: 2.8, 0.67: 2.7, 0.68: 2.67, 0.69: 2.61, 0.7: 2.56, 0.71: 2.52, 0.72: 2.41, 0.73: 2.39, 0.74: 2.39, 0.75: 2.33, 0.76: 2.3, 0.77: 2.3, 0.78: 2.24, 0.79: 2.23, 0.8: 2.21, 0.81: 2.19, 0.82: 2.19, 0.83: 2.17, 0.84: 2.16, 0.85: 2.16, 0.86: 2.16, 0.87: 2.16, 0.88: 2.16, 0.89: 2.16, 0.9: 2.16, 0.91: 2.16, 0.92: 2.16, 0.93: 2.16, 0.94: 2.13, 0.95: 2.13, 0.96: 2.13, 0.97: 2.13, 0.98: 2.13 }, 'r3.2xlarge': { 0.12: 3844.26, 0.13: 3414.5, 0.14: 2845.17, 0.15: 2289.04, 0.16: 1805.79, 0.17: 1383.41, 0.18: 1061.92, 0.19: 835.73, 0.2: 659.86, 0.21: 530.21, 0.22: 426.58, 0.23: 344.61, 0.24: 288.72, 0.25: 242.19, 0.26: 203.48, 0.27: 174.86, 0.28: 150.38, 0.29: 130.38, 0.3: 113.01, 0.31: 98.81, 0.32: 85.82, 0.33: 75.5, 0.34: 66.61, 0.35: 58.59, 0.36: 50.62, 0.37: 42.92, 0.38: 36.0, 0.39: 29.53, 0.4: 22.12, 0.41: 19.93, 0.42: 18.59, 0.43: 17.53, 0.44: 16.69, 0.45: 15.89, 0.46: 14.38, 0.47: 13.66, 0.48: 12.71, 0.49: 12.23, 0.5: 11.56, 0.51: 10.96, 0.52: 10.36, 0.53: 9.74, 0.54: 9.24, 0.55: 8.8, 0.56: 8.57, 0.57: 8.22, 0.58: 7.92, 0.59: 7.62, 0.6: 7.46, 0.61: 7.11, 0.62: 6.98, 0.63: 6.81, 0.64: 6.64, 0.65: 6.46, 0.66: 6.27, 0.67: 6.24, 0.68: 6.12, 0.69: 5.94, 0.7: 5.86, 0.71: 5.74, 0.72: 5.64, 0.73: 5.59, 0.74: 5.53, 0.75: 5.46, 0.76: 5.29, 0.77: 5.29, 0.78: 5.23, 0.79: 5.18, 0.8: 4.8, 0.81: 4.72, 0.82: 4.68, 0.83: 4.67, 0.84: 4.64, 0.85: 4.6, 0.86: 4.53, 0.87: 4.48, 0.88: 4.47, 0.89: 4.46, 0.9: 4.41, 0.91: 4.38, 0.92: 4.38, 0.93: 4.32, 0.94: 4.28, 0.95: 4.28, 0.96: 4.28, 0.97: 4.28, 0.98: 4.23 }, 'r3.4xlarge': { 0.12: 1545.18, 0.13: 1188.36, 0.14: 944.61, 0.15: 759.98, 0.16: 637.66, 0.17: 546.06, 0.18: 475.58, 0.19: 420.97, 0.2: 381.2, 0.21: 344.24, 0.22: 314.6, 0.23: 284.27, 0.24: 258.8, 0.25: 237.83, 0.26: 218.54, 0.27: 201.68, 0.28: 186.51, 0.29: 174.11, 0.3: 162.09, 0.31: 148.93, 0.32: 139.12, 0.33: 130.38, 0.34: 120.61, 0.35: 112.62, 0.36: 105.11, 0.37: 97.41, 0.38: 86.54, 0.39: 80.57, 0.4: 74.46, 0.41: 66.23, 0.42: 63.17, 0.43: 60.72, 0.44: 58.1, 0.45: 55.42, 0.46: 52.47, 0.47: 50.63, 0.48: 49.04, 0.49: 47.73, 0.5: 44.8, 0.51: 43.53, 0.52: 42.14, 0.53: 40.67, 0.54: 39.64, 0.55: 38.0, 0.56: 37.03, 0.57: 36.14, 0.58: 34.76, 0.59: 33.96, 0.6: 33.2, 0.61: 31.68, 0.62: 30.93, 0.63: 30.22, 0.64: 28.32, 0.65: 27.6, 0.66: 27.02, 0.67: 26.62, 0.68: 26.0, 0.69: 25.38, 0.7: 25.16, 0.71: 24.73, 0.72: 24.24, 0.73: 23.84, 0.74: 23.5, 0.75: 23.38, 0.76: 19.4, 0.77: 19.22, 0.78: 19.03, 0.79: 18.68, 0.8: 18.58, 0.81: 18.32, 0.82: 18.22, 0.83: 18.03, 0.84: 17.89, 0.85: 17.82, 0.86: 17.77, 0.87: 17.61, 0.88: 17.6, 0.89: 17.57, 0.9: 17.52, 0.91: 17.3, 0.92: 17.22, 0.93: 17.12, 0.94: 17.04, 0.95: 17.04, 0.96: 17.04, 0.97: 17.04, 0.98: 17.04 }, 'r3.8xlarge': { 0.12: 2622.84, 0.13: 2273.86, 0.14: 1924.46, 0.15: 1586.87, 0.16: 1298.44, 0.17: 1058.79, 0.18: 844.49, 0.19: 633.72, 0.2: 524.49, 0.21: 438.66, 0.22: 370.76, 0.23: 314.3, 0.24: 269.92, 0.25: 235.92, 0.26: 204.71, 0.27: 180.16, 0.28: 156.81, 0.29: 136.53, 0.3: 118.88, 0.31: 103.41, 0.32: 92.68, 0.33: 83.1, 0.34: 74.47, 0.35: 67.9, 0.36: 62.23, 0.37: 57.48, 0.38: 52.03, 0.39: 48.29, 0.4: 44.79, 0.41: 41.8, 0.42: 39.14, 0.43: 37.04, 0.44: 35.3, 0.45: 32.9, 0.46: 31.3, 0.47: 29.26, 0.48: 28.12, 0.49: 26.78, 0.5: 25.59, 0.51: 24.56, 0.52: 23.18, 0.53: 22.38, 0.54: 21.56, 0.55: 20.74, 0.56: 20.13, 0.57: 19.34, 0.58: 18.02, 0.59: 17.38, 0.6: 16.79, 0.61: 16.41, 0.62: 15.86, 0.63: 15.34, 0.64: 14.92, 0.65: 14.32, 0.66: 14.08, 0.67: 13.72, 0.68: 13.39, 0.69: 12.77, 0.7: 12.63, 0.71: 12.31, 0.72: 11.88, 0.73: 11.69, 0.74: 11.34, 0.75: 11.2, 0.76: 7.59, 0.77: 7.36, 0.78: 7.29, 0.79: 7.2, 0.8: 7.03, 0.81: 6.9, 0.82: 6.77, 0.83: 6.62, 0.84: 6.46, 0.85: 6.37, 0.86: 6.19, 0.87: 6.14, 0.88: 6.1, 0.89: 6.06, 0.9: 6.02, 0.91: 5.99, 0.92: 5.98, 0.93: 5.98, 0.94: 5.92, 0.95: 5.91, 0.96: 5.91, 0.97: 5.91, 0.98: 5.91 }, 'r3.large': { 0.12: 294.83, 0.13: 180.21, 0.14: 107.87, 0.15: 74.54, 0.16: 54.27, 0.17: 42.77, 0.18: 34.79, 0.19: 28.81, 0.2: 25.03, 0.21: 21.72, 0.22: 19.51, 0.23: 17.29, 0.24: 15.66, 0.25: 14.51, 0.26: 13.28, 0.27: 11.86, 0.28: 11.04, 0.29: 10.54, 0.3: 10.09, 0.31: 9.68, 0.32: 8.53, 0.33: 8.3, 0.34: 8.0, 0.35: 7.84, 0.36: 7.63, 0.37: 7.41, 0.38: 7.21, 0.39: 7.06, 0.4: 6.89, 0.41: 6.14, 0.42: 6.08, 0.43: 6.0, 0.44: 5.94, 0.45: 5.84, 0.46: 5.67, 0.47: 5.43, 0.48: 5.38, 0.49: 5.18, 0.5: 5.12, 0.51: 5.03, 0.52: 4.96, 0.53: 4.89, 0.54: 4.8, 0.55: 4.77, 0.56: 4.73, 0.57: 4.68, 0.58: 3.97, 0.59: 3.96, 0.6: 3.93, 0.61: 3.84, 0.62: 3.8, 0.63: 3.79, 0.64: 3.76, 0.65: 3.6, 0.66: 3.56, 0.67: 3.54, 0.68: 3.54, 0.69: 3.44, 0.7: 3.43, 0.71: 3.43, 0.72: 3.39, 0.73: 3.38, 0.74: 3.3, 0.75: 3.29, 0.76: 3.26, 0.77: 3.21, 0.78: 3.2, 0.79: 3.19, 0.8: 3.19, 0.81: 3.17, 0.82: 3.13, 0.83: 3.12, 0.84: 3.11, 0.85: 3.08, 0.86: 3.08, 0.87: 3.08, 0.88: 3.08, 0.89: 3.08, 0.9: 3.08, 0.91: 3.08, 0.92: 3.08, 0.93: 3.08, 0.94: 3.08, 0.95: 3.08, 0.96: 3.08, 0.97: 3.08, 0.98: 3.08 }, 'r3.xlarge': { 0.12: 2447.93, 0.13: 2069.16, 0.14: 1738.32, 0.15: 1462.18, 0.16: 1249.51, 0.17: 1093.04, 0.18: 957.97, 0.19: 854.5, 0.2: 762.62, 0.21: 687.5, 0.22: 619.24, 0.23: 563.0, 0.24: 516.74, 0.25: 472.94, 0.26: 434.99, 0.27: 403.32, 0.28: 372.71, 0.29: 346.48, 0.3: 322.22, 0.31: 301.22, 0.32: 279.34, 0.33: 261.26, 0.34: 243.47, 0.35: 228.26, 0.36: 214.63, 0.37: 202.59, 0.38: 191.2, 0.39: 181.66, 0.4: 170.71, 0.41: 162.06, 0.42: 154.81, 0.43: 148.67, 0.44: 142.02, 0.45: 134.2, 0.46: 128.56, 0.47: 123.16, 0.48: 118.28, 0.49: 113.66, 0.5: 108.23, 0.51: 103.69, 0.52: 99.69, 0.53: 96.08, 0.54: 93.09, 0.55: 89.16, 0.56: 86.86, 0.57: 83.56, 0.58: 79.92, 0.59: 77.46, 0.6: 75.51, 0.61: 72.97, 0.62: 70.56, 0.63: 68.69, 0.64: 66.23, 0.65: 64.36, 0.66: 62.44, 0.67: 60.86, 0.68: 59.07, 0.69: 57.67, 0.7: 56.42, 0.71: 54.96, 0.72: 53.68, 0.73: 52.44, 0.74: 51.18, 0.75: 50.2, 0.76: 48.96, 0.77: 47.97, 0.78: 46.92, 0.79: 46.01, 0.8: 45.04, 0.81: 44.44, 0.82: 43.48, 0.83: 42.5, 0.84: 41.62, 0.85: 41.09, 0.86: 40.3, 0.87: 39.83, 0.88: 39.47, 0.89: 39.08, 0.9: 38.24, 0.91: 36.6, 0.92: 35.32, 0.93: 34.8, 0.94: 34.58, 0.95: 34.49, 0.96: 34.49, 0.97: 34.48, 0.98: 34.48 } } # data structure: # { type name : [mean, std dev, # revocation when price over mean during 90 days] revocation_when_price_over_mean = { 'c3.2xlarge': [0.23963546113896608, 0.21657664634426471, 134688], 'c3.4xlarge': [0.25721605371533912, 0.36589812579652364, 81711], 'c3.8xlarge': [0.21484622926238489, 0.34612212988339058, 59111], 'c3.large': [0.21502370449109751, 0.56929277453457394, 4327], 'c3.xlarge': [0.21979137667931201, 0.26630760454267149, 76071], 'd2.2xlarge': [0.15130340252355379, 0.46905223295236587, 4586], 'd2.4xlarge': [0.16958719001873204, 0.65551193212376779, 4763], 'd2.8xlarge': [0.15961323188885215, 0.60695012484486544, 2526], 'd2.xlarge': [0.19056172355566275, 0.43303135714798463, 10047], 'g2.2xlarge': [0.24755254783589334, 0.6525269280051148, 43481], 'g2.8xlarge': [0.67891554498215734, 1.7657031539964303, 7754], 'i2.2xlarge': [0.12554248082496994, 0.053291554500137464, 36271], 'i2.4xlarge': [0.14839947751341329, 0.37738988695257136, 17339], 'i2.8xlarge': [0.12467341668503763, 0.064605277034059841, 9729], 'i2.xlarge': [0.12278897760215073, 0.033756278369311728, 24041], 'm3.2xlarge': [0.18275780171412165, 0.26094485362138625, 86068], 'm3.large': [0.16929584563644634, 0.41159709713796189, 15702], 'm3.medium': [0.19894409009812669, 0.65298444491396257, 3001], 'm3.xlarge': [0.17277890182624145, 0.24388040108465411, 93584], 'r3.2xlarge': [0.16900780456668693, 0.13031081275006029, 127668], 'r3.4xlarge': [0.16241257936417203, 0.29757245101140428, 55504], 'r3.8xlarge': [0.17841913618688862, 0.24660367175227449, 79961], 'r3.large': [0.12985615767740835, 0.12426919186714877, 17238], 'r3.xlarge': [0.20370734784317931, 0.37173715874871954, 66509] } import pandas as pd # columns = [] # prices = [str(i) for i in np.arange(0.12, 0.99, 0.01)] # empty = [[]] * len(prices) # revoc_num = dict(zip(prices, empty)) # # for t, entry in revocation_num.items(): # columns.append(t) # for price, num in entry.items(): # revoc_num[str(price)].append(num) # # # df = pd.DataFrame(revocation_num, columns=columns) # print(df) # # # indices = [] # for k, v in revocation_when_price_over_mean.items(): # indices.append(k) import pickle df = pd.DataFrame(revocation_num) with open('revoc_at_all_bids.bin', 'wb') as f: pickle.dump(df, f) # import pandas as pd # # indices = ['mean_price', 'std_err_nonsense', 'revoc_num_90days'] # # df = pd.DataFrame(revocation_when_price_over_mean, index=indices).transpose() # print(df) # # import pickle # # with open('mean_price_90day_revoc.bin', 'wb') as f: # pickle.dump(df, f)
""" ================================================== Explicit feature map approximation for RBF kernels ================================================== An example illustrating the approximation of the feature map of an RBF kernel. .. currentmodule:: sklearn.kernel_approximation It shows how to use :class:`RBFSampler` and :class:`Nystroem` to approximate the feature map of an RBF kernel for classification with an SVM on the digits dataset. Results using a linear SVM in the original space, a linear SVM using the approximate mappings and using a kernelized SVM are compared. Timings and accuracy for varying amounts of Monte Carlo samplings (in the case of :class:`RBFSampler`, which uses random Fourier features) and different sized subsets of the training set (for :class:`Nystroem`) for the approximate mapping are shown. Please not that the dataset here is not large enough to show the benefits of kernel approximation, as the exact SVM is still reasonably fast. Sampling more dimensions clearly leads to better classification results, but comes at a greater cost. This means there is a tradeoff between runtime and accuracy, given by the parameter n_components. Note that solving the Linear SVM and also the approximate kernel SVM could be greatly accelerated by using stochastic gradient descent via :class:`sklearn.linear_model.SGDClassifier`. This is not easily possible for the case of the kernelized SVM. The second plot visualized the decision surfaces of the RBF kernel SVM and the linear SVM with approximate kernel maps. The plot shows decision surfaces of the classifiers projected onto the first two principal components of the data. This visualization should be taken with a grain of salt since it is just an interesting slice through the decision surface in 64 dimensions. In particular note that a datapoint (represented as a dot) does not necessarily be classified into the region it is lying in, since it will not lie on the plane that the first two principal components span. The usage of :class:`RBFSampler` and :class:`Nystroem` is described in detail in :ref:`kernel_approximation`. """ print(__doc__) # Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org> # Andreas Mueller <amueller@ais.uni-bonn.de> # License: BSD 3 clause # Standard scientific Python imports import pylab as pl import numpy as np from time import time # Import datasets, classifiers and performance metrics from sklearn import datasets, svm, pipeline from sklearn.kernel_approximation import (RBFSampler, Nystroem) from sklearn.decomposition import PCA # The digits dataset digits = datasets.load_digits(n_class=9) # To apply an classifier on this data, we need to flatten the image, to # turn the data in a (samples, feature) matrix: n_samples = len(digits.data) data = digits.data / 16. data -= data.mean(axis=0) # We learn the digits on the first half of the digits data_train, targets_train = data[:n_samples / 2], digits.target[:n_samples / 2] # Now predict the value of the digit on the second half: data_test, targets_test = data[n_samples / 2:], digits.target[n_samples / 2:] #data_test = scaler.transform(data_test) # Create a classifier: a support vector classifier kernel_svm = svm.SVC(gamma=.2) linear_svm = svm.LinearSVC() # create pipeline from kernel approximation # and linear svm feature_map_fourier = RBFSampler(gamma=.2, random_state=1) feature_map_nystroem = Nystroem(gamma=.2, random_state=1) fourier_approx_svm = pipeline.Pipeline([("feature_map", feature_map_fourier), ("svm", svm.LinearSVC())]) nystroem_approx_svm = pipeline.Pipeline([("feature_map", feature_map_nystroem), ("svm", svm.LinearSVC())]) # fit and predict using linear and kernel svm: kernel_svm_time = time() kernel_svm.fit(data_train, targets_train) kernel_svm_score = kernel_svm.score(data_test, targets_test) kernel_svm_time = time() - kernel_svm_time linear_svm_time = time() linear_svm.fit(data_train, targets_train) linear_svm_score = linear_svm.score(data_test, targets_test) linear_svm_time = time() - linear_svm_time sample_sizes = 30 * np.arange(1, 10) fourier_scores = [] nystroem_scores = [] fourier_times = [] nystroem_times = [] for D in sample_sizes: fourier_approx_svm.set_params(feature_map__n_components=D) nystroem_approx_svm.set_params(feature_map__n_components=D) start = time() nystroem_approx_svm.fit(data_train, targets_train) nystroem_times.append(time() - start) start = time() fourier_approx_svm.fit(data_train, targets_train) fourier_times.append(time() - start) fourier_score = fourier_approx_svm.score(data_test, targets_test) nystroem_score = nystroem_approx_svm.score(data_test, targets_test) nystroem_scores.append(nystroem_score) fourier_scores.append(fourier_score) # plot the results: pl.figure(figsize=(8, 8)) accuracy = pl.subplot(211) # second y axis for timeings timescale = pl.subplot(212) accuracy.plot(sample_sizes, nystroem_scores, label="Nystroem approx. kernel") timescale.plot(sample_sizes, nystroem_times, '--', label='Nystroem approx. kernel') accuracy.plot(sample_sizes, fourier_scores, label="Fourier approx. kernel") timescale.plot(sample_sizes, fourier_times, '--', label='Fourier approx. kernel') # horizontal lines for exact rbf and linear kernels: accuracy.plot([sample_sizes[0], sample_sizes[-1]], [linear_svm_score, linear_svm_score], label="linear svm") timescale.plot([sample_sizes[0], sample_sizes[-1]], [linear_svm_time, linear_svm_time], '--', label='linear svm') accuracy.plot([sample_sizes[0], sample_sizes[-1]], [kernel_svm_score, kernel_svm_score], label="rbf svm") timescale.plot([sample_sizes[0], sample_sizes[-1]], [kernel_svm_time, kernel_svm_time], '--', label='rbf svm') # vertical line for dataset dimensionality = 64 accuracy.plot([64, 64], [0.7, 1], label="n_features") # legends and labels accuracy.set_title("Classification accuracy") timescale.set_title("Training times") accuracy.set_xlim(sample_sizes[0], sample_sizes[-1]) accuracy.set_xticks(()) accuracy.set_ylim(np.min(fourier_scores), 1) timescale.set_xlabel("Sampling steps = transformed feature dimension") accuracy.set_ylabel("Classification accuracy") timescale.set_ylabel("Training time in seconds") accuracy.legend(loc='best') timescale.legend(loc='best') # visualize the decision surface, projected down to the first # two principal components of the dataset pca = PCA(n_components=8).fit(data_train) X = pca.transform(data_train) # Gemerate grid along first two principal components multiples = np.arange(-2, 2, 0.1) # steps along first component first = multiples[:, np.newaxis] * pca.components_[0, :] # steps along second component second = multiples[:, np.newaxis] * pca.components_[1, :] # combine grid = first[np.newaxis, :, :] + second[:, np.newaxis, :] flat_grid = grid.reshape(-1, data.shape[1]) # title for the plots titles = ['SVC with rbf kernel', 'SVC (linear kernel)\n with Fourier rbf feature map\n' 'n_components=100', 'SVC (linear kernel)\n with Nystroem rbf feature map\n' 'n_components=100'] pl.tight_layout() pl.figure(figsize=(12, 5)) # predict and plot for i, clf in enumerate((kernel_svm, nystroem_approx_svm, fourier_approx_svm)): # Plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, m_max]x[y_min, y_max]. pl.subplot(1, 3, i + 1) Z = clf.predict(flat_grid) # Put the result into a color plot Z = Z.reshape(grid.shape[:-1]) pl.contourf(multiples, multiples, Z, cmap=pl.cm.Paired) pl.axis('off') # Plot also the training points pl.scatter(X[:, 0], X[:, 1], c=targets_train, cmap=pl.cm.Paired) pl.title(titles[i]) pl.tight_layout() pl.show()
from __future__ import print_function, division, absolute_import from datetime import datetime, timedelta import json import logging from multiprocessing import Process, Queue, queues import os import shutil import subprocess import sys import tempfile from time import time, sleep from tornado.ioloop import IOLoop from tornado.iostream import StreamClosedError from tornado import gen from .compatibility import JSONDecodeError from .core import Server, rpc, write from .utils import get_ip, ignoring, log_errors, tmpfile from .worker import _ncores, Worker, run, TOTAL_MEMORY nanny_environment = os.path.dirname(sys.executable) logger = logging.getLogger(__name__) class Nanny(Server): """ A process to manage worker processes The nanny spins up Worker processes, watches then, and kills or restarts them as necessary. """ def __init__(self, scheduler_ip, scheduler_port, ip=None, worker_port=0, ncores=None, loop=None, local_dir=None, services=None, name=None, memory_limit=TOTAL_MEMORY, environment=nanny_environment, quiet=False, **kwargs): self.ip = ip or get_ip() self.worker_port = None self._given_worker_port = worker_port self.ncores = ncores or _ncores if not local_dir: local_dir = tempfile.mkdtemp(prefix='nanny-') self._should_cleanup_local_dir = True def _cleanup_local_dir(): if os.path.exists(local_dir): shutil.rmtree(local_dir) atexit.register(_cleanup_local_dir) else: self._should_cleanup_local_dir = False self.local_dir = local_dir self.worker_dir = '' self.status = None self.process = None self.loop = loop or IOLoop.current() self.scheduler = rpc(ip=scheduler_ip, port=scheduler_port) self.services = services self.name = name self.memory_limit = memory_limit self.environment = environment self.quiet = quiet handlers = {'instantiate': self.instantiate, 'kill': self._kill, 'terminate': self._close, 'monitor_resources': self.monitor_resources, 'run': self.run} super(Nanny, self).__init__(handlers, io_loop=self.loop, **kwargs) @gen.coroutine def _start(self, port=0): """ Start nanny, start local process, start watching """ self.listen(port) logger.info(' Start Nanny at: %20s:%d', self.ip, self.port) yield self.instantiate() self.loop.add_callback(self._watch) assert self.worker_port self.status = 'running' def start(self, port=0): self.loop.add_callback(self._start, port) @gen.coroutine def _kill(self, stream=None, timeout=5): """ Kill the local worker process Blocks until both the process is down and the scheduler is properly informed """ while not self.worker_port: yield gen.sleep(0.1) if self.process is not None: try: # Ask worker to close worker = rpc(ip='127.0.0.1', port=self.worker_port) result = yield gen.with_timeout( timedelta(seconds=min(1, timeout)), worker.terminate(report=False), io_loop=self.loop) except gen.TimeoutError: logger.info("Worker non-responsive. Terminating.") except StreamClosedError: pass except Exception as e: logger.exception(e) try: # Tell scheduler that worker is gone result = yield gen.with_timeout(timedelta(seconds=timeout), self.scheduler.unregister(address=self.worker_address), io_loop=self.loop) if result not in ('OK', 'already-removed'): logger.critical("Unable to unregister with scheduler %s. " "Nanny: %s, Worker: %s", result, self.address_tuple, self.worker_address) else: logger.info("Unregister worker %s:%d from scheduler", self.ip, self.worker_port) except gen.TimeoutError: logger.info("Nanny %s:%d failed to unregister worker %s:%d", self.ip, self.port, self.ip, self.worker_port, exc_info=True) except StreamClosedError: pass except Exception as e: logger.exception(e) if self.process: with ignoring(OSError): self.process.terminate() if self.process in processes_to_close: processes_to_close.remove(self.process) start = time() while isalive(self.process) and time() < start + timeout: sleep(0.01) self.process = None self.cleanup() logger.info("Nanny %s:%d kills worker process %s:%d", self.ip, self.port, self.ip, self.worker_port) raise gen.Return('OK') @gen.coroutine def instantiate(self, stream=None, environment=None): """ Start a local worker process Blocks until the process is up and the scheduler is properly informed """ if environment: if not os.path.isabs(environment): environment = os.path.join(self.local_dir, environment) self.environment = environment with log_errors(): if self.process and isalive(self.process): raise ValueError("Existing process still alive. Please kill first") if self.environment != nanny_environment: with tmpfile() as fn: self.process = run_worker_subprocess(self.environment, self.ip, self.scheduler.ip, self.scheduler.port, self.ncores, self.port, self._given_worker_port, self.name, self.memory_limit, self.loop, fn, self.quiet) while not os.path.exists(fn): yield gen.sleep(0.01) while True: try: with open(fn) as f: msg = json.load(f) self.worker_port = msg['port'] self.worker_dir = msg['local_directory'] break except JSONDecodeError: yield gen.sleep(0.01) else: q = Queue() self.process = Process(target=run_worker_fork, args=(q, self.ip, self.scheduler.ip, self.scheduler.port, self.ncores, self.port, self._given_worker_port, self.local_dir, self.services, self.name, self.memory_limit)) self.process.daemon = True self.process.start() while True: try: msg = q.get_nowait() if isinstance(msg, Exception): raise msg self.worker_port = msg['port'] self.worker_dir = msg['dir'] assert self.worker_port break except queues.Empty: yield gen.sleep(0.1) logger.info("Nanny %s:%d starts worker process %s:%d", self.ip, self.port, self.ip, self.worker_port) raise gen.Return('OK') def run(self, *args, **kwargs): return run(self, *args, **kwargs) def cleanup(self): if self.worker_dir and os.path.exists(self.worker_dir): shutil.rmtree(self.worker_dir) self.worker_dir = None if self.process: with ignoring(OSError): self.process.terminate() def __del__(self): if self._should_cleanup_local_dir and os.path.exists(self.local_dir): shutil.rmtree(self.local_dir) self.cleanup() @gen.coroutine def _watch(self, wait_seconds=0.10): """ Watch the local process, if it dies then spin up a new one """ while True: if closing[0] or self.status == 'closed': yield self._close() break elif self.process and not isalive(self.process): logger.warn("Discovered failed worker. Restarting. Status: %s", self.status) self.cleanup() yield self.scheduler.unregister(address=self.worker_address) yield self.instantiate() else: yield gen.sleep(wait_seconds) @gen.coroutine def _close(self, stream=None, timeout=5, report=None): """ Close the nanny process, stop listening """ logger.info("Closing Nanny at %s:%d", self.ip, self.port) self.status = 'closed' yield self._kill(timeout=timeout) self.scheduler.close_streams() self.stop() raise gen.Return('OK') @property def address(self): return '%s:%d' % (self.ip, self.port) @property def address_tuple(self): return (self.ip, self.port) @property def worker_address_tuple(self): return (self.ip, self.worker_port) @property def worker_address(self): return '%s:%d' % (self.ip, self.worker_port) def resource_collect(self): try: import psutil except ImportError: return {} p = psutil.Process(self.process.pid) return {'timestamp': datetime.now().isoformat(), 'cpu_percent': psutil.cpu_percent(), 'status': p.status(), 'memory_percent': p.memory_percent(), 'memory_info_ex': p.memory_info_ex()._asdict(), 'disk_io_counters': psutil.disk_io_counters()._asdict(), 'net_io_counters': psutil.net_io_counters()._asdict()} @gen.coroutine def monitor_resources(self, stream, interval=1): while not stream.closed(): if self.process: yield write(stream, self.resource_collect()) yield gen.sleep(interval) def run_worker_subprocess(environment, ip, scheduler_ip, scheduler_port, ncores, nanny_port, worker_port, name, memory_limit, io_loop, fn, quiet): if environment.endswith('python'): environment = os.path.dirname(environment) if os.path.exists(os.path.join(environment, 'bin')): environment = os.path.join(environment, 'bin') executable = os.path.join(environment, 'python') args = ['-m', 'distributed.cli.dask_worker'] args.extend(['%s:%d' %(scheduler_ip, scheduler_port), '--no-nanny', '--host', ip, '--worker-port', worker_port, '--nanny-port', nanny_port, '--nthreads', ncores, '--nprocs', 1, '--temp-filename', fn]) if name: args.extend(['--name', name]) if memory_limit: args.extend(['--memory-limit', memory_limit]) proc = subprocess.Popen([executable] + list(map(str, args)), stderr=subprocess.PIPE if quiet else None) processes_to_close.append(proc) return proc def run_worker_fork(q, ip, scheduler_ip, scheduler_port, ncores, nanny_port, worker_port, local_dir, services, name, memory_limit): """ Function run by the Nanny when creating the worker """ from distributed import Worker # pragma: no cover from tornado.ioloop import IOLoop # pragma: no cover IOLoop.clear_instance() # pragma: no cover loop = IOLoop() # pragma: no cover loop.make_current() # pragma: no cover worker = Worker(scheduler_ip, scheduler_port, ncores=ncores, ip=ip, service_ports={'nanny': nanny_port}, local_dir=local_dir, services=services, name=name, memory_limit=memory_limit, loop=loop) # pragma: no cover @gen.coroutine # pragma: no cover def start(): try: # pragma: no cover yield worker._start(worker_port) # pragma: no cover except Exception as e: # pragma: no cover logger.exception(e) # pragma: no cover q.put(e) # pragma: no cover else: assert worker.port # pragma: no cover q.put({'port': worker.port, 'dir': worker.local_dir}) # pragma: no cover loop.add_callback(start) # pragma: no cover try: loop.start() # pragma: no cover finally: loop.stop() loop.close(all_fds=True) def isalive(proc): if isinstance(proc, subprocess.Popen): return proc.poll() is None else: return proc.is_alive() import atexit closing = [False] processes_to_close = [] def _closing(): for proc in processes_to_close: try: proc.terminate() except OSError: pass closing[0] = True atexit.register(_closing)
"""Helper classes for Google Assistant integration.""" from asyncio import gather from collections.abc import Mapping from typing import List from homeassistant.core import Context, callback from homeassistant.const import ( CONF_NAME, STATE_UNAVAILABLE, ATTR_SUPPORTED_FEATURES, ATTR_DEVICE_CLASS, CLOUD_NEVER_EXPOSED_ENTITIES, ) from . import trait from .const import ( DOMAIN_TO_GOOGLE_TYPES, CONF_ALIASES, ERR_FUNCTION_NOT_SUPPORTED, DEVICE_CLASS_TO_GOOGLE_TYPES, CONF_ROOM_HINT, ) from .error import SmartHomeError class AbstractConfig: """Hold the configuration for Google Assistant.""" @property def agent_user_id(self): """Return Agent User Id to use for query responses.""" return None @property def entity_config(self): """Return entity config.""" return {} @property def secure_devices_pin(self): """Return entity config.""" return None def should_expose(self, state) -> bool: """Return if entity should be exposed.""" raise NotImplementedError def should_2fa(self, state): """If an entity should have 2FA checked.""" # pylint: disable=no-self-use return True class RequestData: """Hold data associated with a particular request.""" def __init__(self, config, user_id, request_id): """Initialize the request data.""" self.config = config self.request_id = request_id self.context = Context(user_id=user_id) def get_google_type(domain, device_class): """Google type based on domain and device class.""" typ = DEVICE_CLASS_TO_GOOGLE_TYPES.get((domain, device_class)) return typ if typ is not None else DOMAIN_TO_GOOGLE_TYPES[domain] class GoogleEntity: """Adaptation of Entity expressed in Google's terms.""" def __init__(self, hass, config, state): """Initialize a Google entity.""" self.hass = hass self.config = config self.state = state self._traits = None @property def entity_id(self): """Return entity ID.""" return self.state.entity_id @callback def traits(self): """Return traits for entity.""" if self._traits is not None: return self._traits state = self.state domain = state.domain features = state.attributes.get(ATTR_SUPPORTED_FEATURES, 0) device_class = state.attributes.get(ATTR_DEVICE_CLASS) self._traits = [ Trait(self.hass, state, self.config) for Trait in trait.TRAITS if Trait.supported(domain, features, device_class) ] return self._traits @callback def is_supported(self) -> bool: """Return if the entity is supported by Google.""" return self.state.state != STATE_UNAVAILABLE and bool(self.traits()) @callback def might_2fa(self) -> bool: """Return if the entity might encounter 2FA.""" state = self.state domain = state.domain features = state.attributes.get(ATTR_SUPPORTED_FEATURES, 0) device_class = state.attributes.get(ATTR_DEVICE_CLASS) return any( trait.might_2fa(domain, features, device_class) for trait in self.traits() ) async def sync_serialize(self): """Serialize entity for a SYNC response. https://developers.google.com/actions/smarthome/create-app#actiondevicessync """ state = self.state entity_config = self.config.entity_config.get(state.entity_id, {}) name = (entity_config.get(CONF_NAME) or state.name).strip() domain = state.domain device_class = state.attributes.get(ATTR_DEVICE_CLASS) traits = self.traits() device_type = get_google_type(domain, device_class) device = { "id": state.entity_id, "name": {"name": name}, "attributes": {}, "traits": [trait.name for trait in traits], "willReportState": False, "type": device_type, } # use aliases aliases = entity_config.get(CONF_ALIASES) if aliases: device["name"]["nicknames"] = aliases for trt in traits: device["attributes"].update(trt.sync_attributes()) room = entity_config.get(CONF_ROOM_HINT) if room: device["roomHint"] = room return device dev_reg, ent_reg, area_reg = await gather( self.hass.helpers.device_registry.async_get_registry(), self.hass.helpers.entity_registry.async_get_registry(), self.hass.helpers.area_registry.async_get_registry(), ) entity_entry = ent_reg.async_get(state.entity_id) if not (entity_entry and entity_entry.device_id): return device device_entry = dev_reg.devices.get(entity_entry.device_id) if not (device_entry and device_entry.area_id): return device area_entry = area_reg.areas.get(device_entry.area_id) if area_entry and area_entry.name: device["roomHint"] = area_entry.name return device @callback def query_serialize(self): """Serialize entity for a QUERY response. https://developers.google.com/actions/smarthome/create-app#actiondevicesquery """ state = self.state if state.state == STATE_UNAVAILABLE: return {"online": False} attrs = {"online": True} for trt in self.traits(): deep_update(attrs, trt.query_attributes()) return attrs async def execute(self, data, command_payload): """Execute a command. https://developers.google.com/actions/smarthome/create-app#actiondevicesexecute """ command = command_payload["command"] params = command_payload.get("params", {}) challenge = command_payload.get("challenge", {}) executed = False for trt in self.traits(): if trt.can_execute(command, params): await trt.execute(command, data, params, challenge) executed = True break if not executed: raise SmartHomeError( ERR_FUNCTION_NOT_SUPPORTED, f"Unable to execute {command} for {self.state.entity_id}", ) @callback def async_update(self): """Update the entity with latest info from Home Assistant.""" self.state = self.hass.states.get(self.entity_id) if self._traits is None: return for trt in self._traits: trt.state = self.state def deep_update(target, source): """Update a nested dictionary with another nested dictionary.""" for key, value in source.items(): if isinstance(value, Mapping): target[key] = deep_update(target.get(key, {}), value) else: target[key] = value return target @callback def async_get_entities(hass, config) -> List[GoogleEntity]: """Return all entities that are supported by Google.""" entities = [] for state in hass.states.async_all(): if state.entity_id in CLOUD_NEVER_EXPOSED_ENTITIES: continue entity = GoogleEntity(hass, config, state) if entity.is_supported(): entities.append(entity) return entities
#!/usr/bin/env python from unittest import TestCase import subprocess, os, shutil, tempfile # Load git-shadow as a module for functional unit tests. Le Hack. Sacrebleu!!1 import imp git_shadow = imp.load_source("git_shadow", os.path.join(os.getcwd(), "git-shadow")) def rm_r(path): if os.path.isdir(path): shutil.rmtree(path) elif os.path.exists(path): os.remove(path) def num_commits(repo_dir): print "num_commits", subprocess.check_output(["git", "log"], cwd=repo_dir) out = subprocess.check_output(["git", "log", "--format=%H"], cwd=repo_dir) return len(out.strip().splitlines()) class UnitTests(TestCase): def setUp(self): # create dummy repo for testing self.repo_dir = os.path.realpath(tempfile.mkdtemp()) subprocess.check_call(["git", "init", self.repo_dir]) # add cwd to path to support execution of "git-shadow" in tests os.environ["PATH"] = ":".join(os.environ["PATH"].split(":") + [os.getcwd()]) self.env = os.environ def tearDown(self): rm_r(self.repo_dir) def test_get_shadow_path(self): ''' Verify the shadow repo path is constructed properly and that an Exception is raised if the func is called outside a git repo. ''' p = git_shadow.get_shadow_path(self.repo_dir) self.assertEqual(p, os.path.join(self.repo_dir, ".shadow")) tdir = tempfile.mkdtemp() try: with self.assertRaises(subprocess.CalledProcessError): git_shadow.get_shadow_path(tdir) finally: rm_r(tdir) def test_get_current_path(self): ''' Verify the urrent repo path is constructed properly and that an Exception is raised if the func is called outside a git repo. ''' p = git_shadow.get_current_path(self.repo_dir) self.assertEqual(p, os.path.join(self.repo_dir, ".shadow", "current")) tdir = tempfile.mkdtemp() try: with self.assertRaises(subprocess.CalledProcessError): git_shadow.get_current_path(tdir) finally: rm_r(tdir) def test_create_current(self): ''' Verify the function creates/overwrites repos properly ''' git_shadow.create_current(cwd=self.repo_dir) self.assertTrue(os.path.exists(os.path.join(self.repo_dir, ".shadow", "current", ".git"))) with self.assertRaises(OSError): git_shadow.create_current(cwd=self.repo_dir) def test_create_current_shadow(self): ''' Verify the function shadows controlled files ''' fp_a = os.path.join(self.repo_dir, "a") open(fp_a, "wt").write("test_a") dir_b = os.path.join(self.repo_dir, "db") os.mkdir(dir_b) fp_b = os.path.join(dir_b, "b") open(fp_b, "wt").write("test_b") subprocess.call(["git", "add", fp_a, fp_b], cwd=self.repo_dir) subprocess.call(["git", "commit", "-m", "'message'"], cwd=self.repo_dir) git_shadow.create_current(cwd=self.repo_dir) self.assertTrue(os.path.exists(os.path.join(self.repo_dir, ".shadow", "current", fp_a))) self.assertTrue(os.path.exists(os.path.join(self.repo_dir, ".shadow", "current", fp_b))) def test_add_hooks(self): # verify hooks are installed in parent repository git_shadow.add_hooks(self.repo_dir) subprocess.call(["git", "shadow", "add-hooks", self.repo_dir], env=self.env) self.assertTrue(os.path.exists(os.path.join(self.repo_dir, ".git", "hooks", "post-commit"))) self.assertTrue(os.path.exists(os.path.join(self.repo_dir, ".git", "hooks", "post-checkout"))) def test_add_hooks_merge(self): hook_path = os.path.join(self.repo_dir, ".git", "hooks", "post-commit") open(hook_path, "wt").write("echo test") git_shadow.add_hooks(self.repo_dir) self.assertEqual(open(hook_path, "rt").read(), "echo test\ngit shadow post-commit\n") def test_remove_hooks(self): # verify hooks are removed from parent repository git_shadow.add_hooks(self.repo_dir) git_shadow.remove_hooks(self.repo_dir) self.assertFalse(os.path.exists(os.path.join(self.repo_dir, ".git", "hooks", "post-commit"))) self.assertFalse(os.path.exists(os.path.join(self.repo_dir, ".git", "hooks", "post-checkout"))) def test_remove_hooks_merge(self): # verify git-shadow hooks are removed without clobbering existing hook file filetext = "foobaz" filepath = os.path.join(self.repo_dir, ".git", "hooks", "post-commit") open(filepath, "wt").write(filetext) git_shadow.add_hooks(self.repo_dir) git_shadow.remove_hooks(self.repo_dir) self.assertTrue(os.path.exists(os.path.join(self.repo_dir, ".git", "hooks", "post-commit"))) self.assertEqual(filetext, open(filepath, "rt").read()) self.assertFalse(os.path.exists(os.path.join(self.repo_dir, ".git", "hooks", "post-checkout"))) def test_activate(self): git_shadow.activate(self.repo_dir) path = os.path.join(self.repo_dir, ".shadow", "current") self.assertTrue(os.path.exists(path)) self.assertTrue(git_shadow.is_active(self.repo_dir)) git_shadow.deactivate(self.repo_dir) path = os.path.join(self.repo_dir, ".shadow", "current") self.assertFalse(os.path.exists(path)) self.assertFalse(git_shadow.is_active(self.repo_dir)) def test_shadow_controlled_files_moves(self): ''' Verify the function shadows controlled files when git mv, rm are used ''' ''' TODO fp_a = os.path.join(self.repo_dir, "a") open(fp_a, "wt").write("test_a") dir_b = os.path.join(self.repo_dir, "db") os.mkdir(dir_b) fp_b = os.path.join(dir_b, "b") open(fp_b, "wt").write("test_b") subprocess.call(["git", "add", fp_a, fp_b], cwd=self.repo_dir) subprocess.call(["git", "commit", "-m", "'message'"], cwd=self.repo_dir) git_shadow.create_current(cwd=self.repo_dir) self.assertTrue(os.path.exists(os.path.join(self.repo_dir, ".shadow", "current", fp_a))) self.assertTrue(os.path.exists(os.path.join(self.repo_dir, ".shadow", "current", fp_b))) ''' pass def test_shadow_file(self): # add some files to a test repo test_filepath = os.path.join(self.repo_dir, "foobar") open(test_filepath, "wt").write("some file contents") subprocess.check_call(["git", "add", test_filepath], cwd=self.repo_dir, env=self.env) os.mkdir(os.path.join(self.repo_dir, "foobaz")) test_filepath = os.path.join(self.repo_dir, "foobaz", "foomanchu") open(test_filepath, "wt").write("some other file contents") subprocess.check_call(["git", "add", test_filepath], cwd=self.repo_dir, env=self.env) subprocess.check_call(["git", "commit", "-m", "'message'"], cwd=self.repo_dir, env=self.env) git_shadow.activate(self.repo_dir) # verify adding an unchanged file results in creation of .shadow/current, # but doesn't make an additional commit shadow_repo_path = git_shadow.get_current_path(self.repo_dir) print "self.repo_dir", self.repo_dir git_shadow.shadow_file(test_filepath, test_filepath) self.assertEqual(1, num_commits(shadow_repo_path)) # verify adding a changed file *does* result in a commit to the shadow repo with tempfile.NamedTemporaryFile() as tf: tf.write("new contents..\nare here!") tf.flush() git_shadow.shadow_file(test_filepath, tf.name) self.assertEqual(2, num_commits(shadow_repo_path)) """ class IntegrationTests(TestCase): def setUp(self): # create dummy repo for testing self.repo_dir = os.path.realpath(tempfile.mkdtemp()) subprocess.check_call(["git", "init", self.repo_dir]) # add cwd to path to support execution of "git-shadow" in tests self.env = os.environ self.env["PATH"] = ":".join(self.env["PATH"].split(":") + [os.getcwd()]) def tearDown(self): rm_r(self.repo_dir) def test_shadow_file(self): # add some files to a test repo test_filepath = os.path.join(self.repo_dir, "foobar") open(test_filepath, "wt").write("some file contents") subprocess.check_call(["git", "add", test_filepath], cwd=self.repo_dir, env=self.env) os.mkdir(os.path.join(self.repo_dir, "foobaz")) test_filepath = os.path.join(self.repo_dir, "foobaz", "foomanchu") open(test_filepath, "wt").write("some other file contents") subprocess.check_call(["git", "add", test_filepath], cwd=self.repo_dir, env=self.env) subprocess.check_call(["git", "commit", "-m", "'message'"], cwd=self.repo_dir, env=self.env) # create shadow repo git_shadow.create_shadow_repo(self.repo_dir) git_shadow.shadow_controlled_files(self.repo_dir) git_shadow.add_hooks(self.repo_dir) # simulate two modifications to a file with tempfile.NamedTemporaryFile() as tf: tf.write("new contents..\nare here!") tf.flush() git_shadow.shadow_file(test_filepath, tf.name) tf.write("new contents..\nare here now!") tf.flush() git_shadow.shadow_file(test_filepath, tf.name) # simulate a save shutil.copyfile(tf.name, test_filepath) # simulate an add/commit to the enclosing repo subprocess.check_call(["git", "add", test_filepath], cwd=self.repo_dir, env=self.env) subprocess.check_call(["git", "commit", "-m", "'message'"], cwd=self.repo_dir, env=self.env) # make sure: new shadow repo was initialized in HEAD, and it is empty: # - only a single commit -- all tracked files being added self.assertTrue(os.path.exists(os.path.join(self.repo_dir, ".shadow", ".git"))) self.assertEqual(num_commits(os.path.join(self.repo_dir, ".shadow")), 1) # make sure: old shadow repo was committed with last commit subprocess.check_call(["git", "checkout", "HEAD^"], cwd=self.repo_dir, env=self.env) self.assertTrue(os.path.exists(os.path.join(self.repo_dir, ".shadow", "git"))) self.assertEqual(num_commits(os.path.join(self.repo_dir, ".shadow")), 2) """
#STANDARD LIB from urlparse import urlparse # LIBRARIES from django.contrib.auth import get_user_model, get_user, BACKEND_SESSION_KEY from django.contrib.sessions.middleware import SessionMiddleware from django.http import HttpRequest from django.test import TestCase from django.test.utils import override_settings from django.contrib.auth.models import AnonymousUser from django.contrib.auth.hashers import make_password from google.appengine.api import users # DJANGAE from djangae.contrib.gauth.datastore.models import GaeDatastoreUser, Group, get_permission_choices from djangae.contrib.gauth.backends import AppEngineUserAPI from djangae.contrib.gauth.middleware import AuthenticationMiddleware from djangae.contrib.gauth.settings import AUTHENTICATION_BACKENDS from djangae.contrib.gauth.utils import get_switch_accounts_url from djangae.contrib import sleuth class BackendTests(TestCase): """ Tests for the AppEngineUserAPI auth backend. """ def test_invalid_credentials_cause_typeerror(self): """ If the `authenticate` method is passed credentials which it doesn't understand then Django expects it to raise a TypeError. """ backend = AppEngineUserAPI() credentials = {'username': 'ted', 'password': 'secret'} self.assertRaises(TypeError, backend.authenticate, **credentials) def test_authenticate_creates_user_object(self): """ If `authenticate` is called with valid credentials then a User object should be created """ User = get_user_model() self.assertEqual(User.objects.count(), 0) google_user = users.User('1@example.com', _user_id='111111111100000000001') backend = AppEngineUserAPI() user = backend.authenticate(google_user=google_user,) self.assertEqual(user.email, '1@example.com') self.assertEqual(User.objects.count(), 1) # Calling authenticate again with the same credentials should not create another user user2 = backend.authenticate(google_user=google_user) self.assertEqual(user.pk, user2.pk) @override_settings(ALLOW_USER_PRE_CREATION=True) def test_user_pre_creation(self): """ User objects for Google-Accounts-based users should be able to be pre-created in DB and then matched by email address when they log in. """ User = get_user_model() backend = AppEngineUserAPI() email = '1@example.com' # Pre-create our user User.objects.pre_create_google_user(email) # Now authenticate this user via the Google Accounts API google_user = users.User(email=email, _user_id='111111111100000000001') user = backend.authenticate(google_user=google_user) # Check things self.assertEqual(user.email, email) self.assertIsNotNone(user.last_login) self.assertFalse(user.has_usable_password()) @override_settings(ALLOW_USER_PRE_CREATION=True) def test_user_id_switch(self): """ Users sometimes login with the same email, but a different google user id. We handle those cases by blanking out the email on the old user object and creating a new one with the new user id. """ email = 'user@customexample.com' old_user = users.User(email=email, _user_id='111111111100000000001') new_user = users.User(email=email, _user_id='111111111100000000002') User = get_user_model() backend = AppEngineUserAPI() # Authenticate 1st time, creating the user user1 = backend.authenticate(google_user=old_user) self.assertEqual(user1.email, email) self.assertTrue(user1.username.endswith('1')) self.assertEqual(1, User.objects.count()) # Now another user logs in using the same email user2 = backend.authenticate(google_user=new_user) self.assertEqual(user2.email, email) self.assertTrue(user2.username.endswith('2')) self.assertEqual(2, User.objects.count()) # The old account is kept around, but the email is blanked user1 = User.objects.get(pk=user1.pk) self.assertEqual(user1.email, None) @override_settings(DJANGAE_FORCE_USER_PRE_CREATION=True) def test_force_user_pre_creation(self): User = get_user_model() self.assertEqual(User.objects.count(), 0) google_user = users.User('1@example.com', _user_id='111111111100000000001') backend = AppEngineUserAPI() self.assertIsNone(backend.authenticate(google_user=google_user,)) self.assertEqual(User.objects.count(), 0) # superusers don't need pre-creation of User object. self.assertEqual(User.objects.count(), 0) with sleuth.switch('google.appengine.api.users.is_current_user_admin', lambda: True): user = backend.authenticate(google_user=google_user,) self.assertEqual(User.objects.count(), 1) self.assertEquals(User.objects.get(), user) @override_settings(AUTHENTICATION_BACKENDS=AUTHENTICATION_BACKENDS) class MiddlewareTests(TestCase): """ Tests for the AuthenticationMiddleware. """ def test_login(self): def _get_current_user(): return users.User('1@example.com', _user_id='111111111100000000001') request = HttpRequest() SessionMiddleware().process_request(request) # Make the damn sessions work request.session[BACKEND_SESSION_KEY] = 'djangae.contrib.gauth.backends.AppEngineUserAPI' middleware = AuthenticationMiddleware() # Check that we're not logged in already user = get_user(request) self.assertFalse(user.is_authenticated()) # Check that running the middleware when the Google users API doesn't know the current # user still leaves us as an anonymous users. with sleuth.switch('djangae.contrib.gauth.middleware.users.get_current_user', lambda: None): middleware.process_request(request) # Check that the middleware successfully logged us in user = get_user(request) self.assertFalse(user.is_authenticated()) # Now check that when the Google users API *does* know who we are, that we are logged in. with sleuth.switch('djangae.contrib.gauth.middleware.users.get_current_user', _get_current_user): middleware.process_request(request) # Check that the middleware successfully logged us in user = get_user(request) self.assertTrue(user.is_authenticated()) self.assertEqual(user.email, '1@example.com') self.assertEqual(user.username, '111111111100000000001') def test_account_switch(self): user1 = users.User('1@example.com', _user_id='111111111100000000001') user2 = users.User('2@example.com', _user_id='222222222200000000002') request = HttpRequest() SessionMiddleware().process_request(request) # Make the damn sessions work request.session[BACKEND_SESSION_KEY] = 'djangae.contrib.gauth.backends.AppEngineUserAPI' middleware = AuthenticationMiddleware() with sleuth.switch('djangae.contrib.gauth.middleware.users.get_current_user', lambda: user1): middleware.process_request(request) self.assertEqual(user1.user_id(), request.user.username) with sleuth.switch('djangae.contrib.gauth.middleware.users.get_current_user', lambda: user2): middleware.process_request(request) self.assertEqual(user2.user_id(), request.user.username) def test_user_id_switch(self): """ Users sometimes login with the same email, but a different google user id. We handle those cases by blanking out the email on the old user object and creating a new one with the new user id. """ email = 'User@example.com' user1 = users.User(email, _user_id='111111111100000000001') user2 = users.User(email, _user_id='222222222200000000002') User = get_user_model() request = HttpRequest() SessionMiddleware().process_request(request) # Make the damn sessions work request.session[BACKEND_SESSION_KEY] = 'djangae.contrib.gauth.backends.AppEngineUserAPI' middleware = AuthenticationMiddleware() with sleuth.switch('djangae.contrib.gauth.middleware.users.get_current_user', lambda: user1): middleware.process_request(request) self.assertEqual(1, User.objects.count()) django_user1 = request.user self.assertEqual(user1.user_id(), django_user1.username) self.assertEqual(user1.email(), django_user1.email) with sleuth.switch('djangae.contrib.gauth.middleware.users.get_current_user', lambda: user2): middleware.process_request(request) self.assertEqual(2, User.objects.count()) django_user2 = request.user self.assertEqual(user2.user_id(), django_user2.username) self.assertEqual(user2.email(), django_user2.email) django_user1 = User.objects.get(pk=django_user1.pk) self.assertEqual(django_user1.email, None) @override_settings(DJANGAE_FORCE_USER_PRE_CREATION=True) def test_force_user_pre_creation(self): email = 'User@example.com' user1 = users.User(email, _user_id='111111111100000000001') with sleuth.switch('djangae.contrib.gauth.middleware.users.get_current_user', lambda: user1): request = HttpRequest() SessionMiddleware().process_request(request) # Make the damn sessions work middleware = AuthenticationMiddleware() middleware.process_request(request) # We expect request.user to be AnonymousUser(), because there was no User object in the DB # and so with pre-creation required, authentication should have failed self.assertTrue(isinstance(request.user, AnonymousUser)) @override_settings( AUTH_USER_MODEL='djangae.GaeDatastoreUser', AUTHENTICATION_BACKENDS=('djangae.contrib.gauth.backends.AppEngineUserAPI',) ) class CustomPermissionsUserModelBackendTest(TestCase): """ Tests for the ModelBackend using the CustomPermissionsUser model. As with the ExtensionUser test, this isn't a perfect test, because both the User and CustomPermissionsUser are synchronized to the database, which wouldn't ordinary happen in production. """ UserModel = GaeDatastoreUser def setUp(self): # Fix Django so that we can use our custom user model. # TODO: Submit a fix to Django to allow override_settings(AUTH_USER_MODEL='something') to # work, even if the project has already set AUTH_USER_MODEL to a custom user GaeDatastoreUser.objects = GaeDatastoreUser._default_manager GaeDatastoreUser._base_manager = GaeDatastoreUser._default_manager self.user = GaeDatastoreUser.objects.create( username='test1', email='test@example.com', password=make_password(None), is_active=True, ) self.superuser = GaeDatastoreUser.objects.create( username='test2', email='test2@example.com', is_superuser=True, password=make_password(None), is_active=True, ) def tearDown(self): GaeDatastoreUser.objects.all().delete() super(CustomPermissionsUserModelBackendTest, self).tearDown() def test_has_perm(self): user = self.UserModel.objects.get(pk=self.user.pk) self.assertEqual(user.has_perm('auth.test'), False) user.is_staff = True user.save() self.assertEqual(user.has_perm('auth.test'), False) user.is_superuser = True user.save() self.assertEqual(user.has_perm('auth.test'), True) user.is_staff = False user.is_superuser = False user.save() self.assertEqual(user.has_perm('auth.test'), False) user.is_staff = True user.is_superuser = True user.is_active = False user.save() self.assertEqual(user.has_perm('auth.test'), False) def test_custom_perms(self): user = self.UserModel.objects.get(pk=self.user.pk) user.user_permissions = ['auth.test'] user.save() # reloading user to purge the _perm_cache user = self.UserModel.objects.get(pk=self.user.pk) self.assertEqual(user.get_all_permissions() == set(['auth.test']), True) self.assertEqual(user.get_group_permissions(), set([])) self.assertEqual(user.has_module_perms('Group'), False) self.assertEqual(user.has_module_perms('auth'), True) user.user_permissions.extend(['auth.test2', 'auth.test3']) user.save() user = self.UserModel.objects.get(pk=self.user.pk) self.assertEqual(user.get_all_permissions(), set(['auth.test2', 'auth.test', 'auth.test3'])) self.assertEqual(user.has_perm('test'), False) self.assertEqual(user.has_perm('auth.test'), True) self.assertEqual(user.has_perms(['auth.test2', 'auth.test3']), True) group = Group.objects.create(name='test_group') group.permissions = ['auth.test_group'] group.save() user.groups = [group] user.save() user = self.UserModel.objects.get(pk=self.user.pk) exp = set(['auth.test2', 'auth.test', 'auth.test3', 'auth.test_group']) self.assertEqual(user.get_all_permissions(), exp) self.assertEqual(user.get_group_permissions(), set(['auth.test_group'])) self.assertEqual(user.has_perms(['auth.test3', 'auth.test_group']), True) user = AnonymousUser() self.assertEqual(user.has_perm('test'), False) self.assertEqual(user.has_perms(['auth.test2', 'auth.test3']), False) def test_has_no_object_perm(self): """Regressiontest for #12462""" user = self.UserModel.objects.get(pk=self.user.pk) user.user_permissions = ['auth.test'] user.save() self.assertEqual(user.has_perm('auth.test', 'object'), False) self.assertEqual(user.get_all_permissions('object'), set([])) self.assertEqual(user.has_perm('auth.test'), True) self.assertEqual(user.get_all_permissions(), set(['auth.test'])) def test_get_all_superuser_permissions(self): """A superuser has all permissions. Refs #14795.""" user = self.UserModel.objects.get(pk=self.superuser.pk) self.assertEqual(len(user.get_all_permissions()), len(get_permission_choices())) @override_settings( AUTH_USER_MODEL='djangae.GaeDatastoreUser', AUTHENTICATION_BACKENDS=('djangae.contrib.gauth.backends.AppEngineUserAPI',) ) class SwitchAccountsTests(TestCase): """ Tests for the switch accounts functionality. """ def test_switch_accounts(self): gcu = 'djangae.contrib.gauth.middleware.users.get_current_user' final_destination = '/death/' # there's no escaping it switch_accounts_url = get_switch_accounts_url(next=final_destination) any_url = '/_ah/warmup' jekyll = users.User(email='jekyll@gmail.com', _user_id='1') hyde = users.User(email='hyde@gmail.com', _user_id='2') # we start our scenario with the user logged in with sleuth.switch(gcu, lambda: jekyll): response = self.client.get(any_url) # Check that the user is logged in expected_user_query = GaeDatastoreUser.objects.filter(username=jekyll.user_id()) self.assertEqual(len(expected_user_query), 1) self.assertEqual(int(self.client._session()['_auth_user_id']), expected_user_query[0].pk) # Now call the switch_accounts view, which should give us a redirect to the login page response = self.client.get(switch_accounts_url, follow=False) self.assertEqual(response.status_code, 302) self.assertEqual(response['location'], users.create_login_url(switch_accounts_url)) # In tests, we don't have dev_appserver fired up, so we can't actually call the login # URL, but let's suppose that the user wasn't logged into multiple accounts at once # and so the login page redirected us straight back to the switch_accounts view. # It should detect this, and should now redirect us to the log*out* URL with a # destination of the log*in* URL response = self.client.get(switch_accounts_url) self.assertEqual(response.status_code, 302) self.assertEqual( response['location'], users.create_logout_url(users.create_login_url(switch_accounts_url)) ) # And now we have to emulate the scenario that we have now logged in with a different # account, so re-mock that with sleuth.switch(gcu, lambda: hyde): # Now that we're logged in as a different user, we expect request.user to get set to # the equivalent Django user and to be redirected to our final destination response = self.client.get(switch_accounts_url) redirect_path = urlparse(response['location']).path # it has the host name as well self.assertEqual(redirect_path, final_destination) expected_user_query = GaeDatastoreUser.objects.filter(username=hyde.user_id()) self.assertEqual(len(expected_user_query), 1) self.assertEqual(int(self.client._session()['_auth_user_id']), expected_user_query[0].pk)
# -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data')) >>> os.chdir(datadir) """ from __future__ import print_function, division, unicode_literals, absolute_import import os.path as op from ...utils.filemanip import split_filename from ..base import (CommandLineInputSpec, CommandLine, traits, TraitedSpec, File, InputMultiPath, isdefined) class MRConvertInputSpec(CommandLineInputSpec): in_file = File(exists=True, argstr='%s', mandatory=True, position=-2, desc='voxel-order data filename') out_filename = File(genfile=True, argstr='%s', position=-1, desc='Output filename') extract_at_axis = traits.Enum(1, 2, 3, argstr='-coord %s', position=1, desc='"Extract data only at the coordinates specified. This option specifies the Axis. Must be used in conjunction with extract_at_coordinate.') extract_at_coordinate = traits.List(traits.Float, argstr='%s', sep=',', position=2, minlen=1, maxlen=3, desc='"Extract data only at the coordinates specified. This option specifies the coordinates. Must be used in conjunction with extract_at_axis. Three comma-separated numbers giving the size of each voxel in mm.') voxel_dims = traits.List(traits.Float, argstr='-vox %s', sep=',', position=3, minlen=3, maxlen=3, desc='Three comma-separated numbers giving the size of each voxel in mm.') output_datatype = traits.Enum("nii", "float", "char", "short", "int", "long", "double", argstr='-output %s', position=2, desc='"i.e. Bfloat". Can be "char", "short", "int", "long", "float" or "double"') # , usedefault=True) extension = traits.Enum("mif", "nii", "float", "char", "short", "int", "long", "double", position=2, desc='"i.e. Bfloat". Can be "char", "short", "int", "long", "float" or "double"', usedefault=True) layout = traits.Enum("nii", "float", "char", "short", "int", "long", "double", argstr='-output %s', position=2, desc='specify the layout of the data in memory. The actual layout produced will depend on whether the output image format can support it.') resample = traits.Float(argstr='-scale %d', position=3, units='mm', desc='Apply scaling to the intensity values.') offset_bias = traits.Float(argstr='-scale %d', position=3, units='mm', desc='Apply offset to the intensity values.') replace_NaN_with_zero = traits.Bool(argstr='-zero', position=3, desc="Replace all NaN values with zero.") prs = traits.Bool(argstr='-prs', position=3, desc="Assume that the DW gradients are specified in the PRS frame (Siemens DICOM only).") class MRConvertOutputSpec(TraitedSpec): converted = File(exists=True, desc='path/name of 4D volume in voxel order') class MRConvert(CommandLine): """ Perform conversion between different file types and optionally extract a subset of the input image. If used correctly, this program can be a very useful workhorse. In addition to converting images between different formats, it can be used to extract specific studies from a data set, extract a specific region of interest, flip the images, or to scale the intensity of the images. Example ------- >>> import nipype.interfaces.mrtrix as mrt >>> mrconvert = mrt.MRConvert() >>> mrconvert.inputs.in_file = 'dwi_FA.mif' >>> mrconvert.inputs.out_filename = 'dwi_FA.nii' >>> mrconvert.run() # doctest: +SKIP """ _cmd = 'mrconvert' input_spec = MRConvertInputSpec output_spec = MRConvertOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['converted'] = self.inputs.out_filename if not isdefined(outputs['converted']): outputs['converted'] = op.abspath(self._gen_outfilename()) else: outputs['converted'] = op.abspath(outputs['converted']) return outputs def _gen_filename(self, name): if name == 'out_filename': return self._gen_outfilename() else: return None def _gen_outfilename(self): _, name, _ = split_filename(self.inputs.in_file) if isdefined(self.inputs.out_filename): outname = self.inputs.out_filename else: outname = name + '_mrconvert.' + self.inputs.extension return outname class DWI2TensorInputSpec(CommandLineInputSpec): in_file = InputMultiPath(File(exists=True), argstr='%s', mandatory=True, position=-2, desc='Diffusion-weighted images') out_filename = File(name_template="%s_tensor.mif", name_source="in_file", output_name="tensor", argstr='%s', desc='Output tensor filename', position=-1) encoding_file = File(argstr='-grad %s', position=2, desc=('Encoding file supplied as a 4xN text file with ' 'each line is in the format [ X Y Z b ], where ' '[ X Y Z ] describe the direction of the applied ' 'gradient, and b gives the b-value in units ' '(1000 s/mm^2). See FSL2MRTrix()')) ignore_slice_by_volume = traits.List(traits.Int, argstr='-ignoreslices %s', sep=' ', position=2, minlen=2, maxlen=2, desc=('Requires two values (i.e. [34 ' '1] for [Slice Volume] Ignores ' 'the image slices specified ' 'when computing the tensor. ' 'Slice here means the z ' 'coordinate of the slice to be ' 'ignored.')) ignore_volumes = traits.List(traits.Int, argstr='-ignorevolumes %s', sep=' ', position=2, minlen=1, desc=('Requires two values (i.e. [2 5 6] for ' '[Volumes] Ignores the image volumes ' 'specified when computing the tensor.')) quiet = traits.Bool(argstr='-quiet', position=1, desc=("Do not display information messages or progress " "status.")) debug = traits.Bool(argstr='-debug', position=1, desc="Display debugging messages.") class DWI2TensorOutputSpec(TraitedSpec): tensor = File(exists=True, desc='path/name of output diffusion tensor image') class DWI2Tensor(CommandLine): """ Converts diffusion-weighted images to tensor images. Example ------- >>> import nipype.interfaces.mrtrix as mrt >>> dwi2tensor = mrt.DWI2Tensor() >>> dwi2tensor.inputs.in_file = 'dwi.mif' >>> dwi2tensor.inputs.encoding_file = 'encoding.txt' >>> dwi2tensor.cmdline # doctest: +ALLOW_UNICODE 'dwi2tensor -grad encoding.txt dwi.mif dwi_tensor.mif' >>> dwi2tensor.run() # doctest: +SKIP """ _cmd = 'dwi2tensor' input_spec = DWI2TensorInputSpec output_spec = DWI2TensorOutputSpec class Tensor2VectorInputSpec(CommandLineInputSpec): in_file = File(exists=True, argstr='%s', mandatory=True, position=-2, desc='Diffusion tensor image') out_filename = File(genfile=True, argstr='%s', position=-1, desc='Output vector filename') quiet = traits.Bool(argstr='-quiet', position=1, desc="Do not display information messages or progress status.") debug = traits.Bool(argstr='-debug', position=1, desc="Display debugging messages.") class Tensor2VectorOutputSpec(TraitedSpec): vector = File(exists=True, desc='the output image of the major eigenvectors of the diffusion tensor image.') class Tensor2Vector(CommandLine): """ Generates a map of the major eigenvectors of the tensors in each voxel. Example ------- >>> import nipype.interfaces.mrtrix as mrt >>> tensor2vector = mrt.Tensor2Vector() >>> tensor2vector.inputs.in_file = 'dwi_tensor.mif' >>> tensor2vector.run() # doctest: +SKIP """ _cmd = 'tensor2vector' input_spec = Tensor2VectorInputSpec output_spec = Tensor2VectorOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['vector'] = self.inputs.out_filename if not isdefined(outputs['vector']): outputs['vector'] = op.abspath(self._gen_outfilename()) else: outputs['vector'] = op.abspath(outputs['vector']) return outputs def _gen_filename(self, name): if name == 'out_filename': return self._gen_outfilename() else: return None def _gen_outfilename(self): _, name, _ = split_filename(self.inputs.in_file) return name + '_vector.mif' class Tensor2FractionalAnisotropyInputSpec(CommandLineInputSpec): in_file = File(exists=True, argstr='%s', mandatory=True, position=-2, desc='Diffusion tensor image') out_filename = File(genfile=True, argstr='%s', position=-1, desc='Output Fractional Anisotropy filename') quiet = traits.Bool(argstr='-quiet', position=1, desc="Do not display information messages or progress status.") debug = traits.Bool(argstr='-debug', position=1, desc="Display debugging messages.") class Tensor2FractionalAnisotropyOutputSpec(TraitedSpec): FA = File(exists=True, desc='the output image of the major eigenvectors of the diffusion tensor image.') class Tensor2FractionalAnisotropy(CommandLine): """ Generates a map of the fractional anisotropy in each voxel. Example ------- >>> import nipype.interfaces.mrtrix as mrt >>> tensor2FA = mrt.Tensor2FractionalAnisotropy() >>> tensor2FA.inputs.in_file = 'dwi_tensor.mif' >>> tensor2FA.run() # doctest: +SKIP """ _cmd = 'tensor2FA' input_spec = Tensor2FractionalAnisotropyInputSpec output_spec = Tensor2FractionalAnisotropyOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['FA'] = self.inputs.out_filename if not isdefined(outputs['FA']): outputs['FA'] = op.abspath(self._gen_outfilename()) else: outputs['FA'] = op.abspath(outputs['FA']) return outputs def _gen_filename(self, name): if name == 'out_filename': return self._gen_outfilename() else: return None def _gen_outfilename(self): _, name, _ = split_filename(self.inputs.in_file) return name + '_FA.mif' class Tensor2ApparentDiffusionInputSpec(CommandLineInputSpec): in_file = File(exists=True, argstr='%s', mandatory=True, position=-2, desc='Diffusion tensor image') out_filename = File(genfile=True, argstr='%s', position=-1, desc='Output Fractional Anisotropy filename') quiet = traits.Bool(argstr='-quiet', position=1, desc="Do not display information messages or progress status.") debug = traits.Bool(argstr='-debug', position=1, desc="Display debugging messages.") class Tensor2ApparentDiffusionOutputSpec(TraitedSpec): ADC = File(exists=True, desc='the output image of the major eigenvectors of the diffusion tensor image.') class Tensor2ApparentDiffusion(CommandLine): """ Generates a map of the apparent diffusion coefficient (ADC) in each voxel Example ------- >>> import nipype.interfaces.mrtrix as mrt >>> tensor2ADC = mrt.Tensor2ApparentDiffusion() >>> tensor2ADC.inputs.in_file = 'dwi_tensor.mif' >>> tensor2ADC.run() # doctest: +SKIP """ _cmd = 'tensor2ADC' input_spec = Tensor2ApparentDiffusionInputSpec output_spec = Tensor2ApparentDiffusionOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['ADC'] = self.inputs.out_filename if not isdefined(outputs['ADC']): outputs['ADC'] = op.abspath(self._gen_outfilename()) else: outputs['ADC'] = op.abspath(outputs['ADC']) return outputs def _gen_filename(self, name): if name == 'out_filename': return self._gen_outfilename() else: return None def _gen_outfilename(self): _, name, _ = split_filename(self.inputs.in_file) return name + '_ADC.mif' class MRMultiplyInputSpec(CommandLineInputSpec): in_files = InputMultiPath(File(exists=True), argstr='%s', mandatory=True, position=-2, desc='Input images to be multiplied') out_filename = File(genfile=True, argstr='%s', position=-1, desc='Output image filename') quiet = traits.Bool(argstr='-quiet', position=1, desc="Do not display information messages or progress status.") debug = traits.Bool(argstr='-debug', position=1, desc="Display debugging messages.") class MRMultiplyOutputSpec(TraitedSpec): out_file = File(exists=True, desc='the output image of the multiplication') class MRMultiply(CommandLine): """ Multiplies two images. Example ------- >>> import nipype.interfaces.mrtrix as mrt >>> MRmult = mrt.MRMultiply() >>> MRmult.inputs.in_files = ['dwi.mif', 'dwi_WMProb.mif'] >>> MRmult.run() # doctest: +SKIP """ _cmd = 'mrmult' input_spec = MRMultiplyInputSpec output_spec = MRMultiplyOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['out_file'] = self.inputs.out_filename if not isdefined(outputs['out_file']): outputs['out_file'] = op.abspath(self._gen_outfilename()) else: outputs['out_file'] = op.abspath(outputs['out_file']) return outputs def _gen_filename(self, name): if name == 'out_filename': return self._gen_outfilename() else: return None def _gen_outfilename(self): _, name, _ = split_filename(self.inputs.in_files[0]) return name + '_MRMult.mif' class MRTrixViewerInputSpec(CommandLineInputSpec): in_files = InputMultiPath(File(exists=True), argstr='%s', mandatory=True, position=-2, desc='Input images to be viewed') quiet = traits.Bool(argstr='-quiet', position=1, desc="Do not display information messages or progress status.") debug = traits.Bool(argstr='-debug', position=1, desc="Display debugging messages.") class MRTrixViewerOutputSpec(TraitedSpec): pass class MRTrixViewer(CommandLine): """ Loads the input images in the MRTrix Viewer. Example ------- >>> import nipype.interfaces.mrtrix as mrt >>> MRview = mrt.MRTrixViewer() >>> MRview.inputs.in_files = 'dwi.mif' >>> MRview.run() # doctest: +SKIP """ _cmd = 'mrview' input_spec = MRTrixViewerInputSpec output_spec = MRTrixViewerOutputSpec def _list_outputs(self): return class MRTrixInfoInputSpec(CommandLineInputSpec): in_file = File(exists=True, argstr='%s', mandatory=True, position=-2, desc='Input images to be read') class MRTrixInfoOutputSpec(TraitedSpec): pass class MRTrixInfo(CommandLine): """ Prints out relevant header information found in the image specified. Example ------- >>> import nipype.interfaces.mrtrix as mrt >>> MRinfo = mrt.MRTrixInfo() >>> MRinfo.inputs.in_file = 'dwi.mif' >>> MRinfo.run() # doctest: +SKIP """ _cmd = 'mrinfo' input_spec = MRTrixInfoInputSpec output_spec = MRTrixInfoOutputSpec def _list_outputs(self): return class GenerateWhiteMatterMaskInputSpec(CommandLineInputSpec): in_file = File(exists=True, argstr='%s', mandatory=True, position=-3, desc='Diffusion-weighted images') binary_mask = File(exists=True, argstr='%s', mandatory=True, position=-2, desc='Binary brain mask') out_WMProb_filename = File(genfile=True, argstr='%s', position=-1, desc='Output WM probability image filename') encoding_file = File(exists=True, argstr='-grad %s', mandatory=True, position=1, desc='Gradient encoding, supplied as a 4xN text file with each line is in the format [ X Y Z b ], where [ X Y Z ] describe the direction of the applied gradient, and b gives the b-value in units (1000 s/mm^2). See FSL2MRTrix') noise_level_margin = traits.Float(argstr='-margin %s', desc='Specify the width of the margin on either side of the image to be used to estimate the noise level (default = 10)') class GenerateWhiteMatterMaskOutputSpec(TraitedSpec): WMprobabilitymap = File(exists=True, desc='WMprobabilitymap') class GenerateWhiteMatterMask(CommandLine): """ Generates a white matter probability mask from the DW images. Example ------- >>> import nipype.interfaces.mrtrix as mrt >>> genWM = mrt.GenerateWhiteMatterMask() >>> genWM.inputs.in_file = 'dwi.mif' >>> genWM.inputs.encoding_file = 'encoding.txt' >>> genWM.run() # doctest: +SKIP """ _cmd = 'gen_WM_mask' input_spec = GenerateWhiteMatterMaskInputSpec output_spec = GenerateWhiteMatterMaskOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['WMprobabilitymap'] = op.abspath(self._gen_outfilename()) return outputs def _gen_filename(self, name): if name == 'out_WMProb_filename': return self._gen_outfilename() else: return None def _gen_outfilename(self): _, name, _ = split_filename(self.inputs.in_file) return name + '_WMProb.mif' class ErodeInputSpec(CommandLineInputSpec): in_file = File(exists=True, argstr='%s', mandatory=True, position=-2, desc='Input mask image to be eroded') out_filename = File(genfile=True, argstr='%s', position=-1, desc='Output image filename') number_of_passes = traits.Int(argstr='-npass %s', desc='the number of passes (default: 1)') dilate = traits.Bool(argstr='-dilate', position=1, desc="Perform dilation rather than erosion") quiet = traits.Bool(argstr='-quiet', position=1, desc="Do not display information messages or progress status.") debug = traits.Bool(argstr='-debug', position=1, desc="Display debugging messages.") class ErodeOutputSpec(TraitedSpec): out_file = File(exists=True, desc='the output image') class Erode(CommandLine): """ Erode (or dilates) a mask (i.e. binary) image Example ------- >>> import nipype.interfaces.mrtrix as mrt >>> erode = mrt.Erode() >>> erode.inputs.in_file = 'mask.mif' >>> erode.run() # doctest: +SKIP """ _cmd = 'erode' input_spec = ErodeInputSpec output_spec = ErodeOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['out_file'] = self.inputs.out_filename if not isdefined(outputs['out_file']): outputs['out_file'] = op.abspath(self._gen_outfilename()) else: outputs['out_file'] = op.abspath(outputs['out_file']) return outputs def _gen_filename(self, name): if name == 'out_filename': return self._gen_outfilename() else: return None def _gen_outfilename(self): _, name, _ = split_filename(self.inputs.in_file) return name + '_erode.mif' class ThresholdInputSpec(CommandLineInputSpec): in_file = File(exists=True, argstr='%s', mandatory=True, position=-2, desc='The input image to be thresholded') out_filename = File(genfile=True, argstr='%s', position=-1, desc='The output binary image mask.') absolute_threshold_value = traits.Float(argstr='-abs %s', desc='Specify threshold value as absolute intensity.') percentage_threshold_value = traits.Float(argstr='-percent %s', desc='Specify threshold value as a percentage of the peak intensity in the input image.') invert = traits.Bool(argstr='-invert', position=1, desc="Invert output binary mask") replace_zeros_with_NaN = traits.Bool(argstr='-nan', position=1, desc="Replace all zero values with NaN") quiet = traits.Bool(argstr='-quiet', position=1, desc="Do not display information messages or progress status.") debug = traits.Bool(argstr='-debug', position=1, desc="Display debugging messages.") class ThresholdOutputSpec(TraitedSpec): out_file = File(exists=True, desc='The output binary image mask.') class Threshold(CommandLine): """ Create bitwise image by thresholding image intensity. By default, the threshold level is determined using a histogram analysis to cut out the background. Otherwise, the threshold intensity can be specified using command line options. Note that only the first study is used for thresholding. Example ------- >>> import nipype.interfaces.mrtrix as mrt >>> thresh = mrt.Threshold() >>> thresh.inputs.in_file = 'wm_mask.mif' >>> thresh.run() # doctest: +SKIP """ _cmd = 'threshold' input_spec = ThresholdInputSpec output_spec = ThresholdOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['out_file'] = self.inputs.out_filename if not isdefined(outputs['out_file']): outputs['out_file'] = op.abspath(self._gen_outfilename()) else: outputs['out_file'] = op.abspath(outputs['out_file']) return outputs def _gen_filename(self, name): if name == 'out_filename': return self._gen_outfilename() else: return None def _gen_outfilename(self): _, name, _ = split_filename(self.inputs.in_file) return name + '_thresh.mif' class MedianFilter3DInputSpec(CommandLineInputSpec): in_file = File(exists=True, argstr='%s', mandatory=True, position=-2, desc='Input images to be smoothed') out_filename = File(genfile=True, argstr='%s', position=-1, desc='Output image filename') quiet = traits.Bool(argstr='-quiet', position=1, desc="Do not display information messages or progress status.") debug = traits.Bool(argstr='-debug', position=1, desc="Display debugging messages.") class MedianFilter3DOutputSpec(TraitedSpec): out_file = File(exists=True, desc='the output image') class MedianFilter3D(CommandLine): """ Smooth images using a 3x3x3 median filter. Example ------- >>> import nipype.interfaces.mrtrix as mrt >>> median3d = mrt.MedianFilter3D() >>> median3d.inputs.in_file = 'mask.mif' >>> median3d.run() # doctest: +SKIP """ _cmd = 'median3D' input_spec = MedianFilter3DInputSpec output_spec = MedianFilter3DOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['out_file'] = self.inputs.out_filename if not isdefined(outputs['out_file']): outputs['out_file'] = op.abspath(self._gen_outfilename()) else: outputs['out_file'] = op.abspath(outputs['out_file']) return outputs def _gen_filename(self, name): if name == 'out_filename': return self._gen_outfilename() else: return None def _gen_outfilename(self): _, name, _ = split_filename(self.inputs.in_file) return name + '_median3D.mif' class MRTransformInputSpec(CommandLineInputSpec): in_files = InputMultiPath(File(exists=True), argstr='%s', mandatory=True, position=-2, desc='Input images to be transformed') out_filename = File(genfile=True, argstr='%s', position=-1, desc='Output image') invert = traits.Bool(argstr='-inverse', position=1, desc="Invert the specified transform before using it") replace_transform = traits.Bool(argstr='-replace', position=1, desc="replace the current transform by that specified, rather than applying it to the current transform") transformation_file = File(exists=True, argstr='-transform %s', position=1, desc='The transform to apply, in the form of a 4x4 ascii file.') template_image = File(exists=True, argstr='-template %s', position=1, desc='Reslice the input image to match the specified template image.') reference_image = File(exists=True, argstr='-reference %s', position=1, desc='in case the transform supplied maps from the input image onto a reference image, use this option to specify the reference. Note that this implicitly sets the -replace option.') flip_x = traits.Bool(argstr='-flipx', position=1, desc="assume the transform is supplied assuming a coordinate system with the x-axis reversed relative to the MRtrix convention (i.e. x increases from right to left). This is required to handle transform matrices produced by FSL's FLIRT command. This is only used in conjunction with the -reference option.") quiet = traits.Bool(argstr='-quiet', position=1, desc="Do not display information messages or progress status.") debug = traits.Bool(argstr='-debug', position=1, desc="Display debugging messages.") class MRTransformOutputSpec(TraitedSpec): out_file = File(exists=True, desc='the output image of the transformation') class MRTransform(CommandLine): """ Apply spatial transformations or reslice images Example ------- >>> MRxform = MRTransform() >>> MRxform.inputs.in_files = 'anat_coreg.mif' >>> MRxform.run() # doctest: +SKIP """ _cmd = 'mrtransform' input_spec = MRTransformInputSpec output_spec = MRTransformOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['out_file'] = self.inputs.out_filename if not isdefined(outputs['out_file']): outputs['out_file'] = op.abspath(self._gen_outfilename()) else: outputs['out_file'] = op.abspath(outputs['out_file']) return outputs def _gen_filename(self, name): if name == 'out_filename': return self._gen_outfilename() else: return None def _gen_outfilename(self): _, name, _ = split_filename(self.inputs.in_files[0]) return name + '_MRTransform.mif'
# Copyright (c) 2012 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Test of Policy Engine For Tacker""" import fixtures import io from unittest import mock from urllib import request as urlrequest from oslo_policy import policy as common_policy from oslo_serialization import jsonutils as json from oslo_utils import importutils import tacker from tacker.api.v1 import attributes from tacker.common import exceptions from tacker import context from tacker import manager from tacker import policy from tacker.tests import base class PolicyFileTestCase(base.BaseTestCase): def setUp(self): super(PolicyFileTestCase, self).setUp() self.skipTest("Not ready yet") policy.reset() self.addCleanup(policy.reset) self.context = context.Context('fake', 'fake', is_admin=False) self.target = {} self.tempdir = self.useFixture(fixtures.TempDir()) def test_modified_policy_reloads(self): def fake_find_config_file(_1, _2): return self.tempdir.join('policy') with mock.patch.object(tacker.common.utils, 'find_config_file', new=fake_find_config_file): tmpfilename = fake_find_config_file(None, None) action = "example:test" with open(tmpfilename, "w") as policyfile: policyfile.write("""{"example:test": ""}""") policy.init() policy.enforce(self.context, action, self.target) with open(tmpfilename, "w") as policyfile: policyfile.write("""{"example:test": "!"}""") # NOTE(vish): reset stored policy cache so we don't have to # sleep(1) policy._POLICY_CACHE = {} policy.init() self.assertRaises(exceptions.PolicyNotAuthorized, policy.enforce, self.context, action, self.target) class PolicyTestCase(base.BaseTestCase): def setUp(self): super(PolicyTestCase, self).setUp() self.skipTest("Not ready yet") policy.reset() self.addCleanup(policy.reset) # NOTE(vish): preload rules to circumvent reloading from file policy.init() rules = { "true": '@', "example:allowed": '@', "example:denied": '!', "example:get_http": "http:http://www.example.com", "example:my_file": "role:compute_admin or tenant_id:%(tenant_id)s", "example:early_and_fail": "! and @", "example:early_or_success": "@ or !", "example:lowercase_admin": "role:admin or role:sysadmin", "example:uppercase_admin": "role:ADMIN or role:sysadmin", } # NOTE(vish): then overload underlying rules common_policy.set_rules(common_policy.Rules( dict((k, common_policy.parse_rule(v)) for k, v in rules.items()))) self.context = context.Context('fake', 'fake', roles=['member']) self.target = {} def test_enforce_nonexistent_action_throws(self): action = "example:noexist" self.assertRaises(exceptions.PolicyNotAuthorized, policy.enforce, self.context, action, self.target) def test_enforce_bad_action_throws(self): action = "example:denied" self.assertRaises(exceptions.PolicyNotAuthorized, policy.enforce, self.context, action, self.target) def test_check_bad_action_noraise(self): action = "example:denied" result = policy.check(self.context, action, self.target) self.assertFalse(result) def test_check_non_existent_action(self): action = "example:idonotexist" result_1 = policy.check(self.context, action, self.target) self.assertFalse(result_1) result_2 = policy.check(self.context, action, self.target, might_not_exist=True) self.assertTrue(result_2) def test_enforce_good_action(self): action = "example:allowed" result = policy.enforce(self.context, action, self.target) self.assertEqual(True, result) def test_enforce_http_true(self): def fakeurlopen(url, post_data): return io.StringIO("True") with mock.patch.object(urlrequest, 'urlopen', new=fakeurlopen): action = "example:get_http" target = {} result = policy.enforce(self.context, action, target) self.assertEqual(True, result) def test_enforce_http_false(self): def fakeurlopen(url, post_data): return io.StringIO("False") with mock.patch.object(urlrequest, 'urlopen', new=fakeurlopen): action = "example:get_http" target = {} self.assertRaises(exceptions.PolicyNotAuthorized, policy.enforce, self.context, action, target) def test_templatized_enforcement(self): target_mine = {'tenant_id': 'fake'} target_not_mine = {'tenant_id': 'another'} action = "example:my_file" policy.enforce(self.context, action, target_mine) self.assertRaises(exceptions.PolicyNotAuthorized, policy.enforce, self.context, action, target_not_mine) def test_early_AND_enforcement(self): action = "example:early_and_fail" self.assertRaises(exceptions.PolicyNotAuthorized, policy.enforce, self.context, action, self.target) def test_early_OR_enforcement(self): action = "example:early_or_success" policy.enforce(self.context, action, self.target) def test_ignore_case_role_check(self): lowercase_action = "example:lowercase_admin" uppercase_action = "example:uppercase_admin" # NOTE(dprince) we mix case in the Admin role here to ensure # case is ignored admin_context = context.Context('admin', 'fake', roles=['AdMiN']) policy.enforce(admin_context, lowercase_action, self.target) policy.enforce(admin_context, uppercase_action, self.target) class DefaultPolicyTestCase(base.BaseTestCase): def setUp(self): super(DefaultPolicyTestCase, self).setUp() self.skipTest("Not ready yet") policy.reset() policy.init() self.addCleanup(policy.reset) self.rules = { "default": '', "example:exist": '!', } self._set_rules('default') self.context = context.Context('fake', 'fake') def _set_rules(self, default_rule): rules = common_policy.Rules( dict((k, common_policy.parse_rule(v)) for k, v in self.rules.items()), default_rule) common_policy.set_rules(rules) def test_policy_called(self): self.assertRaises(exceptions.PolicyNotAuthorized, policy.enforce, self.context, "example:exist", {}) def test_not_found_policy_calls_default(self): policy.enforce(self.context, "example:noexist", {}) def test_default_not_found(self): self._set_rules("default_noexist") self.assertRaises(exceptions.PolicyNotAuthorized, policy.enforce, self.context, "example:noexist", {}) FAKE_RESOURCE_NAME = 'something' FAKE_RESOURCE = {"%ss" % FAKE_RESOURCE_NAME: {'attr': {'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': None, 'enforce_policy': True, 'validate': {'type:dict': {'sub_attr_1': {'type:string': None}, 'sub_attr_2': {'type:string': None}}} }}} class TackerPolicyTestCase(base.BaseTestCase): def setUp(self): super(TackerPolicyTestCase, self).setUp() self.skipTest("Not ready yet") policy.reset() policy.init() self.addCleanup(policy.reset) self.admin_only_legacy = "role:admin" self.admin_or_owner_legacy = "role:admin or tenant_id:%(tenant_id)s" # Add a Fake 'something' resource to RESOURCE_ATTRIBUTE_MAP attributes.RESOURCE_ATTRIBUTE_MAP.update(FAKE_RESOURCE) self.rules = dict((k, common_policy.parse_rule(v)) for k, v in { "context_is_admin": "role:admin", "admin_or_network_owner": "rule:context_is_admin or " "tenant_id:%(network:tenant_id)s", "admin_or_owner": ("rule:context_is_admin or " "tenant_id:%(tenant_id)s"), "admin_only": "rule:context_is_admin", "regular_user": "role:user", "shared": "field:networks:shared=True", "external": "field:networks:router:external=True", "default": '@', "create_network": "rule:admin_or_owner", "create_network:shared": "rule:admin_only", "update_network": '@', "update_network:shared": "rule:admin_only", "get_network": "rule:admin_or_owner or " "rule:shared or " "rule:external", "create_port:mac": "rule:admin_or_network_owner", "create_something": "rule:admin_or_owner", "create_something:attr": "rule:admin_or_owner", "create_something:attr:sub_attr_1": "rule:admin_or_owner", "create_something:attr:sub_attr_2": "rule:admin_only", "get_firewall_policy": "rule:admin_or_owner or " "rule:shared", "get_firewall_rule": "rule:admin_or_owner or " "rule:shared" }.items()) def fakepolicyinit(): common_policy.set_rules(common_policy.Rules(self.rules)) def remove_fake_resource(): del attributes.RESOURCE_ATTRIBUTE_MAP["%ss" % FAKE_RESOURCE_NAME] self.patcher = mock.patch.object(tacker.policy, 'init', new=fakepolicyinit) self.patcher.start() self.addCleanup(remove_fake_resource) self.context = context.Context('fake', 'fake', roles=['user']) plugin_klass = importutils.import_class( "tacker.db.db_base_plugin_v2.TackerDbPluginV2") self.manager_patcher = mock.patch('tacker.manager.TackerManager') fake_manager = self.manager_patcher.start() fake_manager_instance = fake_manager.return_value fake_manager_instance.plugin = plugin_klass() def _test_action_on_attr(self, context, action, attr, value, exception=None): action = "%s_network" % action target = {'tenant_id': 'the_owner', attr: value} if exception: self.assertRaises(exception, policy.enforce, context, action, target) else: result = policy.enforce(context, action, target) self.assertEqual(True, result) def _test_nonadmin_action_on_attr(self, action, attr, value, exception=None): user_context = context.Context('', "user", roles=['user']) self._test_action_on_attr(user_context, action, attr, value, exception) def test_nonadmin_write_on_private_fails(self): self._test_nonadmin_action_on_attr('create', 'shared', False, exceptions.PolicyNotAuthorized) def test_nonadmin_read_on_private_fails(self): self._test_nonadmin_action_on_attr('get', 'shared', False, exceptions.PolicyNotAuthorized) def test_nonadmin_write_on_shared_fails(self): self._test_nonadmin_action_on_attr('create', 'shared', True, exceptions.PolicyNotAuthorized) def test_nonadmin_read_on_shared_succeeds(self): self._test_nonadmin_action_on_attr('get', 'shared', True) def _test_enforce_adminonly_attribute(self, action): admin_context = context.get_admin_context() target = {'shared': True} result = policy.enforce(admin_context, action, target) self.assertEqual(True, result) def test_enforce_adminonly_attribute_create(self): self._test_enforce_adminonly_attribute('create_network') def test_enforce_adminonly_attribute_update(self): self._test_enforce_adminonly_attribute('update_network') def test_enforce_adminonly_attribute_no_context_is_admin_policy(self): del self.rules[policy.ADMIN_CTX_POLICY] self.rules['admin_only'] = common_policy.parse_rule( self.admin_only_legacy) self.rules['admin_or_owner'] = common_policy.parse_rule( self.admin_or_owner_legacy) self._test_enforce_adminonly_attribute('create_network') def test_enforce_adminonly_attribute_nonadminctx_returns_403(self): action = "create_network" target = {'shared': True, 'tenant_id': 'somebody_else'} self.assertRaises(exceptions.PolicyNotAuthorized, policy.enforce, self.context, action, target) def test_enforce_adminonly_nonadminctx_no_ctx_is_admin_policy_403(self): del self.rules[policy.ADMIN_CTX_POLICY] self.rules['admin_only'] = common_policy.parse_rule( self.admin_only_legacy) self.rules['admin_or_owner'] = common_policy.parse_rule( self.admin_or_owner_legacy) action = "create_network" target = {'shared': True, 'tenant_id': 'somebody_else'} self.assertRaises(exceptions.PolicyNotAuthorized, policy.enforce, self.context, action, target) def _test_build_subattribute_match_rule(self, validate_value): bk = FAKE_RESOURCE['%ss' % FAKE_RESOURCE_NAME]['attr']['validate'] FAKE_RESOURCE['%ss' % FAKE_RESOURCE_NAME]['attr']['validate'] = ( validate_value) action = "create_something" target = {'tenant_id': 'fake', 'attr': {'sub_attr_1': 'x'}} self.assertFalse(policy._build_subattr_match_rule( 'attr', FAKE_RESOURCE['%ss' % FAKE_RESOURCE_NAME]['attr'], action, target)) FAKE_RESOURCE['%ss' % FAKE_RESOURCE_NAME]['attr']['validate'] = bk def test_build_subattribute_match_rule_empty_dict_validator(self): self._test_build_subattribute_match_rule({}) def test_build_subattribute_match_rule_wrong_validation_info(self): self._test_build_subattribute_match_rule( {'type:dict': 'wrong_stuff'}) def test_enforce_subattribute(self): action = "create_something" target = {'tenant_id': 'fake', 'attr': {'sub_attr_1': 'x'}} result = policy.enforce(self.context, action, target, None) self.assertEqual(True, result) def test_enforce_admin_only_subattribute(self): action = "create_something" target = {'tenant_id': 'fake', 'attr': {'sub_attr_1': 'x', 'sub_attr_2': 'y'}} result = policy.enforce(context.get_admin_context(), action, target, None) self.assertEqual(True, result) def test_enforce_admin_only_subattribute_nonadminctx_returns_403(self): action = "create_something" target = {'tenant_id': 'fake', 'attr': {'sub_attr_1': 'x', 'sub_attr_2': 'y'}} self.assertRaises(exceptions.PolicyNotAuthorized, policy.enforce, self.context, action, target, None) def test_enforce_regularuser_on_read(self): action = "get_network" target = {'shared': True, 'tenant_id': 'somebody_else'} result = policy.enforce(self.context, action, target) self.assertTrue(result) def test_enforce_firewall_policy_shared(self): action = "get_firewall_policy" target = {'shared': True, 'tenant_id': 'somebody_else'} result = policy.enforce(self.context, action, target) self.assertTrue(result) def test_enforce_firewall_rule_shared(self): action = "get_firewall_rule" target = {'shared': True, 'tenant_id': 'somebody_else'} result = policy.enforce(self.context, action, target) self.assertTrue(result) def test_enforce_tenant_id_check(self): # Trigger a policy with rule admin_or_owner action = "create_network" target = {'tenant_id': 'fake'} result = policy.enforce(self.context, action, target) self.assertTrue(result) def test_enforce_tenant_id_check_parent_resource(self): def fakegetnetwork(*args, **kwargs): return {'tenant_id': 'fake'} action = "create_port:mac" with mock.patch.object(manager.TackerManager.get_instance().plugin, 'get_network', new=fakegetnetwork): target = {'network_id': 'whatever'} result = policy.enforce(self.context, action, target) self.assertTrue(result) def test_enforce_plugin_failure(self): def fakegetnetwork(*args, **kwargs): raise NotImplementedError('Blast!') # the policy check and plugin method we use in this test are irrelevant # so long that we verify that, if *f* blows up, the behavior of the # policy engine to propagate the exception is preserved action = "create_port:mac" with mock.patch.object(manager.TackerManager.get_instance().plugin, 'get_network', new=fakegetnetwork): target = {'network_id': 'whatever'} self.assertRaises(NotImplementedError, policy.enforce, self.context, action, target) def test_enforce_tenant_id_check_parent_resource_bw_compatibility(self): def fakegetnetwork(*args, **kwargs): return {'tenant_id': 'fake'} del self.rules['admin_or_network_owner'] self.rules['admin_or_network_owner'] = common_policy.parse_rule( "role:admin or tenant_id:%(network_tenant_id)s") action = "create_port:mac" with mock.patch.object(manager.TackerManager.get_instance().plugin, 'get_network', new=fakegetnetwork): target = {'network_id': 'whatever'} result = policy.enforce(self.context, action, target) self.assertTrue(result) def test_tenant_id_check_no_target_field_raises(self): # Try and add a bad rule self.assertRaises( exceptions.PolicyInitError, common_policy.parse_rule, 'tenant_id:(wrong_stuff)') def _test_enforce_tenant_id_raises(self, bad_rule): self.rules['admin_or_owner'] = common_policy.parse_rule(bad_rule) # Trigger a policy with rule admin_or_owner action = "create_network" target = {'tenant_id': 'fake'} policy.init() self.assertRaises(exceptions.PolicyCheckError, policy.enforce, self.context, action, target) def test_enforce_tenant_id_check_malformed_target_field_raises(self): self._test_enforce_tenant_id_raises('tenant_id:%(malformed_field)s') def test_enforce_tenant_id_check_invalid_parent_resource_raises(self): self._test_enforce_tenant_id_raises('tenant_id:%(foobaz_tenant_id)s') def test_get_roles_context_is_admin_rule_missing(self): rules = dict((k, common_policy.parse_rule(v)) for k, v in { "some_other_rule": "role:admin", }.items()) common_policy.set_rules(common_policy.Rules(rules)) # 'admin' role is expected for bw compatibility self.assertEqual(['admin'], policy.get_admin_roles()) def test_get_roles_with_role_check(self): rules = dict((k, common_policy.parse_rule(v)) for k, v in { policy.ADMIN_CTX_POLICY: "role:admin", }.items()) common_policy.set_rules(common_policy.Rules(rules)) self.assertEqual(['admin'], policy.get_admin_roles()) def test_get_roles_with_rule_check(self): rules = dict((k, common_policy.parse_rule(v)) for k, v in { policy.ADMIN_CTX_POLICY: "rule:some_other_rule", "some_other_rule": "role:admin", }.items()) common_policy.set_rules(common_policy.Rules(rules)) self.assertEqual(['admin'], policy.get_admin_roles()) def test_get_roles_with_or_check(self): self.rules = dict((k, common_policy.parse_rule(v)) for k, v in { policy.ADMIN_CTX_POLICY: "rule:rule1 or rule:rule2", "rule1": "role:admin_1", "rule2": "role:admin_2" }.items()) self.assertEqual(['admin_1', 'admin_2'], policy.get_admin_roles()) def test_get_roles_with_other_rules(self): self.rules = dict((k, common_policy.parse_rule(v)) for k, v in { policy.ADMIN_CTX_POLICY: "role:xxx or other:value", }.items()) self.assertEqual(['xxx'], policy.get_admin_roles()) def _test_set_rules_with_deprecated_policy(self, input_rules, expected_rules): policy._set_rules(json.dumps(input_rules)) # verify deprecated policy has been removed for pol in input_rules: self.assertNotIn(pol, common_policy._rules) # verify deprecated policy was correctly translated. Iterate # over items for compatibility with unittest2 in python 2.6 for rule in expected_rules: self.assertIn(rule, common_policy._rules) self.assertEqual(expected_rules[rule], str(common_policy._rules[rule])) def test_set_rules_with_deprecated_view_policy(self): self._test_set_rules_with_deprecated_policy( {'extension:router:view': 'rule:admin_or_owner'}, {'get_network:router:external': 'rule:admin_or_owner'}) def test_set_rules_with_deprecated_set_policy(self): expected_policies = ['create_network:provider:network_type', 'create_network:provider:physical_network', 'create_network:provider:segmentation_id', 'update_network:provider:network_type', 'update_network:provider:physical_network', 'update_network:provider:segmentation_id'] self._test_set_rules_with_deprecated_policy( {'extension:provider_network:set': 'rule:admin_only'}, dict((policy, 'rule:admin_only') for policy in expected_policies))
from unittest import TestCase from mock import MagicMock, patch import frontend.controllers.scanctrl as module from lib.irma.common.utils import IrmaScanStatus from tempfile import TemporaryFile from lib.irma.common.exceptions import IrmaValueError, IrmaTaskError, \ IrmaDatabaseResultNotFound, IrmaFtpError from lib.irma.common.utils import IrmaReturnCode class TestModuleScanctrl(TestCase): def setUp(self): self.old_File = module.File self.old_Scan = module.Scan self.old_build_sha256_path = module.build_sha256_path self.old_celery_brain = module.celery_brain self.File = MagicMock() self.Scan = MagicMock() self.build_sha256_path = MagicMock() self.celery_brain = MagicMock() module.File = self.File module.Scan = self.Scan module.build_sha256_path = self.build_sha256_path module.celery_brain = self.celery_brain def tearDown(self): module.File = self.old_File module.Scan = self.old_Scan module.build_sha256_path = self.old_build_sha256_path module.celery_brain = self.old_celery_brain del self.File del self.Scan del self.build_sha256_path del self.celery_brain def test001_add_files(self): fobj = TemporaryFile() filename = "n_test" scan, session = MagicMock(), MagicMock() function = "frontend.controllers.scanctrl.IrmaScanStatus.filter_status" with patch(function) as mock: scan.status = IrmaScanStatus.empty module.add_files(scan, {filename: fobj}, session) self.assertTrue(mock.called) self.assertEqual(mock.call_args, ((scan.status, IrmaScanStatus.empty, IrmaScanStatus.ready),)) self.File.load_from_sha256.assert_called_once() self.build_sha256_path.assert_called_once() fobj.close() def test002_check_probe(self): scan, session = MagicMock(), MagicMock() scan.status = IrmaScanStatus.ready probelist = ['probe1', 'probe2'] all_probelist = ['probe1', 'probe2', 'probe3'] scan.set_probelist.return_value = None self.celery_brain.probe_list.return_value = all_probelist module.check_probe(scan, probelist, session) self.assertTrue(scan.set_probelist.called) scan.set_probelist.assert_called_once_with(probelist) def test003_check_probe_None(self): scan, session = MagicMock(), MagicMock() scan.status = IrmaScanStatus.ready probelist = None all_probelist = ['probe1', 'probe2', 'probe3'] scan.set_probelist.return_value = None self.celery_brain.probe_list.return_value = all_probelist module.check_probe(scan, probelist, session) self.assertTrue(scan.set_probelist.called) scan.set_probelist.assert_called_once_with(all_probelist) def test004_check_probe_unknown_probe(self): scan, session = MagicMock(), MagicMock() scan.status = IrmaScanStatus.ready probelist = ['probe1', 'probe2', 'probe6'] all_probelist = ['probe1', 'probe2', 'probe3'] scan.set_probelist.return_value = None self.celery_brain.probe_list.return_value = all_probelist with self.assertRaises(IrmaValueError) as context: module.check_probe(scan, probelist, session) self.assertFalse(scan.set_probelist.called) self.assertEquals(str(context.exception), "probe probe6 unknown") def test005_cancel_status_empty(self): scan, session = MagicMock(), MagicMock() scan.status = IrmaScanStatus.empty res = module.cancel(scan, session) self.assertIsNone(res) scan.set_status.assert_called_once_with(IrmaScanStatus.cancelled) def test006_cancel_status_ready(self): scan, session = MagicMock(), MagicMock() scan.status = IrmaScanStatus.ready res = module.cancel(scan, session) self.assertIsNone(res) scan.set_status.assert_called_once_with(IrmaScanStatus.cancelled) def test007_cancel_status_uploaded(self): scan, session = MagicMock(), MagicMock() scan.status = IrmaScanStatus.uploaded label = IrmaScanStatus.label[scan.status] expected = "can not cancel scan in {} status".format(label) with self.assertRaises(IrmaValueError) as context: module.cancel(scan, session) self.assertEqual(str(context.exception), expected) def test008_cancel_status_launched_ok(self): scan, session = MagicMock(), MagicMock() scan.status = IrmaScanStatus.launched retcode = IrmaReturnCode.success cancel_res = {'cancel_details': "details"} self.celery_brain.scan_cancel.return_value = (retcode, cancel_res) res = module.cancel(scan, session) self.assertEqual(res, cancel_res['cancel_details']) scan.set_status.assert_called_once_with(IrmaScanStatus.cancelled) def test008_cancel_status_launched_status_processed(self): scan, session = MagicMock(), MagicMock() scan.status = IrmaScanStatus.launched retcode = IrmaReturnCode.success status = IrmaScanStatus.label[IrmaScanStatus.processed] cancel_res = {'status': status} self.celery_brain.scan_cancel.return_value = (retcode, cancel_res) with self.assertRaises(IrmaValueError) as context: module.cancel(scan, session) self.assertEqual(str(context.exception), "can not cancel scan in {0} status".format(status)) scan.set_status.assert_called_once_with(IrmaScanStatus.processed) def test008_cancel_status_launched_status_error(self): scan, session = MagicMock(), MagicMock() scan.status = IrmaScanStatus.error_ftp_upload res = module.cancel(scan, session) self.assertIsNone(res) scan.set_status.assert_not_called() def test008_cancel_status_launched_brain_error(self): scan, session = MagicMock(), MagicMock() scan.status = IrmaScanStatus.launched retcode = IrmaReturnCode.error ret_val = "reason" self.celery_brain.scan_cancel.return_value = (retcode, ret_val) with self.assertRaises(IrmaTaskError) as context: module.cancel(scan, session) self.assertEqual(str(context.exception), ret_val) scan.set_status.assert_not_called() def test009_cancel_status_processed(self): scan, session = MagicMock(), MagicMock() scan.status = IrmaScanStatus.processed label = IrmaScanStatus.label[scan.status] expected = "can not cancel scan in {} status".format(label) with self.assertRaises(IrmaValueError) as context: module.cancel(scan, session) self.assertEqual(str(context.exception), expected) def test010_cancel_status_flushed(self): scan, session = MagicMock(), MagicMock() scan.status = IrmaScanStatus.flushed label = IrmaScanStatus.label[scan.status] expected = "can not cancel scan in {} status".format(label) with self.assertRaises(IrmaValueError) as context: module.cancel(scan, session) self.assertEqual(str(context.exception), expected) def test011_cancel_status_cancelling(self): scan, session = MagicMock(), MagicMock() scan.status = IrmaScanStatus.cancelling label = IrmaScanStatus.label[scan.status] expected = "can not cancel scan in {} status".format(label) with self.assertRaises(IrmaValueError) as context: module.cancel(scan, session) self.assertEqual(str(context.exception), expected) def test012_cancel_status_cancelled(self): scan, session = MagicMock(), MagicMock() scan.status = IrmaScanStatus.cancelled label = IrmaScanStatus.label[scan.status] expected = "can not cancel scan in {} status".format(label) with self.assertRaises(IrmaValueError) as context: module.cancel(scan, session) self.assertEqual(str(context.exception), expected) def test013_set_launched_status_uploaded(self): scan = MagicMock() scan.status = IrmaScanStatus.uploaded self.Scan.load_from_ext_id.return_value = scan module.set_launched("whatever", "whatever") scan.set_status.assert_called_with(IrmaScanStatus.launched) def test014_set_launched_status_not_uploaded(self): scan = MagicMock() scan.status = IrmaScanStatus.finished self.Scan.load_from_ext_id.return_value = scan module.set_launched("whatever", "whatever") self.assertEqual(scan.status, IrmaScanStatus.finished) @patch("frontend.controllers.scanctrl.sha256sum") def test015_new_file_existing(self, m_sha256sum): m_file = MagicMock() m_file.path = "whatever" self.File.load_from_sha256.return_value = m_file fobj, session = MagicMock(), MagicMock() res = module._new_file(fobj, session) self.assertEqual(res, m_file) @patch("frontend.controllers.scanctrl.sha256sum") @patch("frontend.controllers.scanctrl.save_to_file") def test016_new_file_existing_deleted(self, m_save_to_file, m_sha256sum): m_file = MagicMock() self.File.load_from_sha256.return_value = m_file fobj, session = MagicMock(), MagicMock() path = "testpath" self.build_sha256_path.return_value = path m_file.path = None module._new_file(fobj, session) m_save_to_file.assert_called_once_with(fobj, path) @patch("frontend.controllers.scanctrl.md5sum") @patch("frontend.controllers.scanctrl.sha1sum") @patch("frontend.controllers.scanctrl.sha256sum") @patch("frontend.controllers.scanctrl.Magic") @patch("frontend.controllers.scanctrl.save_to_file") def test017_new_file_not_existing(self, m_save_to_file, m_magic, m_sha256sum, m_sha1sum, m_md5sum): self.File.load_from_sha256.side_effect = IrmaDatabaseResultNotFound fobj, session = MagicMock(), MagicMock() path = "testpath" self.build_sha256_path.return_value = path module._new_file(fobj, session) m_md5sum.assert_called_once_with(fobj) m_sha1sum.assert_called_once_with(fobj) m_sha256sum.assert_called_once_with(fobj) m_magic.assert_called() self.File.assert_called() def test018_update_ref_no_prev(self): m_fw, m_file, m_pr = MagicMock(), MagicMock(), MagicMock() m_probe = MagicMock() probe = MagicMock() probename = "probe1" probe.name = probename m_probe.name = probe m_file.ref_results = [] m_pr.name = "probe2" module._update_ref_results(m_fw, m_file, m_pr) self.assertItemsEqual(m_file.ref_results, [m_pr]) def test019_update_ref_prev(self): m_fw, m_file = MagicMock(), MagicMock() m_pr_new, m_pr_old = MagicMock(), MagicMock() m_probe = MagicMock() probe = MagicMock() probename = "probe1" probe.name = probename m_probe.name = probe m_pr_old.name = "probe1" m_pr_new.name = "probe1" m_file.ref_results = [m_pr_old] module._update_ref_results(m_fw, m_file, m_pr_new) self.assertItemsEqual(m_file.ref_results, [m_pr_new]) @patch("frontend.controllers.scanctrl.log") def test020_update_ref_error(self, m_log): m_fw, m_file, m_pr = MagicMock(), MagicMock(), MagicMock() m_probe = MagicMock() probe = MagicMock() probename = "probe1" probe.name = probename m_probe.name = probe m_pr.name = "probe1" m_file.ref_results = [m_pr, m_pr] module._update_ref_results(m_fw, m_file, m_pr) self.assertItemsEqual(m_file.ref_results, [m_pr, m_pr]) m_log.error.called_once() def test021_fetch_probe_results(self): m_fw, m_pr = MagicMock(), MagicMock() probename = "probe1" m_pr.name = probename m_fw.probe_results = [m_pr] res = module._fetch_probe_result(m_fw, probename) self.assertEqual(res, m_pr) def test021b_fetch_probe_results_none(self): m_fw, m_pr = MagicMock(), MagicMock() probename = "probe1" m_pr.name = probename m_fw.probe_results = [] res = module._fetch_probe_result(m_fw, probename) self.assertIsNone(res) @patch("frontend.controllers.scanctrl.log") def test022_resubmit_new_files_error(self, m_log): m_scan, m_parent_file = MagicMock(), MagicMock() m_resubmit_fws, m_session = MagicMock(), MagicMock() hash_uploaded = "whatever" m_parent_file.files_web = [] module._resubmit_files(m_scan, m_parent_file, m_resubmit_fws, hash_uploaded, m_session) m_log.error.assert_called_once() self.celery_brain.scan_launch.assert_not_called() @patch("frontend.controllers.scanctrl.log") def test023_resubmit_new_files_no_new_file(self, m_log): m_scan, m_parent_file = MagicMock(), MagicMock() m_session = MagicMock() m_fw = MagicMock() hash_uploaded = ["whatever"] m_fw.file.sha256 = hash_uploaded[0] m_resubmit_fws = [m_fw] m_parent_file.files_web = [m_fw] module._resubmit_files(m_scan, m_parent_file, m_resubmit_fws, hash_uploaded, m_session) self.celery_brain.scan_launch.assert_not_called() @patch("frontend.controllers.scanctrl.log") def test024_resubmit_new_files_new_file(self, m_log): m_scan, m_parent_file = MagicMock(), MagicMock() m_session = MagicMock() m_fw = MagicMock() hash_uploaded = ["whatever"] m_fw.file.sha256 = "anotherthing" m_resubmit_fws = [m_fw] m_parent_file.files_web = [m_fw] module._resubmit_files(m_scan, m_parent_file, m_resubmit_fws, hash_uploaded, m_session) self.celery_brain.scan_launch.assert_called() @patch("frontend.controllers.scanctrl._new_fileweb") @patch("frontend.controllers.scanctrl.ftp_ctrl") def test025_append_new_files(self, m_ftpctrl, m_new_fw): m_scan, m_session = MagicMock(), MagicMock() filename = "filename" filehash = "filehash" uploaded_files = {filename: filehash} m_fobj = MagicMock() m_ftpctrl.download_file_data.return_value = m_fobj module._append_new_files_to_scan(m_scan, uploaded_files, m_session) m_download = m_ftpctrl.download_file_data m_download.assert_called_once_with(m_scan.external_id, filehash) m_new_fw.assert_called_once_with(m_scan, filename, m_fobj, m_session) def test026_sanitize_res(self): pattern = u"\u0000test" + "\x00" pattern_expected = "test" dic_key = "te.st$key" dic_expected = "te_stkey" dic = {'unicode': unicode(pattern), 'list': [pattern], 'dict': {dic_key: pattern}, 'else': "else"} expected = {'unicode': pattern_expected, 'list': [pattern_expected], 'dict': {dic_expected: pattern_expected}, 'else': "else"} res = module._sanitize_res(dic) self.assertItemsEqual(res.values(), expected.values()) def test027_add_empty_result_refresult(self): fw, scan, session = MagicMock(), MagicMock(), MagicMock() pr1, pr2 = MagicMock(), MagicMock() probe1, probe2 = "Probe1", "Probe2" probelist = [probe1, probe2] pr1.name = probe1 pr2.name = probe2 scan.force = False fw.file.ref_results = [pr1, pr2] fw.probe_results = [] module._add_empty_result(fw, probelist, scan, session) self.assertItemsEqual(fw.probe_results, [pr1, pr2]) @patch("frontend.controllers.scanctrl._fetch_known_results") def test027_add_empty_result_knownresult(self, m_fetch_known_results): fw, scan, session = MagicMock(), MagicMock(), MagicMock() pr1, pr2 = MagicMock(), MagicMock() probe1, probe2 = "Probe1", "Probe2" probelist = [probe1, probe2] pr1.name = probe1 pr2.name = probe2 scan.force = True m_fetch_known_results.return_value = [pr1, pr2] fw.probe_results = [] module._add_empty_result(fw, probelist, scan, session) self.assertItemsEqual(fw.probe_results, [pr1, pr2]) @patch("frontend.controllers.scanctrl.ProbeResult") def test028_add_empty_result_noresult(self, m_ProbeResult): fw, scan, session = MagicMock(), MagicMock(), MagicMock() probe1, probe2 = "Probe1", "Probe2" probelist = [probe1, probe2] scan.force = True fw.probe_results = [] res = module._add_empty_result(fw, probelist, scan, session) self.assertItemsEqual(res, probelist) @patch("frontend.controllers.scanctrl.FileWeb") def test029_fetch_known_results(self, m_FileWeb): m_scan, m_file, m_session = MagicMock(), MagicMock(), MagicMock() m_scan.id = "scanid" m_file.id = "fileid" fw1 = MagicMock() m_FileWeb.load_by_scanid_fileid.return_value = [fw1, fw1] res = module._fetch_known_results(m_file, m_scan, m_session) self.assertItemsEqual(res, fw1.probe_results) @patch("frontend.controllers.scanctrl.braintasks") @patch("frontend.controllers.scanctrl._add_empty_result") def test030_add_empty_results(self, m_add_empty_result, m_braintasks): m_scan, m_session = MagicMock(), MagicMock() fw1, fw2 = MagicMock(), MagicMock() fw1.file.sha256 = "sha256file1" fw1.file.mimetype = "mimetypefile1" fw2.file.sha256 = "sha256file2" fw2.file.mimetype = "mimetypefile2" fw_list = [fw1, fw2] probe1, probe2 = "Probe1", "Probe2" probelist = [probe1, probe2] m_add_empty_result.return_value = probelist m_braintasks.mimetype_filter_scan_request = lambda x: x scan_request = module._create_scan_request(fw_list, probelist, True) res = module._add_empty_results(fw_list, scan_request, m_scan, m_session) self.assertItemsEqual(res.to_dict().values(), scan_request.to_dict().values()) @patch("frontend.controllers.scanctrl.log") def test031_fetch_probe_results_error(self, m_log): fw, pr = MagicMock(), MagicMock() pr.name = "Probe1" fw.probe_results = [pr, pr] module._fetch_probe_result(fw, pr.name) m_log.error.assert_called_once() @patch("frontend.controllers.scanctrl.Scan") @patch("frontend.controllers.scanctrl.session_transaction") def test032_launch_asynchronous_nothing_to_do(self, m_session_transaction, m_Scan): m_session, m_scan = MagicMock(), MagicMock() m_session_transaction().__enter__.return_value = m_session m_scan.status = IrmaScanStatus.ready m_Scan.load_from_ext_id.return_value = m_scan module.launch_asynchronous("whatever") m_scan.set_status.assert_called_once_with(IrmaScanStatus.finished) @patch("frontend.controllers.scanctrl._add_empty_result") @patch("frontend.controllers.scanctrl.ftp_ctrl") @patch("frontend.controllers.scanctrl.Scan") @patch("frontend.controllers.scanctrl.session_transaction") def test033_launch_asynchronous(self, m_session_transaction, m_Scan, m_ftp_ctrl, m_add_empty_result): m_scan, m_session = MagicMock(), MagicMock() fw1, fw2 = MagicMock(), MagicMock() file1, file2 = MagicMock(), MagicMock() pathf1, pathf2 = 'path-file1', 'path-file2' file1.path = pathf1 file2.path = pathf2 fw1.file.sha256 = "sha256file1" fw1.file.mimetype = "mimetypefile1" fw2.file.sha256 = "sha256file2" fw2.file.mimetype = "mimetypefile2" m_scan.files_web = [fw1, fw2] m_scan.files = [file1, file2] probe1, probe2 = "Probe1", "Probe2" probelist = [probe1, probe2] m_scan.get_probe_list.return_value = probelist m_add_empty_result.return_value = probelist m_session_transaction().__enter__.return_value = m_session m_scan.status = IrmaScanStatus.ready m_scan.mimetype_filtering = False m_Scan.load_from_ext_id.return_value = m_scan scanid = "scanid" module.launch_asynchronous(scanid) m_ftp_ctrl.upload_scan.assert_called_with(scanid, [pathf1, pathf2]) m_scan.set_status.assert_called_once_with(IrmaScanStatus.uploaded) @patch("frontend.controllers.scanctrl._add_empty_result") @patch("frontend.controllers.scanctrl.ftp_ctrl") @patch("frontend.controllers.scanctrl.Scan") @patch("frontend.controllers.scanctrl.session_transaction") def test034_launch_asynchronous_ftp_error(self, m_session_transaction, m_Scan, m_ftp_ctrl, m_add_empty_result): m_scan, m_session = MagicMock(), MagicMock() fw1, fw2 = MagicMock(), MagicMock() file1, file2 = MagicMock(), MagicMock() pathf1, pathf2 = 'path-file1', 'path-file2' file1.path = pathf1 file2.path = pathf2 fw1.file.sha256 = "sha256file1" fw1.file.mimetype = "mimetypefile1" fw2.file.sha256 = "sha256file2" fw2.file.mimetype = "mimetypefile2" m_scan.files_web = [fw1, fw2] m_scan.files = [file1, file2] probe1, probe2 = "Probe1", "Probe2" probelist = [probe1, probe2] m_scan.get_probe_list.return_value = probelist m_add_empty_result.return_value = probelist m_session_transaction().__enter__.return_value = m_session m_scan.status = IrmaScanStatus.ready m_scan.mimetype_filtering = False m_Scan.load_from_ext_id.return_value = m_scan scanid = "scanid" m_ftp_ctrl.upload_scan.side_effect = IrmaFtpError() module.launch_asynchronous(scanid) expected = IrmaScanStatus.error_ftp_upload m_scan.set_status.assert_called_once_with(expected) @patch("frontend.controllers.scanctrl.log") @patch("frontend.controllers.scanctrl.Scan") @patch("frontend.controllers.scanctrl.session_transaction") def test035_set_result_fw_not_found(self, m_session_transaction, m_Scan, m_log): m_scan, m_session = MagicMock(), MagicMock() m_session_transaction().__enter__.return_value = m_session m_scan.get_filewebs_by_sha256.return_value = [] m_Scan.load_from_ext_id.return_value = m_scan module.set_result("scanid", "filehash", "probe", "result") m_log.error.assert_called_once() @patch("frontend.controllers.scanctrl._update_ref_results") @patch("frontend.controllers.scanctrl._fetch_probe_result") @patch("frontend.controllers.scanctrl.Scan") @patch("frontend.controllers.scanctrl.session_transaction") def test036_set_result(self, m_session_transaction, m_Scan, m_fetch_pr, m_update_ref_res): scanid = "scanid" filehash = "filehash" probe = "probe" m_scan, m_session = MagicMock(), MagicMock() m_session_transaction().__enter__.return_value = m_session fw1, pr1 = MagicMock(), MagicMock() pr1.doc = "ProbeResult" file1 = MagicMock() fw1.file = file1 fw1.probe_results = [pr1] m_scan.get_filewebs_by_sha256.return_value = [fw1] m_Scan.load_from_ext_id.return_value = m_scan result = {'status': 1, 'type': "something"} m_fetch_pr.return_value = pr1 module.set_result(scanid, filehash, probe, result) m_fetch_pr.assert_called_once_with(fw1, probe) m_update_ref_res.assert_called_once_with(fw1, file1, pr1) m_Scan.load_from_ext_id.assert_called_with(scanid, session=m_session) @patch("frontend.controllers.scanctrl.File") @patch("frontend.controllers.scanctrl.Scan") @patch("frontend.controllers.scanctrl.session_transaction") def test037_handle_output_files_no_resubmit(self, m_session_transaction, m_Scan, m_File): m_scan, m_session = MagicMock(), MagicMock() m_session_transaction().__enter__.return_value = m_session m_scan.resubmit_files = True m_Scan.load_from_ext_id.return_value = m_scan result = {} module.handle_output_files("scanid", "parent_file_hash", "probe", result) m_Scan.load_from_ext_id.assert_called_once_with("scanid", session=m_session) @patch("frontend.controllers.scanctrl.File") @patch("frontend.controllers.scanctrl.Scan") @patch("frontend.controllers.scanctrl.session_transaction") def test038_handle_output_files_resubmit_False(self, m_session_transaction, m_Scan, m_File): m_scan, m_session = MagicMock(), MagicMock() m_session_transaction().__enter__.return_value = m_session m_scan.resubmit_files = False m_Scan.load_from_ext_id.return_value = m_scan result = {'uploaded_files': []} module.handle_output_files("scanid", "parent_file_hash", "probe", result) m_Scan.load_from_ext_id.assert_called_once_with("scanid", session=m_session) @patch("frontend.controllers.scanctrl._filter_children") @patch("frontend.controllers.scanctrl._append_new_files_to_scan") @patch("frontend.controllers.scanctrl.File") @patch("frontend.controllers.scanctrl.Scan") @patch("frontend.controllers.scanctrl.session_transaction") def test039_handle_output_files_resubmit(self, m_session_transaction, m_Scan, m_File, m_append_new_files_to_scan, m_filter_children): m_scan, m_session = MagicMock(), MagicMock() m_session_transaction().__enter__.return_value = m_session m_scan.resubmit_files = True m_Scan.load_from_ext_id.return_value = m_scan uploaded_files = {'filename': 'filehash'} result = {'uploaded_files': uploaded_files} fw1 = MagicMock() m_append_new_files_to_scan.return_value = [fw1] m_parentfile = MagicMock() m_parentfile.children = [] m_filter_children.return_value = uploaded_files m_File.load_from_sha256.return_value = m_parentfile module.handle_output_files("scanid", "parent_file_hash", "probe", result) m_Scan.load_from_ext_id.assert_called_once_with("scanid", session=m_session) m_append_new_files_to_scan.assert_called_once_with(m_scan, uploaded_files, m_session) self.assertItemsEqual(m_parentfile.children, [fw1]) @patch("frontend.controllers.scanctrl._filter_children") @patch("frontend.controllers.scanctrl._append_new_files_to_scan") @patch("frontend.controllers.scanctrl.File") @patch("frontend.controllers.scanctrl.Scan") @patch("frontend.controllers.scanctrl.session_transaction") def test040_handle_output_files_resubmit_none(self, m_session_transaction, m_Scan, m_File, m_append_new_files_to_scan, m_filter_children): m_scan, m_session = MagicMock(), MagicMock() m_session_transaction().__enter__.return_value = m_session m_scan.resubmit_files = True m_Scan.load_from_ext_id.return_value = m_scan uploaded_files = {} result = {'uploaded_files': uploaded_files} fw1 = MagicMock() m_append_new_files_to_scan.return_value = [fw1] m_parentfile = MagicMock() m_parentfile.children = [] m_filter_children.return_value = {} m_File.load_from_sha256.return_value = m_parentfile module.handle_output_files("scanid", "parent_file_hash", "probe", result) m_Scan.load_from_ext_id.assert_called_once_with("scanid", session=m_session) m_append_new_files_to_scan.assert_not_called() self.assertItemsEqual(m_parentfile.children, [])
from snovault import ( AuditFailure, audit_checker, ) from .item import STATUS_LEVEL def audit_file_processed_derived_from(value, system): if value['output_category'] in ['raw data', 'reference']: return if 'derived_from' not in value or \ 'derived_from' in value and len(value['derived_from']) == 0: detail = 'derived_from is a list of files that were used to create a given file; ' + \ 'for example, fastq file(s) will appear in the derived_from list of an ' + \ 'alignments file. ' + \ 'Processed file {} '.format(value['@id']) + \ 'is missing the requisite file specification in its derived_from list.' yield AuditFailure('missing derived_from', detail, level='INTERNAL_ACTION') return if value['file_format'] != 'bam': return # Ignore replaced BAMs because missing derived_from logic should be applied to their # replacements instead (ENCD-3595). if value['status'] == 'replaced': return fastq_bam_counter = 0 for f in value.get('derived_from'): if (f['file_format'] == 'bam' or f['file_format'] == 'fastq' or (f['file_format'] in ['fasta', 'csfasta', 'csqual'] and f['output_type'] == 'reads' and f['output_category'] == 'raw data')): # Audit shouldn't trigger if status isn't registered in STATUS_LEVEL dict. if f['status'] not in STATUS_LEVEL or value['status'] not in STATUS_LEVEL: return if STATUS_LEVEL[f['status']] >= STATUS_LEVEL[value['status']]: fastq_bam_counter += 1 if f['dataset'] != value['dataset'].get('@id'): detail = 'derived_from is a list of files that were used ' + \ 'to create a given file; ' + \ 'for example, fastq file(s) will appear in the ' + \ 'derived_from list of an ' + \ 'alignments file. ' + \ 'Alignments file {} '.format(value['@id']) + \ 'from experiment {} '.format(value['dataset']['@id']) + \ 'specifies a file {} '.format(f['@id']) + \ 'from a different experiment {} '.format(f['dataset']) + \ 'in its derived_from list.' yield AuditFailure('inconsistent derived_from', detail, level='INTERNAL_ACTION') if fastq_bam_counter == 0: detail = 'derived_from is a list of files that were used to create a given file; ' + \ 'for example, fastq file(s) will appear in the derived_from list of an ' + \ 'alignments file. ' + \ 'Alignments file {} '.format(value['@id']) + \ 'is missing the requisite file specification in its derived_from list.' yield AuditFailure('missing derived_from', detail, level='INTERNAL_ACTION') def audit_file_assembly(value, system): if 'derived_from' not in value: return for f in value['derived_from']: if f.get('assembly') and value.get('assembly') and \ f.get('assembly') != value.get('assembly'): detail = 'Processed file {} '.format(value['@id']) + \ 'assembly {} '.format(value['assembly']) + \ 'does not match assembly {} of the file {} '.format( f['assembly'], f['@id']) + \ 'it was derived from.' yield AuditFailure('inconsistent assembly', detail, level='WARNING') return def audit_file_replicate_match(value, system): ''' A file's replicate should belong to the same experiment that the file does. These tend to get confused when replacing objects. ''' if 'replicate' not in value: return rep_exp = value['replicate']['experiment'] file_exp = value['dataset']['@id'] if rep_exp != file_exp: detail = 'File {} from experiment {} '.format(value['@id'], value['dataset']['@id']) + \ 'is associated with replicate [{},{}] '.format( value['replicate']['biological_replicate_number'], value['replicate']['technical_replicate_number']) + \ '{}, but that replicate is associated with a different '.format( value['replicate']['@id']) + \ 'experiment {}.'.format(value['replicate']['experiment']) yield AuditFailure('inconsistent replicate', detail, level='ERROR') return def audit_paired_with(value, system): ''' A file with a paired_end needs a paired_with. Should be handled in the schema. A paired_with should be the same replicate ''' if 'paired_end' not in value: return if value['paired_end'] in ['1,2']: return if 'paired_with' not in value: return if 'replicate' not in value['paired_with']: return if 'replicate' not in value: detail = 'File {} has paired_end = {}. It requires a replicate'.format( value['@id'], value['paired_end']) yield AuditFailure('missing replicate', detail, level='INTERNAL_ACTION') return if value['replicate'].get('@id') != value['paired_with']['replicate']: detail = 'File {} has replicate {}. It is paired_with file {} with replicate {}'.format( value['@id'], value['replicate'].get('@id'), value['paired_with']['@id'], value['paired_with'].get('replicate')) yield AuditFailure('inconsistent paired_with', detail, level='ERROR') return if value['paired_end'] == '1': context = system['context'] paired_with = context.get_rev_links('paired_with') if len(paired_with) > 1: detail = 'Paired end 1 file {} paired_with by multiple paired end 2 files: {!r}'.format( value['@id'], paired_with ) yield AuditFailure('multiple paired_with', detail, level='ERROR') return def audit_file_format_specifications(value, system): for doc in value.get('file_format_specifications', []): if doc['document_type'] != "file format specification": detail = 'File {} has document {} not of type file format specification'.format( value['@id'], doc['@id'] ) yield AuditFailure('inconsistent document_type', detail, level='ERROR') return def audit_file_controlled_by(value, system): ''' A fastq in a ChIP-seq experiment should have a controlled_by ''' if value['dataset'].get('assay_term_name') not in ['ChIP-seq', 'RAMPAGE', 'CAGE', 'shRNA knockdown followed by RNA-seq', 'siRNA knockdown followed by RNA-seq', 'CRISPR genome editing followed by RNA-seq']: return if value['file_format'] not in ['fastq']: return if 'target' in value['dataset'] and \ 'control' in value['dataset']['target'].get('investigated_as', []): return if not value.get('controlled_by'): detail = 'controlled_by is a list of files that are used as ' + \ 'controls for a given experimental file. ' + \ 'Fastq files generated in a {} assay require the '.format( value['dataset']['assay_term_name']) + \ 'specification of control fastq file(s) in the controlled_by list. ' + \ 'Fastq file {} '.format( value['@id']) + \ 'is missing the requisite file specification in controlled_by list.' yield AuditFailure('missing controlled_by', detail, level='NOT_COMPLIANT') return possible_controls = value['dataset'].get('possible_controls') biosample = value['dataset'].get('biosample_term_id') biosample_term_name = value['dataset'].get('biosample_term_name') run_type = value.get('run_type', None) read_length = value.get('read_length', None) if value['controlled_by']: for ff in value['controlled_by']: control_bs = ff['dataset'].get('biosample_term_id') control_run = ff.get('run_type', None) control_length = ff.get('read_length', None) if control_bs != biosample: detail = 'controlled_by is a list of files that are used as controls for a given file. ' + \ 'This experiment was performed using {}, but '.format(biosample_term_name) + \ 'file {} contains in controlled_by list a file '.format(value['@id']) + \ '{} that belongs to experiment with different biosample {}.'.format( ff['@id'], ff['dataset'].get('biosample_term_name')) yield AuditFailure('inconsistent control', detail, level='ERROR') return if ff['file_format'] != value['file_format']: detail = 'controlled_by is a list of files that are used as controls for a given file. ' + \ 'File {} with file_format {} contains in controlled_by list '.format( value['@id'], value['file_format'],) + \ 'a file {} with different file_format {}.'.format( ff['@id'], ff['file_format']) yield AuditFailure('inconsistent control', detail, level='ERROR') return if (possible_controls is None) or (ff['dataset']['@id'] not in possible_controls): detail = 'possible_controls is a list of experiment(s) that can serve as ' + \ 'analytical controls for a given experiment. ' + \ 'controlled_by is a list of files that are used as ' + \ 'controls for a given file. ' + \ 'File {} contains in controlled_by list a file {} '.format( value['@id'], ff['@id']) + \ 'that belongs to an experiment {} that '.format(ff['dataset']['@id']) + \ 'is not specified in possible_controls list of this experiment.' yield AuditFailure('inconsistent control', detail, level='ERROR') return if (run_type is None) or (control_run is None): continue if (read_length is None) or (control_length is None): continue if run_type != control_run and \ value['dataset'].get('assay_term_name') not in ['RAMPAGE', 'CAGE']: detail = 'File {} is {} but its control file {} is {}'.format( value['@id'], run_type, ff['@id'], control_run ) yield AuditFailure('inconsistent control run_type', detail, level='WARNING') if read_length != control_length and \ abs(read_length - control_length) > 2 and \ value['dataset'].get('assay_term_name') not in \ ['shRNA knockdown followed by RNA-seq', 'siRNA knockdown followed by RNA-seq', 'CRISPR genome editing followed by RNA-seq']: detail = 'File {} is {} but its control file {} is {}'.format( value['@id'], value['read_length'], ff['@id'], ff['read_length'] ) yield AuditFailure('inconsistent control read length', detail, level='WARNING') return def audit_duplicate_quality_metrics(value, system): quality_metrics = value.get('quality_metrics') if not quality_metrics: return metric_signatures = [] audit_signatures = [] for metric in quality_metrics: metric_type = metric.get('@type', [None])[0] signature = ( metric_type, metric.get('processing_stage') ) if signature not in metric_signatures: metric_signatures.append(signature) elif signature not in audit_signatures: # Add so only yields audit once per signature per file. audit_signatures.append(signature) detail = 'File {} has more than one {} quality metric'.format( value.get('@id'), metric_type ) yield AuditFailure( 'duplicate quality metric', detail, level='INTERNAL_ACTION' ) function_dispatcher = { 'audit_derived_from': audit_file_processed_derived_from, 'audit_assembly': audit_file_assembly, 'audit_replicate_match': audit_file_replicate_match, 'audit_paired_with': audit_paired_with, 'audit_specifications': audit_file_format_specifications, 'audit_controlled_by': audit_file_controlled_by, 'audit_duplicate_quality_metrics': audit_duplicate_quality_metrics, } @audit_checker('File', frame=['derived_from', 'replicate', 'paired_with', 'file_format_specifications', 'dataset', 'dataset.target', 'platform', 'controlled_by', 'controlled_by.replicate', 'controlled_by.dataset', 'controlled_by.paired_with', 'controlled_by.platform', 'quality_metrics', ] ) def audit_file(value, system): for function_name in function_dispatcher.keys(): for failure in function_dispatcher[function_name](value, system): yield failure # def audit_file_chip_seq_control_read_depth(value, system): # migrated to experiment https://encodedcc.atlassian.net/browse/ENCD-3493 # def audit_file_flowcells(value, system): # removed in version 56 # http://redmine.encodedcc.org/issues/5060 # def audit_modERN_ChIP_pipeline_steps(value, system): # removed https://encodedcc.atlassian.net/browse/ENCD-3493 # def audit_file_pipeline_status(value, system): removed at release 56 # http://redmine.encodedcc.org/issues/5017 # def audit_file_md5sum_integrity(value, system): # removed release 55 # def audit_file_derived_from_revoked(value, system): removed at release 56 # http://redmine.encodedcc.org/issues/5018 # def audit_file_biological_replicate_number_match # https://encodedcc.atlassian.net/browse/ENCD-3493 # def audit_file_platform(value, system): removed from release v56
#!/bin/env python # Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from amaranth import Signal from amaranth.lib.fifo import SyncFIFOBuffered from amaranth_cfu import simple_cfu, DualPortMemory, is_pysim_run from . import config from .macc import Accumulator, ByteToWordShifter, Madd4Pipeline from .post_process import PostProcessor from .store import CircularIncrementer, FilterValueFetcher, InputStore, InputStoreSetter, NextWordGetter, StoreSetter from .registerfile import RegisterFileInstruction, RegisterSetter from .sequencing import Sequencer class Mnv2RegisterInstruction(RegisterFileInstruction): def _make_setter(self, m, reg_num, name): """Constructs and registers a simple RegisterSetter. Return a pair of signals from setter: (value, set) """ setter = RegisterSetter() m.submodules[name] = setter self.register_xetter(reg_num, setter) return setter.value, setter.set def _make_output_channel_param_store( self, m, reg_num, name, restart_signal): """Constructs and registers a param store connected to a memory and a circular incrementer. Returns a pair of signals: (current data, inc.next) """ m.submodules[f'{name}_dp'] = dp = DualPortMemory( width=32, depth=config.OUTPUT_CHANNEL_PARAM_DEPTH, is_sim=is_pysim_run()) m.submodules[f'{name}_inc'] = inc = CircularIncrementer( config.OUTPUT_CHANNEL_PARAM_DEPTH) m.submodules[f'{name}_set'] = psset = StoreSetter( 32, 1, config.OUTPUT_CHANNEL_PARAM_DEPTH) self.register_xetter(reg_num, psset) m.d.comb += [ # Restart param store when reg is set psset.restart.eq(restart_signal), inc.restart.eq(restart_signal), # Incrementer is limited to number of items already set inc.limit.eq(psset.count), # Hook memory up to various components dp.r_addr.eq(inc.r_addr), ] m.d.comb += psset.connect_write_port([dp]) return dp.r_data, inc.next def _make_filter_value_store( self, m, reg_num, name, restart_signal): """Constructs and registers a param store connected to a memory and a circular incrementer. Returns a list of dual port memories used to implement the store, plus signal for count of words. """ dps = [] for i in range(4): m.submodules[f'{name}_dp_{i}'] = dp = DualPortMemory( width=32, depth=config.FILTER_DATA_MEM_DEPTH, is_sim=is_pysim_run()) dps.append(dp) m.submodules[f'{name}_set'] = fvset = StoreSetter( 32, 4, config.FILTER_DATA_MEM_DEPTH) self.register_xetter(reg_num, fvset) m.d.comb += [ # Restart param store when reg is set fvset.restart.eq(restart_signal), ] m.d.comb += fvset.connect_write_port(dps) return dps, fvset.count, fvset.updated def _make_input_store(self, m, name, restart_signal, input_depth_words): m.submodules[f'{name}'] = ins = InputStore( config.MAX_PER_PIXEL_INPUT_WORDS) m.submodules[f'{name}_set'] = insset = InputStoreSetter() m.d.comb += insset.connect(ins) self.register_xetter(25, insset) _, mark_finished = self._make_setter(m, 112, 'mark_read') read_finished = Signal() m.d.comb += [ ins.restart.eq(restart_signal), ins.input_depth.eq(input_depth_words), ins.r_finished.eq(mark_finished | read_finished), ] return ins, read_finished def _make_filter_value_getter(self, m, fvf_data): fvg_next = Signal() if is_pysim_run(): m.submodules['fvg'] = fvg = NextWordGetter() m.d.comb += [ fvg.data.eq(fvf_data), fvg.ready.eq(1), fvg_next.eq(fvg.next), ] self.register_xetter(110, fvg) return fvg_next def _make_input_store_getter(self, m, ins_r_data, ins_r_ready): insget_next = Signal() if is_pysim_run(): m.submodules['insget'] = insget = NextWordGetter() m.d.comb += [ insget.data.eq(ins_r_data), insget.ready.eq(ins_r_ready), insget_next.eq(insget.next), ] self.register_xetter(111, insget) return insget_next def _make_output_queue(self, m): m.submodules['FIFO'] = fifo = SyncFIFOBuffered( depth=config.OUTPUT_QUEUE_DEPTH, width=32) m.submodules['oq_get'] = oq_get = NextWordGetter() m.d.comb += [ oq_get.data.eq(fifo.r_data), oq_get.ready.eq(fifo.r_rdy), fifo.r_en.eq(oq_get.next), ] self.register_xetter(34, oq_get) oq_has_space = fifo.w_level < (config.OUTPUT_QUEUE_DEPTH - 8) return fifo.w_data, fifo.w_en, oq_has_space def elab_xetters(self, m): # Simple registers input_depth_words, set_id = self._make_setter( m, 10, 'set_input_depth_words') self._make_setter(m, 11, 'set_output_depth') input_offset, _ = self._make_setter(m, 12, 'set_input_offset') output_offset, _ = self._make_setter(m, 13, 'set_output_offset') activation_min, _ = self._make_setter(m, 14, 'set_activation_min') activation_max, _ = self._make_setter(m, 15, 'set_activation_max') # Stores of input and output data _, restart = self._make_setter(m, 20, 'set_output_batch_size') multiplier, multiplier_next = self._make_output_channel_param_store( m, 21, 'store_output_multiplier', restart) shift, shift_next = self._make_output_channel_param_store( m, 22, 'store_output_shift', restart) bias, bias_next = self._make_output_channel_param_store( m, 23, 'store_output_bias', restart) fv_mems, fv_count, fv_updated = self._make_filter_value_store( m, 24, 'store_filter_values', restart) m.submodules['fvf'] = fvf = FilterValueFetcher( config.FILTER_DATA_MEM_DEPTH) m.d.comb += fvf.connect_read_ports(fv_mems) m.d.comb += [ # fetcher only works for multiples of 4, and only for multiples of # 4 > 8 fvf.limit.eq(fv_count & ~0x3), fvf.updated.eq(fv_updated), fvf.restart.eq(restart), ] ins, ins_r_finished = self._make_input_store( m, 'ins', set_id, input_depth_words) # Make getters for filter and instruction next words # Only required during pysim unit tests fvg_next = self._make_filter_value_getter(m, fvf.data) insget_next = self._make_input_store_getter(m, ins.r_data, ins.r_ready) # The output queue oq_w_data, oq_enable, oq_has_space = self._make_output_queue(m) # Calculation aparatus m.submodules['madd'] = madd = Madd4Pipeline() m.d.comb += [ madd.offset.eq(input_offset), madd.f_data.eq(fvf.data), madd.i_data.eq(ins.r_data), ] m.submodules['acc'] = acc = Accumulator() m.d.comb += acc.in_value.eq(madd.result) m.submodules['pp'] = pp = PostProcessor() m.d.comb += [ pp.offset.eq(output_offset), pp.activation_min.eq(activation_min), pp.activation_max.eq(activation_max), pp.bias.eq(bias), pp.multiplier.eq(multiplier), pp.shift.eq(shift), pp.accumulator.eq(acc.result), ] m.submodules['btw'] = btw = ByteToWordShifter() m.d.comb += btw.in_value.eq(pp.result) # Sequencer _, start_run = self._make_setter(m, 33, 'start_run') m.submodules['seq'] = seq = Sequencer() m.d.comb += [ seq.start_run.eq(start_run), seq.in_store_ready.eq(ins.r_ready), seq.fifo_has_space.eq(oq_has_space), seq.filter_value_words.eq(fv_count), seq.input_depth_words.eq(input_depth_words), ins.r_next.eq(insget_next | seq.gate), fvf.next.eq(fvg_next | seq.gate), ins_r_finished.eq(seq.all_output_finished), acc.add_en.eq(seq.madd_done), acc.clear.eq(seq.acc_done), bias_next.eq(seq.acc_done), multiplier_next.eq(seq.acc_done), shift_next.eq(seq.acc_done), btw.shift_en.eq(seq.pp_done), oq_w_data.eq(btw.result), oq_enable.eq(seq.out_word_done), ] def make_cfu(): return simple_cfu({ 0: Mnv2RegisterInstruction(), })
# GUI Application automation and testing library # Copyright (C) 2006-2018 Mark Mc Mahon and Contributors # https://github.com/pywinauto/pywinauto/graphs/contributors # http://pywinauto.readthedocs.io/en/latest/credits.html # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of pywinauto nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ Windows global hooks in pure Python The implementation uses foreign function interface (FFI) provided by standard Python module **ctypes** and inspired by pyHook, pyhooked and other similar modules (the code was re-written from scratch). It tends to be a superset of pyHook but in pure Python only so it doesn't require compilation. Current set of hooks implemented: * WH_MOUSE_LL * WH_KEYBOARD_LL More detailed documentation about Windows hooks can be found in MSDN: https://msdn.microsoft.com/en-us/library/windows/desktop/ms632589.aspx This module can be used as a stand alone or along with pywinauto. The fork of this code (at some moment) was used in standalone library pyhooked 0.8 maintained by Ethan Smith. """ import six from ctypes import wintypes from ctypes import windll from ctypes import CFUNCTYPE from ctypes import POINTER from ctypes import c_int from ctypes import c_uint from ctypes import byref from ctypes import pointer import atexit import sys import time import win32con import win32api from .win32defines import VK_PACKET from .actionlogger import ActionLogger from .win32structures import KBDLLHOOKSTRUCT from .win32structures import MSLLHOOKSTRUCT from .win32structures import LRESULT HOOKCB = CFUNCTYPE(LRESULT, c_int, wintypes.WPARAM, wintypes.LPARAM) windll.kernel32.GetModuleHandleA.restype = wintypes.HMODULE windll.kernel32.GetModuleHandleA.argtypes = [wintypes.LPCSTR] windll.user32.SetWindowsHookExA.restype = wintypes.HHOOK windll.user32.SetWindowsHookExA.argtypes = [c_int, HOOKCB, wintypes.HINSTANCE, wintypes.DWORD] windll.user32.SetWindowsHookExW.restype = wintypes.HHOOK windll.user32.SetWindowsHookExW.argtypes = [c_int, HOOKCB, wintypes.HINSTANCE, wintypes.DWORD] windll.user32.GetMessageW.argtypes = [POINTER(wintypes.MSG), wintypes.HWND, c_uint, c_uint] windll.user32.TranslateMessage.argtypes = [POINTER(wintypes.MSG)] windll.user32.DispatchMessageW.argtypes = [POINTER(wintypes.MSG)] # BOOL WINAPI PeekMessage( # _Out_ LPMSG lpMsg, # _In_opt_ HWND hWnd, # _In_ UINT wMsgFilterMin, # _In_ UINT wMsgFilterMax, # _In_ UINT wRemoveMsg #); windll.user32.PeekMessageW.argtypes = [POINTER(wintypes.MSG), wintypes.HWND, c_uint, c_uint, c_uint] windll.user32.PeekMessageW.restypes = wintypes.BOOL # LRESULT WINAPI CallNextHookEx( # _In_opt_ HHOOK hhk, # _In_ int nCode, # _In_ WPARAM wParam, # _In_ LPARAM lParam # ); windll.user32.CallNextHookEx.argtypes = [wintypes.HHOOK, c_int, wintypes.WPARAM, wintypes.LPARAM] windll.user32.CallNextHookEx.restypes = LRESULT class KeyboardEvent(object): """Created when a keyboard event happened""" def __init__(self, current_key=None, event_type=None, pressed_key=None): self.current_key = current_key self.event_type = event_type self.pressed_key = pressed_key class MouseEvent(object): """Created when a mouse event happened""" def __init__(self, current_key=None, event_type=None, mouse_x=0, mouse_y=0): self.current_key = current_key self.event_type = event_type self.mouse_x = mouse_x self.mouse_y = mouse_y class Hook(object): """Hook for low level keyboard and mouse events""" MOUSE_ID_TO_KEY = {win32con.WM_MOUSEMOVE: 'Move', win32con.WM_LBUTTONDOWN: 'LButton', win32con.WM_LBUTTONUP: 'LButton', win32con.WM_RBUTTONDOWN: 'RButton', win32con.WM_RBUTTONUP: 'RButton', win32con.WM_MBUTTONDOWN: 'WheelButton', win32con.WM_MBUTTONUP: 'WheelButton', win32con.WM_MOUSEWHEEL: 'Wheel'} MOUSE_ID_TO_EVENT_TYPE = {win32con.WM_MOUSEMOVE: None, win32con.WM_LBUTTONDOWN: 'key down', win32con.WM_LBUTTONUP: 'key up', win32con.WM_RBUTTONDOWN: 'key down', win32con.WM_RBUTTONUP: 'key up', win32con.WM_MBUTTONDOWN: 'key down', win32con.WM_MBUTTONUP: 'key up', win32con.WM_MOUSEWHEEL: None} # TODO: use constants from win32con: VK_BACK, VK_TAB, VK_RETURN ... ID_TO_KEY = {1: 'LButton', # win32con.VK_LBUTTON 2: 'RButton', # win32con.VK_RBUTTON 3: 'Cancel', # win32con.VK_CANCEL 4: 'MButton', # win32con.VK_MBUTTON 5: 'XButton1', # win32con.VK_XBUTTON1 6: 'XButton2', # win32con.VK_XBUTTON2 7: 'Undefined1', 8: 'Back', 9: 'Tab', 10: 'Reserved1', 11: 'Reserved2', 12: 'Clear', # win32con.VK_CLEAR 13: 'Return', # win32con.VK_RETURN 14: 'Undefined2', 15: 'Undefined3', 16: 'SHIFT', # win32con.VK_SHIFT 17: 'CONTROL', # win32con.VK_CONTROL 18: 'Menu', # win32con.VK_MENU 19: 'Pause', # win32con.VK_PAUSE 20: 'Capital', 21: 'Kana', # win32con.VK_KANA and win32con.VK_HANGUL 22: 'Undefined4', 23: 'Junja', # win32con.VK_JUNJA 24: 'Final', # win32con.VK_FINAL 25: 'Kanji', # win32con.VK_KANJI and win32con.VK_HANJA 26: 'Undefined5', 27: 'Escape', 28: 'Convert', # win32con.VK_CONVERT 29: 'NonConvert', # win32con.VK_NONCONVERT 30: 'Accept', # win32con.VK_ACCEPT 31: 'ModeChange', # win32con.VK_MODECHANGE 32: 'Space', 33: 'Prior', 34: 'Next', 35: 'End', 36: 'Home', 37: 'Left', 38: 'Up', 39: 'Right', 40: 'Down', 41: 'Select', # win32con.VK_SELECT 42: 'Print', # win32con.VK_PRINT 43: 'Execute', # win32con.VK_EXECUTE 44: 'Snapshot', 45: 'Insert', # win32con.VK_INSERT 46: 'Delete', 47: 'Help', # win32con.VK_HELP 48: '0', 49: '1', 50: '2', 51: '3', 52: '4', 53: '5', 54: '6', 55: '7', 56: '8', 57: '9', 58: 'Undefined6', 59: 'Undefined7', 60: 'Undefined8', 61: 'Undefined9', 62: 'Undefined10', 63: 'Undefined11', 64: 'Undefined12', 65: 'A', 66: 'B', 67: 'C', 68: 'D', 69: 'E', 70: 'F', 71: 'G', 72: 'H', 73: 'I', 74: 'J', 75: 'K', 76: 'L', 77: 'M', 78: 'N', 79: 'O', 80: 'P', 81: 'Q', 82: 'R', 83: 'S', 84: 'T', 85: 'U', 86: 'V', 87: 'W', 88: 'X', 89: 'Y', 90: 'Z', 91: 'Lwin', 92: 'Rwin', 93: 'App', 94: 'Reserved3', 95: 'Sleep', 96: 'Numpad0', 97: 'Numpad1', 98: 'Numpad2', 99: 'Numpad3', 100: 'Numpad4', 101: 'Numpad5', 102: 'Numpad6', 103: 'Numpad7', 104: 'Numpad8', 105: 'Numpad9', 106: 'Multiply', 107: 'Add', 108: 'Separator', # win32con.VK_SEPARATOR 109: 'Subtract', 110: 'Decimal', 111: 'Divide', 112: 'F1', 113: 'F2', 114: 'F3', 115: 'F4', 116: 'F5', 117: 'F6', 118: 'F7', 119: 'F8', 120: 'F9', 121: 'F10', 122: 'F11', 123: 'F12', 124: 'F13', 125: 'F14', 126: 'F15', 127: 'F16', 128: 'F17', 129: 'F18', 130: 'F19', 131: 'F20', 132: 'F21', 133: 'F22', 134: 'F23', 135: 'F24', 136: 'Unassigned1', 137: 'Unassigned2', 138: 'Unassigned3', 139: 'Unassigned4', 140: 'Unassigned5', 141: 'Unassigned6', 142: 'Unassigned7', 143: 'Unassigned8', 144: 'Numlock', 145: 'Scroll', # win32con.VK_SCROLL 146: 'OemSpecific1', 147: 'OemSpecific2', 148: 'OemSpecific3', 149: 'OemSpecific4', 150: 'OemSpecific5', 151: 'OemSpecific6', 152: 'OemSpecific7', 153: 'OemSpecific8', 154: 'OemSpecific9', 155: 'OemSpecific10', 156: 'OemSpecific11', 157: 'OemSpecific12', 158: 'OemSpecific13', 159: 'OemSpecific14', 160: 'Lshift', 161: 'Rshift', 162: 'Lcontrol', 163: 'Rcontrol', 164: 'Lmenu', 165: 'Rmenu', 166: 'BrowserBack', # win32con.VK_BROWSER_BACK 167: 'BrowserForward', # win32con.VK_BROWSER_FORWARD 168: 'BrowserRefresh', # not defined in win32con 169: 'BrowserStop', # not defined in win32con 170: 'BrowserSearch', # not defined in win32con 171: 'BrowserFavourites', # not defined in win32con 172: 'BrowserHome', # not defined in win32con 173: 'Volume_mute', # win32con.VK_VOLUME_MUTE 174: 'Volume_down', # win32con.VK_VOLUME_DOWN 175: 'Volume_up', # win32con.VK_VOLUME_UP 176: 'NextTrack', # win32con.VK_MEDIA_NEXT_TRACK 177: 'PrevTrack', # win32con.VK_MEDIA_PREV_TRACK 178: 'StopTrack', # not defined in win32con 179: 'PlayPause', # win32con.VK_MEDIA_PLAY_PAUSE 180: 'LaunchMail', # not defined in win32con 181: 'MediaSelect', # not defined in win32con 182: 'LaunchApp1', # not defined in win32con 183: 'LaunchApp2', # not defined in win32con 184: 'Reserved4', 185: 'Reserved5', 186: 'Oem_1', 187: 'Oem_Plus', 188: 'Oem_Comma', 189: 'Oem_Minus', 190: 'Oem_Period', 191: 'Oem_2', 192: 'Oem_3', 193: 'Reserved6', 194: 'Reserved7', 195: 'Reserved8', 196: 'Reserved9', 197: 'Reserved10', 198: 'Reserved11', 199: 'Reserved12', 200: 'Reserved13', 201: 'Reserved14', 202: 'Reserved15', 203: 'Reserved16', 204: 'Reserved17', 205: 'Reserved18', 206: 'Reserved19', 207: 'Reserved20', 208: 'Reserved21', 209: 'Reserved22', 210: 'Reserved23', 211: 'Reserved24', 212: 'Reserved25', 213: 'Reserved26', 214: 'Reserved27', 215: 'Reserved28', 216: 'Unassigned9', 217: 'Unassigned10', 218: 'Unassigned11', 219: 'Oem_4', 220: 'Oem_5', 221: 'Oem_6', 222: 'Oem_7', 223: 'Oem_8', # not defined in win32cona 224: 'Reserved29', 225: 'OemSpecific15', 226: 'Oem_102', 227: 'OemSpecific16', 228: 'OemSpecific17', 229: 'ProcessKey', # win32con.VK_PROCESSKEY 230: 'OemSpecific18', 231: 'VkPacket', # win32con.VK_PACKET. It has a special processing in kbd_ll ! 232: 'Unassigned12', 233: 'OemSpecific19', 234: 'OemSpecific20', 235: 'OemSpecific21', 236: 'OemSpecific22', 237: 'OemSpecific23', 238: 'OemSpecific24', 239: 'OemSpecific25', 240: 'OemSpecific26', 241: 'OemSpecific27', 242: 'OemSpecific28', 243: 'OemSpecific29', 244: 'OemSpecific30', 245: 'OemSpecific31', 246: 'Attn', # win32con.VK_ATTN 247: 'CrSel', # win32con.VK_CRSEL 248: 'ExSel', # win32con.VK_EXSEL 249: 'ErEOF', # win32con.VK_EREOF 250: 'Play', # win32con.VK_PLAY 251: 'Zoom', # win32con.VK_ZOOM 252: 'Noname', # win32con.VK_NONAME 253: 'PA1', # win32con.VK_PA1 254: 'OemClear', # win32con.VK_OEM_CLEAR 1001: 'mouse left', # mouse hotkeys 1002: 'mouse right', 1003: 'mouse middle', 1000: 'mouse move', # single event hotkeys 1004: 'mouse wheel up', 1005: 'mouse wheel down', 1010: 'Ctrl', # merged hotkeys 1011: 'Alt', 1012: 'Shift', 1013: 'Win'} event_types = {win32con.WM_KEYDOWN: 'key down', # WM_KEYDOWN for normal keys win32con.WM_KEYUP: 'key up', # WM_KEYUP for normal keys win32con.WM_SYSKEYDOWN: 'key down', # WM_SYSKEYDOWN, is used for Alt key. win32con.WM_SYSKEYUP: 'key up', # WM_SYSKEYUP, is used for Alt key. } def __init__(self): self.handler = None self.pressed_keys = [] self.keyboard_id = None self.mouse_id = None self.mouse_is_hook = False self.keyboard_is_hook = False def _process_kbd_data(self, kb_data_ptr): """Process KBDLLHOOKSTRUCT data received from low level keyboard hook calls""" kbd = KBDLLHOOKSTRUCT.from_address(kb_data_ptr) current_key = None key_code = kbd.vkCode if key_code == VK_PACKET: scan_code = kbd.scanCode current_key = six.unichr(scan_code) elif key_code in self.ID_TO_KEY: current_key = six.u(self.ID_TO_KEY[key_code]) else: al = ActionLogger() al.log("_process_kbd_data, bad key_code: {0}".format(key_code)) return current_key def _process_kbd_msg_type(self, event_code, current_key): """Process event codes from low level keyboard hook calls""" event_type = None event_code_word = 0xFFFFFFFF & event_code if event_code_word in self.event_types: event_type = self.event_types[event_code_word] else: al = ActionLogger() al.log("_process_kbd_msg_type, bad event_type: {0}".format(event_type)) if event_type == 'key down': self.pressed_keys.append(current_key) elif event_type == 'key up': if current_key in self.pressed_keys: self.pressed_keys.remove(current_key) else: al = ActionLogger() al.log("_process_kbd_msg_type, can't remove a key: {0}".format(current_key)) return event_type def _keyboard_ll_hdl(self, code, event_code, kb_data_ptr): """Execute when a keyboard low level event has been triggered""" try: # The next hook in chain must be always called res = windll.user32.CallNextHookEx(self.keyboard_id, code, event_code, kb_data_ptr) if not self.handler: return res current_key = self._process_kbd_data(kb_data_ptr) event_type = self._process_kbd_msg_type(event_code, current_key) event = KeyboardEvent(current_key, event_type, self.pressed_keys) self.handler(event) except Exception: al = ActionLogger() al.log("_keyboard_ll_hdl, {0}".format(sys.exc_info()[0])) al.log("_keyboard_ll_hdl, code {0}, event_code {1}".format(code, event_code)) raise return res def _mouse_ll_hdl(self, code, event_code, mouse_data_ptr): """Execute when a mouse low level event has been triggerred""" try: # The next hook in chain must be always called res = windll.user32.CallNextHookEx(self.mouse_id, code, event_code, mouse_data_ptr) if not self.handler: return res current_key = None event_code_word = 0xFFFFFFFF & event_code if event_code_word in self.MOUSE_ID_TO_KEY: current_key = self.MOUSE_ID_TO_KEY[event_code_word] event_type = None if current_key != 'Move': if event_code in self.MOUSE_ID_TO_EVENT_TYPE: event_type = self.MOUSE_ID_TO_EVENT_TYPE[event_code] # Get the mouse position: x and y ms = MSLLHOOKSTRUCT.from_address(mouse_data_ptr) event = MouseEvent(current_key, event_type, ms.pt.x, ms.pt.y) self.handler(event) except Exception: al = ActionLogger() al.log("_mouse_ll_hdl, {0}".format(sys.exc_info()[0])) al.log("_mouse_ll_hdl, code {0}, event_code {1}".format(code, event_code)) raise return res def hook(self, keyboard=True, mouse=False): """Hook mouse and/or keyboard events""" if not (mouse or keyboard): return self.mouse_is_hook = mouse self.keyboard_is_hook = keyboard if self.keyboard_is_hook: @HOOKCB def _kbd_ll_cb(ncode, wparam, lparam): """Forward the hook event to ourselves""" return self._keyboard_ll_hdl(ncode, wparam, lparam) self.keyboard_id = windll.user32.SetWindowsHookExW( win32con.WH_KEYBOARD_LL, _kbd_ll_cb, win32api.GetModuleHandle(None), 0) if self.mouse_is_hook: @HOOKCB def _mouse_ll_cb(code, event_code, mouse_data_ptr): """Forward the hook event to ourselves""" return self._mouse_ll_hdl(code, event_code, mouse_data_ptr) self.mouse_id = windll.user32.SetWindowsHookExA( win32con.WH_MOUSE_LL, _mouse_ll_cb, win32api.GetModuleHandle(None), 0) self.listen() def unhook_mouse(self): """Unhook mouse events""" if self.mouse_is_hook: self.mouse_is_hook = False windll.user32.UnhookWindowsHookEx(self.mouse_id) def unhook_keyboard(self): """Unhook keyboard events""" if self.keyboard_is_hook: self.keyboard_is_hook = False windll.user32.UnhookWindowsHookEx(self.keyboard_id) def stop(self): """Stop the listening loop""" self.unhook_keyboard() self.unhook_mouse() def is_hooked(self): """Verify if any of hooks are active""" return self.mouse_is_hook or self.keyboard_is_hook def _process_win_msgs(self): """Peek and process queued windows messages""" message = wintypes.MSG() while True: res = windll.user32.PeekMessageW(pointer(message), 0, 0, 0, win32con.PM_REMOVE) if not res: break if message.message == win32con.WM_QUIT: self.stop() sys.exit(0) else: windll.user32.TranslateMessage(byref(message)) windll.user32.DispatchMessageW(byref(message)) def listen(self): """Listen for events""" atexit.register(windll.user32.UnhookWindowsHookEx, self.keyboard_id) atexit.register(windll.user32.UnhookWindowsHookEx, self.mouse_id) while self.is_hooked(): self._process_win_msgs() time.sleep(0.02) if __name__ == "__main__": def on_event(args): """Callback for keyboard and mouse events""" if isinstance(args, KeyboardEvent): if args.current_key == 'A' and args.event_type == 'key down' and 'Lcontrol' in args.pressed_key: print("Ctrl + A was pressed") if args.current_key == 'K' and args.event_type == 'key down': print("K was pressed") if args.current_key == 'M' and args.event_type == 'key down' and 'U' in args.pressed_key: hk.unhook_mouse() print("Unhook mouse") if args.current_key == 'K' and args.event_type == 'key down' and 'U' in args.pressed_key: hk.unhook_keyboard() print("Unhook keyboard") if isinstance(args, MouseEvent): if args.current_key == 'RButton' and args.event_type == 'key down': print("Right button pressed at ({0}, {1})".format(args.mouse_x, args.mouse_y)) if args.current_key == 'WheelButton' and args.event_type == 'key down': print("Wheel button pressed") hk = Hook() hk.handler = on_event hk.hook(keyboard=True, mouse=True)
#!/usr/bin/env python import re from pygments.lexer import ( RegexLexer, bygroups, include, ) from pygments.token import ( Comment, Generic, Keyword, Name, Number, Operator, Punctuation, String, Text, Whitespace, ) __all__ = ['SwiftLexer', 'SwiftConsoleLexer'] class SwiftLexer(RegexLexer): name = 'Swift' aliases = ['swift'] filenames = ['*.swift'] flags = re.MULTILINE | re.DOTALL _isa = r'([a-zA-Z_][a-zA-Z0-9_]*)(\s+)(:)(\s+)([A-Z0-9_][a-zA-Z0-9_]*)' _isa_comma = r'([a-zA-Z_][a-zA-Z0-9_]*)(\s+)(:)(\s+)' + \ '([A-Z0-9_][a-zA-Z0-9_]*)(,\s?)' _name = r'[a-zA-Z_][a-zA-Z0-9_?]*' tokens = { 'root': [ (r'^', Punctuation, 'root2'), ], 'root2': [ (r'\n', Text, '#pop'), (r'//.*?\n', Comment.Single, '#pop'), (r'/\*', Comment.Multiline, 'comment'), (r'\b(var|let)\s', Keyword.Declaration, 'var-decl'), (r'\bfor\s', Keyword.Reserved, 'for-loop'), (r'\b(func|init|deinit)\s', Keyword.Declaration, 'func-decl'), (r'(\bset\b)(\s?)(\()', bygroups( Keyword.Declaration, Whitespace, Punctuation), 'arg-list'), (r'(set|get)(:)', bygroups(Keyword.Reserved, Punctuation)), (r'\b(self|Self)\b', Name.Builtin.Pseudo), (r'\bid\b', Name.Builtin), (r'\bimport\s+', Keyword.Namespace, 'import'), (r'\b(class|struct|protocol|extension)\s', Keyword.Declaration, 'class-decl'), (r'(\b[A-Z][a-zA-Z0-9_]*\s?)(\()', bygroups(Name.Constant, Punctuation), 'type-cast'), (r'(\b[A-Z][a-zA-Z0-9_]*)(\.)([a-z][a-zA-Z0-9_]*)', bygroups(Name.Constant, Punctuation, Name), 'arg-list'), (r'"', String, 'string'), (r'(\bnew\b\s?)', Keyword.Reserved, 'class-name'), (r'\b(true|false)\b', Keyword.Reserved), (r'\b(if|else)\s', Keyword.Reserved), (r'\b(return|break)\b', Keyword.Reserved), (r'[\^\*!%&<>+=/?-]|\.{2}', Operator), (r'\$([0-9]+)', Name.Variable), # Tokens (r'[\[\]\(\)\{\}\|:;,.#]', Punctuation), (r'[0-9]+\.[0-9]+', Number.Float), (r'0x[0-9a-fA-F]+', Number.Hex), (r'[0-9]+', Number.Integer), (r'\s', Whitespace), (r'\(', Punctuation, 'tuple'), include('name'), ], 'isa': [ (_isa, bygroups( Name, Whitespace, Punctuation, Whitespace, Name.Constant)), ], 'class-isa': [ (_isa, bygroups(Name.Class, Whitespace, Punctuation, Whitespace, Name.Constant)), ], 'var-isa': [ (_isa, bygroups(Name.Variable, Whitespace, Punctuation, Whitespace, Name.Constant)), ], 'var-isa-pop': [ (_isa, bygroups(Name.Variable, Whitespace, Punctuation, Whitespace, Name.Constant), '#pop'), ], 'var-isa-comma': [ (_isa_comma, bygroups(Name.Variable, Whitespace, Punctuation, Whitespace, Name.Constant, Punctuation)), ], 'var-name': [ (r'[a-zA-Z_][a-zA-Z0-9_?]*', Name.Variable), ], 'tuple': [ (r'\(', Punctuation, 'in-tuple'), ], 'in-tuple': [ (r'\)', Punctuation, '#pop'), include('class-name'), include('name'), include('isa'), include('root2'), ], 'name': [ (_name, Name), ], 'comment': [ (r'[^*/]', Comment.Multiline), (r'/\*', Comment.Multiline, '#push'), (r'\*/', Comment.Multiline, '#pop'), (r'[*/]', Comment.Multiline), ], 'import': [ (_name, Name.Namespace), # ('\n', Punctuation, '#pop'), ], 'generic-type': [ (r'\s', Whitespace), (r'>', Punctuation, '#pop'), include('class-name'), include('isa'), include('root2'), ], 'class-name': [ (r'[A-Z][a-zA-Z0-9_?]*', Name.Constant), (r'(\[)([0-9]+)(\])', bygroups(Operator, Number.Integer, Operator)), (r'<', Punctuation, 'generic-type'), (r'\.\(', Punctuation, 'arg-list'), (r'\(', Punctuation, 'type-cast'), (r'\)', Punctuation, '#pop'), ], 'label': [ (r'[a-zA-Z_][a-zA-Z0-9_]*:(?=\s*\n)', Name.Label), ], 'ws-pop': [ (r'\s?[\s\n]', Whitespace, '#pop'), ], 'var-decl': [ (r'(\[)([\w\s,]*)(\])(\s+)', bygroups( Punctuation, Name.Attribute, Punctuation, Whitespace)), include('tuple'), include('var-isa-comma'), include('var-isa-pop'), include('var-name'), (r',\s+', Punctuation, 'var-decl'), include('ws-pop'), ], 'for-loop': [ (r'\sin\s', Keyword.Reserved), include('isa'), include('name'), include('ws-pop'), include('root2'), ], 'func-decl': [ (r'(\[)([\w\s,]*)(\])(\s+)', bygroups( Punctuation, Name.Attribute, Punctuation, Whitespace)), (r'\s?\breturn\b', Keyword.Reserved, 'root2'), (r'\s?\w', Name.Function), (r'<', Punctuation, 'generic-type'), (r'\(\s?', Punctuation, 'arg-list'), (r'\s?->\s?', Operator, 'return-type'), (r'\s?\{', Punctuation, '#pop'), ], 'return-type': [ include('tuple'), include('class-name'), (r'\bid\b', Name.Builtin), (r'\s?\{', Punctuation, '#pop'), (r'\s?\)', Punctuation), ], 'class-decl': [ (r'\{', Punctuation, '#pop'), (r'(\[)([\w\s,]*)(\])(\s+)', bygroups( Punctuation, Name.Attribute, Punctuation, Whitespace)), include('class-isa'), (r'[A-Z][a-zA-Z0-9_?]*', Name.Class), (r'\s', Whitespace), (r'<', Punctuation, 'generic-type'), ], 'arg-list': [ (r',\s?', Punctuation), (r'\)', Punctuation, '#pop'), include('isa'), (r'\s?->\s?', Operator, 'return-type'), include('root2'), ], 'type-cast': [ (r'\)', Punctuation, '#pop'), include('root2'), ], 'in-interpolated': [ ('\)', String.Interpol, '#pop'), include('root2'), ], 'string': [ (r'"', String, '#pop'), (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), (r'\\\(', String.Interpol, 'in-interpolated'), (r'[^\\"]+', String), (r'\\', String), ], } class SwiftConsoleLexer(RegexLexer): name = 'SwiftConsole' aliases = ['swift-console'] filenames = ['*.swiftc'] flags = re.MULTILINE | re.DOTALL _isa = r'([a-zA-Z_][a-zA-Z0-9_]*)(\s+)(:)(\s+)([A-Z0-9_][a-zA-Z0-9_]*)' _isa_comma = r'([a-zA-Z_][a-zA-Z0-9_]*)(\s+)(:)(\s+)' + \ r'([A-Z0-9_][a-zA-Z0-9_]*)(,\s?)' _name = r'[a-zA-Z_][a-zA-Z0-9_?]*' tokens = SwiftLexer.tokens.copy() tokens['root'] = [ (r'Welcome to swift. Type \':help\' for assistance.', Generic.Prompt), (r'(\(swift\) | )', Generic.Prompt, 'root2'), (r'\(swift\)', Generic.Prompt), (r' ', Generic.Prompt), (r'//.*?\n', Generic.Output), (r'<REPL Buffer>:[0-9]*:[0-9]*:.*?\n', Generic.Heading), (r'~*?\^\s?~*?\n', Generic.Heading), (r'.*?\n', Generic.Output), ]
# -*- coding: utf-8 -*- from operator import attrgetter from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType from pyangbind.lib.yangtypes import RestrictedClassType from pyangbind.lib.yangtypes import TypedListType from pyangbind.lib.yangtypes import YANGBool from pyangbind.lib.yangtypes import YANGListType from pyangbind.lib.yangtypes import YANGDynClass from pyangbind.lib.yangtypes import ReferenceType from pyangbind.lib.base import PybindBase from collections import OrderedDict from decimal import Decimal from bitarray import bitarray import six # PY3 support of some PY2 keywords (needs improved) if six.PY3: import builtins as __builtin__ long = int elif six.PY2: import __builtin__ from . import config from . import state class interface_ref(PybindBase): """ This class was auto-generated by the PythonClass plugin for PYANG from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/interfaces/interface/interface-ref. Each member element of the container is represented as a class variable - with a specific YANG type. YANG Description: Reference to an interface or subinterface """ __slots__ = ("_path_helper", "_extmethods", "__config", "__state") _yang_name = "interface-ref" _pybind_generated_by = "container" def __init__(self, *args, **kwargs): self._path_helper = False self._extmethods = False self.__config = YANGDynClass( base=config.config, is_container="container", yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=True, ) self.__state = YANGDynClass( base=state.state, is_container="container", yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=True, ) load = kwargs.pop("load", None) if args: if len(args) > 1: raise TypeError("cannot create a YANG container with >1 argument") all_attr = True for e in self._pyangbind_elements: if not hasattr(args[0], e): all_attr = False break if not all_attr: raise ValueError("Supplied object did not have the correct attributes") for e in self._pyangbind_elements: nobj = getattr(args[0], e) if nobj._changed() is False: continue setmethod = getattr(self, "_set_%s" % e) if load is None: setmethod(getattr(args[0], e)) else: setmethod(getattr(args[0], e), load=load) def _path(self): if hasattr(self, "_parent"): return self._parent._path() + [self._yang_name] else: return [ "network-instances", "network-instance", "protocols", "protocol", "isis", "interfaces", "interface", "interface-ref", ] def _get_config(self): """ Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/interface_ref/config (container) YANG Description: Configured reference to interface / subinterface """ return self.__config def _set_config(self, v, load=False): """ Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/interface_ref/config (container) If this variable is read-only (config: false) in the source YANG file, then _set_config is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_config() directly. YANG Description: Configured reference to interface / subinterface """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=config.config, is_container="container", yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=True, ) except (TypeError, ValueError): raise ValueError( { "error-string": """config must be of a type compatible with container""", "defined-type": "container", "generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""", } ) self.__config = t if hasattr(self, "_set"): self._set() def _unset_config(self): self.__config = YANGDynClass( base=config.config, is_container="container", yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=True, ) def _get_state(self): """ Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/interface_ref/state (container) YANG Description: Operational state for interface-ref """ return self.__state def _set_state(self, v, load=False): """ Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/interface_ref/state (container) If this variable is read-only (config: false) in the source YANG file, then _set_state is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_state() directly. YANG Description: Operational state for interface-ref """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=state.state, is_container="container", yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=True, ) except (TypeError, ValueError): raise ValueError( { "error-string": """state must be of a type compatible with container""", "defined-type": "container", "generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""", } ) self.__state = t if hasattr(self, "_set"): self._set() def _unset_state(self): self.__state = YANGDynClass( base=state.state, is_container="container", yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=True, ) config = __builtin__.property(_get_config, _set_config) state = __builtin__.property(_get_state, _set_state) _pyangbind_elements = OrderedDict([("config", config), ("state", state)]) from . import config from . import state class interface_ref(PybindBase): """ This class was auto-generated by the PythonClass plugin for PYANG from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/interfaces/interface/interface-ref. Each member element of the container is represented as a class variable - with a specific YANG type. YANG Description: Reference to an interface or subinterface """ __slots__ = ("_path_helper", "_extmethods", "__config", "__state") _yang_name = "interface-ref" _pybind_generated_by = "container" def __init__(self, *args, **kwargs): self._path_helper = False self._extmethods = False self.__config = YANGDynClass( base=config.config, is_container="container", yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=True, ) self.__state = YANGDynClass( base=state.state, is_container="container", yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=True, ) load = kwargs.pop("load", None) if args: if len(args) > 1: raise TypeError("cannot create a YANG container with >1 argument") all_attr = True for e in self._pyangbind_elements: if not hasattr(args[0], e): all_attr = False break if not all_attr: raise ValueError("Supplied object did not have the correct attributes") for e in self._pyangbind_elements: nobj = getattr(args[0], e) if nobj._changed() is False: continue setmethod = getattr(self, "_set_%s" % e) if load is None: setmethod(getattr(args[0], e)) else: setmethod(getattr(args[0], e), load=load) def _path(self): if hasattr(self, "_parent"): return self._parent._path() + [self._yang_name] else: return [ "network-instances", "network-instance", "protocols", "protocol", "isis", "interfaces", "interface", "interface-ref", ] def _get_config(self): """ Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/interface_ref/config (container) YANG Description: Configured reference to interface / subinterface """ return self.__config def _set_config(self, v, load=False): """ Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/interface_ref/config (container) If this variable is read-only (config: false) in the source YANG file, then _set_config is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_config() directly. YANG Description: Configured reference to interface / subinterface """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=config.config, is_container="container", yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=True, ) except (TypeError, ValueError): raise ValueError( { "error-string": """config must be of a type compatible with container""", "defined-type": "container", "generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""", } ) self.__config = t if hasattr(self, "_set"): self._set() def _unset_config(self): self.__config = YANGDynClass( base=config.config, is_container="container", yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=True, ) def _get_state(self): """ Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/interface_ref/state (container) YANG Description: Operational state for interface-ref """ return self.__state def _set_state(self, v, load=False): """ Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/interface_ref/state (container) If this variable is read-only (config: false) in the source YANG file, then _set_state is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_state() directly. YANG Description: Operational state for interface-ref """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=state.state, is_container="container", yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=True, ) except (TypeError, ValueError): raise ValueError( { "error-string": """state must be of a type compatible with container""", "defined-type": "container", "generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""", } ) self.__state = t if hasattr(self, "_set"): self._set() def _unset_state(self): self.__state = YANGDynClass( base=state.state, is_container="container", yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=True, ) config = __builtin__.property(_get_config, _set_config) state = __builtin__.property(_get_state, _set_state) _pyangbind_elements = OrderedDict([("config", config), ("state", state)])
''' Dual output E3648A 0-8V / 0-20V 2.5A http://cp.literature.agilent.com/litweb/pdf/E3646-90001.pdf ''' import time import sys import datetime import serial class Timeout(Exception): pass def now(): return datetime.datetime.utcnow().isoformat() def dbg(s): if 0: print 'GPIO %s: %s' % (now(), s) ''' ********************************* Serial ********************************* Just send commands verbatim ''' class PUSerial: def __init__(self, port="/dev/ttyUSB0", baudrate=9600, timeout=0, verbose=False): self.port = port self.verbose = verbose self.ser = serial.Serial(port, baudrate=baudrate, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, rtscts=False, dsrdtr=False, xonxoff=False, timeout=3, writeTimeout=0) self.ser.flushInput() self.ser.flushOutput() def interface(self): return "RS232" def send_str(self, s): if self.verbose: print 'DBG: sending "%s"' % (s) s += "\n" self.ser.write(s) self.ser.flush() def recv_str(self): s = self.ser.readline() s = s.rstrip() if self.verbose: print 'DBG: received "%s"' % (s) return s def sendrecv_str(self, s): if self.verbose: print 'DBG: sending "%s"' % (s) # send without sleep self.ser.write(s + '\n') self.ser.flush() # wait for response line s = self.ser.readline() s = s.rstrip() if self.verbose: print 'DBG: received "%s"' % (s) return s def version(self): return 'N/A' ''' outp: 1 or 2 Device tracks which is currently enabled By default commands act on the last selected output Option argument to per-output commands can switch output if not already selected ''' class E36: def __init__(self, io, verbose=False): self.verbose = verbose self.vendor = None self.model = None # Active rail for commands, unknown at init self.outp = None self.io = io # Make sure simple queries work if not self.version(): raise Exception("Failed init %s" % (io.interface())) ''' *********************************8 MISC *********************************8 ''' def version(self): return self.io.sendrecv_str("SYSTEM:VERSION?") def ident(self): # just vendor, model return self.ident_ex()[0:2] def ident_ex(self): ''' PS ident: ['HEWLETT-PACKARD', 'E3632A', '0', '1.1-5.0-1.0'] ''' ret = self.io.sendrecv_str("*IDN?").split(',') self.vendor = ret[0] self.model = ret[1] sn = ret[2] fw = ret[3] return (self.vendor, self.model, sn, fw) def remote(self): '''Put into remote mode? Required before running any commands''' self.io.send_str("SYSTEM:REMOTE") def local(self): '''Put into local mode? Evidently displays better''' #self.io.send_str("SYSTEM:LOCAL") # to make display updates in real time # for some reason you need to issue the GPIB instead of the device local command self.io.local() def off(self, tsleep=0.2): '''Turn off both outputs''' self.io.send_str("OUTPUT OFF") # Copied from on. Needed? time.sleep(tsleep) def on(self, tsleep=0.2): '''Turn on both outputs''' self.io.send_str("OUTPUT ON") # 0.1 causes error, 0.15 fine time.sleep(tsleep) # .15 worked + some margin def set_outp(self, outp, tsleep=0.25): '''Force selecting given rail''' if not outp in (1, 2): raise Exception('Bad outp %s' % (outp,)) # FIXME: hack if self.model == 'E3632A': return self.io.send_str("INSTRUMENT:SELECT OUTP%d" % outp) self.outp = outp time.sleep(tsleep) def disp_vi(self, outp=None): '''display actual currents on front panel''' # FIXME: hack if self.model == 'E3632A': return if outp is not None and outp != self.outp: self.set_outp(outp) self.io.send_str("DISP:MODE VI") def wait_ready(self): ''' Generally, it is best to use the "Operation Complete" bit (bit 0) in the Standard Event register to signal when a command sequence is completed. This bit is set in the register after an *OPC command has been executed. If you send *OPC after a command which loads a message in the power supply's output buffer (query data), you can use the "Operation Complete" bit to determine when the message is available. However, if too many messages are generated before the *OPC command executes (sequentially), the output buffer will overload and the power supply will stop processing commands. ''' while True: print "sending *OPC?" self.io.send_str("*OPC?\012") self.ser.flush() rx = self.ser.readline(100).rstrip() print "got ",rx if(rx == "1"): break def apply(self, voltage, current, outp=None): '''Set both voltage and current at once?''' if outp is not None and outp != self.outp: self.set_outp(outp) self.io.send_str("APPL %s,%s" % (voltage, current)) ''' Errors are retrieved in the first- in- first- out (FIFO) order. The first error returned is the first error that was stored. Errors are cleared as you read them. When you have read all errors from the queue, the ERROR annunciator turns off and the errors are cleared. The power supply beeps once each time an error is generated. If more than 20 errors have occurred, the last error stored in the queue (the most recent error) is replaced with - 350, "Queue overflow". No additional errors are stored until you remove errors from the queue. If no errors have occurred when you read the error queue, the power supply responds with +0, "No error" over the remote interface or NO ERRORS from the front panel. The error queue is cleared by the *CLS (clear status) command or when power is cycled. The errors are also cleared when you read the queue. The *RST (reset) command does not clear the error queue. ''' def beep(self): '''Call this to annoying your labmates''' self.io.send_str("SYSTEM:BEEPER") def text(self, s): '''Call this to put creepy messages directly on the display''' if len(s) > 11: raise Exception('string too long') self.io.send_str("DISPLAY:TEXT \"%s\"" % (s,)) def text_clr(self): self.io.send_str("DISPlay:TEXT:CLEar") def rst(self, tsleep=1.0): '''Reset the device except for errors''' self.io.send_str("*RST") # Device locks up for a bit time.sleep(tsleep) def clr(self): '''Clear error queue''' self.io.send_str("*CLS") def get_err(self): '''Get next error from queue''' return self.io.sendrecv_str("SYST:ERR?") ''' *********************************8 CURRENT *********************************8 ''' def curr(self, outp=None): '''Get current reading''' return float(self.io.sendrecv_str("MEAS:CURR?")) def curr_max(self, outp=None): '''Get current setpoint as set by set_curr''' return float(self.io.sendrecv_str("CURR?")) def set_curr(self, current, outp=None): '''Set current limit on given output''' if outp is not None and outp != self.outp: self.set_outp(outp) self.io.send_str("CURR %3.3f" % current) ''' *********************************8 VOLTAGE *********************************8 ''' # 0.185 s over serial def volt(self, outp=None): '''Get voltage reading''' if outp is not None and outp != self.outp: self.set_outp(outp) return float(self.io.sendrecv_str("MEAS:VOLT?")) def volt_max(self, outp=None): '''Get voltage setpoint''' if outp is not None and outp != self.outp: self.set_outp(outp) return float(self.io.sendrecv_str("VOLT?")) def set_volt(self, volt, outp=None): '''Set voltage limit on given output''' if outp is not None and outp != self.outp: self.set_outp(outp) self.io.send_str("VOLT %3.3f" % (volt,)) def set_ovp(self, volt, outp=None): '''Set over voltage protection limit on given output''' if outp is not None and outp != self.outp: self.set_outp(outp) self.io.send_str("VOLTAGE:PROT %3.3f" % (volt,)) def ovp_enb(self, outp=None): '''Enable over voltage protection''' if outp is not None and outp != self.outp: self.set_outp(outp) self.io.send_str("VOLTAGE:PROT:STATE ON") def ovp_dis(self, outp=None): '''Disable over voltage protection''' if outp is not None and outp != self.outp: self.set_outp(outp) self.io.send_str("VOLTAGE:PROT:STATE OFF") def ovp_clr(self, outp=None): '''Clear voltage protect fault?''' if outp is not None and outp != self.outp: self.set_outp(outp) self.io.send_str("VOLTAGE:PROT:CLEAR") def print_errors(ps): print 'Errors:' errors = [] while True: s = ps.get_err() if s == '+0,"No error"': break errors.append(s) if errors: for error in errors: print ' %s' % error else: print ' None'
import wx import os import sys import webbrowser from GENe import * from wx.lib.pubsub import setuparg1 class GENeGUI(wx.Frame): def __init__(self,parent,id): wx.Frame.__init__(self,parent,id,'GENe BLAST Automation and Gene Cataloger', size=(600,300), style=wx.MINIMIZE_BOX|wx.SYSTEM_MENU|wx.CAPTION|wx.CLOSE_BOX|wx.CLIP_CHILDREN) self.Center() self.panel = wx.Panel(self) #Check to see if there are other instances of this same program. If so, close out. self.name = "GENeGUI" self.instance = wx.SingleInstanceChecker(self.name) if self.instance.IsAnotherRunning(): wx.MessageBox("Another instance is running already.", "Only one GENe instance at a time.") time.sleep(5) sys.exit() #IO Info self.openFileName = '' self.saveFileName = '' #Static Texts self.openFileText = wx.StaticText(self.panel, label = '', pos = (120, 16)) self.saveFileText = wx.StaticText(self.panel, label = '', pos = (155, 57)) self.colScroll = wx.StaticText(self.panel, label = 'Column Containing Sequences', pos = (421, 5)) self.progressText = wx.StaticText(self.panel, label = "Waiting to run." , pos = (10, 225)) self.localDBText = wx.StaticText(self.panel, label = "Local Database:" , pos = (35, 180)) self.serverDBText = wx.StaticText(self.panel, label = "Server Database:" , pos = (290, 180)) self.searchText = wx.StaticText(self.panel, label = "BLAST Search Type:" , pos = (290, 143)) self.evalText = wx.StaticText(self.panel, label = "e-value maximum:" , pos = (35, 143)) #Load Bar self.progressBar = wx.Gauge(self.panel, -1, 1, pos= (10,245), size= (400,20)) #Buttons catalogButton = wx.Button(self.panel, label = "Run GENe", pos = (415,217), size = (136, 50)) openFileButton = wx.Button(self.panel, label = "Open Excel File", pos = (10,10), size = (100, 30)) saveFileButton = wx.Button(self.panel, label = "Choose Save Destination", pos = (10,50), size = (140,30)) helpButton = wx.Button(self.panel, label = "Help", pos = (550,217), size = (40, 50)) self.Bind(wx.EVT_BUTTON, self.saveExcelFile, saveFileButton) self.Bind(wx.EVT_BUTTON, self.openExcelFile, openFileButton) self.Bind(wx.EVT_BUTTON, self.runCataloger, catalogButton) self.Bind(wx.EVT_BUTTON, self.openREADME, helpButton) self.Bind(wx.EVT_CLOSE, self.closewindow) #Check Boxes self.localCheck = wx.CheckBox(self.panel, -1, "Local Blast Search (Requires BLAST+)", pos = (35,90)) self.serverCheck = wx.CheckBox(self.panel, -1, "NCBI Server Blast Search (No installation required)", pos = (275,90)) self.serverCheck.SetValue(True) self.queryType = 'queryServer' self.xenoCheck = wx.CheckBox(self.panel, -1, "Use Xenopus Laevis algorithm - When disabled, GENe simply records top three BLAST hits.", pos = (35, 115)) self.xenoCheck.SetValue(True) self.xenopusAlgo = True self.Bind(wx.EVT_CHECKBOX, self.selectXeno, self.xenoCheck) self.Bind(wx.EVT_CHECKBOX, self.selectLocal, self.localCheck) self.Bind(wx.EVT_CHECKBOX, self.selectServer, self.serverCheck) #Choices self.dbChoice = wx.Choice(self.panel, -1, pos = (410, 178), choices = ['nr', 'refseq', 'swissprot', 'pat', 'month', 'pdb', 'refseq_mrna', 'refseq_genomic', 'est', 'est_human', \ 'est_mouse', 'est_others', 'gss', 'htgs', 'pat', 'dbsts', 'chromosome', 'genome', 'HTGS', 'RefSeq RNA', 'RefSeq Protein', 'Build RNA', 'Build Protein', 'ESTs']) self.dbChoice.SetStringSelection('nr') self.serverDatabase = 'nr' self.searchChoice = wx.Choice(self.panel, -1, pos = (410, 140), choices = ['blastn', 'blastp', 'blastx', 'tblastn', 'tblastx']) self.searchChoice.SetStringSelection('blastn') self.searchType = 'blastn' #Text Entry self.dbTextBox = wx.TextCtrl (self.panel, -1, 'nt', size = (120, -1), pos = (150, 178)) self.localDatabase = 'nt' self.evalTextBox = wx.TextCtrl(self.panel, -1, '3', size = (120,-1), pos = (150, 140)) self.eValCap = None #Scroll Counter Boxes self.seqColumn = wx.SpinCtrl(self.panel, value='0', pos = (480,24), size = (60, -1)) self.seqColumn.SetRange(0,1000) self.seqCol = 0 #Initialize GENe instance self.newCatalog = GENe(self) def errorPop(self, string, end = False): '''Given a string that presumably contains an error message, this method will create a popup displaying that message''' wx.MessageBox(string, "GENe Error or Update") if end == True: self.closewindow(None) def runCataloger(self,event): "Main method. Passess in most of the GUI's instnace variables to GENe and runs GENe in a thread." #Make sure there is only one instance of GENe running as a result of this program. if self.newCatalog.running == False: #Collect contents from text boxes and GUI choices. self.eValCap = float(self.evalTextBox.GetValue()) self.serverDatabase = self.dbChoice.GetStringSelection() self.searchType = self.searchChoice.GetStringSelection() self.seqCol = int(self.seqColumn.GetValue()) #Format the 'local database' variable for proper use in GENe (differs per operating system) self.localDatabase = self.dbTextBox.GetValue() ### Double quotes for windows if 'win' in sys.platform: self.localDatabase = '"' + self.localDatabase + '"' ### Single quotes for UNIX (MacOS and Linux) else: self.localDatabase = "'" + self.localDatabase + "'" print "Databases you chose: ", self.localDatabase #Set instance variables in GENe self.newCatalog.seqCol = self.seqCol self.newCatalog.eValCap = self.eValCap self.newCatalog.queryType = self.queryType self.newCatalog.searchType = self.searchType self.newCatalog.xenopusAlgo = self.xenopusAlgo self.newCatalog.localDatabase = self.localDatabase self.newCatalog.serverDatabase = self.serverDatabase #Force user to pick a file if they haven't already. if self.openFileName == '': self.openExcelFile(None) return if self.saveFileName == '': self.saveExcelFile(None) return #Run the GENe program self.newCatalog.readBook() self.progressBar.SetRange(self.newCatalog.numberOfQueries) if self.queryType == 'queryServer': self.progressText.SetLabel("Initializing Server BLAST Query...") if self.queryType == 'queryLocal': self.progressText.SetLabel("Running Local BLAST Query. This may take a very long time.") self.newCatalog.start() def progressUpdate(self, progress): '''Updates progress bar''' if progress == -1: self.progressBar.Pulse() elif progress == -2: self.progressText.SetLabel("Complete!") elif progress == -3: self.progressText.SetLabel("Terminated Early.") self.progressBar.SetValue(0) else: self.progressBar.SetValue(progress) progressString = str(progress) + ' of ' + str(self.newCatalog.numberOfQueries) + " catalogged." self.progressText.SetLabel(progressString) def openExcelFile(self, event): dialog = wx.FileDialog(self, message= "Open an Excel File", style = wx.OPEN) if dialog.ShowModal() == wx.ID_OK: self.openFileName = dialog.GetPath() if len(self.openFileName) > 45: shortName = self.openFileName[0:45] + '...' else: shortName = self.openFileName self.openFileText.SetLabel(shortName) self.newCatalog.fileToOpen = self.openFileName dialog.Destroy() def saveExcelFile(self,event): dialog = wx.FileDialog(self, message= "Choose save Destination", style = wx.SAVE) dialog.SetFilename('GENe Results.xls') if dialog.ShowModal() == wx.ID_OK: self.saveFileName = dialog.GetPath() #Make sure that the file being saved is in excel format. if '.xls' not in self.saveFileName: self.saveFileName = self.saveFileName + '.xls' #Shorten up the name to display on the Window if len(self.saveFileName) > 50: shortName = self.saveFileName[0:50] + '...' else: shortName = self.saveFileName self.saveFileText.SetLabel(shortName) self.newCatalog.saveAs = self.saveFileName dialog.Destroy() def selectLocal(self, event): '''Checkbox method for local selection''' if not self.localCheck.IsChecked(): self.localCheck.SetValue(True) else: self.serverCheck.SetValue(False) self.queryType = 'queryLocal' def selectServer(self, event): '''Checkbox method for server selection''' if not self.serverCheck.IsChecked(): self.serverCheck.SetValue(True) else: self.localCheck.SetValue(False) self.queryType = 'queryServer' def selectXeno(self, event): '''Checkbox method for Xenopus Laevis algorithm vs. Top three hits selection''' if self.xenoCheck.IsChecked(): self.xenopusAlgo = True else: self.xenopusAlgo = False def openREADME(self, event): webbrowser.open('README.txt') def closewindow(self, event): sys.exit() self.Destroy() if __name__ == '__main__': homeFolder = os.path.expanduser('~') GENefolder = homeFolder + '\GENe\\' if not os.path.exists(GENefolder): os.makedirs(GENefolder) errorlog = GENefolder + 'GENe Error Log.txt' try: os.remove(errorlog) except: pass app = wx.App(True, filename = errorlog) frame = GENeGUI(parent=None, id=-1) frame.Show() app.MainLoop()
import requests try: import ujson as json except: import json import unittest import os import uuid import subprocess import logging import threading from JumpScale import j debug = True out = err = None appport = 9099 if debug: appport = 80 baseurl = 'http://localhost:%s/restmachine/space/objects/model.%%s' % appport baserestfull = 'http://localhost:%s/restextmachine/space/objects/%%s' % appport loginurl = "http://localhost:%s" % appport appdir = os.path.join(j.dirs.appDir, str(uuid.uuid4())) procs = list() class LogPipe(threading.Thread): def __init__(self, level): """Setup the object with a logger and a loglevel and start the thread """ threading.Thread.__init__(self) self.daemon = False self.level = level self.fdRead, self.fdWrite = os.pipe() self.pipeReader = os.fdopen(self.fdRead) self.start() def fileno(self): """Return the write file descriptor of the pipe """ return self.fdWrite def run(self): """Run the thread, logging everything. """ for line in iter(self.pipeReader.readline, ''): logging.log(self.level, line.strip('\n')) self.pipeReader.close() def close(self): """Close the write end of the pipe. """ os.close(self.fdWrite) def setUp(): app = os.path.join(os.path.dirname(__file__), '_app') j.system.fs.copyDirTree(app, appdir) if not debug: global out, err out = LogPipe(logging.INFO) err = LogPipe(logging.ERROR) startOsis(out, err) startApp(out, err) def startOsis(out, err): import ipdb ipdb.set_trace() path = j.system.fs.joinPaths(j.dirs.baseDir, 'apps', 'osis') proc = subprocess.Popen(['python', 'osisServerStart.py'], cwd=path, stdout=out, stderr=err) procs.append(proc) def startApp(out, err): proc = subprocess.Popen(['python', 'start_appserver.py'], cwd=appdir, stdout=out, stderr=err) procs.append(proc) j.system.net.waitConnectionTest('127.0.0.1', appport, 5) def tearDown(): for proc in procs: proc.kill() j.system.fs.removeDirTree(appdir) global out, err if out: out.close() if err: err.close() class RestMachine(unittest.TestCase): objecttype = 'machine' objectpk = 'id' idtype = int def setUp(self): self.headers = {'Content-Type': 'application/json', 'Accept': 'application/json'} self.session = requests.Session() self.login('admin', 'admin') self.ids = list() def tearDown(self): for id in self.ids[:]: try: self.delete(id) except: print 'Failed to delete possibly already deleted' def login(self, username, password): data = {'passwd': password, 'user_login_': username} self.session.post(loginurl, data=json.dumps(data), headers=self.headers) def doRequest(self, action, data=None, method='post'): url = '%s.%s' % (baseurl % self.objecttype, action) data = json.dumps(data) req = getattr(self.session, method, self.session.post) r = req(url, data=data, headers=self.headers) self.assertEqual(r.status_code, 200) return r def test_create(self): data = {'name': 'test', 'memory': 6} if self.idtype is basestring: data['id'] = str(uuid.uuid4()) r = self.doRequest('create', data) id = r.json() self.assertTrue(id) self.assertIsInstance(id, self.idtype) self.ids.append(id) return id def list(self): return self.doRequest('list').json() def test_list(self): id = self.test_create() result = self.list() self.assertIn(str(id), result) return result, id def delete(self, id): self.doRequest('delete', {'id': id}) self.ids.remove(id) def test_delete(self): id = self.test_create() self.delete(id) result = self.list() self.assertNotIn(str(id), result) def get(self, id): rawobj = self.doRequest('get', {'id': id}).json() return rawobj def test_get(self): id = self.test_create() obj = self.get(id) self.assertIsInstance(obj, dict) for prop in ('guid', 'memory', 'name'): self.assertIn(prop, obj) return obj def test_update(self): obj = self.test_get() update = {self.objectpk: obj[self.objectpk], 'memory': 2048} self.doRequest('set', {'data': update}) newobj = self.get(obj[self.objectpk]) self.assertEqual(newobj['memory'], update['memory']) self.assertEqual(newobj['name'], obj['name']) class RestExtMachine(RestMachine): def doRequest(self, action, data=None, method='post'): req = self.session.get url = baserestfull % self.objecttype jsondata = None if action == 'create': jsondata = json.dumps(data) req = self.session.post elif action == 'set': jsondata = json.dumps(data) req = self.session.put url += '/%s' % data['data'][self.objectpk] elif action == 'delete': req = self.session.delete url += '/%s' % data['id'] elif action == 'get': url += '/%s' % data['id'] r = req(url, data=jsondata, headers=self.headers) self.assertEqual(r.status_code, 200) return r class RestMachineOsis(RestMachine): objecttype = 'machineosis' class RestMachineExtOsis(RestExtMachine): objecttype = 'machineosis' class RestNic(RestMachine): objecttype = 'nic' idtype = basestring class RestExtNic(RestExtMachine): objecttype = 'nic' idtype = basestring class RestNicOsis(RestMachine): objecttype = 'nicosis' idtype = basestring class RestExtNicOsis(RestExtMachine): objecttype = 'nicosis' idtype = basestring
""" Classes allowing "generic" relations through ContentType and object-id fields. """ from __future__ import unicode_literals from collections import defaultdict from functools import partial from django.core.exceptions import ObjectDoesNotExist from django.db import connection from django.db.models import signals from django.db import models, router, DEFAULT_DB_ALIAS from django.db.models.fields.related import RelatedField, Field, ManyToManyRel from django.db.models.loading import get_model from django.forms import ModelForm from django.forms.models import BaseModelFormSet, modelformset_factory, save_instance from django.contrib.admin.options import InlineModelAdmin, flatten_fieldsets from django.contrib.contenttypes.models import ContentType from django.utils.encoding import smart_text class GenericForeignKey(object): """ Provides a generic relation to any object through content-type/object-id fields. """ def __init__(self, ct_field="content_type", fk_field="object_id"): self.ct_field = ct_field self.fk_field = fk_field def contribute_to_class(self, cls, name): self.name = name self.model = cls self.cache_attr = "_%s_cache" % name cls._meta.add_virtual_field(self) # For some reason I don't totally understand, using weakrefs here doesn't work. signals.pre_init.connect(self.instance_pre_init, sender=cls, weak=False) # Connect myself as the descriptor for this field setattr(cls, name, self) def instance_pre_init(self, signal, sender, args, kwargs, **_kwargs): """ Handles initializing an object with the generic FK instaed of content-type/object-id fields. """ if self.name in kwargs: value = kwargs.pop(self.name) kwargs[self.ct_field] = self.get_content_type(obj=value) kwargs[self.fk_field] = value._get_pk_val() def get_content_type(self, obj=None, id=None, using=None): # Convenience function using get_model avoids a circular import when # using this model ContentType = get_model("contenttypes", "contenttype") if obj: return ContentType.objects.db_manager(obj._state.db).get_for_model(obj) elif id: return ContentType.objects.db_manager(using).get_for_id(id) else: # This should never happen. I love comments like this, don't you? raise Exception("Impossible arguments to GFK.get_content_type!") def get_prefetch_query_set(self, instances): # For efficiency, group the instances by content type and then do one # query per model fk_dict = defaultdict(set) # We need one instance for each group in order to get the right db: instance_dict = {} ct_attname = self.model._meta.get_field(self.ct_field).get_attname() for instance in instances: # We avoid looking for values if either ct_id or fkey value is None ct_id = getattr(instance, ct_attname) if ct_id is not None: fk_val = getattr(instance, self.fk_field) if fk_val is not None: fk_dict[ct_id].add(fk_val) instance_dict[ct_id] = instance ret_val = [] for ct_id, fkeys in fk_dict.items(): instance = instance_dict[ct_id] ct = self.get_content_type(id=ct_id, using=instance._state.db) ret_val.extend(ct.get_all_objects_for_this_type(pk__in=fkeys)) # For doing the join in Python, we have to match both the FK val and the # content type, so we use a callable that returns a (fk, class) pair. def gfk_key(obj): ct_id = getattr(obj, ct_attname) if ct_id is None: return None else: model = self.get_content_type(id=ct_id, using=obj._state.db).model_class() return (model._meta.pk.get_prep_value(getattr(obj, self.fk_field)), model) return (ret_val, lambda obj: (obj._get_pk_val(), obj.__class__), gfk_key, True, self.cache_attr) def is_cached(self, instance): return hasattr(instance, self.cache_attr) def __get__(self, instance, instance_type=None): if instance is None: return self try: return getattr(instance, self.cache_attr) except AttributeError: rel_obj = None # Make sure to use ContentType.objects.get_for_id() to ensure that # lookups are cached (see ticket #5570). This takes more code than # the naive ``getattr(instance, self.ct_field)``, but has better # performance when dealing with GFKs in loops and such. f = self.model._meta.get_field(self.ct_field) ct_id = getattr(instance, f.get_attname(), None) if ct_id: ct = self.get_content_type(id=ct_id, using=instance._state.db) try: rel_obj = ct.get_object_for_this_type(pk=getattr(instance, self.fk_field)) except ObjectDoesNotExist: pass setattr(instance, self.cache_attr, rel_obj) return rel_obj def __set__(self, instance, value): if instance is None: raise AttributeError("%s must be accessed via instance" % self.related.opts.object_name) ct = None fk = None if value is not None: ct = self.get_content_type(obj=value) fk = value._get_pk_val() setattr(instance, self.ct_field, ct) setattr(instance, self.fk_field, fk) setattr(instance, self.cache_attr, value) class GenericRelation(RelatedField, Field): """Provides an accessor to generic related objects (e.g. comments)""" def __init__(self, to, **kwargs): kwargs['verbose_name'] = kwargs.get('verbose_name', None) kwargs['rel'] = GenericRel(to, related_name=kwargs.pop('related_name', None), limit_choices_to=kwargs.pop('limit_choices_to', None), symmetrical=kwargs.pop('symmetrical', True)) # Override content-type/object-id field names on the related class self.object_id_field_name = kwargs.pop("object_id_field", "object_id") self.content_type_field_name = kwargs.pop("content_type_field", "content_type") kwargs['blank'] = True kwargs['editable'] = False kwargs['serialize'] = False Field.__init__(self, **kwargs) def get_choices_default(self): return Field.get_choices(self, include_blank=False) def value_to_string(self, obj): qs = getattr(obj, self.name).all() return smart_text([instance._get_pk_val() for instance in qs]) def m2m_db_table(self): return self.rel.to._meta.db_table def m2m_column_name(self): return self.object_id_field_name def m2m_reverse_name(self): return self.rel.to._meta.pk.column def m2m_target_field_name(self): return self.model._meta.pk.name def m2m_reverse_target_field_name(self): return self.rel.to._meta.pk.name def contribute_to_class(self, cls, name): super(GenericRelation, self).contribute_to_class(cls, name) # Save a reference to which model this class is on for future use self.model = cls # Add the descriptor for the m2m relation setattr(cls, self.name, ReverseGenericRelatedObjectsDescriptor(self)) def contribute_to_related_class(self, cls, related): pass def set_attributes_from_rel(self): pass def get_internal_type(self): return "ManyToManyField" def db_type(self, connection): # Since we're simulating a ManyToManyField, in effect, best return the # same db_type as well. return None def extra_filters(self, pieces, pos, negate): """ Return an extra filter to the queryset so that the results are filtered on the appropriate content type. """ if negate: return [] ContentType = get_model("contenttypes", "contenttype") content_type = ContentType.objects.get_for_model(self.model) prefix = "__".join(pieces[:pos + 1]) return [("%s__%s" % (prefix, self.content_type_field_name), content_type)] def bulk_related_objects(self, objs, using=DEFAULT_DB_ALIAS): """ Return all objects related to ``objs`` via this ``GenericRelation``. """ return self.rel.to._base_manager.db_manager(using).filter(**{ "%s__pk" % self.content_type_field_name: ContentType.objects.db_manager(using).get_for_model(self.model).pk, "%s__in" % self.object_id_field_name: [obj.pk for obj in objs] }) class ReverseGenericRelatedObjectsDescriptor(object): """ This class provides the functionality that makes the related-object managers available as attributes on a model class, for fields that have multiple "remote" values and have a GenericRelation defined in their model (rather than having another model pointed *at* them). In the example "article.publications", the publications attribute is a ReverseGenericRelatedObjectsDescriptor instance. """ def __init__(self, field): self.field = field def __get__(self, instance, instance_type=None): if instance is None: return self # This import is done here to avoid circular import importing this module from django.contrib.contenttypes.models import ContentType # Dynamically create a class that subclasses the related model's # default manager. rel_model = self.field.rel.to superclass = rel_model._default_manager.__class__ RelatedManager = create_generic_related_manager(superclass) qn = connection.ops.quote_name content_type = ContentType.objects.db_manager(instance._state.db).get_for_model(instance) manager = RelatedManager( model = rel_model, instance = instance, symmetrical = (self.field.rel.symmetrical and instance.__class__ == rel_model), source_col_name = qn(self.field.m2m_column_name()), target_col_name = qn(self.field.m2m_reverse_name()), content_type = content_type, content_type_field_name = self.field.content_type_field_name, object_id_field_name = self.field.object_id_field_name, prefetch_cache_name = self.field.attname, ) return manager def __set__(self, instance, value): if instance is None: raise AttributeError("Manager must be accessed via instance") manager = self.__get__(instance) manager.clear() for obj in value: manager.add(obj) def create_generic_related_manager(superclass): """ Factory function for a manager that subclasses 'superclass' (which is a Manager) and adds behavior for generic related objects. """ class GenericRelatedObjectManager(superclass): def __init__(self, model=None, instance=None, symmetrical=None, source_col_name=None, target_col_name=None, content_type=None, content_type_field_name=None, object_id_field_name=None, prefetch_cache_name=None): super(GenericRelatedObjectManager, self).__init__() self.model = model self.content_type = content_type self.symmetrical = symmetrical self.instance = instance self.source_col_name = source_col_name self.target_col_name = target_col_name self.content_type_field_name = content_type_field_name self.object_id_field_name = object_id_field_name self.prefetch_cache_name = prefetch_cache_name self.pk_val = self.instance._get_pk_val() self.core_filters = { '%s__pk' % content_type_field_name: content_type.id, '%s__exact' % object_id_field_name: instance._get_pk_val(), } def get_query_set(self): try: return self.instance._prefetched_objects_cache[self.prefetch_cache_name] except (AttributeError, KeyError): db = self._db or router.db_for_read(self.model, instance=self.instance) return super(GenericRelatedObjectManager, self).get_query_set().using(db).filter(**self.core_filters) def get_prefetch_query_set(self, instances): db = self._db or router.db_for_read(self.model, instance=instances[0]) query = { '%s__pk' % self.content_type_field_name: self.content_type.id, '%s__in' % self.object_id_field_name: set(obj._get_pk_val() for obj in instances) } qs = super(GenericRelatedObjectManager, self).get_query_set().using(db).filter(**query) # We (possibly) need to convert object IDs to the type of the # instances' PK in order to match up instances: object_id_converter = instances[0]._meta.pk.to_python return (qs, lambda relobj: object_id_converter(getattr(relobj, self.object_id_field_name)), lambda obj: obj._get_pk_val(), False, self.prefetch_cache_name) def add(self, *objs): for obj in objs: if not isinstance(obj, self.model): raise TypeError("'%s' instance expected" % self.model._meta.object_name) setattr(obj, self.content_type_field_name, self.content_type) setattr(obj, self.object_id_field_name, self.pk_val) obj.save() add.alters_data = True def remove(self, *objs): db = router.db_for_write(self.model, instance=self.instance) for obj in objs: obj.delete(using=db) remove.alters_data = True def clear(self): db = router.db_for_write(self.model, instance=self.instance) for obj in self.all(): obj.delete(using=db) clear.alters_data = True def create(self, **kwargs): kwargs[self.content_type_field_name] = self.content_type kwargs[self.object_id_field_name] = self.pk_val db = router.db_for_write(self.model, instance=self.instance) return super(GenericRelatedObjectManager, self).using(db).create(**kwargs) create.alters_data = True return GenericRelatedObjectManager class GenericRel(ManyToManyRel): def __init__(self, to, related_name=None, limit_choices_to=None, symmetrical=True): self.to = to self.related_name = related_name self.limit_choices_to = limit_choices_to or {} self.symmetrical = symmetrical self.multiple = True self.through = None class BaseGenericInlineFormSet(BaseModelFormSet): """ A formset for generic inline objects to a parent. """ def __init__(self, data=None, files=None, instance=None, save_as_new=None, prefix=None, queryset=None): # Avoid a circular import. from django.contrib.contenttypes.models import ContentType opts = self.model._meta self.instance = instance self.rel_name = '-'.join(( opts.app_label, opts.object_name.lower(), self.ct_field.name, self.ct_fk_field.name, )) if self.instance is None or self.instance.pk is None: qs = self.model._default_manager.none() else: if queryset is None: queryset = self.model._default_manager qs = queryset.filter(**{ self.ct_field.name: ContentType.objects.get_for_model(self.instance), self.ct_fk_field.name: self.instance.pk, }) super(BaseGenericInlineFormSet, self).__init__( queryset=qs, data=data, files=files, prefix=prefix ) @classmethod def get_default_prefix(cls): opts = cls.model._meta return '-'.join((opts.app_label, opts.object_name.lower(), cls.ct_field.name, cls.ct_fk_field.name, )) def save_new(self, form, commit=True): # Avoid a circular import. from django.contrib.contenttypes.models import ContentType kwargs = { self.ct_field.get_attname(): ContentType.objects.get_for_model(self.instance).pk, self.ct_fk_field.get_attname(): self.instance.pk, } new_obj = self.model(**kwargs) return save_instance(form, new_obj, commit=commit) def generic_inlineformset_factory(model, form=ModelForm, formset=BaseGenericInlineFormSet, ct_field="content_type", fk_field="object_id", fields=None, exclude=None, extra=3, can_order=False, can_delete=True, max_num=None, formfield_callback=None): """ Returns a ``GenericInlineFormSet`` for the given kwargs. You must provide ``ct_field`` and ``object_id`` if they different from the defaults ``content_type`` and ``object_id`` respectively. """ opts = model._meta # Avoid a circular import. from django.contrib.contenttypes.models import ContentType # if there is no field called `ct_field` let the exception propagate ct_field = opts.get_field(ct_field) if not isinstance(ct_field, models.ForeignKey) or ct_field.rel.to != ContentType: raise Exception("fk_name '%s' is not a ForeignKey to ContentType" % ct_field) fk_field = opts.get_field(fk_field) # let the exception propagate if exclude is not None: exclude = list(exclude) exclude.extend([ct_field.name, fk_field.name]) else: exclude = [ct_field.name, fk_field.name] FormSet = modelformset_factory(model, form=form, formfield_callback=formfield_callback, formset=formset, extra=extra, can_delete=can_delete, can_order=can_order, fields=fields, exclude=exclude, max_num=max_num) FormSet.ct_field = ct_field FormSet.ct_fk_field = fk_field return FormSet class GenericInlineModelAdmin(InlineModelAdmin): ct_field = "content_type" ct_fk_field = "object_id" formset = BaseGenericInlineFormSet def get_formset(self, request, obj=None, **kwargs): if self.declared_fieldsets: fields = flatten_fieldsets(self.declared_fieldsets) else: fields = None if self.exclude is None: exclude = [] else: exclude = list(self.exclude) exclude.extend(self.get_readonly_fields(request, obj)) if self.exclude is None and hasattr(self.form, '_meta') and self.form._meta.exclude: # Take the custom ModelForm's Meta.exclude into account only if the # GenericInlineModelAdmin doesn't define its own. exclude.extend(self.form._meta.exclude) exclude = exclude or None can_delete = self.can_delete and self.has_delete_permission(request, obj) defaults = { "ct_field": self.ct_field, "fk_field": self.ct_fk_field, "form": self.form, "formfield_callback": partial(self.formfield_for_dbfield, request=request), "formset": self.formset, "extra": self.extra, "can_delete": can_delete, "can_order": False, "fields": fields, "max_num": self.max_num, "exclude": exclude } defaults.update(kwargs) return generic_inlineformset_factory(self.model, **defaults) class GenericStackedInline(GenericInlineModelAdmin): template = 'admin/edit_inline/stacked.html' class GenericTabularInline(GenericInlineModelAdmin): template = 'admin/edit_inline/tabular.html'
# # Author: Pearu Peterson, March 2002 # # w/ additions by Travis Oliphant, March 2002 # and Jake Vanderplas, August 2012 from __future__ import division, print_function, absolute_import __all__ = ['solve', 'solve_triangular', 'solveh_banded', 'solve_banded', 'solve_toeplitz', 'solve_circulant', 'inv', 'det', 'lstsq', 'pinv', 'pinv2', 'pinvh'] import numpy as np from .flinalg import get_flinalg_funcs from .lapack import get_lapack_funcs, _compute_lwork from .misc import LinAlgError, _datacopied from .decomp import _asarray_validated from . import decomp, decomp_svd from ._solve_toeplitz import levinson # Linear equations def solve(a, b, sym_pos=False, lower=False, overwrite_a=False, overwrite_b=False, debug=False, check_finite=True): """ Solve the equation ``a x = b`` for ``x``. Parameters ---------- a : (M, M) array_like A square matrix. b : (M,) or (M, N) array_like Right-hand side matrix in ``a x = b``. sym_pos : bool, optional Assume `a` is symmetric and positive definite. lower : bool, optional Use only data contained in the lower triangle of `a`, if `sym_pos` is true. Default is to use upper triangle. overwrite_a : bool, optional Allow overwriting data in `a` (may enhance performance). Default is False. overwrite_b : bool, optional Allow overwriting data in `b` (may enhance performance). Default is False. check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- x : (M,) or (M, N) ndarray Solution to the system ``a x = b``. Shape of the return matches the shape of `b`. Raises ------ LinAlgError If `a` is singular. ValueError If `a` is not square Examples -------- Given `a` and `b`, solve for `x`: >>> a = np.array([[3, 2, 0], [1, -1, 0], [0, 5, 1]]) >>> b = np.array([2, 4, -1]) >>> from scipy import linalg >>> x = linalg.solve(a, b) >>> x array([ 2., -2., 9.]) >>> np.dot(a, x) == b array([ True, True, True], dtype=bool) """ a1 = _asarray_validated(a, check_finite=check_finite) b1 = _asarray_validated(b, check_finite=check_finite) if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]: raise ValueError('expected square matrix') if a1.shape[0] != b1.shape[0]: raise ValueError('incompatible dimensions') overwrite_a = overwrite_a or _datacopied(a1, a) overwrite_b = overwrite_b or _datacopied(b1, b) if debug: print('solve:overwrite_a=', overwrite_a) print('solve:overwrite_b=', overwrite_b) if sym_pos: posv, = get_lapack_funcs(('posv',), (a1, b1)) c, x, info = posv(a1, b1, lower=lower, overwrite_a=overwrite_a, overwrite_b=overwrite_b) else: gesv, = get_lapack_funcs(('gesv',), (a1, b1)) lu, piv, x, info = gesv(a1, b1, overwrite_a=overwrite_a, overwrite_b=overwrite_b) if info == 0: return x if info > 0: raise LinAlgError("singular matrix") raise ValueError('illegal value in %d-th argument of internal gesv|posv' % -info) def solve_triangular(a, b, trans=0, lower=False, unit_diagonal=False, overwrite_b=False, debug=False, check_finite=True): """ Solve the equation `a x = b` for `x`, assuming a is a triangular matrix. Parameters ---------- a : (M, M) array_like A triangular matrix b : (M,) or (M, N) array_like Right-hand side matrix in `a x = b` lower : bool, optional Use only data contained in the lower triangle of `a`. Default is to use upper triangle. trans : {0, 1, 2, 'N', 'T', 'C'}, optional Type of system to solve: ======== ========= trans system ======== ========= 0 or 'N' a x = b 1 or 'T' a^T x = b 2 or 'C' a^H x = b ======== ========= unit_diagonal : bool, optional If True, diagonal elements of `a` are assumed to be 1 and will not be referenced. overwrite_b : bool, optional Allow overwriting data in `b` (may enhance performance) check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- x : (M,) or (M, N) ndarray Solution to the system `a x = b`. Shape of return matches `b`. Raises ------ LinAlgError If `a` is singular Notes ----- .. versionadded:: 0.9.0 """ a1 = _asarray_validated(a, check_finite=check_finite) b1 = _asarray_validated(b, check_finite=check_finite) if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]: raise ValueError('expected square matrix') if a1.shape[0] != b1.shape[0]: raise ValueError('incompatible dimensions') overwrite_b = overwrite_b or _datacopied(b1, b) if debug: print('solve:overwrite_b=', overwrite_b) trans = {'N': 0, 'T': 1, 'C': 2}.get(trans, trans) trtrs, = get_lapack_funcs(('trtrs',), (a1, b1)) x, info = trtrs(a1, b1, overwrite_b=overwrite_b, lower=lower, trans=trans, unitdiag=unit_diagonal) if info == 0: return x if info > 0: raise LinAlgError("singular matrix: resolution failed at diagonal %s" % info-1) raise ValueError('illegal value in %d-th argument of internal trtrs' % -info) def solve_banded(l_and_u, ab, b, overwrite_ab=False, overwrite_b=False, debug=False, check_finite=True): """ Solve the equation a x = b for x, assuming a is banded matrix. The matrix a is stored in `ab` using the matrix diagonal ordered form:: ab[u + i - j, j] == a[i,j] Example of `ab` (shape of a is (6,6), `u` =1, `l` =2):: * a01 a12 a23 a34 a45 a00 a11 a22 a33 a44 a55 a10 a21 a32 a43 a54 * a20 a31 a42 a53 * * Parameters ---------- (l, u) : (integer, integer) Number of non-zero lower and upper diagonals ab : (`l` + `u` + 1, M) array_like Banded matrix b : (M,) or (M, K) array_like Right-hand side overwrite_ab : bool, optional Discard data in `ab` (may enhance performance) overwrite_b : bool, optional Discard data in `b` (may enhance performance) check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- x : (M,) or (M, K) ndarray The solution to the system a x = b. Returned shape depends on the shape of `b`. """ a1 = _asarray_validated(ab, check_finite=check_finite, as_inexact=True) b1 = _asarray_validated(b, check_finite=check_finite, as_inexact=True) # Validate shapes. if a1.shape[-1] != b1.shape[0]: raise ValueError("shapes of ab and b are not compatible.") (l, u) = l_and_u if l + u + 1 != a1.shape[0]: raise ValueError("invalid values for the number of lower and upper " "diagonals: l+u+1 (%d) does not equal ab.shape[0] " "(%d)" % (l+u+1, ab.shape[0])) overwrite_b = overwrite_b or _datacopied(b1, b) if a1.shape[-1] == 1: b2 = np.array(b1, copy=overwrite_b) b2 /= a1[1, 0] return b2 if l == u == 1: overwrite_ab = overwrite_ab or _datacopied(a1, ab) gtsv, = get_lapack_funcs(('gtsv',), (a1, b1)) du = a1[0, 1:] d = a1[1, :] dl = a1[2, :-1] du2, d, du, x, info = gtsv(dl, d, du, b1, overwrite_ab, overwrite_ab, overwrite_ab, overwrite_b) else: gbsv, = get_lapack_funcs(('gbsv',), (a1, b1)) a2 = np.zeros((2*l+u+1, a1.shape[1]), dtype=gbsv.dtype) a2[l:, :] = a1 lu, piv, x, info = gbsv(l, u, a2, b1, overwrite_ab=True, overwrite_b=overwrite_b) if info == 0: return x if info > 0: raise LinAlgError("singular matrix") raise ValueError('illegal value in %d-th argument of internal gbsv/gtsv' % -info) def solveh_banded(ab, b, overwrite_ab=False, overwrite_b=False, lower=False, check_finite=True): """ Solve equation a x = b. a is Hermitian positive-definite banded matrix. The matrix a is stored in `ab` either in lower diagonal or upper diagonal ordered form: ab[u + i - j, j] == a[i,j] (if upper form; i <= j) ab[ i - j, j] == a[i,j] (if lower form; i >= j) Example of `ab` (shape of a is (6, 6), `u` =2):: upper form: * * a02 a13 a24 a35 * a01 a12 a23 a34 a45 a00 a11 a22 a33 a44 a55 lower form: a00 a11 a22 a33 a44 a55 a10 a21 a32 a43 a54 * a20 a31 a42 a53 * * Cells marked with * are not used. Parameters ---------- ab : (`u` + 1, M) array_like Banded matrix b : (M,) or (M, K) array_like Right-hand side overwrite_ab : bool, optional Discard data in `ab` (may enhance performance) overwrite_b : bool, optional Discard data in `b` (may enhance performance) lower : bool, optional Is the matrix in the lower form. (Default is upper form) check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- x : (M,) or (M, K) ndarray The solution to the system a x = b. Shape of return matches shape of `b`. """ a1 = _asarray_validated(ab, check_finite=check_finite) b1 = _asarray_validated(b, check_finite=check_finite) # Validate shapes. if a1.shape[-1] != b1.shape[0]: raise ValueError("shapes of ab and b are not compatible.") overwrite_b = overwrite_b or _datacopied(b1, b) overwrite_ab = overwrite_ab or _datacopied(a1, ab) if a1.shape[0] == 2: ptsv, = get_lapack_funcs(('ptsv',), (a1, b1)) if lower: d = a1[0, :].real e = a1[1, :-1] else: d = a1[1, :].real e = a1[0, 1:].conj() d, du, x, info = ptsv(d, e, b1, overwrite_ab, overwrite_ab, overwrite_b) else: pbsv, = get_lapack_funcs(('pbsv',), (a1, b1)) c, x, info = pbsv(a1, b1, lower=lower, overwrite_ab=overwrite_ab, overwrite_b=overwrite_b) if info > 0: raise LinAlgError("%d-th leading minor not positive definite" % info) if info < 0: raise ValueError('illegal value in %d-th argument of internal pbsv' % -info) return x def solve_toeplitz(c_or_cr, b, check_finite=True): """Solve a Toeplitz system using Levinson Recursion The Toeplitz matrix has constant diagonals, with c as its first column and r as its first row. If r is not given, ``r == conjugate(c)`` is assumed. Parameters ---------- c_or_cr : array_like or tuple of (array_like, array_like) The vector ``c``, or a tuple of arrays (``c``, ``r``). Whatever the actual shape of ``c``, it will be converted to a 1-D array. If not supplied, ``r = conjugate(c)`` is assumed; in this case, if c[0] is real, the Toeplitz matrix is Hermitian. r[0] is ignored; the first row of the Toeplitz matrix is ``[c[0], r[1:]]``. Whatever the actual shape of ``r``, it will be converted to a 1-D array. b : (M,) or (M, K) array_like Right-hand side in ``T x = b``. check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (result entirely NaNs) if the inputs do contain infinities or NaNs. Returns ------- x : (M,) or (M, K) ndarray The solution to the system ``T x = b``. Shape of return matches shape of `b`. Notes ----- The solution is computed using Levinson-Durbin recursion, which is faster than generic least-squares methods, but can be less numerically stable. """ # If numerical stability of this algorithim is a problem, a future # developer might consider implementing other O(N^2) Toeplitz solvers, # such as GKO (http://www.jstor.org/stable/2153371) or Bareiss. if isinstance(c_or_cr, tuple): c, r = c_or_cr c = _asarray_validated(c, check_finite=check_finite).ravel() r = _asarray_validated(r, check_finite=check_finite).ravel() else: c = _asarray_validated(c_or_cr, check_finite=check_finite).ravel() r = c.conjugate() # Form a 1D array of values to be used in the matrix, containing a reversed # copy of r[1:], followed by c. vals = np.concatenate((r[-1:0:-1], c)) if b is None: raise ValueError('illegal value, `b` is a required argument') if vals.shape[0] != (2*b.shape[0] - 1): raise ValueError('incompatible dimensions') b = _asarray_validated(b) if np.iscomplexobj(vals) or np.iscomplexobj(b): vals = np.asarray(vals, dtype=np.complex128, order='c') b = np.asarray(b, dtype=np.complex128) else: vals = np.asarray(vals, dtype=np.double, order='c') b = np.asarray(b, dtype=np.double) if b.ndim == 1: x, _ = levinson(vals, np.ascontiguousarray(b)) else: b_shape = b.shape b = b.reshape(b.shape[0], -1) x = np.column_stack( (levinson(vals, np.ascontiguousarray(b[:, i]))[0]) for i in range(b.shape[1])) x = x.reshape(*b_shape) return x def _get_axis_len(aname, a, axis): ax = axis if ax < 0: ax += a.ndim if 0 <= ax < a.ndim: return a.shape[ax] raise ValueError("'%saxis' entry is out of bounds" % (aname,)) def solve_circulant(c, b, singular='raise', tol=None, caxis=-1, baxis=0, outaxis=0): """Solve C x = b for x, where C is a circulant matrix. `C` is the circulant matrix associated with the vector `c`. The system is solved by doing division in Fourier space. The calculation is:: x = ifft(fft(b) / fft(c)) where `fft` and `ifft` are the fast Fourier transform and its inverse, respectively. For a large vector `c`, this is *much* faster than solving the system with the full circulant matrix. Parameters ---------- c : array_like The coefficients of the circulant matrix. b : array_like Right-hand side matrix in ``a x = b``. singular : str, optional This argument controls how a near singular circulant matrix is handled. If `singular` is "raise" and the circulant matrix is near singular, a `LinAlgError` is raised. If `singular` is "lstsq", the least squares solution is returned. Default is "raise". tol : float, optional If any eigenvalue of the circulant matrix has an absolute value that is less than or equal to `tol`, the matrix is considered to be near singular. If not given, `tol` is set to:: tol = abs_eigs.max() * abs_eigs.size * np.finfo(np.float64).eps where `abs_eigs` is the array of absolute values of the eigenvalues of the circulant matrix. caxis : int When `c` has dimension greater than 1, it is viewed as a collection of circulant vectors. In this case, `caxis` is the axis of `c` that holds the vectors of circulant coefficients. baxis : int When `b` has dimension greater than 1, it is viewed as a collection of vectors. In this case, `baxis` is the axis of `b` that holds the right-hand side vectors. outaxis : int When `c` or `b` are multidimensional, the value returned by `solve_circulant` is multidimensional. In this case, `outaxis` is the axis of the result that holds the solution vectors. Returns ------- x : ndarray Solution to the system ``C x = b``. Raises ------ LinAlgError If the circulant matrix associated with `c` is near singular. See Also -------- circulant Notes ----- For a one-dimensional vector `c` with length `m`, and an array `b` with shape ``(m, ...)``, solve_circulant(c, b) returns the same result as solve(circulant(c), b) where `solve` and `circulant` are from `scipy.linalg`. .. versionadded:: 0.16.0 Examples -------- >>> from scipy.linalg import solve_circulant, solve, circulant, lstsq >>> c = np.array([2, 2, 4]) >>> b = np.array([1, 2, 3]) >>> solve_circulant(c, b) array([ 0.75, -0.25, 0.25]) Compare that result to solving the system with `scipy.linalg.solve`: >>> solve(circulant(c), b) array([ 0.75, -0.25, 0.25]) A singular example: >>> c = np.array([1, 1, 0, 0]) >>> b = np.array([1, 2, 3, 4]) Calling ``solve_circulant(c, b)`` will raise a `LinAlgError`. For the least square solution, use the option ``singular='lstsq'``: >>> solve_circulant(c, b, singular='lstsq') array([ 0.25, 1.25, 2.25, 1.25]) Compare to `scipy.linalg.lstsq`: >>> x, resid, rnk, s = lstsq(circulant(c), b) >>> x array([ 0.25, 1.25, 2.25, 1.25]) A broadcasting example: Suppose we have the vectors of two circulant matrices stored in an array with shape (2, 5), and three `b` vectors stored in an array with shape (3, 5). For example, >>> c = np.array([[1.5, 2, 3, 0, 0], [1, 1, 4, 3, 2]]) >>> b = np.arange(15).reshape(-1, 5) We want to solve all combinations of circulant matrices and `b` vectors, with the result stored in an array with shape (2, 3, 5). When we disregard the axes of `c` and `b` that hold the vectors of coefficients, the shapes of the collections are (2,) and (3,), respectively, which are not compatible for broadcasting. To have a broadcast result with shape (2, 3), we add a trivial dimension to `c`: ``c[:, np.newaxis, :]`` has shape (2, 1, 5). The last dimension holds the coefficients of the circulant matrices, so when we call `solve_circulant`, we can use the default ``caxis=-1``. The coefficients of the `b` vectors are in the last dimension of the array `b`, so we use ``baxis=-1``. If we use the default `outaxis`, the result will have shape (5, 2, 3), so we'll use ``outaxis=-1`` to put the solution vectors in the last dimension. >>> x = solve_circulant(c[:, np.newaxis, :], b, baxis=-1, outaxis=-1) >>> x.shape (2, 3, 5) >>> np.set_printoptions(precision=3) # For compact output of numbers. >>> x array([[[-0.118, 0.22 , 1.277, -0.142, 0.302], [ 0.651, 0.989, 2.046, 0.627, 1.072], [ 1.42 , 1.758, 2.816, 1.396, 1.841]], [[ 0.401, 0.304, 0.694, -0.867, 0.377], [ 0.856, 0.758, 1.149, -0.412, 0.831], [ 1.31 , 1.213, 1.603, 0.042, 1.286]]]) Check by solving one pair of `c` and `b` vectors (cf. ``x[1, 1, :]``): >>> solve_circulant(c[1], b[1, :]) array([ 0.856, 0.758, 1.149, -0.412, 0.831]) """ c = np.atleast_1d(c) nc = _get_axis_len("c", c, caxis) b = np.atleast_1d(b) nb = _get_axis_len("b", b, baxis) if nc != nb: raise ValueError('Incompatible c and b axis lengths') fc = np.fft.fft(np.rollaxis(c, caxis, c.ndim), axis=-1) abs_fc = np.abs(fc) if tol is None: # This is the same tolerance as used in np.linalg.matrix_rank. tol = abs_fc.max(axis=-1) * nc * np.finfo(np.float64).eps if tol.shape != (): tol.shape = tol.shape + (1,) else: tol = np.atleast_1d(tol) near_zeros = abs_fc <= tol is_near_singular = np.any(near_zeros) if is_near_singular: if singular == 'raise': raise LinAlgError("near singular circulant matrix.") else: # Replace the small values with 1 to avoid errors in the # division fb/fc below. fc[near_zeros] = 1 fb = np.fft.fft(np.rollaxis(b, baxis, b.ndim), axis=-1) q = fb / fc if is_near_singular: # `near_zeros` is a boolean array, same shape as `c`, that is # True where `fc` is (near) zero. `q` is the broadcasted result # of fb / fc, so to set the values of `q` to 0 where `fc` is near # zero, we use a mask that is the broadcast result of an array # of True values shaped like `b` with `near_zeros`. mask = np.ones_like(b, dtype=bool) & near_zeros q[mask] = 0 x = np.fft.ifft(q, axis=-1) if not (np.iscomplexobj(c) or np.iscomplexobj(b)): x = x.real if outaxis != -1: x = np.rollaxis(x, -1, outaxis) return x # matrix inversion def inv(a, overwrite_a=False, check_finite=True): """ Compute the inverse of a matrix. Parameters ---------- a : array_like Square matrix to be inverted. overwrite_a : bool, optional Discard data in `a` (may improve performance). Default is False. check_finite : bool, optional Whether to check that the input matrix contains only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- ainv : ndarray Inverse of the matrix `a`. Raises ------ LinAlgError : If `a` is singular. ValueError : If `a` is not square, or not 2-dimensional. Examples -------- >>> from scipy import linalg >>> a = np.array([[1., 2.], [3., 4.]]) >>> linalg.inv(a) array([[-2. , 1. ], [ 1.5, -0.5]]) >>> np.dot(a, linalg.inv(a)) array([[ 1., 0.], [ 0., 1.]]) """ a1 = _asarray_validated(a, check_finite=check_finite) if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]: raise ValueError('expected square matrix') overwrite_a = overwrite_a or _datacopied(a1, a) #XXX: I found no advantage or disadvantage of using finv. ## finv, = get_flinalg_funcs(('inv',),(a1,)) ## if finv is not None: ## a_inv,info = finv(a1,overwrite_a=overwrite_a) ## if info==0: ## return a_inv ## if info>0: raise LinAlgError, "singular matrix" ## if info<0: raise ValueError,\ ## 'illegal value in %d-th argument of internal inv.getrf|getri'%(-info) getrf, getri, getri_lwork = get_lapack_funcs(('getrf', 'getri', 'getri_lwork'), (a1,)) lu, piv, info = getrf(a1, overwrite_a=overwrite_a) if info == 0: lwork = _compute_lwork(getri_lwork, a1.shape[0]) # XXX: the following line fixes curious SEGFAULT when # benchmarking 500x500 matrix inverse. This seems to # be a bug in LAPACK ?getri routine because if lwork is # minimal (when using lwork[0] instead of lwork[1]) then # all tests pass. Further investigation is required if # more such SEGFAULTs occur. lwork = int(1.01 * lwork) inv_a, info = getri(lu, piv, lwork=lwork, overwrite_lu=1) if info > 0: raise LinAlgError("singular matrix") if info < 0: raise ValueError('illegal value in %d-th argument of internal ' 'getrf|getri' % -info) return inv_a ### Determinant def det(a, overwrite_a=False, check_finite=True): """ Compute the determinant of a matrix The determinant of a square matrix is a value derived arithmetically from the coefficients of the matrix. The determinant for a 3x3 matrix, for example, is computed as follows:: a b c d e f = A g h i det(A) = a*e*i + b*f*g + c*d*h - c*e*g - b*d*i - a*f*h Parameters ---------- a : (M, M) array_like A square matrix. overwrite_a : bool, optional Allow overwriting data in a (may enhance performance). check_finite : bool, optional Whether to check that the input matrix contains only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- det : float or complex Determinant of `a`. Notes ----- The determinant is computed via LU factorization, LAPACK routine z/dgetrf. Examples -------- >>> from scipy import linalg >>> a = np.array([[1,2,3], [4,5,6], [7,8,9]]) >>> linalg.det(a) 0.0 >>> a = np.array([[0,2,3], [4,5,6], [7,8,9]]) >>> linalg.det(a) 3.0 """ a1 = _asarray_validated(a, check_finite=check_finite) if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]: raise ValueError('expected square matrix') overwrite_a = overwrite_a or _datacopied(a1, a) fdet, = get_flinalg_funcs(('det',), (a1,)) a_det, info = fdet(a1, overwrite_a=overwrite_a) if info < 0: raise ValueError('illegal value in %d-th argument of internal ' 'det.getrf' % -info) return a_det ### Linear Least Squares def lstsq(a, b, cond=None, overwrite_a=False, overwrite_b=False, check_finite=True): """ Compute least-squares solution to equation Ax = b. Compute a vector x such that the 2-norm ``|b - A x|`` is minimized. Parameters ---------- a : (M, N) array_like Left hand side matrix (2-D array). b : (M,) or (M, K) array_like Right hand side matrix or vector (1-D or 2-D array). cond : float, optional Cutoff for 'small' singular values; used to determine effective rank of a. Singular values smaller than ``rcond * largest_singular_value`` are considered zero. overwrite_a : bool, optional Discard data in `a` (may enhance performance). Default is False. overwrite_b : bool, optional Discard data in `b` (may enhance performance). Default is False. check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- x : (N,) or (N, K) ndarray Least-squares solution. Return shape matches shape of `b`. residues : () or (1,) or (K,) ndarray Sums of residues, squared 2-norm for each column in ``b - a x``. If rank of matrix a is < N or > M this is an empty array. If b was 1-D, this is an (1,) shape array, otherwise the shape is (K,). rank : int Effective rank of matrix `a`. s : (min(M,N),) ndarray Singular values of `a`. The condition number of a is ``abs(s[0]/s[-1])``. Raises ------ LinAlgError : If computation does not converge. See Also -------- optimize.nnls : linear least squares with non-negativity constraint """ a1 = _asarray_validated(a, check_finite=check_finite) b1 = _asarray_validated(b, check_finite=check_finite) if len(a1.shape) != 2: raise ValueError('expected matrix') m, n = a1.shape if len(b1.shape) == 2: nrhs = b1.shape[1] else: nrhs = 1 if m != b1.shape[0]: raise ValueError('incompatible dimensions') gelss, = get_lapack_funcs(('gelss',), (a1, b1)) if n > m: # need to extend b matrix as it will be filled with # a larger solution matrix if len(b1.shape) == 2: b2 = np.zeros((n, nrhs), dtype=gelss.dtype) b2[:m, :] = b1 else: b2 = np.zeros(n, dtype=gelss.dtype) b2[:m] = b1 b1 = b2 overwrite_a = overwrite_a or _datacopied(a1, a) overwrite_b = overwrite_b or _datacopied(b1, b) # get optimal work array work = gelss(a1, b1, lwork=-1)[4] lwork = work[0].real.astype(int) v, x, s, rank, work, info = gelss( a1, b1, cond=cond, lwork=lwork, overwrite_a=overwrite_a, overwrite_b=overwrite_b) if info > 0: raise LinAlgError("SVD did not converge in Linear Least Squares") if info < 0: raise ValueError('illegal value in %d-th argument of internal gelss' % -info) resids = np.asarray([], dtype=x.dtype) if n < m: x1 = x[:n] if rank == n: resids = np.sum(np.abs(x[n:])**2, axis=0) x = x1 return x, resids, rank, s def pinv(a, cond=None, rcond=None, return_rank=False, check_finite=True): """ Compute the (Moore-Penrose) pseudo-inverse of a matrix. Calculate a generalized inverse of a matrix using a least-squares solver. Parameters ---------- a : (M, N) array_like Matrix to be pseudo-inverted. cond, rcond : float, optional Cutoff for 'small' singular values in the least-squares solver. Singular values smaller than ``rcond * largest_singular_value`` are considered zero. return_rank : bool, optional if True, return the effective rank of the matrix check_finite : bool, optional Whether to check that the input matrix contains only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- B : (N, M) ndarray The pseudo-inverse of matrix `a`. rank : int The effective rank of the matrix. Returned if return_rank == True Raises ------ LinAlgError If computation does not converge. Examples -------- >>> from scipy import linalg >>> a = np.random.randn(9, 6) >>> B = linalg.pinv(a) >>> np.allclose(a, np.dot(a, np.dot(B, a))) True >>> np.allclose(B, np.dot(B, np.dot(a, B))) True """ a = _asarray_validated(a, check_finite=check_finite) b = np.identity(a.shape[0], dtype=a.dtype) if rcond is not None: cond = rcond x, resids, rank, s = lstsq(a, b, cond=cond, check_finite=False) if return_rank: return x, rank else: return x def pinv2(a, cond=None, rcond=None, return_rank=False, check_finite=True): """ Compute the (Moore-Penrose) pseudo-inverse of a matrix. Calculate a generalized inverse of a matrix using its singular-value decomposition and including all 'large' singular values. Parameters ---------- a : (M, N) array_like Matrix to be pseudo-inverted. cond, rcond : float or None Cutoff for 'small' singular values. Singular values smaller than ``rcond*largest_singular_value`` are considered zero. If None or -1, suitable machine precision is used. return_rank : bool, optional if True, return the effective rank of the matrix check_finite : bool, optional Whether to check that the input matrix contains only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- B : (N, M) ndarray The pseudo-inverse of matrix `a`. rank : int The effective rank of the matrix. Returned if return_rank == True Raises ------ LinAlgError If SVD computation does not converge. Examples -------- >>> from scipy import linalg >>> a = np.random.randn(9, 6) >>> B = linalg.pinv2(a) >>> np.allclose(a, np.dot(a, np.dot(B, a))) True >>> np.allclose(B, np.dot(B, np.dot(a, B))) True """ a = _asarray_validated(a, check_finite=check_finite) u, s, vh = decomp_svd.svd(a, full_matrices=False, check_finite=False) if rcond is not None: cond = rcond if cond in [None, -1]: t = u.dtype.char.lower() factor = {'f': 1E3, 'd': 1E6} cond = factor[t] * np.finfo(t).eps rank = np.sum(s > cond * np.max(s)) u = u[:, :rank] u /= s[:rank] B = np.transpose(np.conjugate(np.dot(u, vh[:rank]))) if return_rank: return B, rank else: return B def pinvh(a, cond=None, rcond=None, lower=True, return_rank=False, check_finite=True): """ Compute the (Moore-Penrose) pseudo-inverse of a Hermitian matrix. Calculate a generalized inverse of a Hermitian or real symmetric matrix using its eigenvalue decomposition and including all eigenvalues with 'large' absolute value. Parameters ---------- a : (N, N) array_like Real symmetric or complex hermetian matrix to be pseudo-inverted cond, rcond : float or None Cutoff for 'small' eigenvalues. Singular values smaller than rcond * largest_eigenvalue are considered zero. If None or -1, suitable machine precision is used. lower : bool, optional Whether the pertinent array data is taken from the lower or upper triangle of a. (Default: lower) return_rank : bool, optional if True, return the effective rank of the matrix check_finite : bool, optional Whether to check that the input matrix contains only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- B : (N, N) ndarray The pseudo-inverse of matrix `a`. rank : int The effective rank of the matrix. Returned if return_rank == True Raises ------ LinAlgError If eigenvalue does not converge Examples -------- >>> from scipy.linalg import pinvh >>> a = np.random.randn(9, 6) >>> a = np.dot(a, a.T) >>> B = pinvh(a) >>> np.allclose(a, np.dot(a, np.dot(B, a))) True >>> np.allclose(B, np.dot(B, np.dot(a, B))) True """ a = _asarray_validated(a, check_finite=check_finite) s, u = decomp.eigh(a, lower=lower, check_finite=False) if rcond is not None: cond = rcond if cond in [None, -1]: t = u.dtype.char.lower() factor = {'f': 1E3, 'd': 1E6} cond = factor[t] * np.finfo(t).eps # For Hermitian matrices, singular values equal abs(eigenvalues) above_cutoff = (abs(s) > cond * np.max(abs(s))) psigma_diag = 1.0 / s[above_cutoff] u = u[:, above_cutoff] B = np.dot(u * psigma_diag, np.conjugate(u).T) if return_rank: return B, len(psigma_diag) else: return B
#!/usr/bin/env python """ Copyright (c) 2013, Citrix Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ """ Interface Rename logic Provides the 'rename' function which takes 4 lists of state and returns a list of name transactions to rename network interfaces. [in] static_rules - Static rules provided by the user, taking absolute priority list of MACPCI objects in form (mac, pci)->ethXX [in] cur_state - Current state of network cards on the machine (pre rename) list of MACPCI objects in form ethXXX|side-XXX-ethXX->(mac, pci) [in] last_state - Last boot state (post rename) of network cards on the machine list of MACPCI objects in form (mac, pci)->ethXX [in] old_state - Any older nics which have disappeared in the meantime list of MACPCI objects in form (mac, pci)->ethXX [out] transactions list of string tuples as source and destination names for "ip link set name" """ __version__ = "1.0.0" __author__ = "Andrew Cooper" import re from xcp.logger import LOG from xcp.net.ifrename.macpci import MACPCI VALID_CUR_STATE_KNAME = re.compile("^(?:eth[\d]+|side-[\d]+-eth[\d]+)$") VALID_ETH_NAME = re.compile("^eth([\d])+$") VALID_IBFT_NAME = re.compile("^ibft([\d])+$") # util needs to import VALID_ETH_NAME from xcp.net.ifrename import util class StaticRuleError(RuntimeError): """Error with static rules""" class CurrentStateError(RuntimeError): """Error with current state information""" class LastStateError(RuntimeError): """Error with last state information""" class OldStateError(RuntimeError): """Error with old state information""" class LogicError(RuntimeError): """Logical Error. Needs fixing""" def __rename_nic(nic, name, transactions, cur_state): """ Rename a specified nic to name. It checkes in possibly_aliased for nics which currently have name, and renames them sideways. The caller should ensure that no nics in cur_state have already been renamed to name, and that name is a valid nic name """ # Assert that name is valid assert VALID_ETH_NAME.match(name) is not None # Assert that name is not already taken in the current state assert name not in map(lambda x: x.tname, cur_state) # Given the previous assert, only un-renamed nics in the current state can # possibly alias the new name aliased = util.get_nic_with_kname( filter(lambda x: x.tname is None, cur_state), name) if aliased is None: # Using this rule will not alias another currently present NIC LOG.debug("Renaming unaliased nic '%s' to '%s'" % (nic, name)) nic.tname = name transactions.append((nic.kname, name)) elif aliased == nic and aliased.kname == nic.kname: # The nic is already named correctly. Just update tname LOG.debug("Nic '%s' is already named correctly" % (nic,)) nic.tname = nic.kname else: # Another nic is in the way for applying the rule. Move it sideways # TODO: given new assertions, will this ever be nessesary? if aliased.kname[:5] == "side-": aliased_eth = aliased.kname.split('-')[2] else: aliased_eth = aliased.kname tempname = util.get_new_temp_name(cur_state, aliased_eth) LOG.debug("Nic '%s' aliases rename of '%s' to '%s'" % (aliased, nic, name)) # Rename aliased nic sideways LOG.debug("Renaming aliased nic to '%s'" % (tempname,)) transactions.append((aliased.kname, tempname)) aliased.kname = tempname # And then rename the original nic LOG.debug("Renaming original nic to '%s'" % (name,)) nic.tname = name transactions.append((nic.kname, name)) def rename_logic( static_rules, cur_state, last_state, old_state ): """ Core logic of renaming the current state based on the rules and past state. This function assumes all inputs have been suitably sanitised. @param static_rules List of MACPCI objects representing rules @param cur_state List of MACPCI objects representing the current state @param last_state List of MACPCI objects representing the last boot state @param old_state List of MACPCI objects representing the old state @returns List of tuples... @throws AssertionError (Should not be thrown, but better to know about logic errors if they occur) """ transactions = [] if not len(cur_state): # If there are no nics present on the system, no renaming to perform return transactions # Certain drivers advertise multiple eth devices for the same PCI function # To avoid breaking the logic later, we need to know which PCI functions # have multiple eths. As this is a per-driver effect, calculate it only # from the current state and not any saved state. multinic_functions = set() pci_functions = set() for nic in cur_state: if nic.pci in pci_functions: multinic_functions.add(nic.pci) else: pci_functions.add(nic.pci) if len(multinic_functions): LOG.debug("Detected the following PCI functions with multiple nics\n%s" % (util.niceformat(multinic_functions),)) # 1st pass. Force current state into line according to the static rules for rule in static_rules: LOG.debug("Considering static rule '%s'" % (rule,)) try: nic = cur_state[cur_state.index(rule)] except ValueError: LOG.debug("Static rule does not reference a current nic") continue __rename_nic(nic, rule.tname, transactions, cur_state) LOG.debug("Finished static rules. Transactions are \n%s\n" "Current State is \n%s" % (util.niceformat(transactions), util.niceformat(cur_state))) # 2nd pass. This logic should cover nics referenced by last or old state for nic in filter(util.needs_renaming, cur_state): LOG.info("Considering '%s'" % (nic,)) # Did this nic appear in the same pci location as last boot? try: lastnic = last_state[last_state.index(nic)] except ValueError: # No it did not appear in the same location as before pass else: can_rename = util.tname_free(cur_state, lastnic.tname) # Warn if UDEV failed to rename the nic. Either there is a logical # bug somewhere, or the user is messing around with our files. if VALID_CUR_STATE_KNAME.match(nic.kname) is None: LOG.warning("nic '%s' was not renamed by udev." % (nic,)) # If the correct target name is free, attempt to rename to it. if can_rename: LOG.info("nic '%s' in the same location as before. " "Renaming to %s" % (nic, lastnic.tname)) __rename_nic(nic, lastnic.tname, transactions, cur_state) else: # If the target name is already taken, warn about it LOG.warning("nic '%s' aliased from its last boot location. " "Defering renaming and treating as new" % (nic,)) continue # if we saw this nic last time but its pci location is different, we # have just moved hardware on the bus so give it the old name lastnic = util.get_nic_with_mac(last_state, nic.mac) LOG.debug("nic_with_mac(last_state, %s) = %s" % (nic.mac, lastnic)) if lastnic: LOG.info("nic '%s' moved on the pci bus from '%s'" % (nic, lastnic)) __rename_nic(nic, lastnic.tname, transactions, cur_state) continue # else this mac was not seen last boot. Is it on a multinic function? if nic.pci in multinic_functions: # if it is on a multinic_function, consider it brand new and rename # later LOG.info("nic '%s' is on a multinic pci function. Considering it " "new and renaming later" % (nic,)) continue # this nic is not on a multinic function. Has it displaced another nic? lastnic = util.get_nic_with_pci(last_state+old_state, nic.pci) LOG.debug("nic_with_pci(last_state+old_state, %s) = %s" % (nic.mac, lastnic)) if lastnic: # This nic is in the place of an older nic. Is that older nic still # present elsewhere in the system? if util.get_nic_with_mac(cur_state, lastnic.mac) is not None: # Yes - the displaced nic is still preset. Therefore, that nic # has moved and this current nic is new. LOG.info("nic '%s' displaced older nic '%s' which is still " "present. Considering this nic new" % (nic, lastnic)) continue else: # No - the displaced nic is no longer present so consider it # replaced LOG.info("nic '%s' has replaced older nic '%s'" % (nic, lastnic)) __rename_nic(nic, lastnic.tname, transactions, cur_state) continue # have we ever seen this nic before? lastnic = util.get_nic_with_mac(old_state, nic.mac) LOG.debug("nic_with_mac(old_state, %s) = %s" % (nic.mac, lastnic)) if lastnic: # Yes - this nic was once present but not present last boot # Is its old name still availble? if util.tname_free(cur_state, lastnic.tname): # Old name is available - give it its old name back LOG.info("old nic '%s' returned and its name is free" % (nic,)) __rename_nic(nic, lastnic.tname, transactions, cur_state) continue else: LOG.info("old nic '%s' returned but its name is taken. " "Treating it as new" % (nic,)) continue LOG.info("nic '%s' seems brand new. Defering until later for renaming" % (nic,)) LOG.debug("Finished dynamic rules. Transactions are \n%s\n" "Current State is \n%s" % (util.niceformat(transactions), util.niceformat(cur_state))) # 3rd pass. This pass ensures that replaced multi-nic functions # are ordered the same as a the previous state, relative to MACs. # # New multi-nic functions get ordered below. if len(multinic_functions): for fn in multinic_functions: lastnics = util.get_nics_with_pci(last_state + old_state, fn) newnics = util.get_nics_with_pci(cur_state, fn) # Check that the function still has the same number of nics if len(lastnics) != len(newnics): LOG.warn("multi-nic function %s had %d nics but now has %d. " "Defering all until later for renaming" % (fn, len(lastnics), len(newnics))) continue # Check that all nics are still pending a rename if False in (util.needs_renaming(n) for n in newnics): LOG.info("Some of %s's nics have alrealdy been renamed. " "Defering the rest until later for renaming" % (fn, )) continue # Check that all expected target names are free if False in (util.tname_free(cur_state, n.tname) for n in lastnics): LOG.info("Some of %s's nics target names already used. " "Defering the rest until later for renaming" % (fn, )) continue # Assume the MACs are ordered reliably. They are typically adjacent lastnics.sort(key = lambda n: n.mac.integer) newnics.sort(key = lambda n: n.mac.integer) for new, old in zip(newnics, lastnics): __rename_nic(new, old.tname, transactions, cur_state) LOG.debug("Finished multi-nic logic. Transactions are \n%s\n" "Current State is \n%s" % (util.niceformat(transactions), util.niceformat(cur_state))) # There may be some new multinic functions. We can't trust biosdevname's # order for these NICs, so for each NIC collect the reported "order" <n> # (derived directly from eth<n>) and sort them according to the MACs if len(multinic_functions): LOG.debug("New multi-nic logic - attempting to re-order") for fn in multinic_functions: newnics = util.get_nics_with_pci(filter(util.needs_renaming, cur_state), fn) orders = sorted(map(lambda x: x.order, newnics)) newnics.sort(key = lambda n: n.mac.integer) for nic, neworder in zip(newnics, orders): LOG.debug("NIC '%s' getting new order '%s'" % (nic, neworder)) nic.order = neworder # For completely new network cards which we have never seen before, work out # a safe new number to assign it ethnumbers = sorted( map(lambda x: int(x[3:]), filter(lambda x: VALID_ETH_NAME.match(x) is not None, map(lambda x: x.tname or x.kname, static_rules + cur_state + last_state)))) if len(ethnumbers): nextethnum = ethnumbers[-1]+1 else: nextethnum = 0 # 4th pass. This should only affect brand new network cards unreferenced # by previous state. Prefer the order (e.g. from biosdevname), given # no other objections. for nic in sorted(filter(util.needs_renaming, cur_state), key=lambda x: x.order): LOG.info("Renaming brand new nic '%s'" % (nic,)) if ( VALID_ETH_NAME.match(nic.kname) is not None and nic.kname not in map(lambda x: x.tname, cur_state) ): # User has been messing around with state files but not the udev # rules. If the eth name is still free, give it nic.tname = nic.kname # No transaction needed continue newname = "eth%d" % (nextethnum, ) nextethnum += 1 __rename_nic(nic, newname, transactions, cur_state) LOG.debug("Finished all logic. Transactions are \n%s\n" "Current State is \n%s" % (util.niceformat(transactions), util.niceformat(cur_state))) return transactions def rename( static_rules, cur_state, last_state, old_state ): """ Rename current state based on the rules and past state. This function sanitises the input and delgates the renaming logic to __rename. @param static_rules List of MACPCI objects representing rules @param cur_state List of MACPCI objects representing the current state @param last_state List of MACPCI objects representing the last boot state @param old_state List of MACPCI objects representing the old state @throws StaticRuleError, CurrentStateError, LastStateError, TypeError @returns list of tuples of name changes requred """ if len(static_rules): # Verify types and properties of the list for e in static_rules: # Verify type if not isinstance(e, MACPCI): raise TypeError("Expected List of MACPCI objects") # Verify kname is None if e.kname is not None: raise StaticRuleError("Expected static rule kname to be None") # Verify tname points to 'eth<foo>' if not e.tname.startswith("eth"): raise StaticRuleError("Static rule '%s' expected to name to " "'eth<num>'" % (e, )) # Verify no two static rules refer to the same eth name _ = frozenset( map(lambda x: x.tname, static_rules) ) if len(_) != len(static_rules): raise StaticRuleError("Some static rules alias the same " "eth name") # Verify no two static rules refer to the same mac address _ = frozenset( map(lambda x: x.mac, static_rules) ) if len(_) != len(static_rules): raise StaticRuleError("Some static rules alias the same MAC " "address") if len(cur_state): # Filter out iBFT NICs cur_state = filter(lambda x: VALID_IBFT_NAME.match(x.kname) is None, cur_state) # Verify types and properties of the list for e in cur_state: if not isinstance(e, MACPCI): raise TypeError("Expected List of MACPCI objects") # Verify tname is None if e.tname is not None: raise CurrentStateError("Expected current state tname to be " " None") # Verify kname is 'eth<foo>' or 'side-<num>-eth<num>' if VALID_CUR_STATE_KNAME.match(e.kname) is None: raise StaticRuleError("Current state '%s' expected to name to " "'eth<num>' or 'side-<num>-eth<num>'" % (e, )) # Verify no two entries of current state refer to the same eth name _ = frozenset( map(lambda x: x.kname, cur_state) ) if len(_) != len(cur_state): raise CurrentStateError("Some entries of current state alias the " "same eth name") # Verify no two entries of current state refer to the same mac address _ = frozenset( map(lambda x: x.mac, cur_state) ) if len(_) != len(cur_state): raise CurrentStateError("Some entries of current state alias the " "same MAC address") if len(last_state): # Verify types in the list for e in last_state: if not isinstance(e, MACPCI): raise TypeError("Expected List of MACPCI objects") # Verify kname is None if e.kname is not None: raise LastStateError("Expected last state kname to be None") # Verify kname is valid if VALID_ETH_NAME.match(e.tname) is None: raise LastStateError("Last state '%s' target name is invalid" % (e, )) # Verify no two entries of last state refer to the same eth name _ = frozenset( map(lambda x: x.tname, last_state) ) if len(_) != len(last_state): raise LastStateError("Some entries of last state alias the " "same eth name") # Verify no two entries of last state refer to the same mac address _ = frozenset( map(lambda x: x.mac, last_state) ) if len(_) != len(last_state): raise LastStateError("Some entries of last state alias the " "same MAC address") if len(old_state): # Verify types in the list for e in old_state: if not isinstance(e, MACPCI): raise TypeError("Expected List of MACPCI objects") # Verify kname is None if e.kname is not None: raise OldStateError("Expected old state kname to be None") # Verify tname points to 'eth<foo>' if not e.tname.startswith("eth"): raise OldStateError("Old state '%s' expected tname to " "'eth<num>'" % (e, )) return rename_logic(static_rules, cur_state, last_state, old_state)
"""Tests for samsungtv Components.""" import asyncio import unittest from unittest.mock import call, patch, MagicMock from asynctest import mock import pytest import tests.common from homeassistant.components.media_player import SUPPORT_TURN_ON, \ MEDIA_TYPE_CHANNEL, MEDIA_TYPE_URL from homeassistant.components.media_player.samsungtv import setup_platform, \ CONF_TIMEOUT, SamsungTVDevice, SUPPORT_SAMSUNGTV from homeassistant.const import CONF_HOST, CONF_NAME, CONF_PORT, STATE_ON, \ CONF_MAC, STATE_OFF from tests.common import MockDependency from homeassistant.util import dt as dt_util from datetime import timedelta WORKING_CONFIG = { CONF_HOST: 'fake', CONF_NAME: 'fake', CONF_PORT: 8001, CONF_TIMEOUT: 10, CONF_MAC: 'fake', 'uuid': None, } DISCOVERY_INFO = { 'name': 'fake', 'model_name': 'fake', 'host': 'fake' } class AccessDenied(Exception): """Dummy Exception.""" class ConnectionClosed(Exception): """Dummy Exception.""" class UnhandledResponse(Exception): """Dummy Exception.""" class TestSamsungTv(unittest.TestCase): """Testing Samsungtv component.""" @MockDependency('samsungctl') @MockDependency('wakeonlan') def setUp(self, samsung_mock, wol_mock): """Set up test environment.""" self.hass = tests.common.get_test_home_assistant() self.hass.start() self.hass.block_till_done() self.device = SamsungTVDevice(**WORKING_CONFIG) self.device._exceptions_class = mock.Mock() self.device._exceptions_class.UnhandledResponse = UnhandledResponse self.device._exceptions_class.AccessDenied = AccessDenied self.device._exceptions_class.ConnectionClosed = ConnectionClosed def tearDown(self): """Tear down test data.""" self.hass.stop() @MockDependency('samsungctl') @MockDependency('wakeonlan') def test_setup(self, samsung_mock, wol_mock): """Testing setup of platform.""" with mock.patch( 'homeassistant.components.media_player.samsungtv.socket'): add_entities = mock.Mock() setup_platform( self.hass, WORKING_CONFIG, add_entities) @MockDependency('samsungctl') @MockDependency('wakeonlan') def test_setup_discovery(self, samsung_mock, wol_mock): """Testing setup of platform with discovery.""" with mock.patch( 'homeassistant.components.media_player.samsungtv.socket'): add_entities = mock.Mock() setup_platform(self.hass, {}, add_entities, discovery_info=DISCOVERY_INFO) @MockDependency('samsungctl') @MockDependency('wakeonlan') @mock.patch( 'homeassistant.components.media_player.samsungtv._LOGGER.warning') def test_setup_none(self, samsung_mock, wol_mock, mocked_warn): """Testing setup of platform with no data.""" with mock.patch( 'homeassistant.components.media_player.samsungtv.socket'): add_entities = mock.Mock() setup_platform(self.hass, {}, add_entities, discovery_info=None) mocked_warn.assert_called_once_with("Cannot determine device") add_entities.assert_not_called() def test_update_on(self): """Testing update tv on.""" self.device.update() self.assertEqual(STATE_ON, self.device._state) def test_update_off(self): """Testing update tv off.""" _remote = mock.Mock() _remote.control = mock.Mock( side_effect=OSError('Boom')) self.device.get_remote = mock.Mock(return_value=_remote) self.device.update() assert STATE_OFF == self.device._state def test_send_key(self): """Test for send key.""" self.device.send_key('KEY_POWER') self.assertEqual(STATE_ON, self.device._state) def test_send_key_broken_pipe(self): """Testing broken pipe Exception.""" _remote = mock.Mock() _remote.control = mock.Mock( side_effect=BrokenPipeError('Boom')) self.device.get_remote = mock.Mock(return_value=_remote) self.device.send_key('HELLO') self.assertIsNone(self.device._remote) self.assertEqual(STATE_ON, self.device._state) def test_send_key_connection_closed_retry_succeed(self): """Test retry on connection closed.""" _remote = mock.Mock() _remote.control = mock.Mock(side_effect=[ self.device._exceptions_class.ConnectionClosed('Boom'), mock.DEFAULT]) self.device.get_remote = mock.Mock(return_value=_remote) command = 'HELLO' self.device.send_key(command) self.assertEqual(STATE_ON, self.device._state) # verify that _remote.control() get called twice because of retry logic expected = [mock.call(command), mock.call(command)] assert expected == _remote.control.call_args_list def test_send_key_unhandled_response(self): """Testing unhandled response exception.""" _remote = mock.Mock() _remote.control = mock.Mock( side_effect=self.device._exceptions_class.UnhandledResponse('Boom') ) self.device.get_remote = mock.Mock(return_value=_remote) self.device.send_key('HELLO') self.assertIsNone(self.device._remote) self.assertEqual(STATE_ON, self.device._state) def test_send_key_os_error(self): """Testing broken pipe Exception.""" _remote = mock.Mock() _remote.control = mock.Mock( side_effect=OSError('Boom')) self.device.get_remote = mock.Mock(return_value=_remote) self.device.send_key('HELLO') assert self.device._remote is None assert STATE_OFF == self.device._state def test_power_off_in_progress(self): """Test for power_off_in_progress.""" assert not self.device._power_off_in_progress() self.device._end_of_power_off = dt_util.utcnow() + timedelta( seconds=15) assert self.device._power_off_in_progress() def test_name(self): """Test for name property.""" assert 'fake' == self.device.name def test_state(self): """Test for state property.""" self.device._state = STATE_ON self.assertEqual(STATE_ON, self.device.state) self.device._state = STATE_OFF assert STATE_OFF == self.device.state def test_is_volume_muted(self): """Test for is_volume_muted property.""" self.device._muted = False assert not self.device.is_volume_muted self.device._muted = True assert self.device.is_volume_muted def test_supported_features(self): """Test for supported_features property.""" self.device._mac = None assert SUPPORT_SAMSUNGTV == self.device.supported_features self.device._mac = "fake" assert SUPPORT_SAMSUNGTV | SUPPORT_TURN_ON == \ self.device.supported_features def test_turn_off(self): """Test for turn_off.""" self.device.send_key = mock.Mock() _remote = mock.Mock() _remote.close = mock.Mock() self.get_remote = mock.Mock(return_value=_remote) self.device._end_of_power_off = None self.device.turn_off() assert self.device._end_of_power_off is not None self.device.send_key.assert_called_once_with('KEY_POWER') self.device.send_key = mock.Mock() self.device._config['method'] = 'legacy' self.device.turn_off() self.device.send_key.assert_called_once_with('KEY_POWEROFF') @mock.patch( 'homeassistant.components.media_player.samsungtv._LOGGER.debug') def test_turn_off_os_error(self, mocked_debug): """Test for turn_off with OSError.""" _remote = mock.Mock() _remote.close = mock.Mock(side_effect=OSError("BOOM")) self.device.get_remote = mock.Mock(return_value=_remote) self.device.turn_off() mocked_debug.assert_called_once_with("Could not establish connection.") def test_volume_up(self): """Test for volume_up.""" self.device.send_key = mock.Mock() self.device.volume_up() self.device.send_key.assert_called_once_with("KEY_VOLUP") def test_volume_down(self): """Test for volume_down.""" self.device.send_key = mock.Mock() self.device.volume_down() self.device.send_key.assert_called_once_with("KEY_VOLDOWN") def test_mute_volume(self): """Test for mute_volume.""" self.device.send_key = mock.Mock() self.device.mute_volume(True) self.device.send_key.assert_called_once_with("KEY_MUTE") def test_media_play_pause(self): """Test for media_next_track.""" self.device.send_key = mock.Mock() self.device._playing = False self.device.media_play_pause() self.device.send_key.assert_called_once_with("KEY_PLAY") assert self.device._playing self.device.send_key = mock.Mock() self.device.media_play_pause() self.device.send_key.assert_called_once_with("KEY_PAUSE") assert not self.device._playing def test_media_play(self): """Test for media_play.""" self.device.send_key = mock.Mock() self.device._playing = False self.device.media_play() self.device.send_key.assert_called_once_with("KEY_PLAY") assert self.device._playing def test_media_pause(self): """Test for media_pause.""" self.device.send_key = mock.Mock() self.device._playing = True self.device.media_pause() self.device.send_key.assert_called_once_with("KEY_PAUSE") assert not self.device._playing def test_media_next_track(self): """Test for media_next_track.""" self.device.send_key = mock.Mock() self.device.media_next_track() self.device.send_key.assert_called_once_with("KEY_FF") def test_media_previous_track(self): """Test for media_previous_track.""" self.device.send_key = mock.Mock() self.device.media_previous_track() self.device.send_key.assert_called_once_with("KEY_REWIND") def test_turn_on(self): """Test turn on.""" self.device.send_key = mock.Mock() self.device._mac = None self.device.turn_on() self.device.send_key.assert_called_once_with('KEY_POWERON') self.device._wol.send_magic_packet = mock.Mock() self.device._mac = "fake" self.device.turn_on() self.device._wol.send_magic_packet.assert_called_once_with("fake") @pytest.fixture def samsung_mock(): """Mock samsungctl.""" with patch.dict('sys.modules', { 'samsungctl': MagicMock(), }): yield async def test_play_media(hass, samsung_mock): """Test for play_media.""" asyncio_sleep = asyncio.sleep sleeps = [] async def sleep(duration, loop): sleeps.append(duration) await asyncio_sleep(0, loop=loop) with patch('asyncio.sleep', new=sleep): device = SamsungTVDevice(**WORKING_CONFIG) device.hass = hass device.send_key = mock.Mock() await device.async_play_media(MEDIA_TYPE_CHANNEL, "576") exp = [call("KEY_5"), call("KEY_7"), call("KEY_6")] assert device.send_key.call_args_list == exp assert len(sleeps) == 3 async def test_play_media_invalid_type(hass, samsung_mock): """Test for play_media with invalid media type.""" url = "https://example.com" device = SamsungTVDevice(**WORKING_CONFIG) device.send_key = mock.Mock() await device.async_play_media(MEDIA_TYPE_URL, url) assert device.send_key.call_count == 0 async def test_play_media_channel_as_string(hass, samsung_mock): """Test for play_media with invalid channel as string.""" url = "https://example.com" device = SamsungTVDevice(**WORKING_CONFIG) device.send_key = mock.Mock() await device.async_play_media(MEDIA_TYPE_CHANNEL, url) assert device.send_key.call_count == 0 async def test_play_media_channel_as_non_positive(hass, samsung_mock): """Test for play_media with invalid channel as non positive integer.""" device = SamsungTVDevice(**WORKING_CONFIG) device.send_key = mock.Mock() await device.async_play_media(MEDIA_TYPE_CHANNEL, "-4") assert device.send_key.call_count == 0
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Moving sky plot to bottom of object spectrum plot, added badlines # stuff,restricted z range # for cross correlation to slider z +/-0.3 # # Adding xcsao button - need shady iraf import shenanigans, otherwise iraf # causes feedback between tk # widgets and variables to not work! # # Same as visualTemplateRedshift.py, but accepts wildcards and automatically # saves out plots/results # (quit button -> done button) # # Has a lovely tk interface # # Overplots a spectrum and spectral template using astSED functions # This works with SDSS log wavelength templates # # Input spectra can either be DEEP2 pipeline spec1d files, or similar # formatted FITS tables with # spectrum in extension '1D_SPECTRUM'. Masking out values in input spectrum # with values > 10*median # (gets rid of dodgy ends in efosc2 spectra) import os import sys import Tkinter import pyfits import numpy import pylab import matplotlib.patches as patches import IPython from astLib import astSED from astLib import astStats from astLib import astWCS from scipy import optimize pylab.matplotlib.interactive(True) #----------------------------------------------------------------------------- # SDSS templates to use - note later on we explicitly load in the Tremonti et # al. starburst template # which is in a different format and stored under # $HOME/Astro_Software/TremontiStarburstTemplate tempDir = os.environ['HOME']+"/Projects/redshiftCode/VisualTemplateRedshiftTemplates" #tremontiFileName=os.environ['HOME']+os.path.sep+"Astro_Software"+os.path.sep+"TremontiStarburstTemplate/fos_ghrs_composite.txt" templateFileNames = [ # Galaxies "spDR2-023.fit", "spDR2-024.fit", "spDR2-025.fit", "spDR2-026.fit", "spDR2-027.fit", "spDR2-028.fit", # QSOs "spDR2-029.fit", "spDR2-030.fit", "spDR2-031.fit", "spDR2-032.fit" ] # Added this because Tremonti starburst template added templateLabels = [ # Galaxies "SDSS-023", "SDSS-024", "SDSS-025", "SDSS-026", "SDSS-027", "SDSS-028", # QSOs "SDSS-029", "SDSS-030", "SDSS-031", "SDSS-032" #, # Starburst #"T03 Starburst" ] # List of spectral line labels and wavelengths # Unlike the old style scripts, these don't need unique labels/names spectralFeaturesCatalogue = [ ["Halpha", 6562.8], ["Hbeta", 4861.33], ["Hgamma", 4340.50], ["Hdelta", 4101.70], ["Hepsilon", 3970.10], ["Htheta", 3798.6], ["Hzeta", 3888.9], ["HeI", 4471.5, 5875.6], ["HeII", 4338.6, 4685.70], ["Lyalpha", 1216.0], ["CII", 1335.30], ["CIV", 1549.0], ["[NII]", 6548.1, 6583.4], ["NV", 1240.14], ["OI", 1304.3], ["[OI]", 6300.3], ["[OII]", 2471.03, 3727.3], ["OIII", 2672.04], ["[OIII]", 4958.91, 5006.84], ["OIV]", 1402.06], ["[NeIII]", 3967.5], ["[NeIV]", 1602.0, 2423.83], ["[NeV]", 1575.0, 3426.0], ["MgI", 2852.0, 3830.4, 5175.4], ["MgII", 2800.0, 2803.0, 2798.0], ["AlII]", 2669.95], ["SiII", 1262.59, 1306.82], ["SiIV", 1396.76], ["[SII]", 6717.0, 6731.3], ["CaI", 4226.7], ["H", 3968.47], ["K", 3933.68], ["G", 4307.74], ["E", 5269.00], ["FeI", 4045.8, 4271.7, 4329.7, 4045.8], ["FeII", 2344.0, 2374.0, 2382.0, 2586.0, 2600.0] ] class App: def __init__(self, master, objectSpecFileNames, outDir): """outDir is a dir in which to write output """ # Do some initial setup of things self.objectSpecFileNames = objectSpecFileNames self.currentSpecFileIndex = 0 # Create the output text file for results self.outDir = outDir if not os.path.exists(outDir): os.makedirs(outDir) self.outFile = file(outDir + os.path.sep +\ self.objectSpecFileNames[self.currentSpecFileIndex].rstrip('.fits') + ".results", "w") self.outFile.write("#Fiber\tRedshift\tRedshiftError" + "\tQuality\tComments\n") self.outFile.close() # Load Templates self.templates = self.loadTemplates(tempDir, templateFileNames) # Load Science print "Checking spectrum %d/%d ..." % (self.currentSpecFileIndex + 1, len(self.objectSpecFileNames)) # Single Frame if 0: obj = self.loadObjectSpectrum(objectSpecFileNames[ self.currentSpecFileIndex]) self.objectSpecFileName = objectSpecFileNames[ self.currentSpecFileIndex] # for plot titles etc. self.objSED = obj['object'] self.unsmoothedObjFlux = self.objSED.flux[:] self.skySED = obj['sky'] # IFU data else: self.obj = self.loadIFUSpectra(objectSpecFileNames[ self.currentSpecFileIndex]) self.objectSpecFileName = objectSpecFileNames[ self.currentSpecFileIndex] # for plot titles etc. self.fiberNumber = len(self.obj) # List of feature names in spectralFeaturesCatalogue to plot self.plotFeatures = [] ########################### ### BEGIN LAYOUT OF APP ### ########################### # Layout - these get drawn top to bottom self.buttonFrame = Tkinter.Frame(master, padx=5, pady=5) self.buttonFrame.grid() self.qualityFrame = Tkinter.Frame(master, padx=5, pady=5) self.qualityFrame.grid() self.fiberFrame = Tkinter.Frame(master, padx=5, pady=5) self.fiberFrame.grid() self.smoothFrame = Tkinter.Frame(master, padx=5, pady=5) self.smoothFrame.grid() templatesFrame = Tkinter.Frame(master, padx=5, pady=5) templatesFrame.grid() scaleFrame = Tkinter.Frame(master, padx=5, pady=5) scaleFrame.grid() featuresFrame = Tkinter.Frame(master, padx=5, pady=5) featuresFrame.grid() # Buttons frame self.redrawButton = Tkinter.Button(self.buttonFrame, text="Redraw plot", command=self.redrawPlot) self.redrawButton.grid(row=0, column=0) self.savePNGButton = Tkinter.Button(self.buttonFrame, text="Save .png", command=self.savePNG) self.savePNGButton.grid(row=0, column=1) self.outPathLabel = Tkinter.Label(self.buttonFrame, text="Output .png file : ") self.outPathLabel.grid(row=0, column=2) self.outPathEntryVar = Tkinter.StringVar() self.outPathEntryVar.set(self.outDir + os.path.sep + self.objectSpecFileName.replace( ".fits", ".png")) self.outPathEntry = Tkinter.Entry(self.buttonFrame, textvariable=self.outPathEntryVar, width=80) self.outPathEntry.grid(row=0, column=3) self.nextButton = Tkinter.Button(self.buttonFrame, text="Log", bg="blue", command=self.log) self.nextButton.grid(row=0, column=6) self.quitButton = Tkinter.Button(self.buttonFrame, text="QUIT", bg="red", command=self.buttonFrame.quit) self.quitButton.grid(row=0, column=7) # Quality frame, contains radio buttons and comments field self.qualityRadioVar = Tkinter.IntVar() self.qualityRadioList = [] self.qualityLabel = Tkinter.Label(self.qualityFrame, text="Quality flag : ", anchor=Tkinter.E) self.qualityLabel.grid(row=2, column=0) for i in range(4): self.qualityRadioList.append( Tkinter.Radiobutton(self.qualityFrame, text=str(i), variable=self.qualityRadioVar, value=i, command=self.redrawPlot)) self.qualityRadioList[-1].grid(row=2, column=i + 1) self.qualityRadioList[0].select() # Comment Box self.commentsLabel = Tkinter.Label(self.qualityFrame, text="Comments: ") self.commentsLabel.grid(row=2, column=i + 2) self.commentsEntryVar = Tkinter.StringVar() self.commentsEntryVar.set("") self.commentsEntry = Tkinter.Entry(self.qualityFrame, textvariable=self.commentsEntryVar, width=80) self.commentsEntry.grid(row=2, column=i + 3) # Slider to choose fiber self.fibernumberVar = Tkinter.DoubleVar() self.fibernumberLabel = Tkinter.Label(self.fiberFrame, text='Fiber Number : ', anchor=Tkinter.E) self.fibernumberLabel.grid(row=1, column=0) self.fibernumberScale = Tkinter.Scale(self.fiberFrame, orient=Tkinter.HORIZONTAL, length=300, from_=1, to=self.fiberNumber, tickinterval=60, command=None, variable=self.fibernumberVar, resolution=1) self.fibernumberScale.set(1) self.fibernumberScale.grid(row=1, column=1, columnspan=3) # Buttons to finely tune the fiber number self.fibernumberMinusButton = Tkinter.Button(self.fiberFrame, text="-", command=self.decreaseFiber) self.fibernumberMinusButton.grid(row=1, column=4) self.fibernumberPlusButton = Tkinter.Button(self.fiberFrame, text="+", command=self.increaseFiber) self.fibernumberPlusButton.grid(row=1, column=5) # Checkbox to set fiber as sky self.ignoreEmission = Tkinter.IntVar() self.ignoreEmissionLabel = Tkinter.Label(self.fiberFrame, text="Ignore Emission", width=15, anchor=Tkinter.E) self.ignoreEmissionLabel.grid(row=1, column=10) self.ignoreEmissionCheckButton = Tkinter.Checkbutton( self.fiberFrame, variable=self.ignoreEmission, command=None) self.ignoreEmissionCheckButton.grid(row=1, column=11) # Slider to set smoothing of object spectrum self.smoothScaleVar = Tkinter.DoubleVar() self.smoothLabel = Tkinter.Label(self.smoothFrame, text="Spectrum smoothing : ", anchor=Tkinter.E) self.smoothLabel.grid(row=1, column=0) self.smoothScale = Tkinter.Scale(self.smoothFrame, orient=Tkinter.HORIZONTAL, length=300, from_=0, to=100, tickinterval=25, command=self.smoothSpectrum, variable=self.smoothScaleVar, resolution=1) # Initial Smoothing self.smoothScale.set(5) x = self.smoothScale.get() self.objSED = self.obj[self.fiberNumber - self.fibernumberScale.get()]['object'] self.unsmoothedObjFlux = self.objSED.flux[:] self.objSED.flux = self.unsmoothedObjFlux[:] self.objSED.smooth(x) self.smoothScale.grid(row=1, column=1, columnspan=3) # Buttons to finely tune the smoothing self.smoothPlusButton = Tkinter.Button(self.smoothFrame, text="+", command=self.increaseSmoothing) self.smoothPlusButton.grid(row=1, column=5) self.smoothMinusButton = Tkinter.Button(self.smoothFrame, text="-", command=self.decreaseSmoothing) self.smoothMinusButton.grid(row=1, column=4) # Min, max wavelength range self.minWavelengthLabel = Tkinter.Label(self.smoothFrame, text="Min WL:", anchor=Tkinter.E) self.minWavelengthLabel.grid(row=1, column=6) self.minWavelengthEntryVar = Tkinter.StringVar() self.minWavelengthEntryVar.set("4500") self.minWavelengthEntry = Tkinter.Entry( self.smoothFrame, textvariable=self.minWavelengthEntryVar, width=6) self.minWavelengthEntry.grid(row=1, column=7) self.maxWavelengthLabel = Tkinter.Label(self.smoothFrame, text="Max WL:", anchor=Tkinter.E) self.maxWavelengthLabel.grid(row=1, column=8) self.maxWavelengthEntryVar = Tkinter.StringVar() self.maxWavelengthEntryVar.set("6500") self.maxWavelengthEntry = Tkinter.Entry( self.smoothFrame, textvariable=self.maxWavelengthEntryVar, width=6) self.maxWavelengthEntry.grid(row=1, column=9) # Alt normalisation method self.altNormCheckVar = Tkinter.IntVar() self.altNormLabel = Tkinter.Label(self.smoothFrame, text="Alt norm", width=10, anchor=Tkinter.E) self.altNormLabel.grid(row=1, column=10) self.altNormCheckButton = Tkinter.Checkbutton( self.smoothFrame, variable=self.altNormCheckVar, command=self.plotSkyChanged) self.altNormCheckButton.grid(row=1, column=11) # Turn sky plotting on/off self.plotSkyCheckVar = Tkinter.IntVar() self.plotSkyLabel = Tkinter.Label(self.smoothFrame, text="Plot sky", width=10, anchor=Tkinter.E) self.plotSkyLabel.grid(row=1, column=12) self.plotSkyCheckButton = Tkinter.Checkbutton( self.smoothFrame, variable=self.plotSkyCheckVar, command=self.plotSkyChanged) self.plotSkyCheckButton.grid(row=1, column=13) # Turn Error plotting on/off self.plotErrorCheckVar = Tkinter.IntVar() self.plotErrorLabel = Tkinter.Label(self.smoothFrame, text="Plot Error", width=10, anchor=Tkinter.E) self.plotErrorLabel.grid(row=1, column=14) self.plotErrorCheckButton = Tkinter.Checkbutton( self.smoothFrame, variable=self.plotErrorCheckVar, command=self.plotErrorChanged) self.plotErrorCheckButton.grid(row=1, column=15) # Templates frame # Radio buttons used to select template self.templateRadioVar = Tkinter.IntVar() self.templateRadioList = [] self.templateLabel = Tkinter.Label(templatesFrame, text="Template:", anchor=Tkinter.E) self.templateLabel.grid(row=2, column=0) for i in range(len(self.templates)): try: tempName = templateLabels[i] except IndexError: sys.exit() self.templateRadioList.append(Tkinter.Radiobutton( templatesFrame, text=tempName, variable=self.templateRadioVar, value=i, command=self.redrawPlot)) self.templateRadioList[-1].grid(row=2, column=i + 1) self.templateRadioList[0].select() # Slider used to set trial redshift of template self.redshiftScaleVar = Tkinter.DoubleVar() self.redshiftLabel = Tkinter.Label(scaleFrame, text="Template redshift : ", anchor=Tkinter.E) self.redshiftLabel.grid(row=3, column=0) self.redshiftScale = Tkinter.Scale(scaleFrame, orient=Tkinter.HORIZONTAL, length=600, from_=0.0, to=2.01, tickinterval=1, command=self.getRedshiftScaleValue, variable=self.redshiftScaleVar, resolution=0.001) self.redshiftScale.set(0.5) self.redshiftScale.grid(row=3, column=1, columnspan=3) # Buttons to finely tune the trial redshift self.redshiftPlusButton = Tkinter.Button(scaleFrame, text="+", command=self.increaseRedshift) self.redshiftPlusButton.grid(row=3, column=5) self.redshiftMinusButton = Tkinter.Button(scaleFrame, text="-", command=self.decreaseRedshift) self.redshiftMinusButton.grid(row=3, column=4) # Redshift uncertainty entry box self.redshiftErrorLabel = Tkinter.Label(scaleFrame, text="+/-") self.redshiftErrorLabel.grid(row=3, column=6) self.redshiftErrorEntryVar = Tkinter.StringVar() self.redshiftErrorEntryVar.set(0.001) self.redshiftErrorEntry = Tkinter.Entry( scaleFrame, textvariable=self.redshiftErrorEntryVar, width=10) self.redshiftErrorEntry.grid(row=3, column=7) # XCSAO button self.runXCSAOButton = Tkinter.Button(scaleFrame, text="XC galaxies", command=self.runXCSAOGalaxies, fg="green") self.runXCSAOButton.grid(row=3, column=8) self.runXCSAOButton = Tkinter.Button(scaleFrame, text="XC QSOs", command=self.runXCSAOQSOs, fg="green") self.runXCSAOButton.grid(row=3, column=9) # Features to optionally plot # Have to have separate labels for the check boxes because other # layout goes stupid self.featuresCheckList = [] self.featuresCheckLabelsList = [] self.featuresCheckVars = [] maxPerRow = 20 row = 0 column = 0 for i in range(len(spectralFeaturesCatalogue)): if column == maxPerRow: row += 1 column = 0 self.featuresCheckVars.append(Tkinter.IntVar()) self.featuresCheckLabelsList.append( Tkinter.Label(featuresFrame, text=spectralFeaturesCatalogue[i][0], width=10, anchor=Tkinter.E)) self.featuresCheckLabelsList[-1].grid(row=row + 4, column=column) self.featuresCheckList.append(Tkinter.Checkbutton( featuresFrame, variable=self.featuresCheckVars[-1], command=self.setPlotFeatures)) self.featuresCheckList[-1].grid(row=row + 4, column=column + 1) column += 2 # Start up the figure for drawing pylab.figure(figsize=(12, 8)) # Do initial plot self.updatePlot(self.obj[self.fiberNumber - self.fibernumberScale.get()]['object'], self.templates[self.templateRadioVar.get()], self.obj[self.fiberNumber - self.fibernumberScale.get()]['sky'], self.obj[self.fiberNumber - self.fibernumberScale.get()]['error'], self.redshiftScaleVar.get(), tempLabel=os.path.split(templateLabels[ self.templateRadioVar.get()])[-1], redrawSky=True, redrawFeatures=True, plotFeatures=self.plotFeatures) # self.updatePlot(self.objSED, # self.templates[self.templateRadioVar.get()], # self.skySED, # self.redshiftScaleVar.get(), # tempLabel=os.path.split(templateLabels[ # self.templateRadioVar.get()])[-1], # redrawSky=True, # redrawFeatures=True, # plotFeatures=self.plotFeatures) ######################### ### END OF APP LAYOUT ### ######################### def smoothSpectrum(self, event): """ Smooths the object spectrum when the smooth slider is updated """ self.objSED = self.obj[self.fiberNumber - self.fibernumberScale.get()]['object'] #self.unsmoothedObjFlux = self.objSED.flux[:] #self.objSED.flux = self.unsmoothedObjFlux[:] self.objSED.smooth(self.smoothScale.get()) def increaseSmoothing(self): """ Increases the smoothing by 1 """ self.smoothScale.set(self.smoothScaleVar.get() + 1) def decreaseSmoothing(self): """ Decreases the smoothing by 1 """ self.smoothScale.set(self.smoothScaleVar.get() - 1) def changeFiber(self): self.updatePlot(self.obj[self.fiberNumber - self.fibernumberScale.get()]['object'], self.templates[self.templateRadioVar.get()], self.obj[self.fiberNumber - self.fibernumberScale.get()]['sky'], self.obj[self.fiberNumber - self.fibernumberScale.get()]['error'], self.redshiftScaleVar.get(), tempLabel=os.path.split(templateLabels[ self.templateRadioVar.get()])[-1], redrawSky=True, redrawFeatures=True, plotFeatures=self.plotFeatures) def increaseFiber(self): """ Increases the fiber number by 1 """ self.fibernumberScale.set(self.fibernumberVar.get() + 1) self.outPathEntryVar.set(self.outDir + os.path.sep + self.objectSpecFileName.rstrip(".fits") + '_' + str(self.fibernumberScale.get()) + '.png') self.changeFiber() def decreaseFiber(self): """ Decreases the fiber number by 1 """ self.fibernumberScale.set(self.fibernumberVar.get() - 1) self.outPathEntryVar.set(self.outDir + os.path.sep + self.objectSpecFileName.rstrip(".fits") + '_' + str(self.fibernumberScale.get()) + '.png') self.changeFiber() def resetFeatures(self): """ Resets (turns off) the plotting of the spectral features """ for i in range(len(self.plotFeatures)): self.featuresCheckVars[i].set(0) self.setPlotFeatures() def getRedshiftScaleValue(self, event): """ Gets the current value of the template redshift from the slider """ self.redshiftScale.get() def increaseRedshift(self): """ Increases the template redshift by delta-z """ self.redshiftScale.set(self.redshiftScaleVar.get() + 0.001) def decreaseRedshift(self): """ Decreases the template redshift by delta-z """ self.redshiftScale.set(self.redshiftScaleVar.get() - 0.001) def setPlotFeatures(self): """ Sets self.plotFeatues and triggers redrawing of plot, according to which spectral features are selected. Redraws plot afterwards. """ self.plotFeatures = [] for i in range(len(self.featuresCheckVars)): val = self.featuresCheckVars[i].get() self.plotFeatures.append(val) self.redrawPlot() def savePNG(self): """ Saves the current figure to a png """ pylab.savefig(self.outPathEntry.get()) def runXCSAOGalaxies(self): """ Function which chooses what templates to include when matching galaxy type objects. Calls runXCSAO. """ templatesToInclude = [] for temp in templateFileNames: tempNum = int(os.path.split(temp)[-1].split("-")[-1].split(".")[0]) if 23 <= tempNum < 29: templatesToInclude.append(temp) self.runXCSAO(templatesToInclude=templatesToInclude) def runXCSAOQSOs(self): """ Function which chooses what templates to include when matching QSO type objects. Calls runXCSAO. """ templatesToInclude = [] for temp in templateFileNames: tempNum = int(os.path.split(temp)[-1].split("-")[-1].split(".")[0]) if 29 <= tempNum < 33: templatesToInclude.append(temp) self.runXCSAO(templatesToInclude=templatesToInclude) # This does all the work of fitting the spectra def runXCSAO(self, templatesToInclude=[]): try: from pyraf import iraf except ImportError: print 'There is something wrong with pyraf! Exiting...' sys.exit(1) try: from iraf import rvsao except ImportError: print 'RVSAO not installed? Exiting...' sys.exit(1) xMin = float(self.minWavelengthEntry.get()) xMax = float(self.maxWavelengthEntry.get()) # This has to have the file available in IRAF friendly format if not os.path.exists("spec1d_IRAF"): os.makedirs("spec1d_IRAF") irafFileName = "spec1d_IRAF" + os.path.sep + "iraf_boxcar_" + \ str(self.fibernumberScale.get())+ "_" +\ self.objectFileName print irafFileName result = None if os.path.exists(irafFileName) or \ self.convertToIRAFFormat(): print "--> cross correlating " + irafFileName + " ..." self.skySED = self.obj[self.fiberNumber - self.fibernumberScale.get()]['sky'] # Mask prominent sky emission lines if self.skySED is None: fixbad = "n" else: fixbad = "y" if fixbad == "y": normSkyFlux = self.skySED.flux / self.skySED.flux.max() threshold = 0.15 badPix = numpy.where(normSkyFlux > threshold)[0] lines = [] for i in range(len(badPix)): startPixInLine = False for line in lines: if line[0] <= badPix[i] <= line[1]: startPixInLine = True if not startPixInLine: pixCount = 1 if pixCount + i < len(badPix) - 1: nextPix = badPix[i + pixCount] prevPix = badPix[i] maxReached = False while nextPix < prevPix + 2: if pixCount + i < len(badPix) - 1: prevPix = badPix[i + pixCount] pixCount += 1 nextPix = badPix[i + pixCount] else: maxReached = True break if not maxReached: lastPix = prevPix else: lastPix = max(badPix) else: lastPix = max(badPix) lines.append([badPix[i], lastPix]) outFile = file("badlines.dat", "w") for line in lines: print('Badlines updated') outFile.write(str(self.skySED.wavelength[line[0]]-10) + \ "\t" +\ str(self.skySED.wavelength[line[1]]+10) +\ "\n") outFile.close() if self.ignoreEmission.get() == 1: chop = 'y' else: chop = 'n' for temp in templatesToInclude: try: rvsao.xcsao(spectra=irafFileName, tempdir=tempDir, fixbad=fixbad, badlines=os.getcwd()+os.path.sep+"badlines.dat", vel_init="zguess", czguess=self.redshiftScaleVar.get(), templates=os.path.split(temp)[-1], st_lambda=xMin, end_lambda=xMax, zeropad="y", nsmooth=30, #s_emchop="n", t_emchop="n", nzpass=8, s_emchop=chop, t_emchop=chop, nzpass=8, minvel=(self.redshiftScaleVar.get() - 0.3) *\ 3e5, maxvel=(self.redshiftScaleVar.get() + 0.3) *\ 3e5, renormalize="y", ncols=8192, low_bin=10, top_low=20, top_nrun=250, nrun=500, bell_window=0.05, dispmode=2, curmode="n", ablines="ablines.dat", displot="no", logfiles="xcsao.log", save_vel="n", pkfrac=0.5, report_mode=1) except: print "hmm? xcsao fell over" sys.exit(1) result = self.parseXCSAOResult() os.remove("xcsao.log") if result is not None: self.redshiftScaleVar.set(result['z']) self.commentsEntryVar.set("XCSAO (R=%.3f): " % (result['RVal'])) self.commentsEntry['textvariable'] = self.commentsEntryVar self.templateRadioVar.set( templateFileNames.index(result['template'])) self.templateRadioList[ templateFileNames.index(result['template'])].select() self.commentsEntry.update() self.redshiftErrorEntryVar.set("%.6f" % (result['zErr'])) print result else: print "XCSAO failed." self.redrawPlot() def convertToIRAFFormat(self): """ Canned convert self.objectFileName spec1d IDL file into IRAF format, stored under 'spec1d_IRAF' dir. Needed for running XCSAO. Note if values < 1 here, we multiply by ridiculous factor to stop xcsao from crashing """ try: from pyraf import iraf except ImportError: print 'There is something wrong with pyraf! Exiting...' sys.exit(1) try: from iraf import onedspec except ImportError: print 'Did not find onedspec in iraf! Exiting...' sys.exit(1) method = "boxcar" baseName = "spec1d_IRAF" + os.path.sep + "iraf_" + method + "_" \ + str(self.fibernumberScale.get()) + "_" print "--> Converting " + self.objectFileName + " to IRAF format ..." if method == "boxcar": tabExts = [1, 2] elif method == "optimal": tabExts = [3, 4] outFileName = baseName + self.objectFileName.replace(".fits", ".csv") writer = file(outFileName, "wb") skyWriter = file(outFileName.replace("iraf_", "sky_iraf_"), "wb") #idlfits = pyfits.open(self.objectFileName) fluxData = self.obj[self.fiberNumber -\ self.fibernumberScale.get()]['object'].flux wavelengthData =\ self.obj[self.fiberNumber - self.fibernumberScale.get()]['object'].wavelength skyData = self.obj[self.fiberNumber - self.fibernumberScale.get()]['sky'].flux print self.fiberNumber - self.fibernumberScale.get() print fluxData.mean() #if fluxData.mean() < 100: # fluxData = (fluxData / fluxData.mean()) * 1e6 plotData = [] skyPlotData = [] for i in range(len(fluxData)): writer.write(str( wavelengthData[i]) + "\t" + str(fluxData[i]) + "\n") skyWriter.write(str( wavelengthData[i])+"\t"+str(skyData[i])+"\n") plotData.append([wavelengthData[i], fluxData[i]]) skyPlotData.append([wavelengthData[i], skyData[i]]) writer.close() skyWriter.close() #for tabExt in tabExts: # # Sometimes things just don't work as they should ... # dataOkay = True # try: # if len(idlfits[tabExt].data.field('SPEC').shape) > 1: # fluxData = idlfits[tabExt].data.field('SPEC')[0] # else: # fluxData = idlfits[tabExt].data.field('SPEC') # except IndexError: # dataOkay = False # if dataOkay: # if len(idlfits[tabExt].data.field('LAMBDA').shape) > 1: # wavelengthData = idlfits[tabExt].data.field('LAMBDA')[0] # else: # wavelengthData = idlfits[tabExt].data.field('LAMBDA') # #skyData=idlfits[tabExt].data.field('SKYSPEC')[0] # # # xcsao crash preventing when running on fluxed spectra (it # # should do this itself, of course) # if fluxData.mean() < 100: # fluxData = (fluxData / fluxData.mean()) * 1e6 # minWavelength = min(wavelengthData) # maxWavelength = max(wavelengthData) # plotData = [] # skyPlotData = [] # for i in range(len(fluxData)): # writer.write(str( # wavelengthData[i]) + "\t" + str(fluxData[i]) + "\n") # #skyWriter.write(str( # # wavelengthData[i])+"\t"+str(skyData[i])+"\n") # plotData.append([wavelengthData[i], fluxData[i]]) # #skyPlotData.append([wavelengthData[i],skyData[i]]) #writer.close() #skyWriter.close() if not os.path.exists(outFileName.replace(".csv", ".fits")): #os.remove(outFile.rstrip("csv")+"fits") # This might fall over below if there's a dodgy file try: onedspec.rspectext(input=outFileName, output=outFileName.replace(".csv", ".fits"), flux="no", dtype="interp") onedspec.rspectext(input=outFileName.replace("iraf_", "sky_iraf_"), output=outFileName.replace("iraf_", "sky_iraf_").rstrip("csv")+"fits", flux="no", dtype="interp") #del sys.modules['pyraf'] #del sys.modules['pyraf.iraf'] return True except: print "... Argh! there's a problem with this file that ", \ "causes IRAF to crash! ..." print "... skipping ..." return False def parseXCSAOResult(self): """ Parses xcsao results log file, returns highest R value result """ inFile = file("xcsao.log", "rb") lines = inFile.readlines() #results = [] objectList = [] currentObject = "" bestRVal = 0.0 bestResult = None for line in lines: # Check for spectrum name change objectChanged = False if line.find("Object:") != -1: newObject = line[line.find("Object:") + 8:].rstrip(" \n") if newObject != currentObject: currentObject = newObject objectChanged = True if objectChanged: objectList.append(currentObject) # Extract results -- if we asked to ignore any templates it's done # here if line.find("CZ:") != -1: bits = line.split() result = {} for i in range(len(bits)): if bits[i] == "Temp:": result['template'] = str(bits[i + 1]) if bits[i] == "R:": result['RVal'] = float(bits[i + 1]) if bits[i] == "CZ:": result['cz'] = float(bits[i + 1]) if bits[i] == "+/-": result['czErr'] = float(bits[i + 1]) if result['RVal'] > bestRVal: result['z'] = result['cz'] / 3e5 result['zErr'] = (result['czErr'] / result['cz']) * \ (result['cz'] / 3e5) bestResult = result bestRVal = result['RVal'] return bestResult def loadTemplates(self, tempDir, templateFileNamesList): """ Takes in a list of SDSS template file names. Appends the Tremonti starburst template which we handle differently. Returns a list containing astSED.SED objects """ print "Loading templates ..." templatesList = [] for t in templateFileNamesList: # This loads in an SDSS spectral template, and feeds it into a SED # object timg = pyfits.open(tempDir + os.path.sep + t) th = timg[0].header tc0 = th['COEFF0'] tc1 = th['COEFF1'] tpixRange = numpy.arange(timg[0].data.shape[1]) twavelengthRange = 10.0 ** (tc0 + tc1 * tpixRange) tempSED = astSED.SED(wavelength=twavelengthRange, flux=timg[0].data[0]) templatesList.append(tempSED) # Tremonti star burst #s=astSED.SED() #s.loadFromFile(tremontiFileName) #templatesList.append(s) return templatesList def log(self): # Save results for current spectrum self.savePNG() self.outFile = file(outDir + os.path.sep +\ self.objectSpecFileNames[self.currentSpecFileIndex].rstrip('.fits') + ".results", "a") #if self.qualityRadioVar.get() != 0: self.outFile.write("%s\t%.5f\t%.5f\t%d\t%s\n" % \ (self.fibernumberScale.get(), self.redshiftScaleVar.get(), float(self.redshiftErrorEntryVar.get()), self.qualityRadioVar.get(), self.commentsEntryVar.get())) # else: # self.outFile.write("%s\t%s\t%s\t%d\t%s\n" % \ # (self.objectSpecFileNames[ # self.currentSpecFileIndex], # "None", "None", self.qualityRadioVar.get(), # self.commentsEntryVar.get())) self.outFile.close() print('Logged %s' % self.fibernumberScale.get()) # # Move on to next spectrum # if self.currentSpecFileIndex < len(self.objectSpecFileNames) - 1: # self.currentSpecFileIndex += 1 # # print "Checking spectrum %d/%d ..." % \ # (self.currentSpecFileIndex + 1, len(self.objectSpecFileNames)) # obj = self.loadObjectSpectrum(self.objectSpecFileNames[ # self.currentSpecFileIndex]) # self.objectSpecFileName = self.objectSpecFileNames[ # self.currentSpecFileIndex] # for plot titles etc. # self.objSED = obj['object'] # self.unsmoothedObjFlux = self.objSED.flux[:] # self.skySED = obj['sky'] # x = self.smoothScale.get() # self.objSED.flux = self.unsmoothedObjFlux[:] # self.objSED.smooth(x) # self.resetFeatures() # self.qualityRadioVar.set(0) # self.commentsEntryVar.set("") # self.outPathEntryVar.set(self.outDir + os.path.sep + # self.objectSpecFileName.replace(".fits", # ".png")) # self.redshiftErrorEntryVar.set(0.001) # self.updatePlot(self.objSED, # self.templates[ # self.templateRadioVar.get()], # self.skySED, # self.redshiftScaleVar.get(), # tempLabel=os.path.split(templateLabels[ # self.templateRadioVar.get()])[-1], # redrawSky=True, # redrawFeatures=True, # plotFeatures=self.plotFeatures) # else: # print "Finished checking all spectra!" # self.buttonFrame.quit() def loadIFUSpectra(self, objectFileName): """ Loads in an object spectrum - this has to be in DEEP2 pipeline spec1d format (i.e. fits tables) Object spectrum is smoothed by boxcar of size smoothPix. Returns a dictionary containing object and sky astSED.SED objects {'object', 'sky'} """ print "Loading IFU spectrum ..." self.objectFileName = objectFileName oimg = pyfits.open(objectFileName) # Load the IFU data -- Row-stacked spectra odata = oimg[1].data oError = oimg[2].data odata_dim = odata.shape wcs = astWCS.WCS(objectFileName, extensionName=1) owavelengthStartEnd = wcs.getImageMinMaxWCSCoords()[0:2] fiberNumber = wcs.getImageMinMaxWCSCoords()[2:4] owavelengthStep = oimg[1].header['CDELT1'] owavelengthRange = [owavelengthStartEnd[0] + i * owavelengthStep for i in range(odata_dim[1])] # Check to make sure we got it right if not owavelengthRange[-1] == owavelengthStartEnd[-1]: print 'The ending wavelenghts do not match... Exiting' sys.exit(1) else: # make median sky specs = numpy.array([flux for flux in odata]) skySpec = numpy.median(specs, axis=0) # sums = [sum(odata[i,:]) for i in range(odata.shape[0])] #find the median value of all the fibers # med = astStats.clippedMedianStdev(sums) # med = med['clippedMedian'] # skyfibers = [i for i in range(odata.shape[0])\ # if sum(odata[i,:]) <= med] # skydata = odata.take(skyfibers, axis=0) # oskyflux = [numpy.average(skydata[:,i])\ # for i in range(skydata.shape[1])] RSS = [] for i in range(int(fiberNumber[1])): #oflux = odata[i] - oskyflux oflux = odata[i] - skySpec oErrorFlux = oError[i] #oflux = odata[i] # Mask out extreme values in spectrum # Just because edges dodgy in efosc med = numpy.median(oflux) oflux[numpy.greater(abs(oflux), 10.0*med)] = 0.0001 objSED = astSED.SED(wavelength=owavelengthRange, flux=oflux) # make it > 0 everywhere #objSED.flux = objSED.flux - objSED.flux.min() #objSED.flux = objSED.flux / objSED.flux.max() #skySED = astSED.SED(wavelength=owavelengthRange, flux=oskyflux) skySED = astSED.SED(wavelength=owavelengthRange, flux=skySpec) errSED = astSED.SED(wavelength=owavelengthRange, flux=oErrorFlux) RSS.append({'object': objSED, 'sky': skySED, 'error' : errSED}) return RSS #return {'object': objSED, 'sky': skySED} def loadObjectSpectrum(self, objectFileName): """ Loads in an object spectrum - this has to be in DEEP2 pipeline spec1d format (i.e. fits tables) Object spectrum is smoothed by boxcar of size smoothPix. Returns a dictionary containing object and sky astSED.SED objects {'object', 'sky'} """ print "Loading object spectrum ..." self.objectFileName = objectFileName oimg = pyfits.open(objectFileName) # If DEEP2 format, concatenate red, blue spectra # Otherwise, assume in efosc2 reducer format try: rwavelengthRange = oimg['HORNE-R'].data.field('LAMBDA')[0] rflux = oimg['HORNE-R'].data.field('SPEC')[0] rskyflux = oimg['HORNE-R'].data.field('SKYSPEC')[0] bwavelengthRange = oimg['HORNE-B'].data.field('LAMBDA')[0] bflux = oimg['HORNE-B'].data.field('SPEC')[0] bskyflux = oimg['HORNE-B'].data.field('SKYSPEC')[0] owavelengthRange = numpy.array( bwavelengthRange.tolist() + rwavelengthRange.tolist()) oflux = numpy.array(bflux.tolist() + rflux.tolist()) oskyflux = numpy.array(bskyflux.tolist() + rskyflux.tolist()) except: # owavelengthRange = oimg['1D_SPECTRUM'].data.field('LAMBDA') # oflux = oimg['1D_SPECTRUM'].data.field('SPEC') # columnNames = oimg['1D_SPECTRUM'].columns.names # if 'SKYSPEC' in columnNames: # oskyflux = oimg['1D_SPECTRUM'].data.field('SKYSPEC') #else: # oskyflux = None owavelengthRange = oimg[1].data.field('LAMBDA') oflux = oimg[1].data.field('SPEC') columnNames = oimg[1].columns.names if 'SKYSPEC' in columnNames: oskyflux = oimg[1].data.field('SKYSPEC') else: oskyflux = None # Mask out extreme values in spectrum # Just because edges dodgy in efosc #med=numpy.median(oflux) #oflux[numpy.greater(abs(oflux), 10.0*med)]=0.0 objSED = astSED.SED(wavelength=owavelengthRange, flux=oflux) # make it > 0 everywhere objSED.flux = objSED.flux - objSED.flux.min() objSED.flux = objSED.flux / objSED.flux.max() if oskyflux is not None: skySED = astSED.SED(wavelength=owavelengthRange, flux=oskyflux) else: skySED = None return {'object': objSED, 'sky': skySED} def plotSkyChanged(self): """ Clears figure for if we're redrawing sky subplot or not. Use if we change norm method too. """ pylab.clf() self.redrawPlot() def plotErrorChanged(self): """ Clears figure for if we're redrawing sky subplot or not. Use if we change norm method too. """ pylab.clf() self.redrawPlot() def maskLines(self, objSED, skySED): # Mask prominent sky emission lines if skySED is None: fixbad = "n" else: fixbad = "y" if fixbad == "y": normSkyFlux = skySED.flux / skySED.flux.max() threshold = 0.15 badPix = numpy.where(normSkyFlux > threshold)[0] lines = [] for i in range(len(badPix)): startPixInLine = False for line in lines: if line[0] <= badPix[i] <= line[1]: startPixInLine = True if not startPixInLine: pixCount = 1 if pixCount + i < len(badPix) - 1: nextPix = badPix[i + pixCount] prevPix = badPix[i] maxReached = False while nextPix < prevPix + 2: if pixCount + i < len(badPix) - 1: prevPix = badPix[i + pixCount] pixCount += 1 nextPix = badPix[i + pixCount] else: maxReached = True break if not maxReached: lastPix = prevPix else: lastPix = max(badPix) else: lastPix = max(badPix) #append the lines and add a little padding lines.append([badPix[i]-10, lastPix+10]) for line in lines: # Do a simple linear fit to the end points y = objSED.flux[line[1]] - objSED.flux[line[0]] x = skySED.wavelength[line[1]] - skySED.wavelength[line[0]] slope = y/x intercept = objSED.flux[line[0]] - slope *\ skySED.wavelength[line[0]] # print skySED.wavelength[line[0]], skySED.wavelength[line[1]] for i in range(line[0], line[1]): objSED.flux[i] = slope * skySED.wavelength[i] + intercept return objSED def updatePlot(self, objSED, tempSED, skySED, errSED, redshift, tempLabel=None, redrawSky=True, redrawFeatures=False, plotFeatures=[]): """ Updates the pylab plot of the object spectrum with template overlaid. """ xMin = float(self.minWavelengthEntry.get()) xMax = float(self.maxWavelengthEntry.get()) tempSED.redshift(redshift) tempSED.flux = tempSED.flux # Mask out the spectrum that we can't see plusMask = numpy.greater(tempSED.wavelength, xMin) minusMask = numpy.less(tempSED.wavelength, xMax) mask = numpy.logical_and(plusMask, minusMask) tempSED.flux = tempSED.flux - tempSED.flux.min() tempSED.flux = tempSED.flux / tempSED.flux[mask].max() # In case we don't want to see redward of 10000 Angstroms plusMask = numpy.greater(objSED.wavelength, xMin) minusMask = numpy.less(objSED.wavelength, xMax) mask = numpy.logical_and(plusMask, minusMask) objSED = self.maskLines(objSED, skySED) # Normalize objSED.flux = objSED.flux - objSED.flux[mask].min() objSED.flux = objSED.flux / objSED.flux[mask].max() # Norm based on matching flux as closely as possible between template # and object spectrum # Ignore XX% of each end of spectrum as edges can be weird if self.altNormCheckVar.get() == 1: ignoreAngstroms = (objSED.wavelength.max() - objSED.wavelength.min()) * 0.25 dw = 100 binEdges = numpy.arange(objSED.wavelength.min() + ignoreAngstroms, objSED.wavelength.max() - ignoreAngstroms, dw) passbands = [] for b in binEdges: passbands.append(astSED.TopHatPassband(b, b + dw)) objSEDDict = objSED.getSEDDict(passbands) tempSEDDict = tempSED.getSEDDict(passbands) norm0 = 1.0 norm, success = optimize.leastsq(fitSEDNormErrFunc, norm0, args=(tempSEDDict['flux'], objSEDDict['flux'])) objSED.flux = objSED.flux / norm #if skySED != None and self.plotSkyCheckVar.get() == 1: #pylab.subplot(211) pylab.cla() pylab.title(self.objectSpecFileName+' Fiber No. ' + str(self.fibernumberScale.get())) pylab.plot(objSED.wavelength, objSED.flux, 'k-') pylab.plot(tempSED.wavelength, tempSED.flux, '-', c='#a60628', label=tempLabel) pylab.text(0.05, 0.92, "z = %.5f $\pm$ %.5f (Q = %s)" % (tempSED.z, float(self.redshiftErrorEntryVar.get()), self.qualityRadioVar.get()), ha='left', va='top', transform=pylab.gca().transAxes, size=12, color='#a60628') pylab.ylim(0, 1.2) #if skySED != None and self.plotSkyCheckVar.get() == True: #pylab.gca().set_xticklabels([]) #else: pylab.xlabel("Wavelength (Angstroms)") # Plots the spectral features in turn #plotFeatures=["H", "K", "[OII]"] if redrawFeatures: #ylim=pylab.gca().get_ylim() # Need this to automatically draw #correct length -- for features for on, item in zip(self.plotFeatures, spectralFeaturesCatalogue): if on == 1: featureLabel = item[0] # Greek letters? eta will cause a problem here! featureLabel = featureLabel.replace("alpha", "$\\alpha$") featureLabel = featureLabel.replace("beta", "$\\beta$") featureLabel = featureLabel.replace("gamma", "$\gamma$") featureLabel = featureLabel.replace("delta", "$\delta$") featureLabel = featureLabel.replace("epsilon", "$\\epsilon$") featureLabel = featureLabel.replace("zeta", "$\zeta$") featureLabel = featureLabel.replace("theta", "$\\theta$") for i in range(1, len(item)): featureLambda = (1.0 + float(redshift)) * item[i] pylab.plot((featureLambda, featureLambda), (0, 1.0), '--', c='#188487') pylab.text(featureLambda, 1.05, featureLabel, ha='center', va='top', size=10, rotation='vertical') if redrawSky and self.plotSkyCheckVar.get() == 1: if skySED is not None: pylab.plot(skySED.wavelength, skySED.flux / skySED.flux.max() * 0.3, '-', c='#348abd', label='Sky') # Main telluric absorption features c = patches.Rectangle((6860, 0), (6930 - 6860), 1.2, fill=True, edgecolor=(0.8, 0.8, 0.8), facecolor=(0.8, 0.8, 0.8), linewidth=1) pylab.gca().add_patch(c) c = patches.Rectangle((7590, 0), (7710 - 7590), 1.2, fill=True, edgecolor=(0.8, 0.8, 0.8), facecolor=(0.8, 0.8, 0.8), linewidth=1) pylab.gca().add_patch(c) for line in open('badlines.dat', 'r'): line = line.split('\t') c = patches.Rectangle((float(line[0]), 0), (float(line[1]) -\ float(line[0])), 1.2, fill=True, edgecolor=(0.8, 0.8, 0.8), facecolor=(0.8, 0.8, 0.8), linewidth=1, zorder=0) pylab.gca().add_patch(c) if redrawSky and self.plotErrorCheckVar.get() == 1: if errSED is not None: pylab.plot(errSED.wavelength, errSED.flux / errSED.flux.max() * 0.5, '-', c='#467821', label='Error', zorder=0) # Finish drawing the object spectrum plot pylab.ylim(0, 1.2) pylab.xlim(xMin, xMax) pylab.ylabel("Relative Flux") pylab.xlabel("Wavelength (Angstroms)") pylab.legend(loc="upper right") def redrawPlot(self): self.updatePlot(self.obj[self.fiberNumber - self.fibernumberScale.get()]['object'], self.templates[self.templateRadioVar.get()], self.obj[self.fiberNumber - self.fibernumberScale.get()]['sky'], self.obj[self.fiberNumber - self.fibernumberScale.get()]['error'], self.redshiftScaleVar.get(), tempLabel=os.path.split(templateLabels[ self.templateRadioVar.get()])[-1], redrawSky=True, redrawFeatures=True, plotFeatures=self.plotFeatures) # self.updatePlot(self.objSED, # self.templates[self.templateRadioVar.get()], # self.skySED, # self.redshiftScaleVar.get(), # tempLabel=os.path.split(templateLabels[ # self.templateRadioVar.get()])[-1], # redrawFeatures=True, # plotFeatures=self.plotFeatures) def fitSEDNorm(p, modelFluxes): """ Pair of helper functions for fitting SED normalisation p0 is list, [0] = normalisation """ result = p * modelFluxes return result def fitSEDNormErrFunc(p, modelFluxes, observedFluxes): x = fitSEDNorm(p, modelFluxes) - observedFluxes chiSq = numpy.sum(x ** 2) # not really chi sq, duh return chiSq #----------------------------------------------------------------------------- # Main ... if __name__ == "__main__": if len(sys.argv) < 3: print "Run: visualTemplateRedshift5.py <spec1d object spectra .fits", \ "[wildcards allowed]> ... <outputDir>" else: objectSpecFileNames = sys.argv[1:-1] outDir = sys.argv[-1] print "File to be used: " print objectSpecFileNames root = Tkinter.Tk() root.title("Visual Template Redshift 5.0 now with 100% more IFU") app = App(root, objectSpecFileNames, outDir) root.mainloop()
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from enum import Enum, EnumMeta from six import with_metaclass class _CaseInsensitiveEnumMeta(EnumMeta): def __getitem__(self, name): return super().__getitem__(name.upper()) def __getattr__(cls, name): """Return the enum member matching `name` We use __getattr__ instead of descriptors or inserting into the enum class' __dict__ in order to support `name` and `value` being both properties for enum members (which live in the class' __dict__) and enum members themselves. """ try: return cls._member_map_[name.upper()] except KeyError: raise AttributeError(name) class ActionStatusEnum(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Describes whether the order item is deletable or not. """ #: Allowed flag. ALLOWED = "Allowed" #: Not Allowed flag. NOT_ALLOWED = "NotAllowed" class ActionType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Enum. Indicates the action type. "Internal" refers to actions that are for internal only APIs. """ INTERNAL = "Internal" class AddressType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Type of address. """ #: Address type not known. NONE = "None" #: Residential Address. RESIDENTIAL = "Residential" #: Commercial Address. COMMERCIAL = "Commercial" class AvailabilityStage(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Current availability stage of the product. Availability stage """ #: Product is available. AVAILABLE = "Available" #: Product is coming soon. COMING_SOON = "ComingSoon" #: Product is in preview. PREVIEW = "Preview" #: Product is deprecated. DEPRECATED = "Deprecated" #: Product is available only on signup. SIGNUP = "Signup" #: Product is not available. UNAVAILABLE = "Unavailable" class BillingType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Represents billing type. """ #: PaV2 billing. PAV2 = "Pav2" #: Purchase billing. PURCHASE = "Purchase" class ChargingType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Charging type. """ #: Per order charging type. PER_ORDER = "PerOrder" #: Per device charging type. PER_DEVICE = "PerDevice" class CreatedByType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The type of identity that created the resource. """ USER = "User" APPLICATION = "Application" MANAGED_IDENTITY = "ManagedIdentity" KEY = "Key" class DescriptionType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Type of description. """ #: Base description. BASE = "Base" class DisabledReason(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Reason why the product is disabled. """ #: Not disabled. NONE = "None" #: Not available in the requested country. COUNTRY = "Country" #: Not available to push data to the requested Azure region. REGION = "Region" #: Required features are not enabled. FEATURE = "Feature" #: Subscription does not have required offer types. OFFER_TYPE = "OfferType" #: Subscription has not registered to Microsoft.DataBox and Service does not have the subscription #: notification. NO_SUBSCRIPTION_INFO = "NoSubscriptionInfo" #: The product is not yet available. NOT_AVAILABLE = "NotAvailable" #: The product is out of stock. OUT_OF_STOCK = "OutOfStock" class DoubleEncryptionStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Double encryption status as entered by the customer. It is compulsory to give this parameter if the 'Deny' or 'Disabled' policy is configured. """ #: Double encryption is disabled. DISABLED = "Disabled" #: Double encryption is enabled. ENABLED = "Enabled" class ImageType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Type of the image """ #: Main image. MAIN_IMAGE = "MainImage" #: Bullet image. BULLET_IMAGE = "BulletImage" #: Generic image. GENERIC_IMAGE = "GenericImage" class LengthHeightUnit(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Unit for the dimensions of length, height and width. """ #: Inch, applicable for West US. IN_ENUM = "IN" #: Centimeter. CM = "CM" class LinkType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Type of link """ #: Generic link. GENERIC = "Generic" #: Terms and conditions link. TERMS_AND_CONDITIONS = "TermsAndConditions" #: Link to product specification. SPECIFICATION = "Specification" #: Link to product documentation. DOCUMENTATION = "Documentation" #: Link to know more. KNOW_MORE = "KnowMore" #: Link to sign up for products. SIGN_UP = "SignUp" class MeteringType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Represents Metering type (eg one-time or recurrent) """ #: One time billing. ONE_TIME = "OneTime" #: Recurring billing. RECURRING = "Recurring" #: Adhoc billing. ADHOC = "Adhoc" class NotificationStageName(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Name of the stage. """ #: Notification at order item shipped from microsoft datacenter. SHIPPED = "Shipped" #: Notification at order item delivered to customer. DELIVERED = "Delivered" class OrderItemCancellationEnum(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Describes whether the order item is cancellable or not. """ #: Order item can be cancelled without fee. CANCELLABLE = "Cancellable" #: Order item can be cancelled with fee. CANCELLABLE_WITH_FEE = "CancellableWithFee" #: Order item not cancellable. NOT_CANCELLABLE = "NotCancellable" class OrderItemReturnEnum(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Describes whether the order item is returnable or not. """ #: Order item can be returned without fee. RETURNABLE = "Returnable" #: Order item can be returned with fee. RETURNABLE_WITH_FEE = "ReturnableWithFee" #: Order item not returnable. NOT_RETURNABLE = "NotReturnable" class OrderItemType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Order item type. """ #: Purchase OrderItem. PURCHASE = "Purchase" #: Rental OrderItem. RENTAL = "Rental" class Origin(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The intended executor of the operation; as in Resource Based Access Control (RBAC) and audit logs UX. Default value is "user,system" """ USER = "user" SYSTEM = "system" USER_SYSTEM = "user,system" class StageName(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Stage name """ #: Currently in draft mode and can still be cancelled. PLACED = "Placed" #: Order is currently in draft mode and can still be cancelled. IN_REVIEW = "InReview" #: Order is confirmed. CONFIRMED = "Confirmed" #: Order is ready to ship. READY_TO_SHIP = "ReadyToShip" #: Order is in transit to customer. SHIPPED = "Shipped" #: Order is delivered to customer. DELIVERED = "Delivered" #: Order is in use at customer site. IN_USE = "InUse" #: Return has been initiated by customer. RETURN_INITIATED = "ReturnInitiated" #: Order is in transit from customer to microsoft. RETURN_PICKED_UP = "ReturnPickedUp" #: Order has been received back to microsoft. RETURNED_TO_MICROSOFT = "ReturnedToMicrosoft" #: Return has now completed. RETURN_COMPLETED = "ReturnCompleted" #: Order has been cancelled. CANCELLED = "Cancelled" class StageStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Stage status. """ #: No status available yet. NONE = "None" #: Stage is in progress. IN_PROGRESS = "InProgress" #: Stage has succeeded. SUCCEEDED = "Succeeded" #: Stage has failed. FAILED = "Failed" #: Stage has been cancelled. CANCELLED = "Cancelled" #: Stage is cancelling. CANCELLING = "Cancelling" class SupportedFilterTypes(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Type of product filter. """ #: Ship to country. SHIP_TO_COUNTRIES = "ShipToCountries" #: Double encryption status. DOUBLE_ENCRYPTION_STATUS = "DoubleEncryptionStatus" class TransportShipmentTypes(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Indicates Shipment Logistics type that the customer preferred. """ #: Shipment Logistics is handled by the customer. CUSTOMER_MANAGED = "CustomerManaged" #: Shipment Logistics is handled by Microsoft. MICROSOFT_MANAGED = "MicrosoftManaged" class WeightMeasurementUnit(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Unit for the dimensions of weight. """ #: Pounds. LBS = "LBS" #: Kilograms. KGS = "KGS"
# # This file is subject to the terms and conditions defined in the # file 'LICENSE', which is part of this source code package. # import datetime from rdr_service.code_constants import ( CONSENT_PERMISSION_YES_CODE, CONSENT_PERMISSION_NO_CODE, DVEHRSHARING_CONSENT_CODE_YES, EHR_CONSENT_EXPIRED_YES, DVEHRSHARING_CONSENT_CODE_NO, CONSENT_GROR_NO_CODE, CONSENT_GROR_NOT_SURE, CONSENT_GROR_YES_CODE ) from rdr_service import config from rdr_service.participant_enums import PhysicalMeasurementsStatus, QuestionnaireResponseClassificationType from rdr_service.resource.constants import PDREnrollmentStatusEnum from rdr_service.resource.constants import ParticipantEventEnum, COHORT_1_CUTOFF, \ COHORT_2_CUTOFF, ConsentCohortEnum from rdr_service.services.system_utils import JSONObject class EnrollmentStatusInfo: """ Information about Enrollment Status events """ calculated = False # False = No, True = Yes first_ts = None # First timestamp seen. last_ts = None # Last timestamp seen. values = None # List of related events. def add_value(self, value): """ Save a relevant datum to the values list. """ if self.values is None: self.values = list() self.values.append(value) class EnrollmentStatusCalculator: """ Calculate participant enrollment status. """ status: PDREnrollmentStatusEnum = PDREnrollmentStatusEnum.Unset # events = None # List of EnrollmentStatusEvent objects. activity = None # List of activity created from Participant generator. _ehr_event_found = False cohort = None # First info object for each part used in calculating enrollment status. _signup = None _consented = None _ehr_consented = None _gror_received = None _gror_consented = None _biobank_samples = None _physical_measurements = None _baseline_modules = None # Timestamps for when each status was achieved, these are set in the self.save_calc() method. registered_time = None participant_time = None participant_plus_ehr_time = None core_participant_minus_pm_time = None core_participant_time = None def __init__(self): # Create a list of the baseline module enumerations from the config file. self._module_enums = [ParticipantEventEnum[mod.replace('questionnaireOn', '')] for mod in config.getSettingList('baseline_ppi_questionnaire_fields')] if not self._module_enums: raise ValueError('Loading baseline modules from configuration failed.') def run(self, activity: list): """ :param activity: A list of activity dictionary objects created by the ParticipantSummaryGenerator. """ self.activity = [JSONObject(r) for r in activity if r['timestamp']] # Pre-check to see if there are any EHR consent answers found in the events. self._ehr_event_found = len([e for e in self.activity if e.event == ParticipantEventEnum.EHRConsentPII]) > 0 self.activity.sort(key=lambda i: i.timestamp) # Work through activity by slicing to determine current enrollment status. # This method allows us to iterate once through the data and still catch participants # that might have been considered a Core Participant at one point, but would not by # looking at their current state. for x in range(1, len(self.activity)+1): events = self.activity[0:x] # Get each datum needed for calculating the enrollment status. signed_up = self.calc_signup(events) consented, cohort = self.calc_consent(events) ehr_consented = self.calc_ehr_consent(events) gror_received = self.calc_gror_received(events) biobank_samples = self.calc_biobank_samples(events) physical_measurements = self.calc_physical_measurements(events) modules = self.calc_baseline_modules(events) if not self.cohort: self.cohort = cohort # Calculate enrollment status status = PDREnrollmentStatusEnum.Unset if signed_up: status = PDREnrollmentStatusEnum.Registered if status == PDREnrollmentStatusEnum.Registered and consented: status = PDREnrollmentStatusEnum.Participant if status == PDREnrollmentStatusEnum.Participant and ehr_consented: status = PDREnrollmentStatusEnum.ParticipantPlusEHR if status == PDREnrollmentStatusEnum.ParticipantPlusEHR and biobank_samples and \ (cohort != ConsentCohortEnum.COHORT_3 or gror_received) and \ (modules and len(modules.values) >= len(self._module_enums)): status = PDREnrollmentStatusEnum.CoreParticipantMinusPM if status == PDREnrollmentStatusEnum.CoreParticipantMinusPM and \ physical_measurements and \ (cohort != ConsentCohortEnum.COHORT_3 or gror_received): status = PDREnrollmentStatusEnum.CoreParticipant # Set the permanent enrollment status value if needed. Enrollment status can go down # unless the enrollment status has reached a 'Core' status. if status > self.status or self.status < PDREnrollmentStatusEnum.CoreParticipantMinusPM: self.status = status # Save the timestamp when each status was reached. self.save_status_timestamp(status) def save_status_timestamp(self, status): """ Save the status timestamp when we first see that status achieved. :param status: Current calculated enrollment status. """ # Set the first timestamp for each status the participant reaches. if not self.registered_time and self._signup: self.registered_time = self._signup.first_ts if not self.participant_time and self._consented: self.participant_time = self._consented.first_ts if not self.participant_plus_ehr_time and self._ehr_consented: self.participant_plus_ehr_time = self._ehr_consented.first_ts # We specifically check for Core Minus PM status here to get the correct timestamp. if status == PDREnrollmentStatusEnum.CoreParticipantMinusPM and not self.core_participant_minus_pm_time and \ self._biobank_samples and self._baseline_modules: self.core_participant_minus_pm_time = max([self._biobank_samples.first_ts, self._baseline_modules.last_ts]) if status == PDREnrollmentStatusEnum.CoreParticipant and not self.core_participant_time and \ self._biobank_samples and self._baseline_modules and self._physical_measurements: self.core_participant_time = \ max([self._biobank_samples.first_ts, self._baseline_modules.last_ts, self._physical_measurements.first_ts]) # If we jumped over Core Minus PM status, just make it the same timestamp as core. if not self.core_participant_minus_pm_time: self.core_participant_minus_pm_time = self.core_participant_time def save_calc(self, key, info): """ Save first calculated info object for the given key. :param key: Property name. :param info: EnrollmentStatusInfo object :return: EnrollmentStatusInfo object """ if info.calculated is False: return None obj = getattr(self, key) if not obj or obj.calculated is False: setattr(self, key, info) return info def calc_signup(self, events): """ Determine when participant signed up. Criteria: - Establish participant sign up timestamp. :param events: List of events :return: EnrollmentStatusInfo object """ # Once we have determined when the participant signed up, we don't need to keep checking. if self._signup: return self._signup info = EnrollmentStatusInfo() for ev in events: # PDR-559: Use PTSC ConsentPII authored timestamp if prior to the RDR sign-up-time timestamp. if ev.event == ParticipantEventEnum.SignupTime or ev.event == ParticipantEventEnum.ConsentPII: info.calculated = True info.first_ts = ev.last_ts = ev.timestamp info.add_value(ev) break return self.save_calc('_signup', info) def calc_consent(self, events): """ Determine if participant has consented. Criteria: - ConsentPII has been submitted. :param events: List of events :return: EnrollmentStatusInfo object """ info = EnrollmentStatusInfo() for ev in events: if ev.event == ParticipantEventEnum.ConsentPII: info.calculated = True info.first_ts = ev.last_ts = ev.timestamp info.add_value(ev) break # Calculate consent cohort group cohort = None if info.calculated is True: if info.first_ts < COHORT_1_CUTOFF: cohort = ConsentCohortEnum.COHORT_1 elif COHORT_1_CUTOFF <= info.first_ts <= COHORT_2_CUTOFF: cohort = ConsentCohortEnum.COHORT_2 else: cohort = ConsentCohortEnum.COHORT_3 return self.save_calc('_consented', info), cohort def calc_ehr_consent(self, events): """ Determine if participant has an EHR Consent. Criteria: - A positive EHR or DVEHR Consent submission has been received. - Use first Yes Consent submission after most recent No/Expired consent. :param events: List of events :return: EnrollmentStatusInfo object """ info = EnrollmentStatusInfo() for ev in events: # If EHR Consent answer found, ignore all DVEHR answers. # See unittest: test_participant_enrollment.test_no_on_ehr_overrides_yes_on_dv() if self._ehr_event_found is True and ev.event == ParticipantEventEnum.DVEHRSharing: continue if ev.event in [ParticipantEventEnum.EHRConsentPII, ParticipantEventEnum.DVEHRSharing]: # See if we need to reset the info object. Do not reset if 'DVEHRSHARING_CONSENT_CODE_NOT_SURE'. if ev.answer in [CONSENT_PERMISSION_NO_CODE, DVEHRSHARING_CONSENT_CODE_NO, EHR_CONSENT_EXPIRED_YES]: self._ehr_consented = None # Reset any saved info. info = EnrollmentStatusInfo() continue # See if we should set the consent info. if info.calculated is False and \ ev.answer in [CONSENT_PERMISSION_YES_CODE, DVEHRSHARING_CONSENT_CODE_YES]: info.calculated = True info.first_ts = ev.last_ts = ev.timestamp info.add_value(ev) return self.save_calc('_ehr_consented', info) def calc_gror_received(self, events): """ Determine if a participant ever submitted a valid GROR response (regardless of consent status) """ info = EnrollmentStatusInfo() for ev in events: if (ev.event == ParticipantEventEnum.GROR and ev.answer in [CONSENT_GROR_NO_CODE, CONSENT_GROR_YES_CODE, CONSENT_GROR_NOT_SURE]): info.calculated = True info.first_ts = ev.last_ts = ev.timestamp info.add_value(ev) break return self.save_calc('_gror_received', info) # Note: New guidance from NIH as of July 2021 says GROR affirmative consent is not a requirement for Core # status, so calc_gror_recieved() will replace this function in the enrollment status calculation. Leaving this # code here for potential leverage in case of a future need to confirm a GROR 'yes' consent in a participant's # activity history. def calc_gror_consent(self, events): """ Determine if participant has consented to GROR. Criteria: - GROR consented has been submitted with a CheckDNA_Yes answer. :param events: List of events :return: EnrollmentStatusInfo object """ info = EnrollmentStatusInfo() for ev in events: if ev.event == ParticipantEventEnum.GROR: # See if we need to reset the info object. if ev.answer in [CONSENT_GROR_NO_CODE, CONSENT_GROR_NOT_SURE]: self._gror_consented = None # Reset any saved info. info = EnrollmentStatusInfo() continue # See if we should set the consent info. if info.calculated is False and ev.answer == CONSENT_GROR_YES_CODE: info.calculated = True info.first_ts = ev.last_ts = ev.timestamp info.add_value(ev) return self.save_calc('_gror_consented', info) def calc_biobank_samples(self, events): """ Determine if biobank has confirmed DNA test for participant. Criteria: - First time DNA tests have been confirmed by the BioBank. :param events: List of events :return: EnrollmentStatusInfo object """ info = EnrollmentStatusInfo() for ev in events: if ev.event == ParticipantEventEnum.BiobankConfirmed and ev.dna_tests > 0: info.calculated = True info.first_ts = ev.last_ts = ev.timestamp info.add_value(ev) break return self.save_calc('_biobank_samples', info) def calc_physical_measurements(self, events): """ Determine if biobank has confirmed DNA test for participant. Criteria: - Physical Measurements have been received and finalized. :param events: List of events :return: EnrollmentStatusInfo object """ info = EnrollmentStatusInfo() for ev in events: if info.calculated is False and ev.event == ParticipantEventEnum.PhysicalMeasurements and \ ev.status_id == int(PhysicalMeasurementsStatus.COMPLETED): info.calculated = True info.first_ts = ev.last_ts = ev.timestamp info.add_value(ev) break return self.save_calc('_physical_measurements', info) def calc_baseline_modules(self, events): """ Find the baseline modules the participant has submitted. Criteria: - First TheBasics, Lifestyle and OverallHealth submissions :param events: List of events :return: EnrollmentStatusInfo object """ info = EnrollmentStatusInfo() info.first_ts = datetime.datetime.max info.last_ts = datetime.datetime.min # Sanity check, make sure we have the same number of event enums as config baseline modules. if len(self._module_enums) != len(config.getSettingList('baseline_ppi_questionnaire_fields')): raise ValueError('Baseline module event enum list different than config.') def module_type_stored(mod_ev_): """ See if we have stored that module type already. """ if isinstance(info.values, list): for ev_ in info.values: if mod_ev_ == ev_.event: return True return False # Find the baseline module events. for ev in events: for mod_ev in self._module_enums: # Make sure we are saving a distinct list of baseline module events that have a COMPLETE classification if (ev.event == mod_ev and ev.classification_type == str(QuestionnaireResponseClassificationType.COMPLETE) and module_type_stored(mod_ev) is False ): if ev.timestamp < info.first_ts: info.first_ts = ev.timestamp if ev.timestamp > info.last_ts: info.last_ts = ev.timestamp info.add_value(ev) # If we have seen all the baseline modules, set calculated to True if info.values is not None and len(info.values) == len(self._module_enums): info.calculated = True return self.save_calc('_baseline_modules', info)
from __future__ import absolute_import, print_function, division import re import six from six.moves import urllib from netlib import encoding from netlib import multidict from netlib import strutils from netlib.http import multipart from netlib.http import cookies from netlib.http import headers as nheaders from netlib.http import message import netlib.http.url # This regex extracts & splits the host header into host and port. # Handles the edge case of IPv6 addresses containing colons. # https://bugzilla.mozilla.org/show_bug.cgi?id=45891 host_header_re = re.compile(r"^(?P<host>[^:]+|\[.+\])(?::(?P<port>\d+))?$") class RequestData(message.MessageData): def __init__(self, first_line_format, method, scheme, host, port, path, http_version, headers=(), content=None, timestamp_start=None, timestamp_end=None): if not isinstance(headers, nheaders.Headers): headers = nheaders.Headers(headers) self.first_line_format = first_line_format self.method = method self.scheme = scheme self.host = host self.port = port self.path = path self.http_version = http_version self.headers = headers self.content = content self.timestamp_start = timestamp_start self.timestamp_end = timestamp_end class Request(message.Message): """ An HTTP request. """ def __init__(self, *args, **kwargs): self.data = RequestData(*args, **kwargs) def __repr__(self): if self.host and self.port: hostport = "{}:{}".format(self.host, self.port) else: hostport = "" path = self.path or "" return "Request({} {}{})".format( self.method, hostport, path ) def replace(self, pattern, repl, flags=0): """ Replaces a regular expression pattern with repl in the headers, the request path and the body of the request. Encoded content will be decoded before replacement, and re-encoded afterwards. Returns: The number of replacements made. """ # TODO: Proper distinction between text and bytes. c = super(Request, self).replace(pattern, repl, flags) self.path, pc = strutils.safe_subn( pattern, repl, self.path, flags=flags ) c += pc return c @property def first_line_format(self): """ HTTP request form as defined in `RFC7230 <https://tools.ietf.org/html/rfc7230#section-5.3>`_. origin-form and asterisk-form are subsumed as "relative". """ return self.data.first_line_format @first_line_format.setter def first_line_format(self, first_line_format): self.data.first_line_format = first_line_format @property def method(self): """ HTTP request method, e.g. "GET". """ return message._native(self.data.method).upper() @method.setter def method(self, method): self.data.method = message._always_bytes(method) @property def scheme(self): """ HTTP request scheme, which should be "http" or "https". """ return message._native(self.data.scheme) @scheme.setter def scheme(self, scheme): self.data.scheme = message._always_bytes(scheme) @property def host(self): """ Target host. This may be parsed from the raw request (e.g. from a ``GET http://example.com/ HTTP/1.1`` request line) or inferred from the proxy mode (e.g. an IP in transparent mode). Setting the host attribute also updates the host header, if present. """ if six.PY2: # pragma: no cover return self.data.host if not self.data.host: return self.data.host try: return self.data.host.decode("idna") except UnicodeError: return self.data.host.decode("utf8", "surrogateescape") @host.setter def host(self, host): if isinstance(host, six.text_type): try: # There's no non-strict mode for IDNA encoding. # We don't want this operation to fail though, so we try # utf8 as a last resort. host = host.encode("idna", "strict") except UnicodeError: host = host.encode("utf8", "surrogateescape") self.data.host = host # Update host header if "host" in self.headers: if host: self.headers["host"] = host else: self.headers.pop("host") @property def port(self): """ Target port """ return self.data.port @port.setter def port(self, port): self.data.port = port @property def path(self): """ HTTP request path, e.g. "/index.html". Guaranteed to start with a slash, except for OPTIONS requests, which may just be "*". """ if self.data.path is None: return None else: return message._native(self.data.path) @path.setter def path(self, path): self.data.path = message._always_bytes(path) @property def url(self): """ The URL string, constructed from the request's URL components """ if self.first_line_format == "authority": return "%s:%d" % (self.host, self.port) return netlib.http.url.unparse(self.scheme, self.host, self.port, self.path) @url.setter def url(self, url): self.scheme, self.host, self.port, self.path = netlib.http.url.parse(url) def _parse_host_header(self): """Extract the host and port from Host header""" if "host" not in self.headers: return None, None host, port = self.headers["host"], None m = host_header_re.match(host) if m: host = m.group("host").strip("[]") if m.group("port"): port = int(m.group("port")) return host, port @property def pretty_host(self): """ Similar to :py:attr:`host`, but using the Host headers as an additional preferred data source. This is useful in transparent mode where :py:attr:`host` is only an IP address, but may not reflect the actual destination as the Host header could be spoofed. """ host, port = self._parse_host_header() if not host: return self.host if not port: port = 443 if self.scheme == 'https' else 80 # Prefer the original address if host header has an unexpected form return host if port == self.port else self.host @property def pretty_url(self): """ Like :py:attr:`url`, but using :py:attr:`pretty_host` instead of :py:attr:`host`. """ if self.first_line_format == "authority": return "%s:%d" % (self.pretty_host, self.port) return netlib.http.url.unparse(self.scheme, self.pretty_host, self.port, self.path) @property def query(self): # type: () -> multidict.MultiDictView """ The request query string as an :py:class:`~netlib.multidict.MultiDictView` object. """ return multidict.MultiDictView( self._get_query, self._set_query ) def _get_query(self): _, _, _, _, query, _ = urllib.parse.urlparse(self.url) return tuple(netlib.http.url.decode(query)) def _set_query(self, value): query = netlib.http.url.encode(value) scheme, netloc, path, params, _, fragment = urllib.parse.urlparse(self.url) _, _, _, self.path = netlib.http.url.parse( urllib.parse.urlunparse([scheme, netloc, path, params, query, fragment])) @query.setter def query(self, value): self._set_query(value) @property def cookies(self): # type: () -> multidict.MultiDictView """ The request cookies. An empty :py:class:`~netlib.multidict.MultiDictView` object if the cookie monster ate them all. """ return multidict.MultiDictView( self._get_cookies, self._set_cookies ) def _get_cookies(self): h = self.headers.get_all("Cookie") return tuple(cookies.parse_cookie_headers(h)) def _set_cookies(self, value): self.headers["cookie"] = cookies.format_cookie_header(value) @cookies.setter def cookies(self, value): self._set_cookies(value) @property def path_components(self): """ The URL's path components as a tuple of strings. Components are unquoted. """ _, _, path, _, _, _ = urllib.parse.urlparse(self.url) # This needs to be a tuple so that it's immutable. # Otherwise, this would fail silently: # request.path_components.append("foo") return tuple(urllib.parse.unquote(i) for i in path.split("/") if i) @path_components.setter def path_components(self, components): components = map(lambda x: urllib.parse.quote(x, safe=""), components) path = "/" + "/".join(components) scheme, netloc, _, params, query, fragment = urllib.parse.urlparse(self.url) _, _, _, self.path = netlib.http.url.parse( urllib.parse.urlunparse([scheme, netloc, path, params, query, fragment])) def anticache(self): """ Modifies this request to remove headers that might produce a cached response. That is, we remove ETags and If-Modified-Since headers. """ delheaders = [ "if-modified-since", "if-none-match", ] for i in delheaders: self.headers.pop(i, None) def anticomp(self): """ Modifies this request to remove headers that will compress the resource's data. """ self.headers["accept-encoding"] = "identity" def constrain_encoding(self): """ Limits the permissible Accept-Encoding values, based on what we can decode appropriately. """ accept_encoding = self.headers.get("accept-encoding") if accept_encoding: self.headers["accept-encoding"] = ( ', '.join( e for e in encoding.ENCODINGS if e in accept_encoding ) ) @property def urlencoded_form(self): """ The URL-encoded form data as an :py:class:`~netlib.multidict.MultiDictView` object. An empty multidict.MultiDictView if the content-type indicates non-form data or the content could not be parsed. """ return multidict.MultiDictView( self._get_urlencoded_form, self._set_urlencoded_form ) def _get_urlencoded_form(self): is_valid_content_type = "application/x-www-form-urlencoded" in self.headers.get("content-type", "").lower() if is_valid_content_type: return tuple(netlib.http.url.decode(self.content)) return () def _set_urlencoded_form(self, value): """ Sets the body to the URL-encoded form data, and adds the appropriate content-type header. This will overwrite the existing content if there is one. """ self.headers["content-type"] = "application/x-www-form-urlencoded" self.content = netlib.http.url.encode(value) @urlencoded_form.setter def urlencoded_form(self, value): self._set_urlencoded_form(value) @property def multipart_form(self): """ The multipart form data as an :py:class:`~netlib.multidict.MultiDictView` object. None if the content-type indicates non-form data. """ return multidict.MultiDictView( self._get_multipart_form, self._set_multipart_form ) def _get_multipart_form(self): is_valid_content_type = "multipart/form-data" in self.headers.get("content-type", "").lower() if is_valid_content_type: return multipart.decode(self.headers, self.content) return () def _set_multipart_form(self, value): raise NotImplementedError() @multipart_form.setter def multipart_form(self, value): self._set_multipart_form(value)
## A script for extracting info about the patients used in the analysis ## Load necessary modules from rpy2 import robjects as ro import numpy as np import os ro.r('library(survival)') import re ##This call will only work if you are running python from the command line. ##If you are not running from the command line manually type in your paths. BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) ## Read the follow up data ## It was found that the v4.0 file contained more recent follow up data than v2.0, but the files contained nonredundant patients. ## So both files are loaded with the v4.0 getting preference. ## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent ## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data. ## This required an empty value in the list initialization. ## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...] f=open(os.path.join(BASE_DIR,'tcga_data','BLCA','clinical','nationwidechildrens.org_clinical_follow_up_v4.0_blca.txt')) ##get the column indexes needed columns=f.readline().split('\t') patient_column=columns.index('bcr_patient_barcode') alive_column=columns.index('last_contact_days_to') death_column=columns.index('death_days_to') f.readline() f.readline() data=[i.split('\t') for i in f] clinical1=[['','','']] for i in data: if clinical1[-1][0]==i[patient_column]: if re.search('^[0-9]+$',i[alive_column]): clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive'] elif re.search('^[0-9]+$',i[death_column]): clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead'] else: pass else: if re.search('^[0-9]+$',i[alive_column]): clinical1.append([i[patient_column],int(i[alive_column]),'Alive']) elif re.search('^[0-9]+$',i[death_column]): clinical1.append([i[patient_column],int(i[death_column]),'Dead']) else: pass ## Removing the empty value. clinical=clinical1[1:] f=open(os.path.join(BASE_DIR,'tcga_data','BLCA','clinical','nationwidechildrens.org_clinical_follow_up_v2.0_blca.txt')) ##get the column indexes needed columns=f.readline().split('\t') patient_column=columns.index('bcr_patient_barcode') alive_column=columns.index('last_contact_days_to') death_column=columns.index('death_days_to') f.readline() f.readline() data=[i.split('\t') for i in f] clinical2=[['','','']] for i in data: if i[patient_column] not in [j[0] for j in clinical]: if clinical2[-1][0]==i[patient_column]: if re.search('^[0-9]+$',i[alive_column]): clinical2[-1]=[i[patient_column],int(i[alive_column]),'Alive'] elif re.search('^[0-9]+$',i[death_column]): clinical2[-1]=[i[patient_column],int(i[death_column]),'Dead'] else: pass else: if re.search('^[0-9]+$',i[alive_column]): clinical2.append([i[patient_column],int(i[alive_column]),'Alive']) elif re.search('^[0-9]+$',i[death_column]): clinical2.append([i[patient_column],int(i[death_column]),'Dead']) else: pass ## Removing the empty value and combining the lists. clinical+=clinical2[1:] ## Grade, sex and age information were taken from the "clinical_patient" file. A dictionary was created for grade and sex. more_clinical={} grade_dict={} grade_dict['High Grade']=1 grade_dict['Low Grade']=0 sex_dict={} sex_dict['MALE']=0 sex_dict['FEMALE']=1 ## The "clinical_patient" file can also contain patients not listed in the follow_up files. ## In these cases the clinical data for these patients gets appended to a new clinical list. f=open(os.path.join(BASE_DIR,'tcga_data','BLCA','clinical','nationwidechildrens.org_clinical_patient_blca.txt')) ##get the column indexes needed columns=f.readline().split('\t') grade_column=columns.index('neoplasm_histologic_grade') sex_column=columns.index('gender') age_column=columns.index('age_at_diagnosis') patient_column=columns.index('bcr_patient_barcode') alive_column=columns.index('last_contact_days_to') death_column=columns.index('death_days_to') f.readline() f.readline() clinical4=[] data=[i.split('\t') for i in f] for i in data: try: more_clinical[i[patient_column]]=[grade_dict[i[grade_column]],sex_dict[i[sex_column]],int(i[age_column])] if re.search('^[0-9]+$',i[alive_column]): clinical4.append([i[patient_column],int(i[alive_column]),'Alive']) elif re.search('^[0-9]+$',i[death_column]): clinical4.append([i[patient_column],int(i[death_column]),'Dead']) else: pass except: pass new_clinical=[] ####It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files ####All the clinical data is merged checking which data is the most up to date for i in clinical4: if i[0] not in [j[0] for j in clinical]: new_clinical.append(i) else: if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]: new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])]) else: new_clinical.append(i) ####also do the reverse since clinical can contain patients not included in clinical4 for i in clinical: if i[0] not in [j[0] for j in new_clinical]: new_clinical.append(i) #### only patients who had a follow up time greater than 0 days are included in the analysis clinical=[i for i in new_clinical if i[1]>0] final_clinical=[] #### A new list containing both follow up times and grade, sex, and age is constructed. #### Only patients with grade, sex, and age information are included. #### Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...] ## for i in clinical: if i[0] in more_clinical: final_clinical.append(i+more_clinical[i[0]]) ##In a separate script I parsed the mitranscriptome.expr.counts.tsv file and extracted the BLCA patient and expression values. ##From this file I will load the expression data. ##There are duplicated transcripts and the possibility of a patient having multiple sequencing files. f=open(os.path.join(BASE_DIR,'tcga_data','BLCA','lncrna','BLCA.txt')) ##patient list is at the top of the file patients=f.readline().strip().split() ##create a dictionary mapping patient to all of their lncrna expression data patient_dict={} for index, i in enumerate(patients): patient_dict[i[:12]]='' ##find which patients have complete clinical data, order the data, and average data if necessary ##it's possible there are expression data for patients without clinical data, and clinical data without expression data ##create a new clinical list called clinical_and_files for consistency with previous scripts clinical_and_files=[] for i in final_clinical: if i[0] in patient_dict: clinical_and_files.append(i) ##print average age at diagnosis age=np.mean([i[5] for i in clinical_and_files]) ##print number of males males=len([i for i in clinical_and_files if i[4]==0]) ##print number of females females=len([i for i in clinical_and_files if i[4]==1]) ##to get the median survival we need to call survfit from r ##prepare variables for R ro.globalenv['times']=ro.IntVector([i[1] for i in clinical_and_files]) ##need to create a dummy variable group ro.globalenv['group']=ro.IntVector([0 for i in clinical_and_files]) ##need a vector for deaths death_dic={} death_dic['Alive']=0 death_dic['Dead']=1 ro.globalenv['died']=ro.IntVector([death_dic[i[2]] for i in clinical_and_files]) res=ro.r('survfit(Surv(times,died) ~ as.factor(group))') #the number of events(deaths) is the fourth column of the output deaths=str(res).split('\n')[-2].strip().split()[3] #the median survival time is the fifth column of the output median=str(res).split('\n')[-2].strip().split()[4] ##write data to a file f=open('patient_info.txt','w') f.write('Average Age') f.write('\t') f.write('Males') f.write('\t') f.write('Females') f.write('\t') f.write('Deaths') f.write('\t') f.write('Median Survival') f.write('\n') f.write(str(age)) f.write('\t') f.write(str(males)) f.write('\t') f.write(str(females)) f.write('\t') f.write(deaths) f.write('\t') f.write(median) f.close()
# Copyright 2020 Google LLC # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """Tokenizer analogous to transformers.PreTrainedTokenizer which handles tokenization and numericalization. Mostly follows PreTrainedTokenizer interface with exceptions of task-specific methods such as source text encoding and pair (both source and target texts) encoding. Also handles postprocessing generated outputs into TOP format. """ import os import json from os.path import join as path_join import transformers from new_semantic_parsing import utils from new_semantic_parsing.dataclasses import SchemaItem, PairItem from new_semantic_parsing.configuration_encoder_decoder_wpointer import ( EncoderDecoderWPointerConfig, ) class TopSchemaTokenizer: """ Handles tokenization of TOP schema. Encodes schema into token_ids from schema_vocab and words into position-based ids. Used for *both* source sentence and schema. word_id = tokenizer.vocab_size + position [CLS] token is ignored for position calculation Usage: tokenizer = transformers.AutoTokenizer.from_pretrained('bert-base-cased') schema_vocab = reduce(set.union, map(utils.get_vocab_top_schema, schema_examples)) schema_tokenizer = TopSchemaTokenizer(schema_vocab, tokenizer) # encode pair source_str = 'Go to Mountain View' schema_str = '[IN:GET_DIRECTIONS Go to [SL:Location Mountain View]]' pair: PairItem = schema_tokenizer.encode_pair(source_str, schema_str) # Alternatively: # 1. encode source source_ids: List = schema_tokenizer.encode_source(source_str) # 2. encode schema source_ids: List = schema_tokenizer.encode(schema_str, source_ids) """ def __init__(self, schema_vocab, src_text_tokenizer: transformers.PreTrainedTokenizer): """ :param schema_vocab: iterable with all schema tokens (not source text tokens) :param src_text_tokenizer: transformers.PreTrainedTokenizer object """ self.pad_token = "[PAD]" self.bos_token = "[BOS]" self.eos_token = "[EOS]" self.vocab = schema_vocab self._itos = [self.pad_token, self.bos_token, self.eos_token] + sorted(schema_vocab) self._stoi = {s: i for i, s in enumerate(self._itos)} self.src_tokenizer = src_text_tokenizer @property def vocab_size(self): return len(self._itos) @property def pad_token_id(self): return self._stoi[self.pad_token] @property def bos_token_id(self): return self._stoi[self.bos_token] @property def eos_token_id(self): return self._stoi[self.eos_token] @property def special_tokens(self): return [self.pad_token, self.bos_token, self.eos_token] @property def special_ids(self): return [self.pad_token_id, self.bos_token_id, self.eos_token_id] def index2token(self, i): if i < self.vocab_size: return self._itos[i] else: # returns ptr@{i-self.vocab_size} return self.decode([i])[0] def encode(self, schema_text, source_ids, max_length=None, pad_to_max_length=False): return self.encode_plus(schema_text, source_ids, max_length, pad_to_max_length).ids def encode_plus( self, schema_text, source_ids, max_length=None, pad_to_max_length=False ) -> SchemaItem: # NOTE: this method should do the same things as .batch_encode_plus schema_tokens = self.tokenize(schema_text) if max_length is not None: schema_tokens = schema_tokens[: max_length - 2] # minus BOS and EOS schema_tokens = [self.bos_token] + schema_tokens + [self.eos_token] if pad_to_max_length: delta = max_length - len(schema_tokens) if delta > 0: schema_tokens += [self.pad_token] * delta item = self.convert_tokens_to_ids(schema_tokens, source_ids) return item def convert_tokens_to_ids(self, schema_tokens, src_token_ids) -> SchemaItem: """ :param schema_tokens: string :param src_token_ids: list or numpy array of integers :return: list of integers - a mix of token ids and position ids position id = position + vocab_size """ schema_ids = [] pointer_mask = [] # points to a first token corresponding to a word has_cls = ( self.src_tokenizer.cls_token is not None and self.src_tokenizer.cls_token_id in src_token_ids ) src_tokens_pointer = int(has_cls) for i, token in enumerate(schema_tokens): token_follows_schema = token in { "[", "]", "IN:", "SL:", *self.special_tokens, } or schema_tokens[i - 1] in {"IN:", "SL:"} if token in self._stoi and token_follows_schema: # The reason for second condition are cases when a word from a text exacly equal to the schema word # e.g. "IS THERE A PATH" # PATH is in a schema vocabulary, but not a schema word pointer_mask.append(0) schema_ids.append(self._stoi[token]) continue subtokens = self.src_tokenizer.encode(token, add_special_tokens=False) for subtoken in subtokens: assert subtoken == src_token_ids[src_tokens_pointer] pointer_mask.append(1) schema_ids.append(self.vocab_size + src_tokens_pointer) src_tokens_pointer += 1 return SchemaItem(schema_ids, pointer_mask) def decode(self, ids, source_ids=None, skip_special_tokens=True, return_tokens=False): schema = [] # we combine text into chunks to that it would be easier to merge bpe tokens into words text_chunk_ids = [] for i in ids: if i < self.vocab_size: if text_chunk_ids and source_ids is not None: schema.append(self.src_tokenizer.decode(text_chunk_ids)) text_chunk_ids = [] if skip_special_tokens and i in self.special_ids: continue schema.append(self._itos[i]) else: position = i - self.vocab_size if source_ids is not None: text_chunk_ids.append(source_ids[position]) else: schema.append(f"@ptr{position}") if return_tokens: return schema schema = self.detokenize(schema) return schema def encode_pair(self, schema_text, source_text) -> PairItem: source_ids = self.encode_source(source_text) schema_item = self.encode_plus(schema_text, source_ids) schema_ids = schema_item.ids source_pointer_mask = utils.get_src_pointer_mask(source_ids, self.src_tokenizer) schema_pointer_mask = schema_item.pointer_mask pair = PairItem(source_ids, source_pointer_mask, schema_ids, schema_pointer_mask) return pair def encode_source(self, source_text): return self.src_tokenizer.encode(source_text) def is_schema_token_id(self, i): """Checks if i correspond to the schema token or to the pointer token.""" is_special_token = i in [self.pad_token_id, self.bos_token_id, self.eos_token_id] return i < self.vocab_size and not is_special_token def save(self, path, encoder_model_type=None): """ Save schema tokenizer and text tokenizer Needs pre-trained encoder model type - this is a workaround for Transformers #4197 """ os.makedirs(path, exist_ok=True) with open(path_join(path, "schema_vocab.txt"), "w") as f: f.write("\n".join(self.vocab)) self.src_tokenizer.save_pretrained(path) if encoder_model_type is None: return with open(path_join(path, "config.json"), "w") as f: json.dump({"model_type": encoder_model_type}, f) @classmethod def load(cls, path: str): with open(path_join(path, "schema_vocab.txt")) as f: schema_vocab = set(f.read().strip("\n").split("\n")) if not os.path.exists(path_join(path, "config.json")): raise RuntimeError( f"{path_join(path, 'config.json')} is required for tokenizer loading" ) try: config = transformers.AutoConfig.from_pretrained(path) except KeyError: # config with custom model_type raises KeyError config = EncoderDecoderWPointerConfig.from_pretrained(path) config = config.encoder text_tokenizer = transformers.AutoTokenizer.from_pretrained(path, config=config) return cls(schema_vocab, text_tokenizer) @staticmethod def tokenize(text): # TODO: make a faster regex version tokenized = "" for char in text: if char in ["[", "]"]: char = " " + char + " " if char in [":"]: char = char + " " tokenized += char tokens = tokenized.strip(" ").split(" ") tokens = [t for t in tokens if t != ""] return tokens @staticmethod def detokenize(tokens): merge_vocab = {"[", "IN:", "SL:"} text = "" for token in tokens: if token in merge_vocab: text += token else: text += token + " " return text.strip(" ") @staticmethod def postprocess(text): """TOP format expects tokenized words and punctuation""" if len(text) == 0: return "" stripped_symbols = [".", ",", "?", "!", ";"] postprocessed = text[0] is_abbr = False for i in range(1, len(text)): # always just append the last symbol, as it is ] if i >= len(text) - 1: postprocessed += text[i] continue # do not strip dots for capital latters # e.g. D.C. if text[i - 1].isupper() and text[i] == ".": is_abbr = True postprocessed += text[i] continue # do not strip dots for capital latters # e.g. D. C . -> D.C. # NOTE: it should be "D.C ." to match the TOP format if is_abbr and text[i - 1] == "." and text[i] == " " and text[i + 1].isupper(): continue # all abbreviations should be hadled upper is_abbr = False # strip punctuation if text[i - 1] != " " and text[i] in stripped_symbols: postprocessed += " " + text[i] continue if text[i - 1] in stripped_symbols and text[i] != " ": postprocessed += " " + text[i] continue # strip apostrophe for posessive nouns if text[i - 1] != " " and text[i : i + 2] == "'s": postprocessed += " " + text[i] continue # merge apostrophe with the next symbol # used when posessive noun is a slot value # e.g. "[SL:CONTACT Vlad] ' s" if text[i - 1] == "'" and text[i] == " " and text[i + 1] == "s": continue postprocessed += text[i] # time postprocessed = postprocessed.replace("a . m", "a.m") postprocessed = postprocessed.replace("p . m", "p.m") return postprocessed
""" Admin classes for all the shop models. Many attributes in here are controlled by the ``SHOP_USE_VARIATIONS`` setting which defaults to True. In this case, variations are managed in the product change view, and are created given the ``ProductOption`` values selected. A handful of fields (mostly those defined on the abstract ``Priced`` model) are duplicated across both the ``Product`` and ``ProductVariation`` models, with the latter being the definitive source, and the former supporting denormalised data that can be referenced when iterating through products, without having to query the underlying variations. When ``SHOP_USE_VARIATIONS`` is set to False, a single variation is still stored against each product, to keep consistent with the overall model design. Since from a user perspective there are no variations, the inlines for variations provide a single inline for managing the one variation per product, so in the product change view, a single set of price fields are available via the one variation inline. Also when ``SHOP_USE_VARIATIONS`` is set to False, the denormalised price fields on the product model are presented as editable fields in the product change list - if these form fields are used, the values are then pushed back onto the one variation for the product. """ from copy import deepcopy from django.contrib import admin from django.db.models import ImageField from django.utils.translation import ugettext_lazy as _ from mezzanine.conf import settings from mezzanine.core.admin import DisplayableAdmin, TabularDynamicInlineAdmin from mezzanine.pages.admin import PageAdmin from cartridge.shop.fields import MoneyField from cartridge.shop.forms import ProductAdminForm, ProductVariationAdminForm from cartridge.shop.forms import ProductVariationAdminFormset from cartridge.shop.forms import DiscountAdminForm, ImageWidget, MoneyWidget from cartridge.shop.models import Category, Product, ProductImage from cartridge.shop.models import ProductVariation, ProductOption, Order from cartridge.shop.models import OrderItem, Sale, DiscountCode # Lists of field names. option_fields = [f.name for f in ProductVariation.option_fields()] _flds = lambda s: [f.name for f in Order._meta.fields if f.name.startswith(s)] billing_fields = _flds("billing_detail") shipping_fields = _flds("shipping_detail") ################ # CATEGORIES # ################ # Categories fieldsets are extended from Page fieldsets, since # categories are a Mezzanine Page type. category_fieldsets = deepcopy(PageAdmin.fieldsets) category_fieldsets[0][1]["fields"][3:3] = ["content", "products"] category_fieldsets += ((_("Product filters"), { "fields": ("sale", ("price_min", "price_max"), "combined"), "classes": ("collapse-closed",)},),) if settings.SHOP_CATEGORY_USE_FEATURED_IMAGE: category_fieldsets[0][1]["fields"].insert(3, "featured_image") # Options are only used when variations are in use, so only provide # them as filters for dynamic categories when this is the case. if settings.SHOP_USE_VARIATIONS: category_fieldsets[-1][1]["fields"] = (("options", ) + category_fieldsets[-1][1]["fields"]) class CategoryAdmin(PageAdmin): fieldsets = category_fieldsets formfield_overrides = {ImageField: {"widget": ImageWidget}} filter_horizontal = ("options", "products",) ################ # VARIATIONS # ################ # If variations aren't used, the variation inline should always # provide a single inline for managing the single variation per # product. variation_fields = ["sku", "num_in_stock", "unit_price", "sale_price", "sale_from", "sale_to", "image"] if settings.SHOP_USE_VARIATIONS: variation_fields.insert(1, "default") variations_max_num = None variations_extra = 0 else: variations_max_num = 1 variations_extra = 1 class ProductVariationAdmin(admin.TabularInline): verbose_name_plural = _("Current variations") model = ProductVariation fields = variation_fields max_num = variations_max_num extra = variations_extra formfield_overrides = {MoneyField: {"widget": MoneyWidget}} form = ProductVariationAdminForm formset = ProductVariationAdminFormset class ProductImageAdmin(TabularDynamicInlineAdmin): model = ProductImage formfield_overrides = {ImageField: {"widget": ImageWidget}} ############## # PRODUCTS # ############## product_fieldsets = deepcopy(DisplayableAdmin.fieldsets) product_fieldsets[0][1]["fields"][1] = ("status", "available") product_fieldsets[0][1]["fields"].extend(["content", "categories"]) product_fieldsets = list(product_fieldsets) product_fieldsets.append((_("Other products"), { "classes": ("collapse-closed",), "fields": ("related_products", "upsell_products")})) product_list_display = ["admin_thumb", "title", "status", "available", "admin_link"] product_list_editable = ["status", "available"] # If variations are used, set up the product option fields for managing # variations. If not, expose the denormalised price fields for a product # in the change list view. if settings.SHOP_USE_VARIATIONS: product_fieldsets.insert(1, (_("Create new variations"), {"classes": ("create-variations",), "fields": option_fields})) else: extra_list_fields = ["sku", "unit_price", "sale_price", "num_in_stock"] product_list_display[4:4] = extra_list_fields product_list_editable.extend(extra_list_fields) class ProductAdmin(DisplayableAdmin): class Media: js = ("cartridge/js/admin/product_variations.js",) css = {"all": ("cartridge/css/admin/product.css",)} list_display = product_list_display list_display_links = ("admin_thumb", "title") list_editable = product_list_editable list_filter = ("status", "available", "categories") filter_horizontal = ("categories", "related_products", "upsell_products") search_fields = ("title", "content", "categories__title", "variations__sku") inlines = (ProductImageAdmin, ProductVariationAdmin) form = ProductAdminForm fieldsets = product_fieldsets def save_model(self, request, obj, form, change): """ Store the product object for creating variations in save_formset. """ super(ProductAdmin, self).save_model(request, obj, form, change) self._product = obj def save_formset(self, request, form, formset, change): """ Here be dragons. We want to perform these steps sequentially: - Save variations formset - Run the required variation manager methods: (create_from_options, manage_empty, etc) - Save the images formset The variations formset needs to be saved first for the manager methods to have access to the correct variations. The images formset needs to be run last, because if images are deleted that are selected for variations, the variations formset will raise errors when saving due to invalid image selections. This gets addressed in the set_default_images method. An additional problem is the actual ordering of the inlines, which are in the reverse order for achieving the above. To address this, we store the images formset as an attribute, and then call save on it after the other required steps have occurred. """ # Store the images formset for later saving, otherwise save the # formset. if formset.model == ProductImage: self._images_formset = formset else: super(ProductAdmin, self).save_formset(request, form, formset, change) # Run each of the variation manager methods if we're saving # the variations formset. if formset.model == ProductVariation: # Build up selected options for new variations. options = dict([(f, request.POST.getlist(f)) for f in option_fields if request.POST.getlist(f)]) # Create a list of image IDs that have been marked to delete. deleted_images = [request.POST.get(f.replace("-DELETE", "-id")) for f in request.POST if f.startswith("images-") and f.endswith("-DELETE")] # Create new variations for selected options. self._product.variations.create_from_options(options) # Create a default variation if there are none. self._product.variations.manage_empty() # Remove any images deleted just now from variations they're # assigned to, and set an image for any variations without one. self._product.variations.set_default_images(deleted_images) # Save the images formset stored previously. super(ProductAdmin, self).save_formset(request, form, self._images_formset, change) # Run again to allow for no images existing previously, with # new images added which can be used as defaults for variations. self._product.variations.set_default_images(deleted_images) # Copy duplicate fields (``Priced`` fields) from the default # variation to the product. self._product.copy_default_variation() class ProductOptionAdmin(admin.ModelAdmin): ordering = ("type", "name") list_display = ("type", "name") list_display_links = ("type",) list_editable = ("name",) list_filter = ("type",) search_fields = ("type", "name") radio_fields = {"type": admin.HORIZONTAL} class OrderItemInline(admin.TabularInline): verbose_name_plural = _("Items") model = OrderItem extra = 0 formfield_overrides = {MoneyField: {"widget": MoneyWidget}} class OrderAdmin(admin.ModelAdmin): ordering = ("status", "-id") list_display = ("id", "billing_name", "total", "time", "status", "transaction_id", "invoice") list_editable = ("status",) list_filter = ("status", "time") list_display_links = ("id", "billing_name",) search_fields = (["id", "status", "transaction_id"] + billing_fields + shipping_fields) date_hierarchy = "time" radio_fields = {"status": admin.HORIZONTAL} inlines = (OrderItemInline,) formfield_overrides = {MoneyField: {"widget": MoneyWidget}} fieldsets = ( (_("Billing details"), {"fields": (tuple(billing_fields),)}), (_("Shipping details"), {"fields": (tuple(shipping_fields),)}), (None, {"fields": ("additional_instructions", ("shipping_total", "shipping_type"), ("discount_total", "discount_code"), "item_total", ("total", "status"), "transaction_id")}), ) class SaleAdmin(admin.ModelAdmin): list_display = ("title", "active", "discount_deduct", "discount_percent", "discount_exact", "valid_from", "valid_to") list_editable = ("active", "discount_deduct", "discount_percent", "discount_exact", "valid_from", "valid_to") filter_horizontal = ("categories", "products") formfield_overrides = {MoneyField: {"widget": MoneyWidget}} form = DiscountAdminForm fieldsets = ( (None, {"fields": ("title", "active")}), (_("Apply to product and/or products in categories"), {"fields": ("products", "categories")}), (_("Reduce unit price by"), {"fields": (("discount_deduct", "discount_percent", "discount_exact"),)}), (_("Sale period"), {"fields": (("valid_from", "valid_to"),)}), ) class DiscountCodeAdmin(admin.ModelAdmin): list_display = ("title", "active", "code", "discount_deduct", "discount_percent", "min_purchase", "free_shipping", "valid_from", "valid_to") list_editable = ("active", "code", "discount_deduct", "discount_percent", "min_purchase", "free_shipping", "valid_from", "valid_to") filter_horizontal = ("categories", "products") formfield_overrides = {MoneyField: {"widget": MoneyWidget}} form = DiscountAdminForm fieldsets = ( (None, {"fields": ("title", "active", "code")}), (_("Apply to product and/or products in categories"), {"fields": ("products", "categories")}), (_("Reduce unit price by"), {"fields": (("discount_deduct", "discount_percent"),)}), (None, {"fields": (("min_purchase", "free_shipping"),)}), (_("Valid for"), {"fields": (("valid_from", "valid_to", "uses_remaining"),)}), ) admin.site.register(Category, CategoryAdmin) admin.site.register(Product, ProductAdmin) if settings.SHOP_USE_VARIATIONS: admin.site.register(ProductOption, ProductOptionAdmin) admin.site.register(Order, OrderAdmin) admin.site.register(Sale, SaleAdmin) admin.site.register(DiscountCode, DiscountCodeAdmin)
import re import json from expects import expect, raise_error, match, be_a, equal, have_key from doublex import Spy, Mock, Stub, ANY_ARG from doublex_expects import have_been_called, have_been_called_with, have_been_satisfied from pysellus.stock_integrations import trello with description('the Trello integration'): with description('requires the following arguments to be passed to the constructor:'): with it('API key'): def attempt_to_instantiate_without_api_key(): trello.TrelloIntegration(token='a_token') expect(attempt_to_instantiate_without_api_key).to(raise_error(TypeError, match('missing.+key'))) with it('user token'): def attempt_to_instantiate_without_token(): trello.TrelloIntegration(key='an_api_key') expect(attempt_to_instantiate_without_token).to(raise_error(TypeError, match('missing.+token'))) with description('offers two notification modes'): with description('by card'): with it("is selected by passing mode='card'"): integration = trello.TrelloIntegration( key='an_api_key', token='a_token', mode='card', card='some_card_id', checklist='some_checklist_id' ) expect(integration.notification).to(be_a(trello.ByCardNotification)) with description('requires the following arguments to be passed to the constructor:'): with it('a card id'): def attempt_to_instantiate_in_card_mode_without_card_id(): integration = trello.TrelloIntegration( key='an_api_key', token='a_token', mode='card', checklist='some_checklist_id' ) expect(attempt_to_instantiate_in_card_mode_without_card_id).to(raise_error(TypeError, match('missing.+card'))) with it('a checklist id'): def attempt_to_instantiate_in_card_mode_without_checklist_id(): integration = trello.TrelloIntegration( key='an_api_key', token='a_token', mode='card', card='some_card_id' ) expect(attempt_to_instantiate_in_card_mode_without_checklist_id).to(raise_error(TypeError, match('missing.+checklist'))) with description('is implemented in the ByCardNotification class'): with before.each: self.some_card_id = '1234' self.some_checklist_id = '5678' self.card_wise_notification = trello.ByCardNotification( card=self.some_card_id, checklist=self.some_checklist_id ) with it("has the right 'endpoint' attribute"): # see https://trello.com/docs/api/card/index.html#post-1-cards-card-id-or-shortlink-checklist-idchecklist-checkitem well_formed_endpoint = 'cards/' + self.some_card_id + '/checklist/' + self.some_checklist_id + '/checkItem' expect(self.card_wise_notification.endpoint).to(equal(well_formed_endpoint)) with it('has a method to create the body of the request'): a_notification_message = { 'title': 'the title', 'content': 'the content' } assembled_body = self.card_wise_notification.assemble_body(**a_notification_message) for required_key in [ 'idChecklist', 'name' ]: expect(assembled_body).to(have_key(required_key)) with description('by list'): with it("is selected by passing mode='list'"): integration = trello.TrelloIntegration( key='an_api_key', token='a_token', mode='list', list='some_list_id' ) expect(integration.notification).to(be_a(trello.ByListNotification)) with description('requires more arguments to be passed to the constructor:'): with it('a list id'): def attempt_to_instantiate_in_list_mode_without_list_id(): integration = trello.TrelloIntegration( key='an_api_key', token='a_token', mode='list' ) expect(attempt_to_instantiate_in_list_mode_without_list_id).to(raise_error(TypeError, match('missing.+list'))) with description('is implemented in the ByListNotification class'): with before.each: self.some_list_id = '1234' self.list_wise_notification = trello.ByListNotification( list=self.some_list_id ) with it("has the right 'endpoint' attribute"): # see https://trello.com/docs/api/list/index.html#post-1-lists-idlist-cards well_formed_endpoint = 'lists/' + self.some_list_id + '/cards' expect(self.list_wise_notification.endpoint).to(equal(well_formed_endpoint)) with it("has a method to create the body of the request"): a_notification_message = { 'title': 'the title of the notification message', 'content': 'the content of the notification message' } assembled_body = self.list_wise_notification.assemble_body(**a_notification_message) for required_key in [ 'name', 'desc' ]: expect(assembled_body).to(have_key(required_key)) with it('defaults to card-wise notification'): integration = trello.TrelloIntegration( key='an_api_key', token='a_token', # mode='card', card='some_card_id', checklist='some_checklist_id' ) expect(integration.notification).to(be_a(trello.ByCardNotification)) with description("delegates to its trello_api_client's `post` method"): with before.each: self.notification_mock = Mock() self.trello_api_client_spy = Spy() self.integration = trello.TrelloIntegration( key='an_api_key', token='a_token', mode='card', card='some_card_id', checklist='some_checklist_id', trello_api_client=self.trello_api_client_spy ) self.integration.notification = self.notification_mock with it('when `on_next` is called'): with self.notification_mock as notification_mock: notification_mock.endpoint notification_mock.assemble_body(ANY_ARG) some_element_to_notify_of = { 'test_name': 'blah', 'element': {} } self.integration.on_next(some_element_to_notify_of) expect(self.trello_api_client_spy.post).to(have_been_called.once) expect(self.notification_mock).to(have_been_satisfied) with it('when `on_error` is called'): with self.notification_mock as notification_mock: notification_mock.endpoint notification_mock.assemble_body(ANY_ARG) some_error_to_notify_of = { 'test_name': 'blah', 'element': {}, 'error': Exception('some error description') } self.integration.on_error(some_error_to_notify_of) expect(self.trello_api_client_spy.post).to(have_been_called.once) expect(self.notification_mock).to(have_been_satisfied) with description('delegates to its formatter for determining what is the title and what is the content of the notification'): with before.each: self.dummy_trello_api_client = Stub() self.formatter_spy = Spy() with self.formatter_spy as formatter_spy: formatter_spy.create_element_message(ANY_ARG).returns(dict()) formatter_spy.create_error_message(ANY_ARG).returns(dict()) formatter_spy.create_completion_message(ANY_ARG).returns(dict()) self.integration = trello.TrelloIntegration( key='an_api_key', token='a_token', mode='card', card='some_card_id', checklist='some_checklist_id', trello_api_client=self.dummy_trello_api_client, formatter=self.formatter_spy ) self.integration.notification = Stub() with it('calls Formatter.create_element_message when `on_next` is called'): some_element_to_notify_of = { 'test_name': 'blah', 'element': {} } self.integration.on_next(some_element_to_notify_of) expect(self.formatter_spy.create_element_message).to(have_been_called_with(some_element_to_notify_of).once) with it('calls Formatter#create_error_message when `on_error` is called'): some_element_error_to_notify_of = { 'test_name': 'blah', 'element': {}, 'error': Exception('the error description goes here') } self.integration.on_error(some_element_error_to_notify_of) expect(self.formatter_spy.create_error_message).to(have_been_called_with(some_element_error_to_notify_of).once) with it('calls Formatter#create_completion_message when `on_completed` is called'): self.integration.on_completed() expect(self.formatter_spy.create_completion_message).to(have_been_called_with(be_a(str)).once) with description('the Formatter transforms notified elements into notification messages'): with context('Formatter#create_element_message'): with before.each: self.some_element_to_notify_of = { 'test_name': 'some test', 'element': { 'data': 1 } } self.element_message = trello.Formatter.create_element_message(self.some_element_to_notify_of) with it('includes the test name in the title'): created_title = self.element_message['title'] expect(created_title).to(match(self.some_element_to_notify_of['test_name'])) with it('includes the element which was tested in the content'): created_content = self.element_message['content'] expect(created_content).to(match(json.dumps(self.some_element_to_notify_of['element']))) with context('Formatter#create_error_message'): with before.each: self.some_element_to_notify_of = { 'test_name': 'some test', 'element': { 'data': 1 }, 'error': Exception('the reason for the error') } self.error_message = trello.Formatter.create_error_message(self.some_element_to_notify_of) with it('includes an error notice in the title'): created_title = self.error_message['title'] expect(created_title).to(match('error', re.IGNORECASE)) with it('includes the test name in the title'): created_title = self.error_message['title'] expect(created_title).to(match(self.some_element_to_notify_of['test_name'])) with it('includes the element which was tested in the content'): created_content = self.error_message['content'] expect(created_content).to(match(json.dumps(self.some_element_to_notify_of['element']))) with it('includes the type and message of the exception raised in the content'): created_content = self.error_message['content'] expect(created_content).to(match(re.escape(repr(self.some_element_to_notify_of['error'])), re.DOTALL)) with context('Formatter#create_completion_message'): with before.each: self.a_completion_message = '--- some completion message ---' self.created_completion_message = trello.Formatter.create_completion_message(self.a_completion_message) with it('includes a delimiter in the title'): created_title = self.created_completion_message['title'] expect(created_title).to(match('---+')) with it('the content is the empty string'): created_content = self.created_completion_message['content'] expect(created_content).to(equal('')) with description('the Trello API object'): with description('requires the following arguments to be passed to the constructor:'): with it('API key'): def attempt_to_instantiate_without_api_key(): trello.TrelloAPI(token='a_token') expect(attempt_to_instantiate_without_api_key).to(raise_error(TypeError, match('missing.+key'))) with it('user token'): def attempt_to_instantiate_without_token(): trello.TrelloAPI(key='an_api_key') expect(attempt_to_instantiate_without_token).to(raise_error(TypeError, match('missing.+token'))) with description('abstracts POST actions on the Trello API:'): with before.each: self.some_api_key = 'abc' self.some_api_token = 'def' self.http_client_spy = Spy() self.trello_api = trello.TrelloAPI( key=self.some_api_key, token=self.some_api_token, http_client=self.http_client_spy ) self.dummy_request_body = {} self.endpoint = 'path/to/some/endpoint' with it('calls the `post` mehtod of its http client with the proper url'): self.trello_api.post(self.endpoint, self.dummy_request_body) well_formed_url = 'https://trello.com/1/' + self.endpoint expect(self.http_client_spy.post).to(have_been_called_with(url=well_formed_url).once) with it('sends auth params passed in constructor as query parameters'): self.trello_api.post(self.endpoint, self.dummy_request_body) auth_params = { 'key': self.some_api_key, 'token': self.some_api_token } expect(self.http_client_spy.post).to(have_been_called_with(params=auth_params).once) with it('caps strings in payload to have a length of at most the limit imposed by Trello'): payload = { 'a': 'a' * 50000, 'b': 'b' * 200, 'c': 42 } capped_payload = {} for key, value in payload.items(): if isinstance(value, str): capped_payload[key] = value[:trello.TrelloAPI.TRELLO_MAX_STRING_LENGTH] else: capped_payload[key] = value self.trello_api.post(self.endpoint, payload) expect(self.http_client_spy.post).to(have_been_called_with(json=capped_payload).once)
from django.db import models from core.models import Service, PlCoreBase, Slice, Instance, Tenant, TenantWithContainer, Node, Image, User, Flavor, Subscriber, NetworkParameter, NetworkParameterType, AddressPool, Port from core.models.plcorebase import StrippedCharField import os from django.db import models, transaction from django.forms.models import model_to_dict from django.db.models import Q from operator import itemgetter, attrgetter, methodcaller from core.models import Tag from core.models.service import LeastLoadedNodeScheduler import traceback from xos.exceptions import * from core.models import SlicePrivilege, SitePrivilege from sets import Set from xos.config import Config MCORD_KIND = "MCORD" MCORD_USE_VTN = getattr(Config(), "networking_use_vtn", False) VBBU_KIND = "vBBU" VSGW_KIND = "vSGW" VPGW_KIND = "vPGW" net_types = ("s1u", "s1mme", "rru") # The class to represent the service. Most of the service logic is given for us # in the Service class but, we have some configuration that is specific for # this example. class MCORDService(Service): KIND = MCORD_KIND class Meta: # When the proxy field is set to True the model is represented as # it's superclass in the database, but we can still change the python # behavior. In this case HelloWorldServiceComplete is a Service in the # database. proxy = True # The name used to find this service, all directories are named this app_label = "mcord" verbose_name = "MCORD Service" # This is the class to represent the tenant. Most of the logic is given to use # in TenantWithContainer, however there is some configuration and logic that # we need to define for this example. class VBBUComponent(TenantWithContainer): class Meta: # Same as a above, HelloWorldTenantComplete is represented as a # TenantWithContainer, but we change the python behavior. proxy = True verbose_name = "VBBU MCORD Service Component" # The kind of the service is used on forms to differentiate this service # from the other services. KIND = VBBU_KIND # Ansible requires that the sync_attributes field contain nat_ip and nat_mac # these will be used to determine where to SSH to for ansible. # Getters must be defined for every attribute specified here. sync_attributes = ("s1u_ip", "s1u_mac", "s1mme_ip", "s1mme_mac", "rru_ip", "rru_mac") # default_attributes is used cleanly indicate what the default values for # the fields are. default_attributes = {"display_message": "VBBU Component is ready!", "s1u_tag": "201", "s1mme_tag": "200", "rru_tag": "199"} def __init__(self, *args, **kwargs): mcord_services = MCORDService.get_service_objects().all() # When the tenant is created the default service in the form is set # to be the first created HelloWorldServiceComplete if mcord_services: self._meta.get_field( "provider_service").default = mcord_services[0].id super(VBBUComponent, self).__init__(*args, **kwargs) def can_update(self, user): #Allow creation of this model instances for non-admin users also return True def save(self, *args, **kwargs): if not self.creator: if not getattr(self, "caller", None): # caller must be set when creating a monitoring channel since it creates a slice raise XOSProgrammingError("ServiceComponents's self.caller was not set") self.creator = self.caller if not self.creator: raise XOSProgrammingError("ServiceComponents's self.creator was not set") super(VBBUComponent, self).save(*args, **kwargs) # This call needs to happen so that an instance is created for this # tenant is created in the slice. One instance is created per tenant. model_policy_mcord_servicecomponent(self.pk) def save_instance(self, instance): with transaction.atomic(): super(VBBUComponent, self).save_instance(instance) if instance.isolation in ["vm"]: for ntype in net_types: lan_network = self.get_lan_network(instance, ntype) port = self.find_or_make_port(instance,lan_network) if (ntype == "s1u"): port.set_parameter("s_tag", self.s1u_tag) port.set_parameter("neutron_port_name", "stag-%s" % self.s1u_tag) port.save() elif (ntype == "s1mme"): port.set_parameter("s_tag", self.s1mme_tag) port.set_parameter("neutron_port_name", "stag-%s" % self.s1mme_tag) port.save() elif (ntype == "rru"): port.set_parameter("s_tag", self.rru_tag) port.set_parameter("neutron_port_name", "stag-%s" % self.rru_tag) port.save() def delete(self, *args, **kwargs): # Delete the instance that was created for this tenant self.cleanup_container() super(VBBUComponent, self).delete(*args, **kwargs) def find_or_make_port(self, instance, network, **kwargs): port = Port.objects.filter(instance=instance, network=network) if port: port = port[0] print "port already exist", port[0] else: port = Port(instance=instance, network=network, **kwargs) print "NETWORK", network, "MAKE_PORT", port port.save() return port def get_lan_network(self, instance, ntype): slice = self.provider_service.slices.all()[0] lan_networks = [x for x in slice.networks.all() if ntype in x.name] if not lan_networks: raise XOSProgrammingError("No lan_network") return lan_networks[0] def manage_container(self): from core.models import Instance, Flavor if self.deleted: return # For container or container_vm isolation, use what TenantWithCotnainer # provides us slice = self.get_slice() if slice.default_isolation in ["container_vm", "container"]: super(VBBUComponent,self).manage_container() return if not self.s1u_tag: raise XOSConfigurationError("S1U_TAG is missed") if not self.s1mme_tag: raise XOSConfigurationError("S1U_TAG is missed") if not self.rru_tag: raise XOSConfigurationError("S1U_TAG is missed") if self.instance: # We're good. return instance = self.make_instance() self.instance = instance super(TenantWithContainer, self).save() def get_slice(self): if not self.provider_service.slices.count(): raise XOSConfigurationError("The service has no slices") slice = self.provider_service.slices.all()[0] return slice def make_instance(self): slice = self.provider_service.slices.all()[0] flavors = Flavor.objects.filter(name=slice.default_flavor) if not flavors: raise XOSConfigurationError("No default flavor") default_flavor = slice.default_flavor slice = self.provider_service.slices.all()[0] if slice.default_isolation == "container_vm": (node, parent) = ContainerVmScheduler(slice).pick() else: (node, parent) = LeastLoadedNodeScheduler(slice).pick() instance = Instance(slice = slice, node = node, image = self.image, creator = self.creator, deployment = node.site_deployment.deployment, flavor = flavors[0], isolation = slice.default_isolation, parent = parent) self.save_instance(instance) return instance def ip_to_mac(self, ip): (a, b, c, d) = ip.split('.') return "02:42:%02x:%02x:%02x:%02x" % (int(a), int(b), int(c), int(d)) # Getter for the message that will appear on the webpage # By default it is "Hello World!" @property def display_message(self): return self.get_attribute( "display_message", self.default_attributes['display_message']) @display_message.setter def display_message(self, value): self.set_attribute("display_message", value) @property def s1u_tag(self): return self.get_attribute( "s1u_tag", self.default_attributes['s1u_tag']) @s1u_tag.setter def s1u_tag(self, value): self.set_attribute("s1u_tag", value) @property def s1mme_tag(self): return self.get_attribute( "s1mme_tag", self.default_attributes['s1mme_tag']) @s1mme_tag.setter def s1mme_tag(self, value): self.set_attribute("s1mme_tag", value) @property def rru_tag(self): return self.get_attribute( "rru_tag", self.default_attributes['rru_tag']) @rru_tag.setter def rru_tag(self, value): self.set_attribute("rru_tag", value) @property def addresses(self): if (not self.id) or (not self.instance): return {} addresses = {} for ns in self.instance.ports.all(): if "s1u" in ns.network.name.lower(): addresses["s1u"] = (ns.ip, ns.mac) elif "s1mme" in ns.network.name.lower(): addresses["s1mme"] = (ns.ip, ns.mac) elif "rru" in ns.network.name.lower(): addresses["rru"] = (ns.ip, ns.mac) return addresses @property def s1u_ip(self): return self.addresses.get("s1u", (None, None))[0] @property def s1u_mac(self): return self.addresses.get("s1u", (None, None))[1] @property def s1mme_ip(self): return self.addresses.get("s1mme", (None, None))[0] @property def s1mme_mac(self): return self.addresses.get("s1mme", (None, None))[1] @property def rru_ip(self): return self.addresses.get("rru", (None, None))[0] @property def rru_mac(self): return self.addresses.get("rru", (None, None))[1] def model_policy_mcord_servicecomponent(pk): # This section of code is atomic to prevent race conditions with transaction.atomic(): # We find all of the tenants that are waiting to update component = VBBUComponent.objects.select_for_update().filter(pk=pk) if not component: return # Since this code is atomic it is safe to always use the first tenant component = component[0] component.manage_container()
import itertools import numpy as np from numpy.testing import ( assert_, assert_equal, assert_array_equal, assert_almost_equal, assert_raises, suppress_warnings, assert_raises_regex, assert_allclose ) # Setup for optimize einsum chars = 'abcdefghij' sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3]) global_size_dict = dict(zip(chars, sizes)) class TestEinsum: def test_einsum_errors(self): for do_opt in [True, False]: # Need enough arguments assert_raises(ValueError, np.einsum, optimize=do_opt) assert_raises(ValueError, np.einsum, "", optimize=do_opt) # subscripts must be a string assert_raises(TypeError, np.einsum, 0, 0, optimize=do_opt) # out parameter must be an array assert_raises(TypeError, np.einsum, "", 0, out='test', optimize=do_opt) # order parameter must be a valid order assert_raises(TypeError, np.einsum, "", 0, order='W', optimize=do_opt) # casting parameter must be a valid casting assert_raises(ValueError, np.einsum, "", 0, casting='blah', optimize=do_opt) # dtype parameter must be a valid dtype assert_raises(TypeError, np.einsum, "", 0, dtype='bad_data_type', optimize=do_opt) # other keyword arguments are rejected assert_raises(TypeError, np.einsum, "", 0, bad_arg=0, optimize=do_opt) # issue 4528 revealed a segfault with this call assert_raises(TypeError, np.einsum, *(None,)*63, optimize=do_opt) # number of operands must match count in subscripts string assert_raises(ValueError, np.einsum, "", 0, 0, optimize=do_opt) assert_raises(ValueError, np.einsum, ",", 0, [0], [0], optimize=do_opt) assert_raises(ValueError, np.einsum, ",", [0], optimize=do_opt) # can't have more subscripts than dimensions in the operand assert_raises(ValueError, np.einsum, "i", 0, optimize=do_opt) assert_raises(ValueError, np.einsum, "ij", [0, 0], optimize=do_opt) assert_raises(ValueError, np.einsum, "...i", 0, optimize=do_opt) assert_raises(ValueError, np.einsum, "i...j", [0, 0], optimize=do_opt) assert_raises(ValueError, np.einsum, "i...", 0, optimize=do_opt) assert_raises(ValueError, np.einsum, "ij...", [0, 0], optimize=do_opt) # invalid ellipsis assert_raises(ValueError, np.einsum, "i..", [0, 0], optimize=do_opt) assert_raises(ValueError, np.einsum, ".i...", [0, 0], optimize=do_opt) assert_raises(ValueError, np.einsum, "j->..j", [0, 0], optimize=do_opt) assert_raises(ValueError, np.einsum, "j->.j...", [0, 0], optimize=do_opt) # invalid subscript character assert_raises(ValueError, np.einsum, "i%...", [0, 0], optimize=do_opt) assert_raises(ValueError, np.einsum, "...j$", [0, 0], optimize=do_opt) assert_raises(ValueError, np.einsum, "i->&", [0, 0], optimize=do_opt) # output subscripts must appear in input assert_raises(ValueError, np.einsum, "i->ij", [0, 0], optimize=do_opt) # output subscripts may only be specified once assert_raises(ValueError, np.einsum, "ij->jij", [[0, 0], [0, 0]], optimize=do_opt) # dimensions much match when being collapsed assert_raises(ValueError, np.einsum, "ii", np.arange(6).reshape(2, 3), optimize=do_opt) assert_raises(ValueError, np.einsum, "ii->i", np.arange(6).reshape(2, 3), optimize=do_opt) # broadcasting to new dimensions must be enabled explicitly assert_raises(ValueError, np.einsum, "i", np.arange(6).reshape(2, 3), optimize=do_opt) assert_raises(ValueError, np.einsum, "i->i", [[0, 1], [0, 1]], out=np.arange(4).reshape(2, 2), optimize=do_opt) with assert_raises_regex(ValueError, "'b'"): # gh-11221 - 'c' erroneously appeared in the error message a = np.ones((3, 3, 4, 5, 6)) b = np.ones((3, 4, 5)) np.einsum('aabcb,abc', a, b) def test_einsum_views(self): # pass-through for do_opt in [True, False]: a = np.arange(6) a.shape = (2, 3) b = np.einsum("...", a, optimize=do_opt) assert_(b.base is a) b = np.einsum(a, [Ellipsis], optimize=do_opt) assert_(b.base is a) b = np.einsum("ij", a, optimize=do_opt) assert_(b.base is a) assert_equal(b, a) b = np.einsum(a, [0, 1], optimize=do_opt) assert_(b.base is a) assert_equal(b, a) # output is writeable whenever input is writeable b = np.einsum("...", a, optimize=do_opt) assert_(b.flags['WRITEABLE']) a.flags['WRITEABLE'] = False b = np.einsum("...", a, optimize=do_opt) assert_(not b.flags['WRITEABLE']) # transpose a = np.arange(6) a.shape = (2, 3) b = np.einsum("ji", a, optimize=do_opt) assert_(b.base is a) assert_equal(b, a.T) b = np.einsum(a, [1, 0], optimize=do_opt) assert_(b.base is a) assert_equal(b, a.T) # diagonal a = np.arange(9) a.shape = (3, 3) b = np.einsum("ii->i", a, optimize=do_opt) assert_(b.base is a) assert_equal(b, [a[i, i] for i in range(3)]) b = np.einsum(a, [0, 0], [0], optimize=do_opt) assert_(b.base is a) assert_equal(b, [a[i, i] for i in range(3)]) # diagonal with various ways of broadcasting an additional dimension a = np.arange(27) a.shape = (3, 3, 3) b = np.einsum("...ii->...i", a, optimize=do_opt) assert_(b.base is a) assert_equal(b, [[x[i, i] for i in range(3)] for x in a]) b = np.einsum(a, [Ellipsis, 0, 0], [Ellipsis, 0], optimize=do_opt) assert_(b.base is a) assert_equal(b, [[x[i, i] for i in range(3)] for x in a]) b = np.einsum("ii...->...i", a, optimize=do_opt) assert_(b.base is a) assert_equal(b, [[x[i, i] for i in range(3)] for x in a.transpose(2, 0, 1)]) b = np.einsum(a, [0, 0, Ellipsis], [Ellipsis, 0], optimize=do_opt) assert_(b.base is a) assert_equal(b, [[x[i, i] for i in range(3)] for x in a.transpose(2, 0, 1)]) b = np.einsum("...ii->i...", a, optimize=do_opt) assert_(b.base is a) assert_equal(b, [a[:, i, i] for i in range(3)]) b = np.einsum(a, [Ellipsis, 0, 0], [0, Ellipsis], optimize=do_opt) assert_(b.base is a) assert_equal(b, [a[:, i, i] for i in range(3)]) b = np.einsum("jii->ij", a, optimize=do_opt) assert_(b.base is a) assert_equal(b, [a[:, i, i] for i in range(3)]) b = np.einsum(a, [1, 0, 0], [0, 1], optimize=do_opt) assert_(b.base is a) assert_equal(b, [a[:, i, i] for i in range(3)]) b = np.einsum("ii...->i...", a, optimize=do_opt) assert_(b.base is a) assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)]) b = np.einsum(a, [0, 0, Ellipsis], [0, Ellipsis], optimize=do_opt) assert_(b.base is a) assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)]) b = np.einsum("i...i->i...", a, optimize=do_opt) assert_(b.base is a) assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)]) b = np.einsum(a, [0, Ellipsis, 0], [0, Ellipsis], optimize=do_opt) assert_(b.base is a) assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)]) b = np.einsum("i...i->...i", a, optimize=do_opt) assert_(b.base is a) assert_equal(b, [[x[i, i] for i in range(3)] for x in a.transpose(1, 0, 2)]) b = np.einsum(a, [0, Ellipsis, 0], [Ellipsis, 0], optimize=do_opt) assert_(b.base is a) assert_equal(b, [[x[i, i] for i in range(3)] for x in a.transpose(1, 0, 2)]) # triple diagonal a = np.arange(27) a.shape = (3, 3, 3) b = np.einsum("iii->i", a, optimize=do_opt) assert_(b.base is a) assert_equal(b, [a[i, i, i] for i in range(3)]) b = np.einsum(a, [0, 0, 0], [0], optimize=do_opt) assert_(b.base is a) assert_equal(b, [a[i, i, i] for i in range(3)]) # swap axes a = np.arange(24) a.shape = (2, 3, 4) b = np.einsum("ijk->jik", a, optimize=do_opt) assert_(b.base is a) assert_equal(b, a.swapaxes(0, 1)) b = np.einsum(a, [0, 1, 2], [1, 0, 2], optimize=do_opt) assert_(b.base is a) assert_equal(b, a.swapaxes(0, 1)) def check_einsum_sums(self, dtype, do_opt=False): # Check various sums. Does many sizes to exercise unrolled loops. # sum(a, axis=-1) for n in range(1, 17): a = np.arange(n, dtype=dtype) assert_equal(np.einsum("i->", a, optimize=do_opt), np.sum(a, axis=-1).astype(dtype)) assert_equal(np.einsum(a, [0], [], optimize=do_opt), np.sum(a, axis=-1).astype(dtype)) for n in range(1, 17): a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n) assert_equal(np.einsum("...i->...", a, optimize=do_opt), np.sum(a, axis=-1).astype(dtype)) assert_equal(np.einsum(a, [Ellipsis, 0], [Ellipsis], optimize=do_opt), np.sum(a, axis=-1).astype(dtype)) # sum(a, axis=0) for n in range(1, 17): a = np.arange(2*n, dtype=dtype).reshape(2, n) assert_equal(np.einsum("i...->...", a, optimize=do_opt), np.sum(a, axis=0).astype(dtype)) assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis], optimize=do_opt), np.sum(a, axis=0).astype(dtype)) for n in range(1, 17): a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n) assert_equal(np.einsum("i...->...", a, optimize=do_opt), np.sum(a, axis=0).astype(dtype)) assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis], optimize=do_opt), np.sum(a, axis=0).astype(dtype)) # trace(a) for n in range(1, 17): a = np.arange(n*n, dtype=dtype).reshape(n, n) assert_equal(np.einsum("ii", a, optimize=do_opt), np.trace(a).astype(dtype)) assert_equal(np.einsum(a, [0, 0], optimize=do_opt), np.trace(a).astype(dtype)) # multiply(a, b) assert_equal(np.einsum("..., ...", 3, 4), 12) # scalar case for n in range(1, 17): a = np.arange(3 * n, dtype=dtype).reshape(3, n) b = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n) assert_equal(np.einsum("..., ...", a, b, optimize=do_opt), np.multiply(a, b)) assert_equal(np.einsum(a, [Ellipsis], b, [Ellipsis], optimize=do_opt), np.multiply(a, b)) # inner(a,b) for n in range(1, 17): a = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n) b = np.arange(n, dtype=dtype) assert_equal(np.einsum("...i, ...i", a, b, optimize=do_opt), np.inner(a, b)) assert_equal(np.einsum(a, [Ellipsis, 0], b, [Ellipsis, 0], optimize=do_opt), np.inner(a, b)) for n in range(1, 11): a = np.arange(n * 3 * 2, dtype=dtype).reshape(n, 3, 2) b = np.arange(n, dtype=dtype) assert_equal(np.einsum("i..., i...", a, b, optimize=do_opt), np.inner(a.T, b.T).T) assert_equal(np.einsum(a, [0, Ellipsis], b, [0, Ellipsis], optimize=do_opt), np.inner(a.T, b.T).T) # outer(a,b) for n in range(1, 17): a = np.arange(3, dtype=dtype)+1 b = np.arange(n, dtype=dtype)+1 assert_equal(np.einsum("i,j", a, b, optimize=do_opt), np.outer(a, b)) assert_equal(np.einsum(a, [0], b, [1], optimize=do_opt), np.outer(a, b)) # Suppress the complex warnings for the 'as f8' tests with suppress_warnings() as sup: sup.filter(np.ComplexWarning) # matvec(a,b) / a.dot(b) where a is matrix, b is vector for n in range(1, 17): a = np.arange(4*n, dtype=dtype).reshape(4, n) b = np.arange(n, dtype=dtype) assert_equal(np.einsum("ij, j", a, b, optimize=do_opt), np.dot(a, b)) assert_equal(np.einsum(a, [0, 1], b, [1], optimize=do_opt), np.dot(a, b)) c = np.arange(4, dtype=dtype) np.einsum("ij,j", a, b, out=c, dtype='f8', casting='unsafe', optimize=do_opt) assert_equal(c, np.dot(a.astype('f8'), b.astype('f8')).astype(dtype)) c[...] = 0 np.einsum(a, [0, 1], b, [1], out=c, dtype='f8', casting='unsafe', optimize=do_opt) assert_equal(c, np.dot(a.astype('f8'), b.astype('f8')).astype(dtype)) for n in range(1, 17): a = np.arange(4*n, dtype=dtype).reshape(4, n) b = np.arange(n, dtype=dtype) assert_equal(np.einsum("ji,j", a.T, b.T, optimize=do_opt), np.dot(b.T, a.T)) assert_equal(np.einsum(a.T, [1, 0], b.T, [1], optimize=do_opt), np.dot(b.T, a.T)) c = np.arange(4, dtype=dtype) np.einsum("ji,j", a.T, b.T, out=c, dtype='f8', casting='unsafe', optimize=do_opt) assert_equal(c, np.dot(b.T.astype('f8'), a.T.astype('f8')).astype(dtype)) c[...] = 0 np.einsum(a.T, [1, 0], b.T, [1], out=c, dtype='f8', casting='unsafe', optimize=do_opt) assert_equal(c, np.dot(b.T.astype('f8'), a.T.astype('f8')).astype(dtype)) # matmat(a,b) / a.dot(b) where a is matrix, b is matrix for n in range(1, 17): if n < 8 or dtype != 'f2': a = np.arange(4*n, dtype=dtype).reshape(4, n) b = np.arange(n*6, dtype=dtype).reshape(n, 6) assert_equal(np.einsum("ij,jk", a, b, optimize=do_opt), np.dot(a, b)) assert_equal(np.einsum(a, [0, 1], b, [1, 2], optimize=do_opt), np.dot(a, b)) for n in range(1, 17): a = np.arange(4*n, dtype=dtype).reshape(4, n) b = np.arange(n*6, dtype=dtype).reshape(n, 6) c = np.arange(24, dtype=dtype).reshape(4, 6) np.einsum("ij,jk", a, b, out=c, dtype='f8', casting='unsafe', optimize=do_opt) assert_equal(c, np.dot(a.astype('f8'), b.astype('f8')).astype(dtype)) c[...] = 0 np.einsum(a, [0, 1], b, [1, 2], out=c, dtype='f8', casting='unsafe', optimize=do_opt) assert_equal(c, np.dot(a.astype('f8'), b.astype('f8')).astype(dtype)) # matrix triple product (note this is not currently an efficient # way to multiply 3 matrices) a = np.arange(12, dtype=dtype).reshape(3, 4) b = np.arange(20, dtype=dtype).reshape(4, 5) c = np.arange(30, dtype=dtype).reshape(5, 6) if dtype != 'f2': assert_equal(np.einsum("ij,jk,kl", a, b, c, optimize=do_opt), a.dot(b).dot(c)) assert_equal(np.einsum(a, [0, 1], b, [1, 2], c, [2, 3], optimize=do_opt), a.dot(b).dot(c)) d = np.arange(18, dtype=dtype).reshape(3, 6) np.einsum("ij,jk,kl", a, b, c, out=d, dtype='f8', casting='unsafe', optimize=do_opt) tgt = a.astype('f8').dot(b.astype('f8')) tgt = tgt.dot(c.astype('f8')).astype(dtype) assert_equal(d, tgt) d[...] = 0 np.einsum(a, [0, 1], b, [1, 2], c, [2, 3], out=d, dtype='f8', casting='unsafe', optimize=do_opt) tgt = a.astype('f8').dot(b.astype('f8')) tgt = tgt.dot(c.astype('f8')).astype(dtype) assert_equal(d, tgt) # tensordot(a, b) if np.dtype(dtype) != np.dtype('f2'): a = np.arange(60, dtype=dtype).reshape(3, 4, 5) b = np.arange(24, dtype=dtype).reshape(4, 3, 2) assert_equal(np.einsum("ijk, jil -> kl", a, b), np.tensordot(a, b, axes=([1, 0], [0, 1]))) assert_equal(np.einsum(a, [0, 1, 2], b, [1, 0, 3], [2, 3]), np.tensordot(a, b, axes=([1, 0], [0, 1]))) c = np.arange(10, dtype=dtype).reshape(5, 2) np.einsum("ijk,jil->kl", a, b, out=c, dtype='f8', casting='unsafe', optimize=do_opt) assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'), axes=([1, 0], [0, 1])).astype(dtype)) c[...] = 0 np.einsum(a, [0, 1, 2], b, [1, 0, 3], [2, 3], out=c, dtype='f8', casting='unsafe', optimize=do_opt) assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'), axes=([1, 0], [0, 1])).astype(dtype)) # logical_and(logical_and(a!=0, b!=0), c!=0) a = np.array([1, 3, -2, 0, 12, 13, 0, 1], dtype=dtype) b = np.array([0, 3.5, 0., -2, 0, 1, 3, 12], dtype=dtype) c = np.array([True, True, False, True, True, False, True, True]) assert_equal(np.einsum("i,i,i->i", a, b, c, dtype='?', casting='unsafe', optimize=do_opt), np.logical_and(np.logical_and(a != 0, b != 0), c != 0)) assert_equal(np.einsum(a, [0], b, [0], c, [0], [0], dtype='?', casting='unsafe'), np.logical_and(np.logical_and(a != 0, b != 0), c != 0)) a = np.arange(9, dtype=dtype) assert_equal(np.einsum(",i->", 3, a), 3*np.sum(a)) assert_equal(np.einsum(3, [], a, [0], []), 3*np.sum(a)) assert_equal(np.einsum("i,->", a, 3), 3*np.sum(a)) assert_equal(np.einsum(a, [0], 3, [], []), 3*np.sum(a)) # Various stride0, contiguous, and SSE aligned variants for n in range(1, 25): a = np.arange(n, dtype=dtype) if np.dtype(dtype).itemsize > 1: assert_equal(np.einsum("...,...", a, a, optimize=do_opt), np.multiply(a, a)) assert_equal(np.einsum("i,i", a, a, optimize=do_opt), np.dot(a, a)) assert_equal(np.einsum("i,->i", a, 2, optimize=do_opt), 2*a) assert_equal(np.einsum(",i->i", 2, a, optimize=do_opt), 2*a) assert_equal(np.einsum("i,->", a, 2, optimize=do_opt), 2*np.sum(a)) assert_equal(np.einsum(",i->", 2, a, optimize=do_opt), 2*np.sum(a)) assert_equal(np.einsum("...,...", a[1:], a[:-1], optimize=do_opt), np.multiply(a[1:], a[:-1])) assert_equal(np.einsum("i,i", a[1:], a[:-1], optimize=do_opt), np.dot(a[1:], a[:-1])) assert_equal(np.einsum("i,->i", a[1:], 2, optimize=do_opt), 2*a[1:]) assert_equal(np.einsum(",i->i", 2, a[1:], optimize=do_opt), 2*a[1:]) assert_equal(np.einsum("i,->", a[1:], 2, optimize=do_opt), 2*np.sum(a[1:])) assert_equal(np.einsum(",i->", 2, a[1:], optimize=do_opt), 2*np.sum(a[1:])) # An object array, summed as the data type a = np.arange(9, dtype=object) b = np.einsum("i->", a, dtype=dtype, casting='unsafe') assert_equal(b, np.sum(a)) assert_equal(b.dtype, np.dtype(dtype)) b = np.einsum(a, [0], [], dtype=dtype, casting='unsafe') assert_equal(b, np.sum(a)) assert_equal(b.dtype, np.dtype(dtype)) # A case which was failing (ticket #1885) p = np.arange(2) + 1 q = np.arange(4).reshape(2, 2) + 3 r = np.arange(4).reshape(2, 2) + 7 assert_equal(np.einsum('z,mz,zm->', p, q, r), 253) # singleton dimensions broadcast (gh-10343) p = np.ones((10,2)) q = np.ones((1,2)) assert_array_equal(np.einsum('ij,ij->j', p, q, optimize=True), np.einsum('ij,ij->j', p, q, optimize=False)) assert_array_equal(np.einsum('ij,ij->j', p, q, optimize=True), [10.] * 2) # a blas-compatible contraction broadcasting case which was failing # for optimize=True (ticket #10930) x = np.array([2., 3.]) y = np.array([4.]) assert_array_equal(np.einsum("i, i", x, y, optimize=False), 20.) assert_array_equal(np.einsum("i, i", x, y, optimize=True), 20.) # all-ones array was bypassing bug (ticket #10930) p = np.ones((1, 5)) / 2 q = np.ones((5, 5)) / 2 for optimize in (True, False): assert_array_equal(np.einsum("...ij,...jk->...ik", p, p, optimize=optimize), np.einsum("...ij,...jk->...ik", p, q, optimize=optimize)) assert_array_equal(np.einsum("...ij,...jk->...ik", p, q, optimize=optimize), np.full((1, 5), 1.25)) # Cases which were failing (gh-10899) x = np.eye(2, dtype=dtype) y = np.ones(2, dtype=dtype) assert_array_equal(np.einsum("ji,i->", x, y, optimize=optimize), [2.]) # contig_contig_outstride0_two assert_array_equal(np.einsum("i,ij->", y, x, optimize=optimize), [2.]) # stride0_contig_outstride0_two assert_array_equal(np.einsum("ij,i->", x, y, optimize=optimize), [2.]) # contig_stride0_outstride0_two def test_einsum_sums_int8(self): self.check_einsum_sums('i1') def test_einsum_sums_uint8(self): self.check_einsum_sums('u1') def test_einsum_sums_int16(self): self.check_einsum_sums('i2') def test_einsum_sums_uint16(self): self.check_einsum_sums('u2') def test_einsum_sums_int32(self): self.check_einsum_sums('i4') self.check_einsum_sums('i4', True) def test_einsum_sums_uint32(self): self.check_einsum_sums('u4') self.check_einsum_sums('u4', True) def test_einsum_sums_int64(self): self.check_einsum_sums('i8') def test_einsum_sums_uint64(self): self.check_einsum_sums('u8') def test_einsum_sums_float16(self): self.check_einsum_sums('f2') def test_einsum_sums_float32(self): self.check_einsum_sums('f4') def test_einsum_sums_float64(self): self.check_einsum_sums('f8') self.check_einsum_sums('f8', True) def test_einsum_sums_longdouble(self): self.check_einsum_sums(np.longdouble) def test_einsum_sums_cfloat64(self): self.check_einsum_sums('c8') self.check_einsum_sums('c8', True) def test_einsum_sums_cfloat128(self): self.check_einsum_sums('c16') def test_einsum_sums_clongdouble(self): self.check_einsum_sums(np.clongdouble) def test_einsum_misc(self): # This call used to crash because of a bug in # PyArray_AssignZero a = np.ones((1, 2)) b = np.ones((2, 2, 1)) assert_equal(np.einsum('ij...,j...->i...', a, b), [[[2], [2]]]) assert_equal(np.einsum('ij...,j...->i...', a, b, optimize=True), [[[2], [2]]]) # Regression test for issue #10369 (test unicode inputs with Python 2) assert_equal(np.einsum(u'ij...,j...->i...', a, b), [[[2], [2]]]) assert_equal(np.einsum('...i,...i', [1, 2, 3], [2, 3, 4]), 20) assert_equal(np.einsum(u'...i,...i', [1, 2, 3], [2, 3, 4]), 20) assert_equal(np.einsum('...i,...i', [1, 2, 3], [2, 3, 4], optimize=u'greedy'), 20) # The iterator had an issue with buffering this reduction a = np.ones((5, 12, 4, 2, 3), np.int64) b = np.ones((5, 12, 11), np.int64) assert_equal(np.einsum('ijklm,ijn,ijn->', a, b, b), np.einsum('ijklm,ijn->', a, b)) assert_equal(np.einsum('ijklm,ijn,ijn->', a, b, b, optimize=True), np.einsum('ijklm,ijn->', a, b, optimize=True)) # Issue #2027, was a problem in the contiguous 3-argument # inner loop implementation a = np.arange(1, 3) b = np.arange(1, 5).reshape(2, 2) c = np.arange(1, 9).reshape(4, 2) assert_equal(np.einsum('x,yx,zx->xzy', a, b, c), [[[1, 3], [3, 9], [5, 15], [7, 21]], [[8, 16], [16, 32], [24, 48], [32, 64]]]) assert_equal(np.einsum('x,yx,zx->xzy', a, b, c, optimize=True), [[[1, 3], [3, 9], [5, 15], [7, 21]], [[8, 16], [16, 32], [24, 48], [32, 64]]]) # Ensure explicitly setting out=None does not cause an error # see issue gh-15776 and issue gh-15256 assert_equal(np.einsum('i,j', [1], [2], out=None), [[2]]) def test_subscript_range(self): # Issue #7741, make sure that all letters of Latin alphabet (both uppercase & lowercase) can be used # when creating a subscript from arrays a = np.ones((2, 3)) b = np.ones((3, 4)) np.einsum(a, [0, 20], b, [20, 2], [0, 2], optimize=False) np.einsum(a, [0, 27], b, [27, 2], [0, 2], optimize=False) np.einsum(a, [0, 51], b, [51, 2], [0, 2], optimize=False) assert_raises(ValueError, lambda: np.einsum(a, [0, 52], b, [52, 2], [0, 2], optimize=False)) assert_raises(ValueError, lambda: np.einsum(a, [-1, 5], b, [5, 2], [-1, 2], optimize=False)) def test_einsum_broadcast(self): # Issue #2455 change in handling ellipsis # remove the 'middle broadcast' error # only use the 'RIGHT' iteration in prepare_op_axes # adds auto broadcast on left where it belongs # broadcast on right has to be explicit # We need to test the optimized parsing as well A = np.arange(2 * 3 * 4).reshape(2, 3, 4) B = np.arange(3) ref = np.einsum('ijk,j->ijk', A, B, optimize=False) for opt in [True, False]: assert_equal(np.einsum('ij...,j...->ij...', A, B, optimize=opt), ref) assert_equal(np.einsum('ij...,...j->ij...', A, B, optimize=opt), ref) assert_equal(np.einsum('ij...,j->ij...', A, B, optimize=opt), ref) # used to raise error A = np.arange(12).reshape((4, 3)) B = np.arange(6).reshape((3, 2)) ref = np.einsum('ik,kj->ij', A, B, optimize=False) for opt in [True, False]: assert_equal(np.einsum('ik...,k...->i...', A, B, optimize=opt), ref) assert_equal(np.einsum('ik...,...kj->i...j', A, B, optimize=opt), ref) assert_equal(np.einsum('...k,kj', A, B, optimize=opt), ref) # used to raise error assert_equal(np.einsum('ik,k...->i...', A, B, optimize=opt), ref) # used to raise error dims = [2, 3, 4, 5] a = np.arange(np.prod(dims)).reshape(dims) v = np.arange(dims[2]) ref = np.einsum('ijkl,k->ijl', a, v, optimize=False) for opt in [True, False]: assert_equal(np.einsum('ijkl,k', a, v, optimize=opt), ref) assert_equal(np.einsum('...kl,k', a, v, optimize=opt), ref) # used to raise error assert_equal(np.einsum('...kl,k...', a, v, optimize=opt), ref) J, K, M = 160, 160, 120 A = np.arange(J * K * M).reshape(1, 1, 1, J, K, M) B = np.arange(J * K * M * 3).reshape(J, K, M, 3) ref = np.einsum('...lmn,...lmno->...o', A, B, optimize=False) for opt in [True, False]: assert_equal(np.einsum('...lmn,lmno->...o', A, B, optimize=opt), ref) # used to raise error def test_einsum_fixedstridebug(self): # Issue #4485 obscure einsum bug # This case revealed a bug in nditer where it reported a stride # as 'fixed' (0) when it was in fact not fixed during processing # (0 or 4). The reason for the bug was that the check for a fixed # stride was using the information from the 2D inner loop reuse # to restrict the iteration dimensions it had to validate to be # the same, but that 2D inner loop reuse logic is only triggered # during the buffer copying step, and hence it was invalid to # rely on those values. The fix is to check all the dimensions # of the stride in question, which in the test case reveals that # the stride is not fixed. # # NOTE: This test is triggered by the fact that the default buffersize, # used by einsum, is 8192, and 3*2731 = 8193, is larger than that # and results in a mismatch between the buffering and the # striding for operand A. A = np.arange(2 * 3).reshape(2, 3).astype(np.float32) B = np.arange(2 * 3 * 2731).reshape(2, 3, 2731).astype(np.int16) es = np.einsum('cl, cpx->lpx', A, B) tp = np.tensordot(A, B, axes=(0, 0)) assert_equal(es, tp) # The following is the original test case from the bug report, # made repeatable by changing random arrays to aranges. A = np.arange(3 * 3).reshape(3, 3).astype(np.float64) B = np.arange(3 * 3 * 64 * 64).reshape(3, 3, 64, 64).astype(np.float32) es = np.einsum('cl, cpxy->lpxy', A, B) tp = np.tensordot(A, B, axes=(0, 0)) assert_equal(es, tp) def test_einsum_fixed_collapsingbug(self): # Issue #5147. # The bug only occurred when output argument of einssum was used. x = np.random.normal(0, 1, (5, 5, 5, 5)) y1 = np.zeros((5, 5)) np.einsum('aabb->ab', x, out=y1) idx = np.arange(5) y2 = x[idx[:, None], idx[:, None], idx, idx] assert_equal(y1, y2) def test_einsum_failed_on_p9_and_s390x(self): # Issues gh-14692 and gh-12689 # Bug with signed vs unsigned char errored on power9 and s390x Linux tensor = np.random.random_sample((10, 10, 10, 10)) x = np.einsum('ijij->', tensor) y = tensor.trace(axis1=0, axis2=2).trace() assert_allclose(x, y) def test_einsum_all_contig_non_contig_output(self): # Issue gh-5907, tests that the all contiguous special case # actually checks the contiguity of the output x = np.ones((5, 5)) out = np.ones(10)[::2] correct_base = np.ones(10) correct_base[::2] = 5 # Always worked (inner iteration is done with 0-stride): np.einsum('mi,mi,mi->m', x, x, x, out=out) assert_array_equal(out.base, correct_base) # Example 1: out = np.ones(10)[::2] np.einsum('im,im,im->m', x, x, x, out=out) assert_array_equal(out.base, correct_base) # Example 2, buffering causes x to be contiguous but # special cases do not catch the operation before: out = np.ones((2, 2, 2))[..., 0] correct_base = np.ones((2, 2, 2)) correct_base[..., 0] = 2 x = np.ones((2, 2), np.float32) np.einsum('ij,jk->ik', x, x, out=out) assert_array_equal(out.base, correct_base) def test_small_boolean_arrays(self): # See gh-5946. # Use array of True embedded in False. a = np.zeros((16, 1, 1), dtype=np.bool_)[:2] a[...] = True out = np.zeros((16, 1, 1), dtype=np.bool_)[:2] tgt = np.ones((2, 1, 1), dtype=np.bool_) res = np.einsum('...ij,...jk->...ik', a, a, out=out) assert_equal(res, tgt) def test_out_is_res(self): a = np.arange(9).reshape(3, 3) res = np.einsum('...ij,...jk->...ik', a, a, out=a) assert res is a def optimize_compare(self, subscripts, operands=None): # Tests all paths of the optimization function against # conventional einsum if operands is None: args = [subscripts] terms = subscripts.split('->')[0].split(',') for term in terms: dims = [global_size_dict[x] for x in term] args.append(np.random.rand(*dims)) else: args = [subscripts] + operands noopt = np.einsum(*args, optimize=False) opt = np.einsum(*args, optimize='greedy') assert_almost_equal(opt, noopt) opt = np.einsum(*args, optimize='optimal') assert_almost_equal(opt, noopt) def test_hadamard_like_products(self): # Hadamard outer products self.optimize_compare('a,ab,abc->abc') self.optimize_compare('a,b,ab->ab') def test_index_transformations(self): # Simple index transformation cases self.optimize_compare('ea,fb,gc,hd,abcd->efgh') self.optimize_compare('ea,fb,abcd,gc,hd->efgh') self.optimize_compare('abcd,ea,fb,gc,hd->efgh') def test_complex(self): # Long test cases self.optimize_compare('acdf,jbje,gihb,hfac,gfac,gifabc,hfac') self.optimize_compare('acdf,jbje,gihb,hfac,gfac,gifabc,hfac') self.optimize_compare('cd,bdhe,aidb,hgca,gc,hgibcd,hgac') self.optimize_compare('abhe,hidj,jgba,hiab,gab') self.optimize_compare('bde,cdh,agdb,hica,ibd,hgicd,hiac') self.optimize_compare('chd,bde,agbc,hiad,hgc,hgi,hiad') self.optimize_compare('chd,bde,agbc,hiad,bdi,cgh,agdb') self.optimize_compare('bdhe,acad,hiab,agac,hibd') def test_collapse(self): # Inner products self.optimize_compare('ab,ab,c->') self.optimize_compare('ab,ab,c->c') self.optimize_compare('ab,ab,cd,cd->') self.optimize_compare('ab,ab,cd,cd->ac') self.optimize_compare('ab,ab,cd,cd->cd') self.optimize_compare('ab,ab,cd,cd,ef,ef->') def test_expand(self): # Outer products self.optimize_compare('ab,cd,ef->abcdef') self.optimize_compare('ab,cd,ef->acdf') self.optimize_compare('ab,cd,de->abcde') self.optimize_compare('ab,cd,de->be') self.optimize_compare('ab,bcd,cd->abcd') self.optimize_compare('ab,bcd,cd->abd') def test_edge_cases(self): # Difficult edge cases for optimization self.optimize_compare('eb,cb,fb->cef') self.optimize_compare('dd,fb,be,cdb->cef') self.optimize_compare('bca,cdb,dbf,afc->') self.optimize_compare('dcc,fce,ea,dbf->ab') self.optimize_compare('fdf,cdd,ccd,afe->ae') self.optimize_compare('abcd,ad') self.optimize_compare('ed,fcd,ff,bcf->be') self.optimize_compare('baa,dcf,af,cde->be') self.optimize_compare('bd,db,eac->ace') self.optimize_compare('fff,fae,bef,def->abd') self.optimize_compare('efc,dbc,acf,fd->abe') self.optimize_compare('ba,ac,da->bcd') def test_inner_product(self): # Inner products self.optimize_compare('ab,ab') self.optimize_compare('ab,ba') self.optimize_compare('abc,abc') self.optimize_compare('abc,bac') self.optimize_compare('abc,cba') def test_random_cases(self): # Randomly built test cases self.optimize_compare('aab,fa,df,ecc->bde') self.optimize_compare('ecb,fef,bad,ed->ac') self.optimize_compare('bcf,bbb,fbf,fc->') self.optimize_compare('bb,ff,be->e') self.optimize_compare('bcb,bb,fc,fff->') self.optimize_compare('fbb,dfd,fc,fc->') self.optimize_compare('afd,ba,cc,dc->bf') self.optimize_compare('adb,bc,fa,cfc->d') self.optimize_compare('bbd,bda,fc,db->acf') self.optimize_compare('dba,ead,cad->bce') self.optimize_compare('aef,fbc,dca->bde') def test_combined_views_mapping(self): # gh-10792 a = np.arange(9).reshape(1, 1, 3, 1, 3) b = np.einsum('bbcdc->d', a) assert_equal(b, [12]) def test_broadcasting_dot_cases(self): # Ensures broadcasting cases are not mistaken for GEMM a = np.random.rand(1, 5, 4) b = np.random.rand(4, 6) c = np.random.rand(5, 6) d = np.random.rand(10) self.optimize_compare('ijk,kl,jl', operands=[a, b, c]) self.optimize_compare('ijk,kl,jl,i->i', operands=[a, b, c, d]) e = np.random.rand(1, 1, 5, 4) f = np.random.rand(7, 7) self.optimize_compare('abjk,kl,jl', operands=[e, b, c]) self.optimize_compare('abjk,kl,jl,ab->ab', operands=[e, b, c, f]) # Edge case found in gh-11308 g = np.arange(64).reshape(2, 4, 8) self.optimize_compare('obk,ijk->ioj', operands=[g, g]) class TestEinsumPath: def build_operands(self, string, size_dict=global_size_dict): # Builds views based off initial operands operands = [string] terms = string.split('->')[0].split(',') for term in terms: dims = [size_dict[x] for x in term] operands.append(np.random.rand(*dims)) return operands def assert_path_equal(self, comp, benchmark): # Checks if list of tuples are equivalent ret = (len(comp) == len(benchmark)) assert_(ret) for pos in range(len(comp) - 1): ret &= isinstance(comp[pos + 1], tuple) ret &= (comp[pos + 1] == benchmark[pos + 1]) assert_(ret) def test_memory_contraints(self): # Ensure memory constraints are satisfied outer_test = self.build_operands('a,b,c->abc') path, path_str = np.einsum_path(*outer_test, optimize=('greedy', 0)) self.assert_path_equal(path, ['einsum_path', (0, 1, 2)]) path, path_str = np.einsum_path(*outer_test, optimize=('optimal', 0)) self.assert_path_equal(path, ['einsum_path', (0, 1, 2)]) long_test = self.build_operands('acdf,jbje,gihb,hfac') path, path_str = np.einsum_path(*long_test, optimize=('greedy', 0)) self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)]) path, path_str = np.einsum_path(*long_test, optimize=('optimal', 0)) self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)]) def test_long_paths(self): # Long complex cases # Long test 1 long_test1 = self.build_operands('acdf,jbje,gihb,hfac,gfac,gifabc,hfac') path, path_str = np.einsum_path(*long_test1, optimize='greedy') self.assert_path_equal(path, ['einsum_path', (3, 6), (3, 4), (2, 4), (2, 3), (0, 2), (0, 1)]) path, path_str = np.einsum_path(*long_test1, optimize='optimal') self.assert_path_equal(path, ['einsum_path', (3, 6), (3, 4), (2, 4), (2, 3), (0, 2), (0, 1)]) # Long test 2 long_test2 = self.build_operands('chd,bde,agbc,hiad,bdi,cgh,agdb') path, path_str = np.einsum_path(*long_test2, optimize='greedy') print(path) self.assert_path_equal(path, ['einsum_path', (3, 4), (0, 3), (3, 4), (1, 3), (1, 2), (0, 1)]) path, path_str = np.einsum_path(*long_test2, optimize='optimal') print(path) self.assert_path_equal(path, ['einsum_path', (0, 5), (1, 4), (3, 4), (1, 3), (1, 2), (0, 1)]) def test_edge_paths(self): # Difficult edge cases # Edge test1 edge_test1 = self.build_operands('eb,cb,fb->cef') path, path_str = np.einsum_path(*edge_test1, optimize='greedy') self.assert_path_equal(path, ['einsum_path', (0, 2), (0, 1)]) path, path_str = np.einsum_path(*edge_test1, optimize='optimal') self.assert_path_equal(path, ['einsum_path', (0, 2), (0, 1)]) # Edge test2 edge_test2 = self.build_operands('dd,fb,be,cdb->cef') path, path_str = np.einsum_path(*edge_test2, optimize='greedy') self.assert_path_equal(path, ['einsum_path', (0, 3), (0, 1), (0, 1)]) path, path_str = np.einsum_path(*edge_test2, optimize='optimal') self.assert_path_equal(path, ['einsum_path', (0, 3), (0, 1), (0, 1)]) # Edge test3 edge_test3 = self.build_operands('bca,cdb,dbf,afc->') path, path_str = np.einsum_path(*edge_test3, optimize='greedy') self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)]) path, path_str = np.einsum_path(*edge_test3, optimize='optimal') self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)]) # Edge test4 edge_test4 = self.build_operands('dcc,fce,ea,dbf->ab') path, path_str = np.einsum_path(*edge_test4, optimize='greedy') self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 1), (0, 1)]) path, path_str = np.einsum_path(*edge_test4, optimize='optimal') self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)]) # Edge test5 edge_test4 = self.build_operands('a,ac,ab,ad,cd,bd,bc->', size_dict={"a": 20, "b": 20, "c": 20, "d": 20}) path, path_str = np.einsum_path(*edge_test4, optimize='greedy') self.assert_path_equal(path, ['einsum_path', (0, 1), (0, 1, 2, 3, 4, 5)]) path, path_str = np.einsum_path(*edge_test4, optimize='optimal') self.assert_path_equal(path, ['einsum_path', (0, 1), (0, 1, 2, 3, 4, 5)]) def test_path_type_input(self): # Test explicit path handeling path_test = self.build_operands('dcc,fce,ea,dbf->ab') path, path_str = np.einsum_path(*path_test, optimize=False) self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)]) path, path_str = np.einsum_path(*path_test, optimize=True) self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 1), (0, 1)]) exp_path = ['einsum_path', (0, 2), (0, 2), (0, 1)] path, path_str = np.einsum_path(*path_test, optimize=exp_path) self.assert_path_equal(path, exp_path) # Double check einsum works on the input path noopt = np.einsum(*path_test, optimize=False) opt = np.einsum(*path_test, optimize=exp_path) assert_almost_equal(noopt, opt) def test_spaces(self): #gh-10794 arr = np.array([[1]]) for sp in itertools.product(['', ' '], repeat=4): # no error for any spacing np.einsum('{}...a{}->{}...a{}'.format(*sp), arr) def test_overlap(): a = np.arange(9, dtype=int).reshape(3, 3) b = np.arange(9, dtype=int).reshape(3, 3) d = np.dot(a, b) # sanity check c = np.einsum('ij,jk->ik', a, b) assert_equal(c, d) #gh-10080, out overlaps one of the operands c = np.einsum('ij,jk->ik', a, b, out=b) assert_equal(c, d)
# -*- coding: utf-8 -*- import time import os import logging import argparse import json import codecs import cPickle as pickle import signal import numpy import networkx as nx import beanstalkc import foggy import jobq from itertools import izip from uuid import uuid4 from glob import glob from IPython.parallel import (Client, interactive) logging.basicConfig() LOGGER = logging.getLogger() #LOGGER.setLevel(logging.INFO) LOGGER.setLevel(logging.DEBUG) #LOGGER.addHandler(logging.StreamHandler()) ############################################################################### # Supply ############################################################################### class JSONBeanShooter(object): _shooter = { "parallel": "_shot", "deletory": "_capacity_shot", "buffered": "_capacity_shot" } def __init__(self, bean_queue, encoding="utf-8", **kw_args): """ Warning ------- bean_queue must be using the right tube! """ super(JSONBeanShooter, self).__init__(**kw_args) self.queue = bean_queue self.encoding = encoding def __call__(self, filename): config = json.load(codecs.open(filename, encoding=self.encoding, mode="rb")) LOGGER.debug(str(config)) for path in config["graphs_dir"]: assert os.path.exists(path), "directory does not exist '%s'" % path shoot = getattr(self, self._shooter[config["walk_type"]]) description = config.copy() del description["graphs_dir"] del description["graphs_type"] del description["walker_factors"] del description["steps_factors"] del description["variation_factors"] del description["capacity_factors"] del description["repetition"] for (path, net_type) in izip(config["graphs_dir"], config["graphs_type"]): graphs = glob(os.path.join(path, "*.pkl")) LOGGER.debug("%d graphs found", len(graphs)) for net in graphs: description["graph_file"] = net description["graph_type"] = net_type for kw in config["walker_factors"]: description["walker_factor"] = kw for var in config["variation_factors"]: description["variation_factor"] = var for ks in config["steps_factors"]: description["steps_factor"] = ks for _ in range(config["repetition"]): shoot(config, description) def _shot(self, config, description): description["sim_id"] = str(uuid4()).replace("-", "") LOGGER.debug("firing") self.queue.put(pickle.dumps(description)) def _capacity_shot(self, config, description): description["sim_id"] = str(uuid4()).replace("-", "") for k in config["capacity_factors"]: description["capcity_factor"] = k LOGGER.debug("firing") self.queue.put(pickle.dumps(description)) ############################################################################### # Consumption ############################################################################### def remote_consumer(worker, host, port): queue = beanstalkc.Connection(host=host, port=port) queue.watch("input") queue.use("output") jobq.generic_consumer(queue, worker, "STOP") @interactive def dummy_worker(**kw_args): LOGGER.debug(str(kw_args)) return "success" class BeanMuncher(object): _setup = { "uniform": foggy.prepare_uniform_walk, "directed": foggy.prepare_directed_walk } _type = { "deletory": foggy.deletory_march, "buffered": foggy.buffered_march, "parallel": foggy.march } _distribution = { "uniform": foggy.UniformInterval } _visit = { "constant": foggy.ConstantValue, "degree": foggy.DegreeDependentValue } def graph_info(graph): LOGGER.info("%s graph:", "directed" if graph.is_directed() else "undirected") LOGGER.info(" %d nodes", len(graph)) LOGGER.info(" %d edges", graph.size()) LOGGER.info(" %d component(s)", nx.number_connected_components(graph)) def uniform_capacity(graph, indices, walkers, num_steps): capacity = numpy.zeros(len(graph), dtype=float) capacity += float(walkers.mid_point * num_steps) / float(len(graph)) return capacity def degree_capacity(graph, indices, walkers, num_steps): capacity = numpy.zeros(len(graph), dtype=float) total_degree = sum(deg for (node, deg) in graph.degree_iter()) for (node, deg) in graph.degree_iter(): capacity[indices[node]] = float(deg * walkers.mid_point * num_steps) /\ total_degree return capacity _capacity = { "uniform" : uniform_capacity, "degree" : degree_capacity } def _run(self, config, description, net, nbrs, probs, indices, walkers, num_steps, visits, seed=None): job_descr = dict() job_descr["parameters"] = description job_descr["simulation"] = self._type[config["walk_type"]] job_descr["neighbours"] = nbrs job_descr["probabilities"] = probs job_descr["sources"] = indices.values() job_descr["num_walkers"] = walkers job_descr["time_points"] = config["time_points"] job_descr["steps"] = num_steps job_descr["assessor"] = visits job_descr["transient"] = config["transient"] job_descr["seed"] = seed job_descr["capacity"] = None def _capacity_run(self, config, description, net, nbrs, probs, indices, walkers, num_steps, visits, seed=None): description["sim_id"] = str(uuid4()).replace("-", "") job_descr = dict() job_descr["parameters"] = description job_descr["simulation"] = self._type[config["walk_type"]] job_descr["neighbours"] = nbrs job_descr["probabilities"] = probs job_descr["sources"] = indices.values() job_descr["num_walkers"] = walkers job_descr["time_points"] = config["time_points"] job_descr["steps"] = num_steps job_descr["assessor"] = visits job_descr["transient"] = config["transient"] job_descr["seed"] = seed job_descr["capacity"] = self._capacity[config["capacity"]](net, indices, walkers, num_steps) _dispatch = { "parallel": _run, "deletory": _capacity_run, "buffered": _capacity_run } def __init__(self, bean_queue, encoding="utf-8", **kw_args): """ Warning ------- bean_queue must be using the right tube! """ super(JSONBeanShooter, self).__init__(**kw_args) self.queue = bean_queue self.encoding = encoding def __call__(self, filename): config = json.load(codecs.open(filename, encoding=self.encoding, mode="rb")) LOGGER.debug(str(config)) for path in config["graphs_dir"]: assert os.path.exists(path), "directory does not exist '%s'" % path setup = self._setup[config["walk_setup"]] distribution = self._distribution[config["walker_dist"]] visits = self._visit[config["visit_value"]] simulation = getattr(self, self._dispatch[config["walk_type"]]) description = dict() description["walk_setup"] = config["walk_setup"] description["walk_type"] = config["walk_type"] description["walker_dist"] = config["walker_dist"] description["visit_value"] = config["visit_value"] description["capacity"] = config["capacity"] description["transient"] = config["transient"] for (path, net_type) in izip(config["graphs_dir"], config["graphs_type"]): graphs = [nx.read_gpickle(net_file) for net_file in glob(os.path.join(path, "*.pkl"))] LOGGER.debug("%d graphs found", len(graphs)) for net in graphs: description["graph_name"] = net.name description["graph_type"] = net_type # self.graph_info(net) (probs, nbrs, indices) = setup(net) for kw in config["walker_factors"]: description["walker_factor"] = kw num_walkers = len(net) * kw for var in config["variation_factors"]: description["variation_factor"] = var walkers = distribution(num_walkers, num_walkers * var) for ks in config["steps_factors"]: description["steps_factor"] = ks num_steps = len(net) * ks for _ in range(config["repetition"]): simulation(config, description, net, nbrs, probs, indices, walkers, num_steps, visits, seed=None) ############################################################################### # Results ############################################################################### def dummy_handler(result): LOGGER.debug(str(result)) def result_handler(result): results = foggy.ResultManager(h5_file) filename = os.path.join(out_path, sim_id) numpy.savez(filename, activity=activity) numpy.savez(filename, activity=activity, hits=removed, capacity=dynamic) results.append(sim_id, graph.name, graph.is_directed(), graph, indices, activity, internal, external) results.append_sim(sim_id, config["walk"], config["walk_type"], config["walker_dist"], config["walker_variation"], config["visit_value"], config["walker_factor"], config["steps_factor"], config["time_points"], config["transient_cutoff"], graph.name, graph_type, graph.is_directed()) results.finalize() ############################################################################### # Main ############################################################################### def supply(args): queue = beanstalkc.Connection(host=args.host, port=args.port) queue.use("input") dispatch = JSONBeanShooter(queue) watcher = jobq.DirectoryWatcher(args.watch_dir, dispatch, glob_pattern=args.glob, wait=5.0) watcher.start() LOGGER.debug("watcher running") while watcher.is_alive(): try: time.sleep(0.2) except (KeyboardInterrupt, SystemExit): LOGGER.critical("shutdown signal received") watcher.stop() break watcher.join() queue.close() def consume(args): rc = Client(profile=args.profile, cluster_id=args.cluster_id) dv = rc.direct_view() pid_map = rc[:].apply_async(os.getpid).get_dict() LOGGER.debug("remote module import") dv.execute("import beanstalkc; import foggy; import jobq;"\ "import logging; from IPython.config import Application;"\ "LOGGER = Application.instance().log;"\ "LOGGER.setLevel(logging.DEBUG);", block=True) LOGGER.debug("pushing remote variables") dv.push({"consumer": remote_consumer, "worker": dummy_worker, "host": args.host, "port": args.port}, block=True) LOGGER.debug("remote function call") dv.execute("consumer(worker, host, port)", block=False) while True: try: time.sleep(0.2) except (KeyboardInterrupt, SystemExit): LOGGER.critical("shutdown signal received") for pid in pid_map.itervalues(): try: os.kill(pid, signal.SIGINT) except OSError: LOGGER.warn("ipengine with pid '%d' no longer alive", pid) break def handle(args): queue = beanstalkc.Connection(host=args.host, port=args.port) queue.watch("output") jobq.generic_handler(queue, dummy_handler) queue.close() return def info(args): def queue_info(stats): LOGGER.info("All jobs: %d", stats["total-jobs"]) LOGGER.info("Jobs currently in queue: %d", stats["current-jobs-ready"]) LOGGER.info("Jobs handled correctly: %d", stats["cmd-delete"]) LOGGER.info("Jobs failed: %d", stats["current-jobs-buried"]) queue = beanstalkc.Connection(host=args.host, port=args.port) show_total = False try: stats = queue.stats_tube("input") except beanstalkc.CommandFailed: show_total = True else: LOGGER.info("%s", "".join(["*"] * 58)) LOGGER.info("Input queue statistics:") queue_info(stats) try: stats = queue.stats_tube("output") except beanstalkc.CommandFailed: show_total = True else: LOGGER.info("%s", "".join(["*"] * 58)) LOGGER.info("Output queue statistics:") queue_info(stats) if show_total: LOGGER.info("%s", "".join(["*"] * 58)) LOGGER.info("Total queue statistics:") queue_info(queue.stats()) if __name__ == "__main__": parser = argparse.ArgumentParser(description=None) parser.add_argument("-V", "--version", action="version", version="0.1") parser.add_argument("-l", "--host", dest="host", metavar="IP", default="127.0.0.1", help="host IP address of beanstalkd queue (default: %(default)s)") parser.add_argument("-p", "--port", dest="port", type=int, default=11300, help="host port of beanstalkd queue (default: %(default)d)") subparsers = parser.add_subparsers(help="sub-command help") # supply parser_s = subparsers.add_parser("supply", help="supply the beanstalkd queue with jobs") parser_s.add_argument(dest="watch_dir", metavar="watched directory", help="directory to watch for job config files") parser_s.add_argument("-g", "--glob", dest="glob", default="*", help="extension of files that configure jobs (default: %(default)s)") parser_s.set_defaults(func=supply) # consume parser_c = subparsers.add_parser("consume", help="consume jobs") parser_c.add_argument("--profile", dest="profile", default="default", help="IPython profile to connect to cluster (default: %(default)s)") parser_c.add_argument("--cluster-id", dest="cluster_id", default=None, help="IPython cluster-id to connect to (default: %(default)s)") parser_c.set_defaults(func=consume) # handle parser_h = subparsers.add_parser("handle", help="handle results") parser_h.set_defaults(func=handle) # info parser_i = subparsers.add_parser("info", help="print queue stats") parser_i.set_defaults(func=info) args = parser.parse_args() args.func(args)
import argparse import string import numpy import sys import re import os from sklearn.externals import joblib from sklearn.multiclass import OneVsRestClassifier from sklearn.linear_model import LogisticRegression from sklearn.feature_extraction.text import TfidfVectorizer from scipy import sparse from nltk.stem.snowball import SnowballStemmer training_file = 'data/cleaned_train.tsv' test_file = 'data/cleaned_test.tsv' test_output_header = 'PhraseId,Sentiment\n' validate_training_file = 'data/xaa' validate_test_file = 'data/xab' class Sentiment_Classifier: def __init__(self, training=False, validate=False, require_dense=False, ncores=-2): self.columns_per_training_example = 4 self.columns_per_test_example = 3 self.require_dense = require_dense self.ncores = ncores self.model_pickle_file = None self.transformer_pickle_file = None self.kernel_sampler_file = None self._setup_pickle_files() # used to filter self._stemmer = SnowballStemmer('english') if (training or validate): self._setup_classifier_and_transformer() else: self._load_model_and_transformer() def _setup_pickle_files(self): """ set up the directory for the model and transformer to be stored in once trained. """ pickle_dir = 'pickled_objects' d = os.path.dirname(pickle_dir) if not os.path.exists(pickle_dir): os.makedirs(pickle_dir) self.model_pickle_file = pickle_dir + '/model.pkl' self.transformer_pickle_file = pickle_dir + '/transformer.pkl' def _store_model_and_transformer(self): joblib.dump(self.classifier, self.model_pickle_file) joblib.dump(self.transformer, self.transformer_pickle_file) def _load_model_and_transformer(self): self.classifier = joblib.load(self.model_pickle_file) self.transformer = joblib.load(self.transformer_pickle_file) def _setup_classifier_and_transformer(self): self.transformer = TfidfVectorizer(use_idf=False, decode_error='ignore', ngram_range=(1,3)) self.classifier = OneVsRestClassifier(LogisticRegression(), n_jobs=self.ncores) def _write_message(self, msg): sys.stderr.write(msg + '\n') def _filter(self, sentence): sentence_list = sentence.split() sentence_list = map(lambda x: self._stemmer.stem(x), sentence_list) return ' '.join(sentence_list) def _fit_transform(self, X): return self.transformer.fit_transform(X) def _transform(self, X): return self.transformer.transform(X) ''' Get features related to word lengths. Counts of words of each length. Max word length, min word length, ratio of words to sentence length ''' def _word_len_features(self, sentence): word_lengths = [len(word) for word in sentence.split()] if len(word_lengths) == 0: # return 0 for each feature return [0] * 12 else: # add arbitrary counts up to size 10 (up to 20 is actually better, # but we should probably come up with a better way than arbitrary counts, # larger range buckets perhaps) len_counts = [0] * 9 for i in range(1,10): len_counts[i-1] = word_lengths.count(i) len_counts.extend([sum(word_lengths)/float(len(sentence)), \ max(word_lengths), min(word_lengths)]) return len_counts def _get_extra_features(self, sentence): sentence_len = float(len(sentence)) get_count = lambda l1, l2: len(list(filter(lambda c: c in l2, l1))) digits_count = get_count(sentence, '0123456789') # punctuation count didn't help, pehaps indvidual punctuation count will #punct_count = get_count(sentence, string.punctuation) features = [sentence_len, sum(1 for c in sentence if c.isupper())/float(sentence_len), digits_count/sentence_len] features.extend(self._word_len_features(sentence)) return features def get_features_and_labels(self, training_file): self._write_message('reading data') training_examples = [(phrase_id, sentence_id, self._filter(sentence), self._get_extra_features(sentence), sentiment) for phrase_id, sentence_id, sentence, sentiment in self._read_file(training_file, self.columns_per_training_example)] self._write_message('generating mapped data') phrase_ids, sentence_ids, sentences, extra_features, y = zip(*training_examples) return sentences, extra_features, y def get_features_and_ids(self, data_file): self._write_message('reading data') examples = [(phrase_id, sentence_id, self._filter(sentence)) for phrase_id, sentence_id, sentence in self._read_file(data_file, self.columns_per_test_example)] self._write_message('generating mapped data') phrase_ids, sentence_ids, sentences = zip(*examples) X = self._transform(sentences) return phrase_ids, X def _train(self, X, y): self.classifier.fit(X, y) def train(self, training_file): """ train the model """ X, extra_features, y = self.get_features_and_labels(training_file) X = self._fit_transform(X) sparse_features = sparse.csr_matrix(numpy.array(extra_features)) X = sparse.hstack((X, sparse_features)) if self.require_dense: X = X.toarray() #X = self.kernel.fit_transform(X) self._write_message('training model') self._train(X, y) # save the classifier for later! self._store_model_and_transformer() def validate(self, validate_file): X, extra_features, y = self.get_features_and_labels(validate_file) X = self._transform(X) sparse_features = sparse.csr_matrix(numpy.array(extra_features)) X = sparse.hstack((X, sparse_features)) if self.require_dense: X = X.toarray() #X = self.kernel.transform(X) self._write_message('validate model') print self._score(X, y) def _score(self, X, y): """ score the model """ score = self.classifier.score(X, y) return score def _predict(self, X): """ predict a single example """ y = self.classifier.predict(X) return y def test(self, test_file): """ generate the submission file. """ self._write_message('predicting test outcomes') ids, X = self.get_features_and_ids(test_file) if self.require_dense: X = X.toarray() #X = self.kernel.transform(X) y = self._predict(X) self.write_output(ids, y) def classify_string(self): """ Classify lines from stdin """ for s in sys.stdin: X = self.transformer.transform([s]) self._write_line(self._predict(X)[0]) def _write_line(self, s): sys.stdout.write(str(s) + '\n') def write_output(self, ids, y): """ write the result of the test method. """ # write the new predictions and the IDs to stdout sys.stdout.write(test_output_header) for i in xrange(len(ids)): self._write_line(str(ids[i]) + ',' + str(y[i])) def _read_file(self, filename, expected_elements): """ generator that reads lines from the given file and appends missing data as needed """ with open(filename, 'r') as f: for line in f: t = tuple(line.strip().split('\t')) if len(t) != expected_elements: t = t + ('',) yield t def main(): args = argparse.ArgumentParser() args.add_argument('--train', action='store_true') args.add_argument('--test', action='store_true') args.add_argument('--validate', action='store_true') args.add_argument('--sample', action='store_true') args.add_argument('--ncores', type=int, default=-2) args = args.parse_args() # pass test flag in so the constructer can load the # model and transformer. It doesn't need to do that for training model = Sentiment_Classifier(training=args.train, validate=args.validate, ncores=args.ncores) if args.train: model.train(training_file) if args.test: model.test(test_file) if args.validate: model.train(validate_training_file) model.validate(validate_test_file) if args.sample: model.classify_string() if __name__ == '__main__': main()
# Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for compute resource tracking.""" import copy import uuid import mock from oslo_config import cfg from oslo_serialization import jsonutils from nova.compute import flavors from nova.compute import resource_tracker from nova.compute import resources from nova.compute import task_states from nova.compute import vm_states from nova import context from nova import db from nova import exception from nova import objects from nova.objects import base as obj_base from nova import rpc from nova import test from nova.tests.unit.compute.monitors import test_monitors from nova.tests.unit.pci import fakes as pci_fakes from nova.virt import driver FAKE_VIRT_MEMORY_MB = 5 FAKE_VIRT_MEMORY_OVERHEAD = 1 FAKE_VIRT_MEMORY_WITH_OVERHEAD = ( FAKE_VIRT_MEMORY_MB + FAKE_VIRT_MEMORY_OVERHEAD) FAKE_VIRT_NUMA_TOPOLOGY = objects.NUMATopology( cells=[objects.NUMACell(id=0, cpuset=set([1, 2]), memory=3072, cpu_usage=0, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([])), objects.NUMACell(id=1, cpuset=set([3, 4]), memory=3072, cpu_usage=0, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([]))]) FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD = objects.NUMATopologyLimits( cpu_allocation_ratio=2, ram_allocation_ratio=2) ROOT_GB = 5 EPHEMERAL_GB = 1 FAKE_VIRT_LOCAL_GB = ROOT_GB + EPHEMERAL_GB FAKE_VIRT_VCPUS = 1 FAKE_VIRT_STATS = {'virt_stat': 10} FAKE_VIRT_STATS_JSON = jsonutils.dumps(FAKE_VIRT_STATS) RESOURCE_NAMES = ['vcpu'] CONF = cfg.CONF class UnsupportedVirtDriver(driver.ComputeDriver): """Pretend version of a lame virt driver.""" def __init__(self): super(UnsupportedVirtDriver, self).__init__(None) def get_host_ip_addr(self): return '127.0.0.1' def get_available_resource(self, nodename): # no support for getting resource usage info return {} class FakeVirtDriver(driver.ComputeDriver): def __init__(self, pci_support=False, stats=None, numa_topology=FAKE_VIRT_NUMA_TOPOLOGY): super(FakeVirtDriver, self).__init__(None) self.memory_mb = FAKE_VIRT_MEMORY_MB self.local_gb = FAKE_VIRT_LOCAL_GB self.vcpus = FAKE_VIRT_VCPUS self.numa_topology = numa_topology self.memory_mb_used = 0 self.local_gb_used = 0 self.pci_support = pci_support self.pci_devices = [ { 'label': 'label_8086_0443', 'dev_type': 'type-VF', 'compute_node_id': 1, 'address': '0000:00:01.1', 'product_id': '0443', 'vendor_id': '8086', 'status': 'available', 'extra_k1': 'v1', 'numa_node': 1 }, { 'label': 'label_8086_0443', 'dev_type': 'type-VF', 'compute_node_id': 1, 'address': '0000:00:01.2', 'product_id': '0443', 'vendor_id': '8086', 'status': 'available', 'extra_k1': 'v1', 'numa_node': 1 }, { 'label': 'label_8086_0443', 'dev_type': 'type-PF', 'compute_node_id': 1, 'address': '0000:00:01.0', 'product_id': '0443', 'vendor_id': '8086', 'status': 'available', 'extra_k1': 'v1', 'numa_node': 1 }, { 'label': 'label_8086_0123', 'dev_type': 'type-PCI', 'compute_node_id': 1, 'address': '0000:00:01.0', 'product_id': '0123', 'vendor_id': '8086', 'status': 'available', 'extra_k1': 'v1', 'numa_node': 1 }, { 'label': 'label_8086_7891', 'dev_type': 'type-VF', 'compute_node_id': 1, 'address': '0000:00:01.0', 'product_id': '7891', 'vendor_id': '8086', 'status': 'available', 'extra_k1': 'v1', 'numa_node': None }, ] if self.pci_support else [] self.pci_stats = [ { 'count': 2, 'vendor_id': '8086', 'product_id': '0443', 'numa_node': 1 }, { 'count': 1, 'vendor_id': '8086', 'product_id': '7891', 'numa_node': None }, ] if self.pci_support else [] if stats is not None: self.stats = stats def get_host_ip_addr(self): return '127.0.0.1' def get_available_resource(self, nodename): d = { 'vcpus': self.vcpus, 'memory_mb': self.memory_mb, 'local_gb': self.local_gb, 'vcpus_used': 0, 'memory_mb_used': self.memory_mb_used, 'local_gb_used': self.local_gb_used, 'hypervisor_type': 'fake', 'hypervisor_version': 0, 'hypervisor_hostname': 'fakehost', 'cpu_info': '', 'numa_topology': ( self.numa_topology._to_json() if self.numa_topology else None), } if self.pci_support: d['pci_passthrough_devices'] = jsonutils.dumps(self.pci_devices) if hasattr(self, 'stats'): d['stats'] = self.stats return d def estimate_instance_overhead(self, instance_info): instance_info['memory_mb'] # make sure memory value is present overhead = { 'memory_mb': FAKE_VIRT_MEMORY_OVERHEAD } return overhead # just return a constant value for testing class BaseTestCase(test.TestCase): def setUp(self): super(BaseTestCase, self).setUp() self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0) self.context = context.get_admin_context() self.flags(pci_passthrough_whitelist=[ '{"vendor_id": "8086", "product_id": "0443"}', '{"vendor_id": "8086", "product_id": "7891"}']) self.flags(use_local=True, group='conductor') self.conductor = self.start_service('conductor', manager=CONF.conductor.manager) self._instances = {} self._numa_topologies = {} self._instance_types = {} self.stubs.Set(self.conductor.db, 'instance_get_all_by_host_and_node', self._fake_instance_get_all_by_host_and_node) self.stubs.Set(db, 'instance_extra_get_by_instance_uuid', self._fake_instance_extra_get_by_instance_uuid) self.stubs.Set(self.conductor.db, 'instance_update_and_get_original', self._fake_instance_update_and_get_original) self.stubs.Set(self.conductor.db, 'flavor_get', self._fake_flavor_get) self.host = 'fakehost' self.compute = self._create_compute_node() self.updated = False self.deleted = False self.update_call_count = 0 def _create_compute_node(self, values=None): compute = { "id": 1, "service_id": 1, "host": "fakehost", "vcpus": 1, "memory_mb": 1, "local_gb": 1, "vcpus_used": 1, "memory_mb_used": 1, "local_gb_used": 1, "free_ram_mb": 1, "free_disk_gb": 1, "current_workload": 1, "running_vms": 0, "cpu_info": None, "numa_topology": None, "stats": '{"num_instances": "1"}', "hypervisor_hostname": "fakenode", 'hypervisor_version': 1, 'hypervisor_type': 'fake-hyp', 'disk_available_least': None, 'host_ip': None, 'metrics': None, 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': False, } if values: compute.update(values) return compute def _create_service(self, host="fakehost", compute=None): if compute: compute = [compute] service = { "id": 1, "host": host, "binary": "nova-compute", "topic": "compute", "compute_node": compute, "report_count": 0, 'disabled': False, 'disabled_reason': None, 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': False, } return service def _fake_instance_system_metadata(self, instance_type, prefix=''): sys_meta = [] for key in flavors.system_metadata_flavor_props.keys(): sys_meta.append({'key': '%sinstance_type_%s' % (prefix, key), 'value': instance_type[key]}) return sys_meta def _fake_instance(self, stash=True, flavor=None, **kwargs): # Default to an instance ready to resize to or from the same # instance_type flavor = flavor or self._fake_flavor_create() sys_meta = self._fake_instance_system_metadata(flavor) if stash: # stash instance types in system metadata. sys_meta = (sys_meta + self._fake_instance_system_metadata(flavor, 'new_') + self._fake_instance_system_metadata(flavor, 'old_')) instance_uuid = str(uuid.uuid1()) instance = { 'uuid': instance_uuid, 'vm_state': vm_states.RESIZED, 'task_state': None, 'ephemeral_key_uuid': None, 'os_type': 'Linux', 'project_id': '123456', 'host': None, 'node': None, 'instance_type_id': flavor['id'], 'memory_mb': flavor['memory_mb'], 'vcpus': flavor['vcpus'], 'root_gb': flavor['root_gb'], 'ephemeral_gb': flavor['ephemeral_gb'], 'launched_on': None, 'system_metadata': sys_meta, 'availability_zone': None, 'vm_mode': None, 'reservation_id': None, 'display_name': None, 'default_swap_device': None, 'power_state': None, 'scheduled_at': None, 'access_ip_v6': None, 'access_ip_v4': None, 'key_name': None, 'updated_at': None, 'cell_name': None, 'locked': None, 'locked_by': None, 'launch_index': None, 'architecture': None, 'auto_disk_config': None, 'terminated_at': None, 'ramdisk_id': None, 'user_data': None, 'cleaned': None, 'deleted_at': None, 'id': 333, 'disable_terminate': None, 'hostname': None, 'display_description': None, 'key_data': None, 'deleted': None, 'default_ephemeral_device': None, 'progress': None, 'launched_at': None, 'config_drive': None, 'kernel_id': None, 'user_id': None, 'shutdown_terminate': None, 'created_at': None, 'image_ref': None, 'root_device_name': None, } extra = { 'id': 1, 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': None, 'instance_uuid': instance['uuid'], 'numa_topology': None, 'pci_requests': None, } numa_topology = kwargs.pop('numa_topology', None) if numa_topology: extra['numa_topology'] = numa_topology._to_json() instance.update(kwargs) instance['extra'] = extra self._instances[instance_uuid] = instance self._numa_topologies[instance_uuid] = extra return instance def _fake_flavor_create(self, **kwargs): instance_type = { 'id': 1, 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': False, 'disabled': False, 'is_public': True, 'name': 'fakeitype', 'memory_mb': FAKE_VIRT_MEMORY_MB, 'vcpus': FAKE_VIRT_VCPUS, 'root_gb': ROOT_GB, 'ephemeral_gb': EPHEMERAL_GB, 'swap': 0, 'rxtx_factor': 1.0, 'vcpu_weight': 1, 'flavorid': 'fakeflavor', 'extra_specs': {}, } instance_type.update(**kwargs) id_ = instance_type['id'] self._instance_types[id_] = instance_type return instance_type def _fake_instance_get_all_by_host_and_node(self, context, host, nodename, columns_to_join=None): return [i for i in self._instances.values() if i['host'] == host] def _fake_instance_extra_get_by_instance_uuid(self, context, instance_uuid, columns=None): return self._numa_topologies.get(instance_uuid) def _fake_flavor_get(self, ctxt, id_): return self._instance_types[id_] def _fake_instance_update_and_get_original(self, context, instance_uuid, values, columns_to_join=None): instance = self._instances[instance_uuid] instance.update(values) # the test doesn't care what the original instance values are, it's # only used in the subsequent notification: return (instance, instance) def _fake_compute_node_update(self, ctx, compute_node_id, values, prune_stats=False): self.update_call_count += 1 self.updated = True self.compute.update(values) return self.compute def _driver(self): return FakeVirtDriver() def _tracker(self, host=None): if host is None: host = self.host node = "fakenode" driver = self._driver() tracker = resource_tracker.ResourceTracker(host, driver, node) tracker.compute_node = self._create_compute_node() tracker.ext_resources_handler = \ resources.ResourceHandler(RESOURCE_NAMES, True) return tracker class UnsupportedDriverTestCase(BaseTestCase): """Resource tracking should be disabled when the virt driver doesn't support it. """ def setUp(self): super(UnsupportedDriverTestCase, self).setUp() self.tracker = self._tracker() # seed tracker with data: self.tracker.update_available_resource(self.context) def _driver(self): return UnsupportedVirtDriver() def test_disabled(self): # disabled = no compute node stats self.assertTrue(self.tracker.disabled) self.assertIsNone(self.tracker.compute_node) def test_disabled_claim(self): # basic claim: instance = self._fake_instance() claim = self.tracker.instance_claim(self.context, instance) self.assertEqual(0, claim.memory_mb) def test_disabled_instance_claim(self): # instance variation: instance = self._fake_instance() claim = self.tracker.instance_claim(self.context, instance) self.assertEqual(0, claim.memory_mb) def test_disabled_instance_context_claim(self): # instance context manager variation: instance = self._fake_instance() self.tracker.instance_claim(self.context, instance) with self.tracker.instance_claim(self.context, instance) as claim: self.assertEqual(0, claim.memory_mb) def test_disabled_updated_usage(self): instance = self._fake_instance(host='fakehost', memory_mb=5, root_gb=10) self.tracker.update_usage(self.context, instance) def test_disabled_resize_claim(self): instance = self._fake_instance() instance_type = self._fake_flavor_create() claim = self.tracker.resize_claim(self.context, instance, instance_type) self.assertEqual(0, claim.memory_mb) self.assertEqual(instance['uuid'], claim.migration['instance_uuid']) self.assertEqual(instance_type['id'], claim.migration['new_instance_type_id']) def test_disabled_resize_context_claim(self): instance = self._fake_instance() instance_type = self._fake_flavor_create() with self.tracker.resize_claim(self.context, instance, instance_type) \ as claim: self.assertEqual(0, claim.memory_mb) class MissingServiceTestCase(BaseTestCase): def setUp(self): super(MissingServiceTestCase, self).setUp() self.context = context.get_admin_context() self.tracker = self._tracker() def test_missing_service(self): self.tracker.compute_node = None self.tracker._get_service = mock.Mock(return_value=None) self.tracker.update_available_resource(self.context) self.assertTrue(self.tracker.disabled) class MissingComputeNodeTestCase(BaseTestCase): def setUp(self): super(MissingComputeNodeTestCase, self).setUp() self.tracker = self._tracker() self.stubs.Set(db, 'service_get_by_compute_host', self._fake_service_get_by_compute_host) self.stubs.Set(db, 'compute_node_get_by_host_and_nodename', self._fake_compute_node_get_by_host_and_nodename) self.stubs.Set(db, 'compute_node_create', self._fake_create_compute_node) self.tracker.scheduler_client.update_resource_stats = mock.Mock() def _fake_create_compute_node(self, context, values): self.created = True return self._create_compute_node(values) def _fake_service_get_by_compute_host(self, ctx, host): # return a service with no joined compute service = self._create_service() return service def _fake_compute_node_get_by_host_and_nodename(self, ctx, host, nodename): # return no compute node raise exception.ComputeHostNotFound(host=host) def test_create_compute_node(self): self.tracker.compute_node = None self.tracker.update_available_resource(self.context) self.assertTrue(self.created) def test_enabled(self): self.tracker.update_available_resource(self.context) self.assertFalse(self.tracker.disabled) class BaseTrackerTestCase(BaseTestCase): def setUp(self): # setup plumbing for a working resource tracker with required # database models and a compatible compute driver: super(BaseTrackerTestCase, self).setUp() self.tracker = self._tracker() self._migrations = {} self.stubs.Set(db, 'service_get_by_compute_host', self._fake_service_get_by_compute_host) self.stubs.Set(db, 'compute_node_get_by_host_and_nodename', self._fake_compute_node_get_by_host_and_nodename) self.stubs.Set(db, 'compute_node_update', self._fake_compute_node_update) self.stubs.Set(db, 'compute_node_delete', self._fake_compute_node_delete) self.stubs.Set(db, 'migration_update', self._fake_migration_update) self.stubs.Set(db, 'migration_get_in_progress_by_host_and_node', self._fake_migration_get_in_progress_by_host_and_node) # Note that this must be called before the call to _init_tracker() patcher = pci_fakes.fake_pci_whitelist() self.addCleanup(patcher.stop) self.stubs.Set(self.tracker.scheduler_client, 'update_resource_stats', self._fake_compute_node_update) self._init_tracker() self.limits = self._limits() def _fake_service_get_by_compute_host(self, ctx, host): self.service = self._create_service(host, compute=self.compute) return self.service def _fake_compute_node_get_by_host_and_nodename(self, ctx, host, nodename): self.compute = self._create_compute_node() return self.compute def _fake_compute_node_update(self, ctx, compute_node_id, values, prune_stats=False): self.update_call_count += 1 self.updated = True self.compute.update(values) return self.compute def _fake_compute_node_delete(self, ctx, compute_node_id): self.deleted = True self.compute.update({'deleted': 1}) return self.compute def _fake_migration_get_in_progress_by_host_and_node(self, ctxt, host, node): status = ['confirmed', 'reverted', 'error'] migrations = [] for migration in self._migrations.values(): migration = obj_base.obj_to_primitive(migration) if migration['status'] in status: continue uuid = migration['instance_uuid'] migration['instance'] = self._instances[uuid] migrations.append(migration) return migrations def _fake_migration_update(self, ctxt, migration_id, values): # cheat and assume there's only 1 migration present migration = self._migrations.values()[0] migration.update(values) return migration def _init_tracker(self): self.tracker.update_available_resource(self.context) def _limits(self, memory_mb=FAKE_VIRT_MEMORY_WITH_OVERHEAD, disk_gb=FAKE_VIRT_LOCAL_GB, vcpus=FAKE_VIRT_VCPUS, numa_topology=FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD): """Create limits dictionary used for oversubscribing resources.""" return { 'memory_mb': memory_mb, 'disk_gb': disk_gb, 'vcpu': vcpus, 'numa_topology': numa_topology, } def assertEqualNUMAHostTopology(self, expected, got): attrs = ('cpuset', 'memory', 'id', 'cpu_usage', 'memory_usage') if None in (expected, got): if expected != got: raise AssertionError("Topologies don't match. Expected: " "%(expected)s, but got: %(got)s" % {'expected': expected, 'got': got}) else: return if len(expected) != len(got): raise AssertionError("Topologies don't match due to different " "number of cells. Expected: " "%(expected)s, but got: %(got)s" % {'expected': expected, 'got': got}) for exp_cell, got_cell in zip(expected.cells, got.cells): for attr in attrs: if getattr(exp_cell, attr) != getattr(got_cell, attr): raise AssertionError("Topologies don't match. Expected: " "%(expected)s, but got: %(got)s" % {'expected': expected, 'got': got}) def _assert(self, value, field, tracker=None): if tracker is None: tracker = self.tracker if field not in tracker.compute_node: raise test.TestingException( "'%(field)s' not in compute node." % {'field': field}) x = tracker.compute_node[field] if field == 'numa_topology': self.assertEqualNUMAHostTopology( value, objects.NUMATopology.obj_from_db_obj(x)) else: self.assertEqual(value, x) class TrackerTestCase(BaseTrackerTestCase): def test_free_ram_resource_value(self): driver = FakeVirtDriver() mem_free = driver.memory_mb - driver.memory_mb_used self.assertEqual(mem_free, self.tracker.compute_node['free_ram_mb']) def test_free_disk_resource_value(self): driver = FakeVirtDriver() mem_free = driver.local_gb - driver.local_gb_used self.assertEqual(mem_free, self.tracker.compute_node['free_disk_gb']) def test_update_compute_node(self): self.assertFalse(self.tracker.disabled) self.assertTrue(self.updated) def test_init(self): driver = self._driver() self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb') self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb') self._assert(FAKE_VIRT_VCPUS, 'vcpus') self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology') self._assert(0, 'memory_mb_used') self._assert(0, 'local_gb_used') self._assert(0, 'vcpus_used') self._assert(0, 'running_vms') self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb') self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb') self.assertFalse(self.tracker.disabled) self.assertEqual(0, self.tracker.compute_node['current_workload']) self.assertEqual(driver.pci_stats, self.tracker.compute_node['pci_device_pools']) class SchedulerClientTrackerTestCase(BaseTrackerTestCase): def setUp(self): super(SchedulerClientTrackerTestCase, self).setUp() self.tracker.scheduler_client.update_resource_stats = mock.Mock( side_effect=self._fake_compute_node_update) def test_update_resource(self): # change a compute node value to simulate a change self.tracker.compute_node['local_gb_used'] += 1 expected = copy.deepcopy(self.tracker.compute_node) self.tracker._update(self.context) self.tracker.scheduler_client.update_resource_stats.\ assert_called_once_with(self.context, ("fakehost", "fakenode"), expected) def test_no_update_resource(self): self.tracker._update(self.context) update = self.tracker.scheduler_client.update_resource_stats self.assertFalse(update.called, "update_resource_stats should not be " "called when there is no change") class TrackerPciStatsTestCase(BaseTrackerTestCase): def test_update_compute_node(self): self.assertFalse(self.tracker.disabled) self.assertTrue(self.updated) def test_init(self): driver = self._driver() self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb') self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb') self._assert(FAKE_VIRT_VCPUS, 'vcpus') self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology') self._assert(0, 'memory_mb_used') self._assert(0, 'local_gb_used') self._assert(0, 'vcpus_used') self._assert(0, 'running_vms') self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb') self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb') self.assertFalse(self.tracker.disabled) self.assertEqual(0, self.tracker.compute_node['current_workload']) # NOTE(danms): PciDeviceStats only supports iteration, so we have to # listify it before we can examine the contents by index. pools = list(self.tracker.compute_node['pci_device_pools']) self.assertEqual(driver.pci_stats[0]['product_id'], pools[0]['product_id']) def _driver(self): return FakeVirtDriver(pci_support=True) class TrackerExtraResourcesTestCase(BaseTrackerTestCase): def setUp(self): super(TrackerExtraResourcesTestCase, self).setUp() self.driver = self._driver() def _driver(self): return FakeVirtDriver() def test_set_empty_ext_resources(self): resources = self.driver.get_available_resource(self.tracker.nodename) self.assertNotIn('stats', resources) self.tracker._write_ext_resources(resources) self.assertIn('stats', resources) def test_set_extra_resources(self): def fake_write_resources(resources): resources['stats']['resA'] = '123' resources['stats']['resB'] = 12 self.stubs.Set(self.tracker.ext_resources_handler, 'write_resources', fake_write_resources) resources = self.driver.get_available_resource(self.tracker.nodename) self.tracker._write_ext_resources(resources) expected = {"resA": "123", "resB": 12} self.assertEqual(sorted(expected), sorted(resources['stats'])) class InstanceClaimTestCase(BaseTrackerTestCase): def _instance_topology(self, mem): mem = mem * 1024 return objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( id=0, cpuset=set([1]), memory=mem), objects.InstanceNUMACell( id=1, cpuset=set([3]), memory=mem)]) def _claim_topology(self, mem, cpus=1): if self.tracker.driver.numa_topology is None: return None mem = mem * 1024 return objects.NUMATopology( cells=[objects.NUMACell( id=0, cpuset=set([1, 2]), memory=3072, cpu_usage=cpus, memory_usage=mem, mempages=[], siblings=[], pinned_cpus=set([])), objects.NUMACell( id=1, cpuset=set([3, 4]), memory=3072, cpu_usage=cpus, memory_usage=mem, mempages=[], siblings=[], pinned_cpus=set([]))]) @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid', return_value=objects.InstancePCIRequests(requests=[])) def test_update_usage_only_for_tracked(self, mock_get): flavor = self._fake_flavor_create() claim_mem = flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD claim_gb = flavor['root_gb'] + flavor['ephemeral_gb'] claim_topology = self._claim_topology(claim_mem / 2) instance_topology = self._instance_topology(claim_mem / 2) instance = self._fake_instance( flavor=flavor, task_state=None, numa_topology=instance_topology) self.tracker.update_usage(self.context, instance) self._assert(0, 'memory_mb_used') self._assert(0, 'local_gb_used') self._assert(0, 'current_workload') self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology') claim = self.tracker.instance_claim(self.context, instance, self.limits) self.assertNotEqual(0, claim.memory_mb) self._assert(claim_mem, 'memory_mb_used') self._assert(claim_gb, 'local_gb_used') self._assert(claim_topology, 'numa_topology') # now update should actually take effect instance['task_state'] = task_states.SCHEDULING self.tracker.update_usage(self.context, instance) self._assert(claim_mem, 'memory_mb_used') self._assert(claim_gb, 'local_gb_used') self._assert(claim_topology, 'numa_topology') self._assert(1, 'current_workload') @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid', return_value=objects.InstancePCIRequests(requests=[])) def test_claim_and_abort(self, mock_get): claim_mem = 3 claim_mem_total = 3 + FAKE_VIRT_MEMORY_OVERHEAD claim_disk = 2 claim_topology = self._claim_topology(claim_mem_total / 2) instance_topology = self._instance_topology(claim_mem_total / 2) instance = self._fake_instance(memory_mb=claim_mem, root_gb=claim_disk, ephemeral_gb=0, numa_topology=instance_topology) claim = self.tracker.instance_claim(self.context, instance, self.limits) self.assertIsNotNone(claim) self.assertEqual(claim_mem_total, self.compute["memory_mb_used"]) self.assertEqual(FAKE_VIRT_MEMORY_MB - claim_mem_total, self.compute["free_ram_mb"]) self.assertEqualNUMAHostTopology( claim_topology, objects.NUMATopology.obj_from_db_obj( self.compute['numa_topology'])) self.assertEqual(claim_disk, self.compute["local_gb_used"]) self.assertEqual(FAKE_VIRT_LOCAL_GB - claim_disk, self.compute["free_disk_gb"]) claim.abort() self.assertEqual(0, self.compute["memory_mb_used"]) self.assertEqual(FAKE_VIRT_MEMORY_MB, self.compute["free_ram_mb"]) self.assertEqualNUMAHostTopology( FAKE_VIRT_NUMA_TOPOLOGY, objects.NUMATopology.obj_from_db_obj( self.compute['numa_topology'])) self.assertEqual(0, self.compute["local_gb_used"]) self.assertEqual(FAKE_VIRT_LOCAL_GB, self.compute["free_disk_gb"]) @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid', return_value=objects.InstancePCIRequests(requests=[])) def test_instance_claim_with_oversubscription(self, mock_get): memory_mb = FAKE_VIRT_MEMORY_MB * 2 root_gb = ephemeral_gb = FAKE_VIRT_LOCAL_GB vcpus = FAKE_VIRT_VCPUS * 2 claim_topology = self._claim_topology(3) instance_topology = self._instance_topology(3) limits = {'memory_mb': memory_mb + FAKE_VIRT_MEMORY_OVERHEAD, 'disk_gb': root_gb * 2, 'vcpu': vcpus, 'numa_topology': FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD} instance = self._fake_instance(memory_mb=memory_mb, root_gb=root_gb, ephemeral_gb=ephemeral_gb, numa_topology=instance_topology) self.tracker.instance_claim(self.context, instance, limits) self.assertEqual(memory_mb + FAKE_VIRT_MEMORY_OVERHEAD, self.tracker.compute_node['memory_mb_used']) self.assertEqualNUMAHostTopology( claim_topology, objects.NUMATopology.obj_from_db_obj( self.compute['numa_topology'])) self.assertEqual(root_gb * 2, self.tracker.compute_node['local_gb_used']) @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid', return_value=objects.InstancePCIRequests(requests=[])) def test_additive_claims(self, mock_get): self.limits['vcpu'] = 2 claim_topology = self._claim_topology(2, cpus=2) flavor = self._fake_flavor_create( memory_mb=1, root_gb=1, ephemeral_gb=0) instance_topology = self._instance_topology(1) instance = self._fake_instance( flavor=flavor, numa_topology=instance_topology) with self.tracker.instance_claim(self.context, instance, self.limits): pass instance = self._fake_instance( flavor=flavor, numa_topology=instance_topology) with self.tracker.instance_claim(self.context, instance, self.limits): pass self.assertEqual(2 * (flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD), self.tracker.compute_node['memory_mb_used']) self.assertEqual(2 * (flavor['root_gb'] + flavor['ephemeral_gb']), self.tracker.compute_node['local_gb_used']) self.assertEqual(2 * flavor['vcpus'], self.tracker.compute_node['vcpus_used']) self.assertEqualNUMAHostTopology( claim_topology, objects.NUMATopology.obj_from_db_obj( self.compute['numa_topology'])) @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid', return_value=objects.InstancePCIRequests(requests=[])) def test_context_claim_with_exception(self, mock_get): instance = self._fake_instance(memory_mb=1, root_gb=1, ephemeral_gb=1) try: with self.tracker.instance_claim(self.context, instance): # <insert exciting things that utilize resources> raise test.TestingException() except test.TestingException: pass self.assertEqual(0, self.tracker.compute_node['memory_mb_used']) self.assertEqual(0, self.tracker.compute_node['local_gb_used']) self.assertEqual(0, self.compute['memory_mb_used']) self.assertEqual(0, self.compute['local_gb_used']) self.assertEqualNUMAHostTopology( FAKE_VIRT_NUMA_TOPOLOGY, objects.NUMATopology.obj_from_db_obj( self.compute['numa_topology'])) @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid', return_value=objects.InstancePCIRequests(requests=[])) def test_instance_context_claim(self, mock_get): flavor = self._fake_flavor_create( memory_mb=1, root_gb=2, ephemeral_gb=3) claim_topology = self._claim_topology(1) instance_topology = self._instance_topology(1) instance = self._fake_instance( flavor=flavor, numa_topology=instance_topology) with self.tracker.instance_claim(self.context, instance): # <insert exciting things that utilize resources> self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD, self.tracker.compute_node['memory_mb_used']) self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'], self.tracker.compute_node['local_gb_used']) self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD, self.compute['memory_mb_used']) self.assertEqualNUMAHostTopology( claim_topology, objects.NUMATopology.obj_from_db_obj( self.compute['numa_topology'])) self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'], self.compute['local_gb_used']) # after exiting claim context, build is marked as finished. usage # totals should be same: self.tracker.update_available_resource(self.context) self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD, self.tracker.compute_node['memory_mb_used']) self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'], self.tracker.compute_node['local_gb_used']) self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD, self.compute['memory_mb_used']) self.assertEqualNUMAHostTopology( claim_topology, objects.NUMATopology.obj_from_db_obj( self.compute['numa_topology'])) self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'], self.compute['local_gb_used']) @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid', return_value=objects.InstancePCIRequests(requests=[])) def test_update_load_stats_for_instance(self, mock_get): instance = self._fake_instance(task_state=task_states.SCHEDULING) with self.tracker.instance_claim(self.context, instance): pass self.assertEqual(1, self.tracker.compute_node['current_workload']) instance['vm_state'] = vm_states.ACTIVE instance['task_state'] = None instance['host'] = 'fakehost' self.tracker.update_usage(self.context, instance) self.assertEqual(0, self.tracker.compute_node['current_workload']) @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid', return_value=objects.InstancePCIRequests(requests=[])) def test_cpu_stats(self, mock_get): limits = {'disk_gb': 100, 'memory_mb': 100} self.assertEqual(0, self.tracker.compute_node['vcpus_used']) vcpus = 1 instance = self._fake_instance(vcpus=vcpus) # should not do anything until a claim is made: self.tracker.update_usage(self.context, instance) self.assertEqual(0, self.tracker.compute_node['vcpus_used']) with self.tracker.instance_claim(self.context, instance, limits): pass self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used']) # instance state can change without modifying vcpus in use: instance['task_state'] = task_states.SCHEDULING self.tracker.update_usage(self.context, instance) self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used']) add_vcpus = 10 vcpus += add_vcpus instance = self._fake_instance(vcpus=add_vcpus) with self.tracker.instance_claim(self.context, instance, limits): pass self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used']) instance['vm_state'] = vm_states.DELETED self.tracker.update_usage(self.context, instance) vcpus -= add_vcpus self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used']) def test_skip_deleted_instances(self): # ensure that the audit process skips instances that have vm_state # DELETED, but the DB record is not yet deleted. self._fake_instance(vm_state=vm_states.DELETED, host=self.host) self.tracker.update_available_resource(self.context) self.assertEqual(0, self.tracker.compute_node['memory_mb_used']) self.assertEqual(0, self.tracker.compute_node['local_gb_used']) @mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node') def test_deleted_instances_with_migrations(self, mock_migration_list): migration = objects.Migration(context=self.context, instance_uuid='invalid') mock_migration_list.return_value = [migration] self.tracker.update_available_resource(self.context) self.assertEqual(0, self.tracker.compute_node['memory_mb_used']) self.assertEqual(0, self.tracker.compute_node['local_gb_used']) mock_migration_list.assert_called_once_with(self.context, "fakehost", "fakenode") class ResizeClaimTestCase(BaseTrackerTestCase): def setUp(self): super(ResizeClaimTestCase, self).setUp() self.instance = self._fake_instance() self.instance_type = self._fake_flavor_create() @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid', return_value=objects.InstancePCIRequests(requests=[])) def test_claim(self, mock_get): self.tracker.resize_claim(self.context, self.instance, self.instance_type, self.limits) self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used') self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used') self._assert(FAKE_VIRT_VCPUS, 'vcpus_used') self.assertEqual(1, len(self.tracker.tracked_migrations)) @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid', return_value=objects.InstancePCIRequests(requests=[])) def test_abort(self, mock_get): try: with self.tracker.resize_claim(self.context, self.instance, self.instance_type, self.limits): raise test.TestingException("abort") except test.TestingException: pass self._assert(0, 'memory_mb_used') self._assert(0, 'local_gb_used') self._assert(0, 'vcpus_used') self.assertEqual(0, len(self.tracker.tracked_migrations)) @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid', return_value=objects.InstancePCIRequests(requests=[])) def test_additive_claims(self, mock_get): limits = self._limits( 2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD, 2 * FAKE_VIRT_LOCAL_GB, 2 * FAKE_VIRT_VCPUS) self.tracker.resize_claim(self.context, self.instance, self.instance_type, limits) instance2 = self._fake_instance() self.tracker.resize_claim(self.context, instance2, self.instance_type, limits) self._assert(2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used') self._assert(2 * FAKE_VIRT_LOCAL_GB, 'local_gb_used') self._assert(2 * FAKE_VIRT_VCPUS, 'vcpus_used') @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid', return_value=objects.InstancePCIRequests(requests=[])) def test_revert(self, mock_get): self.tracker.resize_claim(self.context, self.instance, self.instance_type, {}, self.limits) self.tracker.drop_resize_claim(self.context, self.instance) self.assertEqual(0, len(self.tracker.tracked_instances)) self.assertEqual(0, len(self.tracker.tracked_migrations)) self._assert(0, 'memory_mb_used') self._assert(0, 'local_gb_used') self._assert(0, 'vcpus_used') def test_resize_filter(self): instance = self._fake_instance(vm_state=vm_states.ACTIVE, task_state=task_states.SUSPENDING) self.assertFalse(self.tracker._instance_in_resize_state(instance)) instance = self._fake_instance(vm_state=vm_states.RESIZED, task_state=task_states.SUSPENDING) self.assertTrue(self.tracker._instance_in_resize_state(instance)) states = [task_states.RESIZE_PREP, task_states.RESIZE_MIGRATING, task_states.RESIZE_MIGRATED, task_states.RESIZE_FINISH] for vm_state in [vm_states.ACTIVE, vm_states.STOPPED]: for task_state in states: instance = self._fake_instance(vm_state=vm_state, task_state=task_state) result = self.tracker._instance_in_resize_state(instance) self.assertTrue(result) @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid', return_value=objects.InstancePCIRequests(requests=[])) def test_set_instance_host_and_node(self, mock_get): instance = self._fake_instance() self.assertIsNone(instance['host']) self.assertIsNone(instance['launched_on']) self.assertIsNone(instance['node']) claim = self.tracker.instance_claim(self.context, instance) self.assertNotEqual(0, claim.memory_mb) self.assertEqual('fakehost', instance['host']) self.assertEqual('fakehost', instance['launched_on']) self.assertEqual('fakenode', instance['node']) class NoInstanceTypesInSysMetadata(ResizeClaimTestCase): """Make sure we handle the case where the following are true: #) Compute node C gets upgraded to code that looks for instance types in system metadata. AND #) C already has instances in the process of migrating that do not have stashed instance types. bug 1164110 """ def setUp(self): super(NoInstanceTypesInSysMetadata, self).setUp() self.instance = self._fake_instance(stash=False) def test_get_instance_type_stash_false(self): with (mock.patch.object(objects.Flavor, 'get_by_id', return_value=self.instance_type)): flavor = self.tracker._get_instance_type(self.context, self.instance, "new_") self.assertEqual(self.instance_type, flavor) class OrphanTestCase(BaseTrackerTestCase): def _driver(self): class OrphanVirtDriver(FakeVirtDriver): def get_per_instance_usage(self): return { '1-2-3-4-5': {'memory_mb': FAKE_VIRT_MEMORY_MB, 'uuid': '1-2-3-4-5'}, '2-3-4-5-6': {'memory_mb': FAKE_VIRT_MEMORY_MB, 'uuid': '2-3-4-5-6'}, } return OrphanVirtDriver() def test_usage(self): self.assertEqual(2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD, self.tracker.compute_node['memory_mb_used']) def test_find(self): # create one legit instance and verify the 2 orphans remain self._fake_instance() orphans = self.tracker._find_orphaned_instances() self.assertEqual(2, len(orphans)) class ComputeMonitorTestCase(BaseTestCase): def setUp(self): super(ComputeMonitorTestCase, self).setUp() fake_monitors = [ 'nova.tests.unit.compute.monitors.test_monitors.FakeMonitorClass1', 'nova.tests.unit.compute.monitors.test_monitors.FakeMonitorClass2'] self.flags(compute_available_monitors=fake_monitors) self.tracker = self._tracker() self.node_name = 'nodename' self.user_id = 'fake' self.project_id = 'fake' self.info = {} self.context = context.RequestContext(self.user_id, self.project_id) def test_get_host_metrics_none(self): self.flags(compute_monitors=['FakeMontorClass1', 'FakeMonitorClass4']) self.tracker.monitors = [] metrics = self.tracker._get_host_metrics(self.context, self.node_name) self.assertEqual(len(metrics), 0) def test_get_host_metrics_one_failed(self): self.flags(compute_monitors=['FakeMonitorClass1', 'FakeMonitorClass4']) class1 = test_monitors.FakeMonitorClass1(self.tracker) class4 = test_monitors.FakeMonitorClass4(self.tracker) self.tracker.monitors = [class1, class4] metrics = self.tracker._get_host_metrics(self.context, self.node_name) self.assertTrue(len(metrics) > 0) @mock.patch.object(resource_tracker.LOG, 'warning') def test_get_host_metrics_exception(self, mock_LOG_warning): self.flags(compute_monitors=['FakeMontorClass1']) class1 = test_monitors.FakeMonitorClass1(self.tracker) self.tracker.monitors = [class1] with mock.patch.object(class1, 'get_metrics', side_effect=test.TestingException()): metrics = self.tracker._get_host_metrics(self.context, self.node_name) mock_LOG_warning.assert_called_once_with( u'Cannot get the metrics from %s.', class1) self.assertEqual(0, len(metrics)) def test_get_host_metrics(self): self.flags(compute_monitors=['FakeMonitorClass1', 'FakeMonitorClass2']) class1 = test_monitors.FakeMonitorClass1(self.tracker) class2 = test_monitors.FakeMonitorClass2(self.tracker) self.tracker.monitors = [class1, class2] mock_notifier = mock.Mock() with mock.patch.object(rpc, 'get_notifier', return_value=mock_notifier) as mock_get: metrics = self.tracker._get_host_metrics(self.context, self.node_name) mock_get.assert_called_once_with(service='compute', host=self.node_name) expected_metrics = [{ 'timestamp': 1232, 'name': 'key1', 'value': 2600, 'source': 'libvirt' }, { 'name': 'key2', 'source': 'libvirt', 'timestamp': 123, 'value': 1600 }] payload = { 'metrics': expected_metrics, 'host': self.tracker.host, 'host_ip': CONF.my_ip, 'nodename': self.node_name } mock_notifier.info.assert_called_once_with( self.context, 'compute.metrics.update', payload) self.assertEqual(metrics, expected_metrics) class TrackerPeriodicTestCase(BaseTrackerTestCase): def test_periodic_status_update(self): # verify update called on instantiation self.assertEqual(1, self.update_call_count) # verify update not called if no change to resources self.tracker.update_available_resource(self.context) self.assertEqual(1, self.update_call_count) # verify update is called when resources change driver = self.tracker.driver driver.memory_mb += 1 self.tracker.update_available_resource(self.context) self.assertEqual(2, self.update_call_count) def test_update_available_resource_calls_locked_inner(self): @mock.patch.object(self.tracker, 'driver') @mock.patch.object(self.tracker, '_update_available_resource') @mock.patch.object(self.tracker, '_verify_resources') @mock.patch.object(self.tracker, '_report_hypervisor_resource_view') def _test(mock_rhrv, mock_vr, mock_uar, mock_driver): resources = {'there is someone in my head': 'but it\'s not me'} mock_driver.get_available_resource.return_value = resources self.tracker.update_available_resource(self.context) mock_uar.assert_called_once_with(self.context, resources) _test() class StatsDictTestCase(BaseTrackerTestCase): """Test stats handling for a virt driver that provides stats as a dictionary. """ def _driver(self): return FakeVirtDriver(stats=FAKE_VIRT_STATS) def _get_stats(self): return jsonutils.loads(self.tracker.compute_node['stats']) def test_virt_stats(self): # start with virt driver stats stats = self._get_stats() self.assertEqual(FAKE_VIRT_STATS, stats) # adding an instance should keep virt driver stats self._fake_instance(vm_state=vm_states.ACTIVE, host=self.host) self.tracker.update_available_resource(self.context) stats = self._get_stats() expected_stats = {} expected_stats.update(FAKE_VIRT_STATS) expected_stats.update(self.tracker.stats) self.assertEqual(expected_stats, stats) # removing the instances should keep only virt driver stats self._instances = {} self.tracker.update_available_resource(self.context) stats = self._get_stats() self.assertEqual(FAKE_VIRT_STATS, stats) class StatsJsonTestCase(BaseTrackerTestCase): """Test stats handling for a virt driver that provides stats as a json string. """ def _driver(self): return FakeVirtDriver(stats=FAKE_VIRT_STATS_JSON) def _get_stats(self): return jsonutils.loads(self.tracker.compute_node['stats']) def test_virt_stats(self): # start with virt driver stats stats = self._get_stats() self.assertEqual(FAKE_VIRT_STATS, stats) # adding an instance should keep virt driver stats # and add rt stats self._fake_instance(vm_state=vm_states.ACTIVE, host=self.host) self.tracker.update_available_resource(self.context) stats = self._get_stats() expected_stats = {} expected_stats.update(FAKE_VIRT_STATS) expected_stats.update(self.tracker.stats) self.assertEqual(expected_stats, stats) # removing the instances should keep only virt driver stats self._instances = {} self.tracker.update_available_resource(self.context) stats = self._get_stats() self.assertEqual(FAKE_VIRT_STATS, stats) class StatsInvalidJsonTestCase(BaseTrackerTestCase): """Test stats handling for a virt driver that provides an invalid type for stats. """ def _driver(self): return FakeVirtDriver(stats='this is not json') def _init_tracker(self): # do not do initial update in setup pass def test_virt_stats(self): # should throw exception for string that does not parse as json self.assertRaises(ValueError, self.tracker.update_available_resource, context=self.context) class StatsInvalidTypeTestCase(BaseTrackerTestCase): """Test stats handling for a virt driver that provides an invalid type for stats. """ def _driver(self): return FakeVirtDriver(stats=10) def _init_tracker(self): # do not do initial update in setup pass def test_virt_stats(self): # should throw exception for incorrect stats value type self.assertRaises(ValueError, self.tracker.update_available_resource, context=self.context)
''' Created on March 27, 2017 This file is subject to the terms and conditions defined in the file 'LICENSE.txt', which is part of this source code package. @author: David Moss ''' import localization import json import utilities.utilities as utilities import properties from controller import Controller def run(botengine): """ Entry point for bot microservices The bot boots up in this order: 1. Create the `controller` object. 2. Synchronize with our devices and create a location object and device objects 3. new_version() - Executes one time when we're running a new version of the bot 2.a. Microservices and filters are synchronized inside the location object 2.a. Each device object, microservice, and filter should run its new_version() event 4. initialize() - This event executes in every location / device object / microservice / filter on every trigger the bot :param botengine: BotEngine environment object, our window to the outside world. """ localization.initialize(botengine) #=========================================================================== # print("INPUTS: " + json.dumps(botengine.get_inputs(), indent=2, sort_keys=True)) #=========================================================================== trigger_type = botengine.get_trigger_type() triggers = botengine.get_triggers() print("\n\n") botengine.get_logger().info("TRIGGER : " + str(trigger_type)) # Grab our non-volatile memory controller = load_controller(botengine) # The controller stores the bot's last version number. # If this is a new bot version, this evaluation will automatically trigger the new_version() event in all microservices. # Note that the new_version() event is also a bot trigger, so it executes after the initialize() event. new_version_executed = controller.evaluate_version(botengine) # RESET if trigger_type == 0 and not new_version_executed: # Manual reset or new version trigger controller.new_version(botengine) # INITIALIZE controller.initialize(botengine) # SCHEDULE TRIGGER if trigger_type & botengine.TRIGGER_SCHEDULE != 0: schedule_id = "DEFAULT" if 'scheduleId' in botengine.get_inputs(): schedule_id = botengine.get_inputs()['scheduleId'] botengine.get_logger().info("Schedule fired: {}".format(schedule_id)) controller.run_intelligence_schedules(botengine, schedule_id) # MODE TRIGGERS if trigger_type & botengine.TRIGGER_MODE != 0: # Triggered off a change of location botengine.get_logger().info("Trigger: Mode") for trigger in triggers: if 'location' in trigger: mode = trigger['location']['event'] location_id = trigger['location']['locationId'] controller.sync_mode(botengine, mode, location_id) # MEASUREMENT TRIGGERS if trigger_type & botengine.TRIGGER_DEVICE_MEASUREMENT != 0: # Triggered off a device measurement for trigger in triggers: if 'device' in trigger: device_id = trigger['device']['deviceId'] device_object = controller.get_device(device_id) if device_object is not None: device_location = trigger['device']['locationId'] # Measurement dictionary by updated timestamp { timestamp : [ {measure}, {measure}, {measure} ] } measures_dict = {} # List of measurements that were not updated measures_static = [] # Measurements are provided in 1-second granularity, so 2 measurements may have the same timestamp # This is a list of parameters and the newest time they were updated so we don't have 2 measurements for a single timestamp # { "param_name": newest_updated_timestamp_ms } updated_ts = {} for m in botengine.get_measures_block(): if m['deviceId'] == device_id: if not m['updated']: # Not updated measures_static.append(m) else: # Updated parameter # First apply 1 ms corrections for the same parameter at the same time if m['name'] in updated_ts: if updated_ts[m['name']] == m['time']: m['time'] = updated_ts[m['name']] + 1 # Then add it to our list of measurements we need to trigger upon if m['time'] not in measures_dict: measures_dict[m['time']] = [] updated_ts[m['name']] = m['time'] measures_dict[m['time']].append(m) # For each unique timestamp, trigger the microservices from the oldest timestamp to the newest # Also modify the botengine's concept of time to match the current input parameter's time we are executing against, and restore it later. execution_time_ms = botengine.get_timestamp() for timestamp_ms in sorted(list(measures_dict.keys())): botengine.inputs['time'] = timestamp_ms all_measurements = measures_static + measures_dict[timestamp_ms] # Filter data controller.filter_measurements(botengine, device_location, device_object, all_measurements) # Update the device directly updated_devices, updated_metadata = device_object.update(botengine, all_measurements) # Update the proxies for updated_device in updated_devices: try: updated_device.device_measurements_updated(botengine) # Ping any proxy devices to let any sub-microservices know that the proxy is still connected and delivering measurements if updated_device.proxy_id is not None: proxy_object = controller.get_device(updated_device.proxy_id) if proxy_object is not None: if proxy_object not in updated_devices: proxy_object.device_measurements_updated(botengine) except Exception as e: import traceback botengine.get_logger().error("bot.device_measurements_updated(): {}; {}. Continuing execution.".format(str(e), traceback.format_exc())) # Update the location controller.device_measurements_updated(botengine, device_location, updated_device) botengine.inputs['time'] = execution_time_ms # DEVICE ALERTS if trigger_type & botengine.TRIGGER_DEVICE_ALERT != 0: # Triggered off a device alert for trigger in triggers: if 'device' in trigger: device_id = trigger['device']['deviceId'] device_object = controller.get_device(device_id) if device_object is not None: device_location = trigger['device']['locationId'] alerts = botengine.get_alerts_block() if alerts is not None: for alert in alerts: botengine.get_logger().info("Alert: " + json.dumps(alert, indent=2, sort_keys=True)) # Reformat to extract value alert_params = {} if 'params' in alert: for p in alert['params']: alert_params[p['name']] = p['value'] if alert is not None: device_object.device_alert(botengine, alert['alertType'], alert_params) controller.device_alert(botengine, device_location, device_object, alert['alertType'], alert_params) # FILE UPLOAD TRIGGERS if trigger_type & botengine.TRIGGER_DEVICE_FILES != 0: # Triggered off an uploaded file file = botengine.get_file_block() botengine.get_logger().info("File: " + json.dumps(file, indent=2, sort_keys=True)) if file is not None: device_object = controller.get_device(file['deviceId']) if device_object is not None: controller.file_uploaded(botengine, device_object, file) # QUESTIONS ANSWERED if trigger_type & botengine.TRIGGER_QUESTION_ANSWER != 0: question = botengine.get_answered_question() botengine.get_logger().info("Answered: " + str(question.key_identifier)) botengine.get_logger().info("Answer = {}".format(question.answer)) controller.sync_question(botengine, question) # DATA STREAM TRIGGERS if trigger_type & botengine.TRIGGER_DATA_STREAM != 0: # Triggered off a data stream message data_stream = botengine.get_datastream_block() botengine.get_logger().info("Data Stream: " + json.dumps(data_stream, indent=2, sort_keys=True)) if 'address' not in data_stream: botengine.get_logger().warn("Data stream message does not contain an 'address' field. Ignoring the message.") else: address = data_stream['address'] if 'feed' in data_stream: content = data_stream['feed'] else: content = {} if 'fromAppInstanceId' in data_stream: if type(content) == type({}): content['sender_bot_id'] = data_stream['fromAppInstanceId'] if address != "schedule": controller.sync_datastreams(botengine, address, content) else: controller.run_intelligence_schedules(botengine) # COMMAND RESPONSES if trigger_type & botengine.TRIGGER_COMMAND_RESPONSE != 0: botengine.get_logger().info("Command Responses: {}".format(json.dumps(botengine.get_inputs()['commandResponses']))) # TODO responses to commands delivered by the bot are available to build out reliable command delivery infrastructure pass # GOAL / SCENARIO CHANGES if trigger_type & botengine.TRIGGER_METADATA != 0: # The user changed the goal / scenario for a single sensor for trigger in triggers: botengine.get_logger().info("Changed device configuration") if 'device' in trigger: device_id = trigger['device']['deviceId'] device_object = controller.get_device(device_id) if device_object is not None: device_location = trigger['device']['locationId'] if 'spaces' in trigger['device']: device_object.spaces = trigger['device']['spaces'] else: device_object.spaces = [] updated_devices, updated_metadata = device_object.update(botengine, botengine.get_measures_block()) for updated_device in updated_metadata: controller.sync_device(botengine, device_location, device_id, updated_device) updated_device.device_metadata_updated(botengine) controller.device_metadata_updated(botengine, device_location, updated_device) # LOCATION CONFIGURATION CHANGES if trigger_type & botengine.TRIGGER_LOCATION_CONFIGURATION != 0: # The user changed location configuration settings, such as adding/removing/changing a user role in the location category = None previous_category = None location_access = None previous_location_access = None user_id = None location_id = botengine.get_location_id() users = botengine.get_users_block() call_center = botengine.get_callcenter_block() if users is not None: # User changed roles botengine.get_logger().info("User changed roles") for user in users: botengine.get_logger().info("User: {}".format(user)) if 'category' in user: category = user['category'] if 'prevCategory' in user: previous_category = user['prevCategory'] if 'locationAccess' in user: location_access = user['locationAccess'] if 'prevLocationAccess' in user: previous_location_access = user['prevLocationAccess'] if 'userId' in user: user_id = user['userId'] role = None if 'role' in user: role = user['role'] controller.user_role_updated(botengine, location_id, user_id, role, category, location_access, previous_category, previous_location_access) if call_center is not None: # Location call center changed status botengine.get_logger().info("Emergency Call Center Updated") if 'status' in call_center: status = call_center['status'] if 'userId' in call_center: user_id = call_center['userId'] controller.call_center_updated(botengine, location_id, user_id, status) # DATA REQUEST if trigger_type & botengine.TRIGGER_DATA_REQUEST != 0: # Response to botengine.request_data() botengine.get_logger().info("Data request received") data = botengine.get_data_block() events = {} imported = False import importlib try: import lz4.block imported = True except ImportError: botengine.get_logger().error("Attempted to import 'lz4' to uncompress the data request response, but lz4 is not available. Please add 'lz4' to 'pip_install_remotely' in your structure.json.") pass if imported: for d in data: reference = None if 'key' in d: reference = d['key'] if reference not in events: events[reference] = {} botengine.get_logger().info("Downloading {} ({} bytes)...".format(d['deviceId'], d['dataLength'])) r = botengine._requests.get(d['url'], timeout=60, stream=True) events[reference][controller.get_device(d['deviceId'])] = lz4.block.decompress(r.content, uncompressed_size=d['dataLength']) for reference in events: controller.data_request_ready(botengine, reference, events[reference]) # DO NOT SAVE CORE VARIABLES HERE. return # Always save your variables! botengine.save_variable("controller", controller, required_for_each_execution=True) botengine.get_logger().info("<< bot") def load_controller(botengine): """ Load the Controller object :param botengine: Execution environment """ logger = botengine.get_logger() try: controller = botengine.load_variable("controller") logger.info("Loaded the controller") except: controller = None logger.info("Unable to load the controller") if controller == None: botengine.get_logger().info("Bot : Creating a new Controller object. Hello.") controller = Controller() botengine.save_variable("controller", controller, required_for_each_execution=True) controller.track_new_and_deleted_devices(botengine) return controller #=============================================================================== # Location Intelligence Timers #=============================================================================== def _location_intelligence_fired(botengine, argument_tuple): """ Entry point into this bot Location intelligence timer or alarm fired :param botengine: BotEngine Environment :param argument_tuple: (intelligence_id, argument) """ botengine.get_logger().info("\n\nTRIGGER : _location_intelligence_fired()") controller = load_controller(botengine) try: controller.run_location_intelligence(botengine, argument_tuple[0], argument_tuple[1]) except Exception as e: import traceback botengine.get_logger().error("{}; {}".format(str(e), traceback.format_exc())) botengine.save_variable("controller", controller, required_for_each_execution=True) botengine.get_logger().info("<< bot (location timer)") def start_location_intelligence_timer(botengine, seconds, intelligence_id, argument, reference): """ Start a relative location intelligence timer :param botengine: BotEngine environment :param seconds: Seconds from the start of the current execution to make this timer fire :param intelligence_id: ID of the intelligence module to trigger when this timer fires :param argument: Arbitrary argument to pass into the intelligence module's timer_fired() method when this timer fires :param reference: Unique reference name that lets us later cancel this timer if needed """ botengine.get_logger().info(">start_location_intelligence_timer({}, {})".format(seconds, reference)) if reference is not None and reference != "": botengine.cancel_timers(reference) botengine.start_timer_s(int(seconds), _location_intelligence_fired, (intelligence_id, argument), reference) def start_location_intelligence_timer_ms(botengine, milliseconds, intelligence_id, argument, reference): """ Start a relative location intelligence timer :param botengine: BotEngine environment :param milliseconds: Milliseconds from the start of the current execution to make this timer fire :param intelligence_id: ID of the intelligence module to trigger when this timer fires :param argument: Arbitrary argument to pass into the intelligence module's timer_fired() method when this timer fires :param reference: Unique reference name that lets us later cancel this timer if needed """ botengine.get_logger().info(">start_location_intelligence_timer_ms({}, {})".format(milliseconds, reference)) if reference is not None and reference != "": botengine.cancel_timers(reference) botengine.start_timer_ms(int(milliseconds), _location_intelligence_fired, (intelligence_id, argument), reference) def set_location_intelligence_alarm(botengine, timestamp_ms, intelligence_id, argument, reference): """ Set an absolute location intelligence alarm :param botengine: BotEngine environment :param timestamp: Absolute timestamp in milliseconds at which to trigger this alarm :param intelligence_id: ID of the intelligence module to trigger when this alarm fires :param argument: Arbitrary argument to pass into the intelligence module's timer_fired() method when this timer fires :param reference: Unique reference name that lets us later cancel this timer if needed """ botengine.get_logger().info(">set_location_intelligence_alarm({})".format(timestamp_ms)) if reference is not None and reference != "": botengine.cancel_timers(reference) botengine.set_alarm(int(timestamp_ms), _location_intelligence_fired, (intelligence_id, argument), reference) def cancel_location_intelligence_timers(botengine, reference): """ Cancel all location intelligence timers and alarms with the given reference :param botengine: BotEngine environment :param reference: Unique reference name for which to cancel all timers and alarms """ botengine.cancel_timers(reference) def is_location_timer_running(botengine, reference): """ Determine if the timer with the given reference is running :param botengine: BotEngine environment :param reference: Unique reference name for the timer :return: True if the timer is running """ return botengine.is_timer_running(reference) #=============================================================================== # Device Intelligence Timers #=============================================================================== def _device_intelligence_fired(botengine, argument_tuple): """ Entry point into this bot Device intelligence timer or alarm fired :param botengine: BotEngine Environment :param argument_tuple: (intelligence_id, argument) """ botengine.get_logger().info("\n\nTRIGGER : _device_intelligence_fired()") controller = load_controller(botengine) try: controller.run_device_intelligence(botengine, argument_tuple[0], argument_tuple[1]) except Exception as e: import traceback botengine.get_logger().error("{}; {}".format(str(e), traceback.format_exc())) if botengine.playback: import time time.sleep(2) botengine.save_variable("controller", controller, required_for_each_execution=True) botengine.get_logger().info("<< bot (device timer)") def start_device_intelligence_timer(botengine, seconds, intelligence_id, argument, reference): """ Start a relative device intelligence timer :param botengine: BotEngine environment :param seconds: Seconds from the start of the current execution to make this timer fire :param intelligence_id: ID of the intelligence module to trigger when this timer fires :param argument: Arbitrary argument to pass into the intelligence module's timer_fired() method when this timer fires :param reference: Unique reference name that lets us later cancel this timer if needed """ botengine.get_logger().info(">start_device_intelligence_timer({}, {})".format(seconds, reference)) if reference is not None and reference != "": botengine.cancel_timers(reference) botengine.start_timer_s(int(seconds), _device_intelligence_fired, (intelligence_id, argument), reference) def start_device_intelligence_timer_ms(botengine, milliseconds, intelligence_id, argument, reference): """ Start a relative device intelligence timer :param botengine: BotEngine environment :param milliseconds: Milliseconds from the start of the current execution to make this timer fire :param intelligence_id: ID of the intelligence module to trigger when this timer fires :param argument: Arbitrary argument to pass into the intelligence module's timer_fired() method when this timer fires :param reference: Unique reference name that lets us later cancel this timer if needed """ botengine.get_logger().info(">start_device_intelligence_timer_ms({}, {})".format(milliseconds, reference)) if reference is not None and reference != "": botengine.cancel_timers(reference) botengine.start_timer_ms(int(milliseconds), _device_intelligence_fired, (intelligence_id, argument), reference) def set_device_intelligence_alarm(botengine, timestamp_ms, intelligence_id, argument, reference): """ Set an absolute device intelligence alarm :param botengine: BotEngine environment :param timestamp: Absolute timestamp in milliseconds at which to trigger this alarm :param intelligence_id: ID of the intelligence module to trigger when this alarm fires :param argument: Arbitrary argument to pass into the intelligence module's timer_fired() method when this timer fires :param reference: Unique reference name that lets us later cancel this timer if needed """ botengine.get_logger().info(">set_device_intelligence_alarm({})".format(timestamp_ms)) if reference is not None and reference != "": botengine.cancel_timers(reference) botengine.set_alarm(int(timestamp_ms), _device_intelligence_fired, (intelligence_id, argument), reference) def cancel_device_intelligence_timers(botengine, reference): """ Cancel all device intelligence timers and alarms with the given reference :param botengine: BotEngine environment :param reference: Unique reference name for which to cancel all timers and alarms """ botengine.cancel_timers(reference) def is_device_timer_running(botengine, reference): """ Determine if the timer with the given reference is running :param botengine: BotEngine environment :param reference: Unique reference name for the timer :return: True if the timer is running """ return botengine.is_timer_running(reference)
#!/usr/bin/env python3 # Copyright (c) 2014-2021 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Helpful routines for regression testing.""" from base64 import b64encode from decimal import Decimal, ROUND_DOWN from subprocess import CalledProcessError import hashlib import inspect import json import logging import os import re import time import unittest from . import coverage from .authproxy import AuthServiceProxy, JSONRPCException from typing import Callable, Optional logger = logging.getLogger("TestFramework.utils") # Assert functions ################## def assert_approx(v, vexp, vspan=0.00001): """Assert that `v` is within `vspan` of `vexp`""" if v < vexp - vspan: raise AssertionError("%s < [%s..%s]" % (str(v), str(vexp - vspan), str(vexp + vspan))) if v > vexp + vspan: raise AssertionError("%s > [%s..%s]" % (str(v), str(vexp - vspan), str(vexp + vspan))) def assert_fee_amount(fee, tx_size, feerate_BTC_kvB): """Assert the fee is in range.""" assert isinstance(tx_size, int) target_fee = get_fee(tx_size, feerate_BTC_kvB) if fee < target_fee: raise AssertionError("Fee of %s BTC too low! (Should be %s BTC)" % (str(fee), str(target_fee))) # allow the wallet's estimation to be at most 2 bytes off high_fee = get_fee(tx_size + 2, feerate_BTC_kvB) if fee > high_fee: raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)" % (str(fee), str(target_fee))) def assert_equal(thing1, thing2, *args): if thing1 != thing2 or any(thing1 != arg for arg in args): raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args)) def assert_greater_than(thing1, thing2): if thing1 <= thing2: raise AssertionError("%s <= %s" % (str(thing1), str(thing2))) def assert_greater_than_or_equal(thing1, thing2): if thing1 < thing2: raise AssertionError("%s < %s" % (str(thing1), str(thing2))) def assert_raises(exc, fun, *args, **kwds): assert_raises_message(exc, None, fun, *args, **kwds) def assert_raises_message(exc, message, fun, *args, **kwds): try: fun(*args, **kwds) except JSONRPCException: raise AssertionError("Use assert_raises_rpc_error() to test RPC failures") except exc as e: if message is not None and message not in e.error['message']: raise AssertionError( "Expected substring not found in error message:\nsubstring: '{}'\nerror message: '{}'.".format( message, e.error['message'])) except Exception as e: raise AssertionError("Unexpected exception raised: " + type(e).__name__) else: raise AssertionError("No exception raised") def assert_raises_process_error(returncode: int, output: str, fun: Callable, *args, **kwds): """Execute a process and asserts the process return code and output. Calls function `fun` with arguments `args` and `kwds`. Catches a CalledProcessError and verifies that the return code and output are as expected. Throws AssertionError if no CalledProcessError was raised or if the return code and output are not as expected. Args: returncode: the process return code. output: [a substring of] the process output. fun: the function to call. This should execute a process. args*: positional arguments for the function. kwds**: named arguments for the function. """ try: fun(*args, **kwds) except CalledProcessError as e: if returncode != e.returncode: raise AssertionError("Unexpected returncode %i" % e.returncode) if output not in e.output: raise AssertionError("Expected substring not found:" + e.output) else: raise AssertionError("No exception raised") def assert_raises_rpc_error(code: Optional[int], message: Optional[str], fun: Callable, *args, **kwds): """Run an RPC and verify that a specific JSONRPC exception code and message is raised. Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException and verifies that the error code and message are as expected. Throws AssertionError if no JSONRPCException was raised or if the error code/message are not as expected. Args: code: the error code returned by the RPC call (defined in src/rpc/protocol.h). Set to None if checking the error code is not required. message: [a substring of] the error string returned by the RPC call. Set to None if checking the error string is not required. fun: the function to call. This should be the name of an RPC. args*: positional arguments for the function. kwds**: named arguments for the function. """ assert try_rpc(code, message, fun, *args, **kwds), "No exception raised" def try_rpc(code, message, fun, *args, **kwds): """Tries to run an rpc command. Test against error code and message if the rpc fails. Returns whether a JSONRPCException was raised.""" try: fun(*args, **kwds) except JSONRPCException as e: # JSONRPCException was thrown as expected. Check the code and message values are correct. if (code is not None) and (code != e.error["code"]): raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"]) if (message is not None) and (message not in e.error['message']): raise AssertionError( "Expected substring not found in error message:\nsubstring: '{}'\nerror message: '{}'.".format( message, e.error['message'])) return True except Exception as e: raise AssertionError("Unexpected exception raised: " + type(e).__name__) else: return False def assert_is_hex_string(string): try: int(string, 16) except Exception as e: raise AssertionError("Couldn't interpret %r as hexadecimal; raised: %s" % (string, e)) def assert_is_hash_string(string, length=64): if not isinstance(string, str): raise AssertionError("Expected a string, got type %r" % type(string)) elif length and len(string) != length: raise AssertionError("String of length %d expected; got %d" % (length, len(string))) elif not re.match('[abcdef0-9]+$', string): raise AssertionError("String %r contains invalid characters for a hash." % string) def assert_array_result(object_array, to_match, expected, should_not_find=False): """ Pass in array of JSON objects, a dictionary with key/value pairs to match against, and another dictionary with expected key/value pairs. If the should_not_find flag is true, to_match should not be found in object_array """ if should_not_find: assert_equal(expected, {}) num_matched = 0 for item in object_array: all_match = True for key, value in to_match.items(): if item[key] != value: all_match = False if not all_match: continue elif should_not_find: num_matched = num_matched + 1 for key, value in expected.items(): if item[key] != value: raise AssertionError("%s : expected %s=%s" % (str(item), str(key), str(value))) num_matched = num_matched + 1 if num_matched == 0 and not should_not_find: raise AssertionError("No objects matched %s" % (str(to_match))) if num_matched > 0 and should_not_find: raise AssertionError("Objects were found %s" % (str(to_match))) # Utility functions ################### def check_json_precision(): """Make sure json library being used does not lose precision converting BTC values""" n = Decimal("20000000.00000003") satoshis = int(json.loads(json.dumps(float(n))) * 1.0e8) if satoshis != 2000000000000003: raise RuntimeError("JSON encode/decode loses precision") def EncodeDecimal(o): if isinstance(o, Decimal): return str(o) raise TypeError(repr(o) + " is not JSON serializable") def count_bytes(hex_string): return len(bytearray.fromhex(hex_string)) def str_to_b64str(string): return b64encode(string.encode('utf-8')).decode('ascii') def ceildiv(a, b): """ Divide 2 ints and round up to next int rather than round down Implementation requires python integers, which have a // operator that does floor division. Other types like decimal.Decimal whose // operator truncates towards 0 will not work. """ assert isinstance(a, int) assert isinstance(b, int) return -(-a // b) def get_fee(tx_size, feerate_btc_kvb): """Calculate the fee in BTC given a feerate is BTC/kvB. Reflects CFeeRate::GetFee""" feerate_sat_kvb = int(feerate_btc_kvb * Decimal(1e8)) # Fee in sat/kvb as an int to avoid float precision errors target_fee_sat = ceildiv(feerate_sat_kvb * tx_size, 1000) # Round calculated fee up to nearest sat return target_fee_sat / Decimal(1e8) # Return result in BTC def satoshi_round(amount): return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN) def wait_until_helper(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None, timeout_factor=1.0): """Sleep until the predicate resolves to be True. Warning: Note that this method is not recommended to be used in tests as it is not aware of the context of the test framework. Using the `wait_until()` members from `BitcoinTestFramework` or `P2PInterface` class ensures the timeout is properly scaled. Furthermore, `wait_until()` from `P2PInterface` class in `p2p.py` has a preset lock. """ if attempts == float('inf') and timeout == float('inf'): timeout = 60 timeout = timeout * timeout_factor attempt = 0 time_end = time.time() + timeout while attempt < attempts and time.time() < time_end: if lock: with lock: if predicate(): return else: if predicate(): return attempt += 1 time.sleep(0.05) # Print the cause of the timeout predicate_source = "''''\n" + inspect.getsource(predicate) + "'''" logger.error("wait_until() failed. Predicate: {}".format(predicate_source)) if attempt >= attempts: raise AssertionError("Predicate {} not true after {} attempts".format(predicate_source, attempts)) elif time.time() >= time_end: raise AssertionError("Predicate {} not true after {} seconds".format(predicate_source, timeout)) raise RuntimeError('Unreachable') def sha256sum_file(filename): h = hashlib.sha256() with open(filename, 'rb') as f: d = f.read(4096) while len(d) > 0: h.update(d) d = f.read(4096) return h.digest() # RPC/P2P connection constants and functions ############################################ # The maximum number of nodes a single test can spawn MAX_NODES = 12 # Don't assign rpc or p2p ports lower than this PORT_MIN = int(os.getenv('TEST_RUNNER_PORT_MIN', default=11000)) # The number of ports to "reserve" for p2p and rpc, each PORT_RANGE = 5000 class PortSeed: # Must be initialized with a unique integer for each process n = None def get_rpc_proxy(url: str, node_number: int, *, timeout: int=None, coveragedir: str=None) -> coverage.AuthServiceProxyWrapper: """ Args: url: URL of the RPC server to call node_number: the node number (or id) that this calls to Kwargs: timeout: HTTP timeout in seconds coveragedir: Directory Returns: AuthServiceProxy. convenience object for making RPC calls. """ proxy_kwargs = {} if timeout is not None: proxy_kwargs['timeout'] = int(timeout) proxy = AuthServiceProxy(url, **proxy_kwargs) coverage_logfile = coverage.get_filename(coveragedir, node_number) if coveragedir else None return coverage.AuthServiceProxyWrapper(proxy, url, coverage_logfile) def p2p_port(n): assert n <= MAX_NODES return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES) def rpc_port(n): return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES) def rpc_url(datadir, i, chain, rpchost): rpc_u, rpc_p = get_auth_cookie(datadir, chain) host = '127.0.0.1' port = rpc_port(i) if rpchost: parts = rpchost.split(':') if len(parts) == 2: host, port = parts else: host = rpchost return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port)) # Node functions ################ def initialize_datadir(dirname, n, chain, disable_autoconnect=True): datadir = get_datadir_path(dirname, n) if not os.path.isdir(datadir): os.makedirs(datadir) write_config(os.path.join(datadir, "particl.conf"), n=n, chain=chain, disable_autoconnect=disable_autoconnect) os.makedirs(os.path.join(datadir, 'stderr'), exist_ok=True) os.makedirs(os.path.join(datadir, 'stdout'), exist_ok=True) return datadir def write_config(config_path, *, n, chain, extra_config="", disable_autoconnect=True): # Translate chain subdirectory name to config name if chain == 'testnet3': chain_name_conf_arg = 'testnet' chain_name_conf_section = 'test' else: chain_name_conf_arg = chain chain_name_conf_section = chain with open(config_path, 'w', encoding='utf8') as f: if chain_name_conf_arg: f.write("{}=1\n".format(chain_name_conf_arg)) if chain_name_conf_section: f.write("[{}]\n".format(chain_name_conf_section)) f.write("port=" + str(p2p_port(n)) + "\n") f.write("rpcport=" + str(rpc_port(n)) + "\n") f.write("fallbackfee=0.0002\n") f.write("server=1\n") f.write("keypool=1\n") f.write("discover=0\n") f.write("dnsseed=0\n") f.write("fixedseeds=0\n") f.write("listenonion=0\n") # Increase peertimeout to avoid disconnects while using mocktime. # peertimeout is measured in mock time, so setting it large enough to # cover any duration in mock time is sufficient. It can be overridden # in tests. f.write("peertimeout=999999999\n") f.write("printtoconsole=0\n") f.write("upnp=0\n") f.write("natpmp=0\n") f.write("shrinkdebugfile=0\n") # To improve SQLite wallet performance so that the tests don't timeout, use -unsafesqlitesync f.write("unsafesqlitesync=1\n") if disable_autoconnect: f.write("connect=0\n") f.write(extra_config) def get_datadir_path(dirname, n): return os.path.join(dirname, "node" + str(n)) def append_config(datadir, options): with open(os.path.join(datadir, "particl.conf"), 'a', encoding='utf8') as f: for option in options: f.write(option + "\n") def get_auth_cookie(datadir, chain): user = None password = None if os.path.isfile(os.path.join(datadir, "particl.conf")): with open(os.path.join(datadir, "particl.conf"), 'r', encoding='utf8') as f: for line in f: if line.startswith("rpcuser="): assert user is None # Ensure that there is only one rpcuser line user = line.split("=")[1].strip("\n") if line.startswith("rpcpassword="): assert password is None # Ensure that there is only one rpcpassword line password = line.split("=")[1].strip("\n") try: with open(os.path.join(datadir, chain, ".cookie"), 'r', encoding="ascii") as f: userpass = f.read() split_userpass = userpass.split(':') user = split_userpass[0] password = split_userpass[1] except OSError: pass if user is None or password is None: raise ValueError("No RPC credentials") return user, password # If a cookie file exists in the given datadir, delete it. def delete_cookie_file(datadir, chain): if os.path.isfile(os.path.join(datadir, chain, ".cookie")): logger.debug("Deleting leftover cookie file") os.remove(os.path.join(datadir, chain, ".cookie")) def softfork_active(node, key): """Return whether a softfork is active.""" return node.getdeploymentinfo()['deployments'][key]['active'] def set_node_times(nodes, t): for node in nodes: node.setmocktime(t) def check_node_connections(*, node, num_in, num_out): info = node.getnetworkinfo() assert_equal(info["connections_in"], num_in) assert_equal(info["connections_out"], num_out) # Transaction/Block functions ############################# def find_output(node, txid, amount, *, blockhash=None): """ Return index to output of txid with value amount Raises exception if there is none. """ txdata = node.getrawtransaction(txid, 1, blockhash) for i in range(len(txdata["vout"])): if txdata["vout"][i]["value"] == amount: return i raise RuntimeError("find_output txid %s : %s not found" % (txid, str(amount))) # Helper to create at least "count" utxos # Pass in a fee that is sufficient for relay and mining new transactions. def create_confirmed_utxos(test_framework, fee, node, count, **kwargs): to_generate = int(0.5 * count) + 101 while to_generate > 0: test_framework.generate(node, min(25, to_generate), **kwargs) to_generate -= 25 utxos = node.listunspent() iterations = count - len(utxos) addr1 = node.getnewaddress() addr2 = node.getnewaddress() if iterations <= 0: return utxos for _ in range(iterations): t = utxos.pop() inputs = [] inputs.append({"txid": t["txid"], "vout": t["vout"]}) outputs = {} send_value = t['amount'] - fee outputs[addr1] = satoshi_round(send_value / 2) outputs[addr2] = satoshi_round(send_value / 2) raw_tx = node.createrawtransaction(inputs, outputs) signed_tx = node.signrawtransactionwithwallet(raw_tx)["hex"] node.sendrawtransaction(signed_tx) while (node.getmempoolinfo()['size'] > 0): test_framework.generate(node, 1, **kwargs) utxos = node.listunspent() assert len(utxos) >= count return utxos def chain_transaction(node, parent_txids, vouts, value, fee, num_outputs): """Build and send a transaction that spends the given inputs (specified by lists of parent_txid:vout each), with the desired total value and fee, equally divided up to the desired number of outputs. Returns a tuple with the txid and the amount sent per output. """ send_value = satoshi_round((value - fee)/num_outputs) inputs = [] for (txid, vout) in zip(parent_txids, vouts): inputs.append({'txid' : txid, 'vout' : vout}) outputs = {} for _ in range(num_outputs): outputs[node.getnewaddress()] = send_value rawtx = node.createrawtransaction(inputs, outputs, 0, True) signedtx = node.signrawtransactionwithwallet(rawtx) txid = node.sendrawtransaction(signedtx['hex']) fulltx = node.getrawtransaction(txid, 1) assert len(fulltx['vout']) == num_outputs # make sure we didn't generate a change output return (txid, send_value) # Create large OP_RETURN txouts that can be appended to a transaction # to make it large (helper for constructing large transactions). def gen_return_txouts(): # Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create # So we have big transactions (and therefore can't fit very many into each block) # create one script_pubkey script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes for _ in range(512): script_pubkey = script_pubkey + "01" # concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change txouts = [] from .messages import CTxOut txout = CTxOut() txout.nValue = 0 txout.scriptPubKey = bytes.fromhex(script_pubkey) for _ in range(128): txouts.append(txout) return txouts # Create a spend of each passed-in utxo, splicing in "txouts" to each raw # transaction to make it large. See gen_return_txouts() above. def create_lots_of_big_transactions(node, txouts, utxos, num, fee): addr = node.getnewaddress() txids = [] from .messages import tx_from_hex for _ in range(num): t = utxos.pop() inputs = [{"txid": t["txid"], "vout": t["vout"]}] outputs = {} change = t['amount'] - fee outputs[addr] = satoshi_round(change) rawtx = node.createrawtransaction(inputs, outputs) tx = tx_from_hex(rawtx) for txout in txouts: tx.vout.append(txout) newtx = tx.serialize().hex() signresult = node.signrawtransactionwithwallet(newtx, None, "NONE") txid = node.sendrawtransaction(signresult["hex"], 0) txids.append(txid) return txids def mine_large_block(test_framework, node, utxos=None): # generate a 66k transaction, # and 14 of them is close to the 1MB block limit num = 14 txouts = gen_return_txouts() utxos = utxos if utxos is not None else [] if len(utxos) < num: utxos.clear() utxos.extend(node.listunspent()) fee = 100 * node.getnetworkinfo()["relayfee"] create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee) test_framework.generate(node, 1) def find_vout_for_address(node, txid, addr): """ Locate the vout index of the given transaction sending to the given address. Raises runtime error exception if not found. """ tx = node.getrawtransaction(txid, True) for i in range(len(tx["vout"])): if addr == tx["vout"][i]["scriptPubKey"]["address"]: return i raise RuntimeError("Vout not found for address: txid=%s, addr=%s" % (txid, addr)) def modinv(a, n): """Compute the modular inverse of a modulo n using the extended Euclidean Algorithm. See https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm#Modular_integers. """ # TODO: Change to pow(a, -1, n) available in Python 3.8 t1, t2 = 0, 1 r1, r2 = n, a while r2 != 0: q = r1 // r2 t1, t2 = t2, t1 - q * t2 r1, r2 = r2, r1 - q * r2 if r1 > 1: return None if t1 < 0: t1 += n return t1 class TestFrameworkUtil(unittest.TestCase): def test_modinv(self): test_vectors = [ [7, 11], [11, 29], [90, 13], [1891, 3797], [6003722857, 77695236973], ] for a, n in test_vectors: self.assertEqual(modinv(a, n), pow(a, n-2, n))
import os import re import sys import tempfile from remoteconfig import RemoteConfig class CastError(Exception): pass class Cast(object): """ Represents all cast options/messages. """ ALERT_SECTION = 'Alert' MESSAGES_SECTION = 'Messages' ALERT_MSG_KEY = 'message' ALERT_EXIT_KEY = 'exit' MESSAGE_NEXT_KEY = '_next_key' MESSAGE_LIMIT_KEY = '_limit' class CastMessage(object): """ Represents a single message in a cast. """ def __init__(self, key, message): """ :param key: Message key :param str message: The actual message """ self.key = str(key) self.message = message def __cmp__(a, b): try: return cmp(int(a.key), int(b.key)) except Exception: return cmp(a.key, b.key) def __init__(self, alert=None, alert_exit=False, messages=None, next_msg_key=None, msg_limit=None): """ :param str alert: Alert message :param bool alert_exit: Should client CLI exit. Ignored unless alert message is set. :param list(tuple) messages: List of tuple of (key, message) :param int next_msg_key: Next message key to use :param int msg_limit: Limit total number of messages by deleting oldest one. """ self.alert = alert self.alert_exit = alert and alert_exit self.messages = messages and sorted([self.CastMessage(*m) for m in messages]) or [] self._next_msg_key = next_msg_key self.msg_limit = msg_limit # Always set this so that it can be used in :meth:`self.save` if self.messages and not self._next_msg_key: self._next_msg_key = self.next_msg_key(reserve_next=False) def add_msg(self, msg, alert=False, alert_exit=False): """ :param str msg: The message to add or set :param bool alert: Indicates this is the alert message to set. :param bool alert_exit: Indicates this is the alert should request client to exit. """ if alert or alert_exit: self.alert = msg self.alert_exit = alert_exit else: self.messages.append(self.CastMessage(self.next_msg_key(), msg)) self.set_msg_limit(self.msg_limit) def del_msg(self, count=1, alert=False): """ Delete message :param int count: Number of messages to delete :param bool alert: Delete the alert message. Param `count` is ignored. """ if alert: self.alert = None self.alert_exit = None return 1 else: before_count = len(self.messages) self.messages = self.messages[count:] return before_count - len(self.messages) def next_msg_key(self, reserve_next=True): """ :param bool reserve_next: Indicates the key after next should be reserved. Default behavior. :return: Next message key :rtype: int """ if not self._next_msg_key: keys = [] for m in self.messages: try: keys.append(int(m.key)) except Exception: pass if keys: self._next_msg_key = keys[-1] + 1 else: self._next_msg_key = 1 next_key = self._next_msg_key if reserve_next: self._next_msg_key += 1 return next_key def set_msg_limit(self, limit=None): """ Limit total number of messages by deleting oldest message as new message is added when limit has been reached. :param int limit: Number of message to limit. Defaults to infinite. """ self.msg_limit = limit if limit: over_limit = len(self.messages) - limit if over_limit > 0: self.del_msg(over_limit) @classmethod def from_content(cls, context, msg_filter=None, cache_duration=None): """ Create a :class:`Cast` from the given content :param str context: Cast content string, file, or URL :param callable msg_filter: Filter messages with callable that accepts message string and alert boolean (True for alert message). It should return the original or an updated message, or None if the message should be ignored. :param int cache_duration: For cast URL only. Optionally cache the URL content for the given duration (seconds) to avoid downloading too often. :rtype: :class:`Cast` """ config = RemoteConfig(context, cache_duration=cache_duration, compact_form=True) alert_msg = None alert_exit = None if cls.ALERT_SECTION in config: for key, value in config.items(cls.ALERT_SECTION): if cls.ALERT_MSG_KEY == key: if msg_filter: value = msg_filter(value, True) alert_msg = value elif cls.ALERT_EXIT_KEY == key: alert_exit = value else: raise CastError('Invalid key "%s" in %s section', key, cls.ALERT_SECTION) messages = [] next_msg_key = None msg_limit = None if cls.MESSAGES_SECTION in config: for key, value in config.items(cls.MESSAGES_SECTION): if key == cls.MESSAGE_NEXT_KEY: next_msg_key = value elif key == cls.MESSAGE_LIMIT_KEY: msg_limit = value elif key.startswith('_'): pass # Ignore future private keys else: if msg_filter: value = msg_filter(value) if value: messages.append((key, value)) return cls(alert_msg, alert_exit, messages, next_msg_key, msg_limit) def __repr__(self): parser = RemoteConfig(kv_sep=': ', compact_form=True) if self.alert: parser.add_section(self.ALERT_SECTION) parser.set(self.ALERT_SECTION, self.ALERT_MSG_KEY, self.alert) if self.alert_exit: parser.set(self.ALERT_SECTION, self.ALERT_EXIT_KEY, True) if self.messages: parser.add_section(self.MESSAGES_SECTION) for msg in self.messages: parser.set(self.MESSAGES_SECTION, msg.key, msg.message) elif self._next_msg_key: parser.add_section(self.MESSAGES_SECTION) parser.set(self.MESSAGES_SECTION, self.MESSAGE_NEXT_KEY, self._next_msg_key) if self.msg_limit: parser.set(self.MESSAGES_SECTION, self.MESSAGE_LIMIT_KEY, self.msg_limit) parser._indent_spaces = len(str(self._next_msg_key)) + 2 if self._next_msg_key else 3 return str(parser).strip() def save(self, cast_file): """ Save the cast data to the given file. """ with open(cast_file, 'w') as fp: fp.write(str(self) + '\n') class CastReader(object): """ Reads a :class:`Cast` and keep track of read messages """ READ_MSG_FILE = os.path.join(tempfile.gettempdir(), '%s.read_messages' % os.path.basename(sys.argv[0])) @classmethod def reset(cls): """ Resets read messages, so all messages will be displayed again. """ if os.path.exists(cls.READ_MSG_FILE): os.unlink(cls.READ_MSG_FILE) def __init__(self, cast): """ :param Cast cast: A :class:`Cast` instance """ self.cast = cast def show_messages(self, logger=None, header=None, footer=None): """ Show new messages :param logging.Logger logger: Use logger to print the new messages instead of stdout :param str header: Header to show before messages :param str footer: Footer to show before messages """ msgs = self.new_messages() if msgs: if logger: if header: logger.info(header) for msg in msgs: for line in msg.split('\n'): logger.info(line) if footer: logger.info(footer) else: if header: print header print '\n\n'.join(msgs) if footer: print footer def new_messages(self, mark_as_read=True): """ :param bool mark_as_read: Mark new messages as read :return: List of new messages with alert being the first if any. :rtype: list(str) """ read_keys = self._read_msg_keys() new_messages = [m for m in self.cast.messages if m.key not in read_keys] if new_messages and mark_as_read: self._mark_as_read(new_messages) msgs = [m.message for m in new_messages] if self.cast.alert: msgs.insert(0, self.cast.alert) return msgs def _read_msg_keys(self): """ Set of read messages. """ try: with open(self.READ_MSG_FILE) as fp: read_keys = fp.read() return set(read_keys.split()) except Exception: return set() def _mark_as_read(self, messages): """ Mark the given list of :class:`CastMessage` as read. """ keys = self._read_msg_keys() keys.update(m.key for m in messages) with open(self.READ_MSG_FILE, 'w') as fp: fp.write(' '.join(keys)) def _re_sub_multiline(pattern, repl, string): """ Simple hack to get multiline working in Python 2.6 and higher """ try: content = re.sub(pattern, repl, string, flags=re.MULTILINE) except Exception: content = [] for line in string.split('\n'): content.append(re.sub(pattern, repl, line)) content = '\n'.join(content) return content
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import TYPE_CHECKING import warnings from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.paging import ItemPaged from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpRequest, HttpResponse from azure.mgmt.core.exceptions import ARMErrorFormat from .. import models as _models if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class AzureBareMetalInstancesOperations(object): """AzureBareMetalInstancesOperations operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~bare_metal_infrastructure_client.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config def list_by_subscription( self, **kwargs # type: Any ): # type: (...) -> Iterable["_models.AzureBareMetalInstancesListResult"] """Gets a list of Azure BareMetal instances in the specified subscription. Gets a list of AzureBareMetal instances in the specified subscription. The operations returns various properties of each Azure BareMetal instance. :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either AzureBareMetalInstancesListResult or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~bare_metal_infrastructure_client.models.AzureBareMetalInstancesListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.AzureBareMetalInstancesListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-08-09" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list_by_subscription.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request def extract_data(pipeline_response): deserialized = self._deserialize('AzureBareMetalInstancesListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return ItemPaged( get_next, extract_data ) list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.BareMetalInfrastructure/bareMetalInstances'} # type: ignore def list_by_resource_group( self, resource_group_name, # type: str **kwargs # type: Any ): # type: (...) -> Iterable["_models.AzureBareMetalInstancesListResult"] """Gets a list of Azure BareMetal instances in the specified subscription and resource group. Gets a list of AzureBareMetal instances in the specified subscription and resource group. The operations returns various properties of each Azure BareMetal instance. :param resource_group_name: The name of the resource group. The name is case insensitive. :type resource_group_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either AzureBareMetalInstancesListResult or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~bare_metal_infrastructure_client.models.AzureBareMetalInstancesListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.AzureBareMetalInstancesListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-08-09" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list_by_resource_group.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request def extract_data(pipeline_response): deserialized = self._deserialize('AzureBareMetalInstancesListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return ItemPaged( get_next, extract_data ) list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BareMetalInfrastructure/bareMetalInstances'} # type: ignore def get( self, resource_group_name, # type: str azure_bare_metal_instance_name, # type: str **kwargs # type: Any ): # type: (...) -> "_models.AzureBareMetalInstance" """Gets an Azure BareMetal instance. Gets an Azure BareMetal instance for the specified subscription, resource group, and instance name. :param resource_group_name: The name of the resource group. The name is case insensitive. :type resource_group_name: str :param azure_bare_metal_instance_name: Name of the Azure BareMetal on Azure instance. :type azure_bare_metal_instance_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: AzureBareMetalInstance, or the result of cls(response) :rtype: ~bare_metal_infrastructure_client.models.AzureBareMetalInstance :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.AzureBareMetalInstance"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-08-09" accept = "application/json" # Construct URL url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1), 'azureBareMetalInstanceName': self._serialize.url("azure_bare_metal_instance_name", azure_bare_metal_instance_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('AzureBareMetalInstance', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BareMetalInfrastructure/bareMetalInstances/{azureBareMetalInstanceName}'} # type: ignore def update( self, resource_group_name, # type: str azure_bare_metal_instance_name, # type: str tags_parameter, # type: "_models.Tags" **kwargs # type: Any ): # type: (...) -> "_models.AzureBareMetalInstance" """Patches the Tags field of a Azure BareMetal instance. Patches the Tags field of a Azure BareMetal instance for the specified subscription, resource group, and instance name. :param resource_group_name: The name of the resource group. The name is case insensitive. :type resource_group_name: str :param azure_bare_metal_instance_name: Name of the Azure BareMetal on Azure instance. :type azure_bare_metal_instance_name: str :param tags_parameter: Request body that only contains the new Tags field. :type tags_parameter: ~bare_metal_infrastructure_client.models.Tags :keyword callable cls: A custom type or function that will be passed the direct response :return: AzureBareMetalInstance, or the result of cls(response) :rtype: ~bare_metal_infrastructure_client.models.AzureBareMetalInstance :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.AzureBareMetalInstance"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-08-09" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self.update.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1), 'azureBareMetalInstanceName': self._serialize.url("azure_bare_metal_instance_name", azure_bare_metal_instance_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(tags_parameter, 'Tags') body_content_kwargs['content'] = body_content request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('AzureBareMetalInstance', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BareMetalInfrastructure/bareMetalInstances/{azureBareMetalInstanceName}'} # type: ignore
"""Support for OpenTherm Gateway devices.""" import logging from datetime import datetime, date import voluptuous as vol from homeassistant.components.binary_sensor import DOMAIN as COMP_BINARY_SENSOR from homeassistant.components.sensor import DOMAIN as COMP_SENSOR from homeassistant.const import ( ATTR_DATE, ATTR_ID, ATTR_TEMPERATURE, ATTR_TIME, CONF_DEVICE, CONF_MONITORED_VARIABLES, CONF_NAME, EVENT_HOMEASSISTANT_STOP, PRECISION_HALVES, PRECISION_TENTHS, PRECISION_WHOLE) from homeassistant.helpers.discovery import async_load_platform from homeassistant.helpers.dispatcher import async_dispatcher_send import homeassistant.helpers.config_validation as cv REQUIREMENTS = ['pyotgw==0.4b3'] _LOGGER = logging.getLogger(__name__) DOMAIN = 'opentherm_gw' ATTR_MODE = 'mode' ATTR_LEVEL = 'level' CONF_CLIMATE = 'climate' CONF_FLOOR_TEMP = 'floor_temperature' CONF_PRECISION = 'precision' DATA_DEVICE = 'device' DATA_GW_VARS = 'gw_vars' DATA_LATEST_STATUS = 'latest_status' DATA_OPENTHERM_GW = 'opentherm_gw' SIGNAL_OPENTHERM_GW_UPDATE = 'opentherm_gw_update' SERVICE_RESET_GATEWAY = 'reset_gateway' SERVICE_SET_CLOCK = 'set_clock' SERVICE_SET_CLOCK_SCHEMA = vol.Schema({ vol.Optional(ATTR_DATE, default=date.today()): cv.date, vol.Optional(ATTR_TIME, default=datetime.now().time()): cv.time, }) SERVICE_SET_CONTROL_SETPOINT = 'set_control_setpoint' SERVICE_SET_CONTROL_SETPOINT_SCHEMA = vol.Schema({ vol.Required(ATTR_TEMPERATURE): vol.All(vol.Coerce(float), vol.Range(min=0, max=90)), }) SERVICE_SET_GPIO_MODE = 'set_gpio_mode' SERVICE_SET_GPIO_MODE_SCHEMA = vol.Schema(vol.Any( vol.Schema({ vol.Required(ATTR_ID): vol.Equal('A'), vol.Required(ATTR_MODE): vol.All(vol.Coerce(int), vol.Range(min=0, max=6)), }), vol.Schema({ vol.Required(ATTR_ID): vol.Equal('B'), vol.Required(ATTR_MODE): vol.All(vol.Coerce(int), vol.Range(min=0, max=7)), }), )) SERVICE_SET_LED_MODE = 'set_led_mode' SERVICE_SET_LED_MODE_SCHEMA = vol.Schema({ vol.Required(ATTR_ID): vol.In('ABCDEF'), vol.Required(ATTR_MODE): vol.In('RXTBOFHWCEMP'), }) SERVICE_SET_MAX_MOD = 'set_max_modulation' SERVICE_SET_MAX_MOD_SCHEMA = vol.Schema({ vol.Required(ATTR_LEVEL): vol.All(vol.Coerce(int), vol.Range(min=-1, max=100)) }) SERVICE_SET_OAT = 'set_outside_temperature' SERVICE_SET_OAT_SCHEMA = vol.Schema({ vol.Required(ATTR_TEMPERATURE): vol.All(vol.Coerce(float), vol.Range(min=-40, max=99)), }) SERVICE_SET_SB_TEMP = 'set_setback_temperature' SERVICE_SET_SB_TEMP_SCHEMA = vol.Schema({ vol.Required(ATTR_TEMPERATURE): vol.All(vol.Coerce(float), vol.Range(min=0, max=30)), }) CLIMATE_SCHEMA = vol.Schema({ vol.Optional(CONF_NAME, default="OpenTherm Gateway"): cv.string, vol.Optional(CONF_PRECISION): vol.In([PRECISION_TENTHS, PRECISION_HALVES, PRECISION_WHOLE]), vol.Optional(CONF_FLOOR_TEMP, default=False): cv.boolean, }) CONFIG_SCHEMA = vol.Schema({ DOMAIN: vol.Schema({ vol.Required(CONF_DEVICE): cv.string, vol.Optional(CONF_CLIMATE, default={}): CLIMATE_SCHEMA, vol.Optional(CONF_MONITORED_VARIABLES, default=[]): vol.All( cv.ensure_list, [cv.string]), }), }, extra=vol.ALLOW_EXTRA) async def async_setup(hass, config): """Set up the OpenTherm Gateway component.""" import pyotgw conf = config[DOMAIN] gateway = pyotgw.pyotgw() monitored_vars = conf.get(CONF_MONITORED_VARIABLES) hass.data[DATA_OPENTHERM_GW] = { DATA_DEVICE: gateway, DATA_GW_VARS: pyotgw.vars, DATA_LATEST_STATUS: {} } hass.async_create_task(register_services(hass, gateway)) hass.async_create_task(async_load_platform( hass, 'climate', DOMAIN, conf.get(CONF_CLIMATE), config)) if monitored_vars: hass.async_create_task(setup_monitored_vars( hass, config, monitored_vars)) # Schedule directly on the loop to avoid blocking HA startup. hass.loop.create_task( connect_and_subscribe(hass, conf[CONF_DEVICE], gateway)) return True async def connect_and_subscribe(hass, device_path, gateway): """Connect to serial device and subscribe report handler.""" await gateway.connect(hass.loop, device_path) _LOGGER.debug("Connected to OpenTherm Gateway at %s", device_path) async def cleanup(event): """Reset overrides on the gateway.""" await gateway.set_control_setpoint(0) await gateway.set_max_relative_mod('-') hass.bus.async_listen(EVENT_HOMEASSISTANT_STOP, cleanup) async def handle_report(status): """Handle reports from the OpenTherm Gateway.""" _LOGGER.debug("Received report: %s", status) hass.data[DATA_OPENTHERM_GW][DATA_LATEST_STATUS] = status async_dispatcher_send(hass, SIGNAL_OPENTHERM_GW_UPDATE, status) gateway.subscribe(handle_report) async def register_services(hass, gateway): """Register services for the component.""" gw_vars = hass.data[DATA_OPENTHERM_GW][DATA_GW_VARS] async def reset_gateway(call): """Reset the OpenTherm Gateway.""" mode_rst = gw_vars.OTGW_MODE_RESET status = await gateway.set_mode(mode_rst) hass.data[DATA_OPENTHERM_GW][DATA_LATEST_STATUS] = status async_dispatcher_send(hass, SIGNAL_OPENTHERM_GW_UPDATE, status) hass.services.async_register(DOMAIN, SERVICE_RESET_GATEWAY, reset_gateway) async def set_control_setpoint(call): """Set the control setpoint on the OpenTherm Gateway.""" gw_var = gw_vars.DATA_CONTROL_SETPOINT value = await gateway.set_control_setpoint(call.data[ATTR_TEMPERATURE]) status = hass.data[DATA_OPENTHERM_GW][DATA_LATEST_STATUS] status.update({gw_var: value}) async_dispatcher_send(hass, SIGNAL_OPENTHERM_GW_UPDATE, status) hass.services.async_register(DOMAIN, SERVICE_SET_CONTROL_SETPOINT, set_control_setpoint, SERVICE_SET_CONTROL_SETPOINT_SCHEMA) async def set_device_clock(call): """Set the clock on the OpenTherm Gateway.""" attr_date = call.data[ATTR_DATE] attr_time = call.data[ATTR_TIME] await gateway.set_clock(datetime.combine(attr_date, attr_time)) hass.services.async_register(DOMAIN, SERVICE_SET_CLOCK, set_device_clock, SERVICE_SET_CLOCK_SCHEMA) async def set_gpio_mode(call): """Set the OpenTherm Gateway GPIO modes.""" gpio_id = call.data[ATTR_ID] gpio_mode = call.data[ATTR_MODE] mode = await gateway.set_gpio_mode(gpio_id, gpio_mode) gpio_var = getattr(gw_vars, 'OTGW_GPIO_{}'.format(gpio_id)) status = hass.data[DATA_OPENTHERM_GW][DATA_LATEST_STATUS] status.update({gpio_var: mode}) async_dispatcher_send(hass, SIGNAL_OPENTHERM_GW_UPDATE, status) hass.services.async_register(DOMAIN, SERVICE_SET_GPIO_MODE, set_gpio_mode, SERVICE_SET_GPIO_MODE_SCHEMA) async def set_led_mode(call): """Set the OpenTherm Gateway LED modes.""" led_id = call.data[ATTR_ID] led_mode = call.data[ATTR_MODE] mode = await gateway.set_led_mode(led_id, led_mode) led_var = getattr(gw_vars, 'OTGW_LED_{}'.format(led_id)) status = hass.data[DATA_OPENTHERM_GW][DATA_LATEST_STATUS] status.update({led_var: mode}) async_dispatcher_send(hass, SIGNAL_OPENTHERM_GW_UPDATE, status) hass.services.async_register(DOMAIN, SERVICE_SET_LED_MODE, set_led_mode, SERVICE_SET_LED_MODE_SCHEMA) async def set_max_mod(call): """Set the max modulation level.""" gw_var = gw_vars.DATA_SLAVE_MAX_RELATIVE_MOD level = call.data[ATTR_LEVEL] if level == -1: # Backend only clears setting on non-numeric values. level = '-' value = await gateway.set_max_relative_mod(level) status = hass.data[DATA_OPENTHERM_GW][DATA_LATEST_STATUS] status.update({gw_var: value}) async_dispatcher_send(hass, SIGNAL_OPENTHERM_GW_UPDATE, status) hass.services.async_register(DOMAIN, SERVICE_SET_MAX_MOD, set_max_mod, SERVICE_SET_MAX_MOD_SCHEMA) async def set_outside_temp(call): """Provide the outside temperature to the OpenTherm Gateway.""" gw_var = gw_vars.DATA_OUTSIDE_TEMP value = await gateway.set_outside_temp(call.data[ATTR_TEMPERATURE]) status = hass.data[DATA_OPENTHERM_GW][DATA_LATEST_STATUS] status.update({gw_var: value}) async_dispatcher_send(hass, SIGNAL_OPENTHERM_GW_UPDATE, status) hass.services.async_register(DOMAIN, SERVICE_SET_OAT, set_outside_temp, SERVICE_SET_OAT_SCHEMA) async def set_setback_temp(call): """Set the OpenTherm Gateway SetBack temperature.""" gw_var = gw_vars.OTGW_SB_TEMP value = await gateway.set_setback_temp(call.data[ATTR_TEMPERATURE]) status = hass.data[DATA_OPENTHERM_GW][DATA_LATEST_STATUS] status.update({gw_var: value}) async_dispatcher_send(hass, SIGNAL_OPENTHERM_GW_UPDATE, status) hass.services.async_register(DOMAIN, SERVICE_SET_SB_TEMP, set_setback_temp, SERVICE_SET_SB_TEMP_SCHEMA) async def setup_monitored_vars(hass, config, monitored_vars): """Set up requested sensors.""" gw_vars = hass.data[DATA_OPENTHERM_GW][DATA_GW_VARS] sensor_type_map = { COMP_BINARY_SENSOR: [ gw_vars.DATA_MASTER_CH_ENABLED, gw_vars.DATA_MASTER_DHW_ENABLED, gw_vars.DATA_MASTER_COOLING_ENABLED, gw_vars.DATA_MASTER_OTC_ENABLED, gw_vars.DATA_MASTER_CH2_ENABLED, gw_vars.DATA_SLAVE_FAULT_IND, gw_vars.DATA_SLAVE_CH_ACTIVE, gw_vars.DATA_SLAVE_DHW_ACTIVE, gw_vars.DATA_SLAVE_FLAME_ON, gw_vars.DATA_SLAVE_COOLING_ACTIVE, gw_vars.DATA_SLAVE_CH2_ACTIVE, gw_vars.DATA_SLAVE_DIAG_IND, gw_vars.DATA_SLAVE_DHW_PRESENT, gw_vars.DATA_SLAVE_CONTROL_TYPE, gw_vars.DATA_SLAVE_COOLING_SUPPORTED, gw_vars.DATA_SLAVE_DHW_CONFIG, gw_vars.DATA_SLAVE_MASTER_LOW_OFF_PUMP, gw_vars.DATA_SLAVE_CH2_PRESENT, gw_vars.DATA_SLAVE_SERVICE_REQ, gw_vars.DATA_SLAVE_REMOTE_RESET, gw_vars.DATA_SLAVE_LOW_WATER_PRESS, gw_vars.DATA_SLAVE_GAS_FAULT, gw_vars.DATA_SLAVE_AIR_PRESS_FAULT, gw_vars.DATA_SLAVE_WATER_OVERTEMP, gw_vars.DATA_REMOTE_TRANSFER_DHW, gw_vars.DATA_REMOTE_TRANSFER_MAX_CH, gw_vars.DATA_REMOTE_RW_DHW, gw_vars.DATA_REMOTE_RW_MAX_CH, gw_vars.DATA_ROVRD_MAN_PRIO, gw_vars.DATA_ROVRD_AUTO_PRIO, gw_vars.OTGW_GPIO_A_STATE, gw_vars.OTGW_GPIO_B_STATE, gw_vars.OTGW_IGNORE_TRANSITIONS, gw_vars.OTGW_OVRD_HB, ], COMP_SENSOR: [ gw_vars.DATA_CONTROL_SETPOINT, gw_vars.DATA_MASTER_MEMBERID, gw_vars.DATA_SLAVE_MEMBERID, gw_vars.DATA_SLAVE_OEM_FAULT, gw_vars.DATA_COOLING_CONTROL, gw_vars.DATA_CONTROL_SETPOINT_2, gw_vars.DATA_ROOM_SETPOINT_OVRD, gw_vars.DATA_SLAVE_MAX_RELATIVE_MOD, gw_vars.DATA_SLAVE_MAX_CAPACITY, gw_vars.DATA_SLAVE_MIN_MOD_LEVEL, gw_vars.DATA_ROOM_SETPOINT, gw_vars.DATA_REL_MOD_LEVEL, gw_vars.DATA_CH_WATER_PRESS, gw_vars.DATA_DHW_FLOW_RATE, gw_vars.DATA_ROOM_SETPOINT_2, gw_vars.DATA_ROOM_TEMP, gw_vars.DATA_CH_WATER_TEMP, gw_vars.DATA_DHW_TEMP, gw_vars.DATA_OUTSIDE_TEMP, gw_vars.DATA_RETURN_WATER_TEMP, gw_vars.DATA_SOLAR_STORAGE_TEMP, gw_vars.DATA_SOLAR_COLL_TEMP, gw_vars.DATA_CH_WATER_TEMP_2, gw_vars.DATA_DHW_TEMP_2, gw_vars.DATA_EXHAUST_TEMP, gw_vars.DATA_SLAVE_DHW_MAX_SETP, gw_vars.DATA_SLAVE_DHW_MIN_SETP, gw_vars.DATA_SLAVE_CH_MAX_SETP, gw_vars.DATA_SLAVE_CH_MIN_SETP, gw_vars.DATA_DHW_SETPOINT, gw_vars.DATA_MAX_CH_SETPOINT, gw_vars.DATA_OEM_DIAG, gw_vars.DATA_TOTAL_BURNER_STARTS, gw_vars.DATA_CH_PUMP_STARTS, gw_vars.DATA_DHW_PUMP_STARTS, gw_vars.DATA_DHW_BURNER_STARTS, gw_vars.DATA_TOTAL_BURNER_HOURS, gw_vars.DATA_CH_PUMP_HOURS, gw_vars.DATA_DHW_PUMP_HOURS, gw_vars.DATA_DHW_BURNER_HOURS, gw_vars.DATA_MASTER_OT_VERSION, gw_vars.DATA_SLAVE_OT_VERSION, gw_vars.DATA_MASTER_PRODUCT_TYPE, gw_vars.DATA_MASTER_PRODUCT_VERSION, gw_vars.DATA_SLAVE_PRODUCT_TYPE, gw_vars.DATA_SLAVE_PRODUCT_VERSION, gw_vars.OTGW_MODE, gw_vars.OTGW_DHW_OVRD, gw_vars.OTGW_ABOUT, gw_vars.OTGW_BUILD, gw_vars.OTGW_CLOCKMHZ, gw_vars.OTGW_LED_A, gw_vars.OTGW_LED_B, gw_vars.OTGW_LED_C, gw_vars.OTGW_LED_D, gw_vars.OTGW_LED_E, gw_vars.OTGW_LED_F, gw_vars.OTGW_GPIO_A, gw_vars.OTGW_GPIO_B, gw_vars.OTGW_SB_TEMP, gw_vars.OTGW_SETP_OVRD_MODE, gw_vars.OTGW_SMART_PWR, gw_vars.OTGW_THRM_DETECT, gw_vars.OTGW_VREF, ] } binary_sensors = [] sensors = [] for var in monitored_vars: if var in sensor_type_map[COMP_SENSOR]: sensors.append(var) elif var in sensor_type_map[COMP_BINARY_SENSOR]: binary_sensors.append(var) else: _LOGGER.error("Monitored variable not supported: %s", var) if binary_sensors: hass.async_create_task(async_load_platform( hass, COMP_BINARY_SENSOR, DOMAIN, binary_sensors, config)) if sensors: hass.async_create_task(async_load_platform( hass, COMP_SENSOR, DOMAIN, sensors, config))
# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import datetime import json import uuid import mock import netaddr from oslotest import base as test_base import simplejson import six import six.moves.xmlrpc_client as xmlrpclib from oslo.i18n import fixture from oslo.serialization import jsonutils class JSONUtilsTestMixin(object): json_impl = None def setUp(self): super(JSONUtilsTestMixin, self).setUp() self.json_patcher = mock.patch.multiple( jsonutils, json=self.json_impl, is_simplejson=self.json_impl is simplejson) self.json_impl_mock = self.json_patcher.start() def tearDown(self): self.json_patcher.stop() super(JSONUtilsTestMixin, self).tearDown() def test_dumps(self): self.assertEqual('{"a": "b"}', jsonutils.dumps({'a': 'b'})) def test_dumps_namedtuple(self): n = collections.namedtuple("foo", "bar baz")(1, 2) self.assertEqual('[1, 2]', jsonutils.dumps(n)) def test_dump(self): expected = '{"a": "b"}' json_dict = {'a': 'b'} fp = six.StringIO() jsonutils.dump(json_dict, fp) self.assertEqual(expected, fp.getvalue()) def test_dump_namedtuple(self): expected = '[1, 2]' json_dict = collections.namedtuple("foo", "bar baz")(1, 2) fp = six.StringIO() jsonutils.dump(json_dict, fp) self.assertEqual(expected, fp.getvalue()) def test_dumps_uuid(self): self.assertEqual('"87edfaf4-9bff-11e4-82bd-b7b4e88d3780"', jsonutils.dumps( uuid.UUID("87edfaf49bff11e482bdb7b4e88d3780"))) def test_dump_set(self): # Only test with one entry because the order is random :] self.assertEqual("[1]", jsonutils.dumps(set([1]))) def test_loads(self): self.assertEqual({'a': 'b'}, jsonutils.loads('{"a": "b"}')) def test_loads_unicode(self): self.assertIsInstance(jsonutils.loads(b'"foo"'), six.text_type) self.assertIsInstance(jsonutils.loads(u'"foo"'), six.text_type) # 'test' in Ukrainian i18n_str_unicode = u'"\u0442\u0435\u0441\u0442"' self.assertIsInstance(jsonutils.loads(i18n_str_unicode), six.text_type) i18n_str = i18n_str_unicode.encode('utf-8') self.assertIsInstance(jsonutils.loads(i18n_str), six.text_type) def test_loads_with_kwargs(self): jsontext = u'{"foo": 3}' result = jsonutils.loads(jsontext, parse_int=lambda x: 5) self.assertEqual(5, result['foo']) def test_load(self): jsontext = u'{"a": "\u0442\u044d\u0441\u0442"}' expected = {u'a': u'\u0442\u044d\u0441\u0442'} for encoding in ('utf-8', 'cp1251'): fp = six.BytesIO(jsontext.encode(encoding)) result = jsonutils.load(fp, encoding=encoding) self.assertEqual(expected, result) for key, val in result.items(): self.assertIsInstance(key, six.text_type) self.assertIsInstance(val, six.text_type) class JSONUtilsTestJson(JSONUtilsTestMixin, test_base.BaseTestCase): json_impl = json class JSONUtilsTestSimpleJson(JSONUtilsTestMixin, test_base.BaseTestCase): json_impl = simplejson class ToPrimitiveTestCase(test_base.BaseTestCase): def setUp(self): super(ToPrimitiveTestCase, self).setUp() self.trans_fixture = self.useFixture(fixture.Translation()) def test_list(self): self.assertEqual(jsonutils.to_primitive([1, 2, 3]), [1, 2, 3]) def test_set(self): self.assertEqual(jsonutils.to_primitive(set([1, 2, 3])), [1, 2, 3]) def test_empty_list(self): self.assertEqual(jsonutils.to_primitive([]), []) def test_tuple(self): self.assertEqual(jsonutils.to_primitive((1, 2, 3)), [1, 2, 3]) def test_dict(self): self.assertEqual(jsonutils.to_primitive(dict(a=1, b=2, c=3)), dict(a=1, b=2, c=3)) def test_empty_dict(self): self.assertEqual(jsonutils.to_primitive({}), {}) def test_datetime(self): x = datetime.datetime(1920, 2, 3, 4, 5, 6, 7) self.assertEqual(jsonutils.to_primitive(x), '1920-02-03T04:05:06.000007') def test_uuid(self): x = uuid.uuid4() self.assertEqual(jsonutils.to_primitive(x), six.text_type(x)) def test_datetime_preserve(self): x = datetime.datetime(1920, 2, 3, 4, 5, 6, 7) self.assertEqual(jsonutils.to_primitive(x, convert_datetime=False), x) def test_DateTime(self): x = xmlrpclib.DateTime() x.decode("19710203T04:05:06") self.assertEqual(jsonutils.to_primitive(x), '1971-02-03T04:05:06.000000') def test_iter(self): class IterClass(object): def __init__(self): self.data = [1, 2, 3, 4, 5] self.index = 0 def __iter__(self): return self def next(self): if self.index == len(self.data): raise StopIteration self.index = self.index + 1 return self.data[self.index - 1] __next__ = next x = IterClass() self.assertEqual(jsonutils.to_primitive(x), [1, 2, 3, 4, 5]) def test_iteritems(self): class IterItemsClass(object): def __init__(self): self.data = dict(a=1, b=2, c=3).items() self.index = 0 def iteritems(self): return self.data x = IterItemsClass() p = jsonutils.to_primitive(x) self.assertEqual(p, {'a': 1, 'b': 2, 'c': 3}) def test_iteritems_with_cycle(self): class IterItemsClass(object): def __init__(self): self.data = dict(a=1, b=2, c=3) self.index = 0 def iteritems(self): return self.data.items() x = IterItemsClass() x2 = IterItemsClass() x.data['other'] = x2 x2.data['other'] = x # If the cycle isn't caught, to_primitive() will eventually result in # an exception due to excessive recursion depth. jsonutils.to_primitive(x) def test_instance(self): class MysteryClass(object): a = 10 def __init__(self): self.b = 1 x = MysteryClass() self.assertEqual(jsonutils.to_primitive(x, convert_instances=True), dict(b=1)) self.assertEqual(jsonutils.to_primitive(x), x) def test_typeerror(self): x = bytearray # Class, not instance if six.PY3: self.assertEqual(jsonutils.to_primitive(x), u"<class 'bytearray'>") else: self.assertEqual(jsonutils.to_primitive(x), u"<type 'bytearray'>") def test_nasties(self): def foo(): pass x = [datetime, foo, dir] ret = jsonutils.to_primitive(x) self.assertEqual(len(ret), 3) self.assertTrue(ret[0].startswith(u"<module 'datetime' from ") or ret[0].startswith(u"<module 'datetime' (built-in)")) if six.PY3: self.assertTrue(ret[1].startswith( '<function ToPrimitiveTestCase.test_nasties.<locals>.foo at 0x' )) else: self.assertTrue(ret[1].startswith('<function foo at 0x')) self.assertEqual(ret[2], '<built-in function dir>') def test_depth(self): class LevelsGenerator(object): def __init__(self, levels): self._levels = levels def iteritems(self): if self._levels == 0: return iter([]) else: return iter([(0, LevelsGenerator(self._levels - 1))]) l4_obj = LevelsGenerator(4) json_l2 = {0: {0: '?'}} json_l3 = {0: {0: {0: '?'}}} json_l4 = {0: {0: {0: {0: '?'}}}} ret = jsonutils.to_primitive(l4_obj, max_depth=2) self.assertEqual(ret, json_l2) ret = jsonutils.to_primitive(l4_obj, max_depth=3) self.assertEqual(ret, json_l3) ret = jsonutils.to_primitive(l4_obj, max_depth=4) self.assertEqual(ret, json_l4) def test_ipaddr(self): thing = {'ip_addr': netaddr.IPAddress('1.2.3.4')} ret = jsonutils.to_primitive(thing) self.assertEqual({'ip_addr': '1.2.3.4'}, ret) def test_dumps_ipaddr(self): thing = {'ip_addr': netaddr.IPAddress('1.2.3.4')} ret = jsonutils.dumps(thing) self.assertEqual('{"ip_addr": "1.2.3.4"}', ret) def test_dump_ipaddr(self): thing = {'ip_addr': netaddr.IPAddress('1.2.3.4')} fp = six.StringIO() jsonutils.dump(thing, fp) self.assertEqual('{"ip_addr": "1.2.3.4"}', fp.getvalue()) def test_message_with_param(self): msg = self.trans_fixture.lazy('A message with param: %s') msg = msg % 'test_domain' ret = jsonutils.to_primitive(msg) self.assertEqual(msg, ret) def test_message_with_named_param(self): msg = self.trans_fixture.lazy('A message with params: %(param)s') msg = msg % {'param': 'hello'} ret = jsonutils.to_primitive(msg) self.assertEqual(msg, ret)
from pyVulkan import * import sdl2 import ctypes import sys def string(char_ptr): if sys.version_info < (3, 0): return ffi.string(char_ptr) else: return ffi.string(char_ptr).decode('ascii') class Demo: def __init__(self): app_name = 'Demo' self.width = 300 self.height = 300 self.setup_cmd = VK_NULL_HANDLE self.old_swapchain = VK_NULL_HANDLE depth_stencil = 1.0 #initialize app_info = VkApplicationInfo(pApplicationName=app_name, applicationVersion=0, pEngineName=app_name, engineVersion=0, apiVersion=VK_MAKE_VERSION(1, 0, 0)) def _getInstanceLayers(): instance_validation_layers_alts = [["VK_LAYER_LUNARG_standard_validation"], ["VK_LAYER_GOOGLE_threading", "VK_LAYER_LUNARG_parameter_validation", "VK_LAYER_LUNARG_device_limits", "VK_LAYER_LUNARG_object_tracker", "VK_LAYER_LUNARG_image", "VK_LAYER_LUNARG_core_validation", "VK_LAYER_LUNARG_swapchain", "VK_LAYER_GOOGLE_unique_objects"]] instance_layer_names = [string(i.layerName) for _, i in enumerate(vkEnumerateInstanceLayerProperties())] return next((i for i in instance_validation_layers_alts if set(i).issubset(instance_layer_names)), []) # instance_layers = [] instance_layers = _getInstanceLayers() extensions = [string(i.extensionName) for i in vkEnumerateInstanceExtensionProperties(None)] @vkDebugReportCallbackEXT def dbgFunc(*args): print (string(args[6])) return True debug_info = VkDebugReportCallbackCreateInfoEXT(pfnCallback=dbgFunc, flags=VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT) instance_info = VkInstanceCreateInfo(pApplicationInfo=app_info, enabledLayerCount=len(instance_layers), ppEnabledLayerNames=instance_layers, enabledExtensionCount=len(extensions), ppEnabledExtensionNames=extensions, pNext=debug_info) ptrs = set() @vkAllocationFunction def allocFunc(*args): temp = ffi.new("char[]", args[1]) ptrs.add(temp) return temp @vkFreeFunction def freeFunc(*args): if args[1] != ffi.NULL: ptrs.remove(args[1]) @vkReallocationFunction def reallocFunc(*args): raise NotImplementedError() @vkInternalAllocationNotification def internalAllocNotify(*args): raise NotImplementedError() @vkInternalFreeNotification def internalFreeNotify(*args): raise NotImplementedError() allocation_callbacks = VkAllocationCallbacks(pUserData=None, pfnAllocation=allocFunc, pfnReallocation=reallocFunc, pfnFree=freeFunc, pfnInternalAllocation=internalAllocNotify, pfnInternalFree=internalFreeNotify) # inst = vkCreateInstance(instance_info, allocation_callbacks) self.inst = vkCreateInstance(instance_info, None) self.vkDestroySurfaceKHR = vkGetInstanceProcAddr(self.inst, 'vkDestroySurfaceKHR') vkGetPhysicalDeviceSurfaceSupportKHR = vkGetInstanceProcAddr(self.inst, 'vkGetPhysicalDeviceSurfaceSupportKHR') vkGetPhysicalDeviceSurfaceFormatsKHR = vkGetInstanceProcAddr(self.inst, 'vkGetPhysicalDeviceSurfaceFormatsKHR') vkGetPhysicalDeviceSurfaceCapabilitiesKHR = vkGetInstanceProcAddr(self.inst, 'vkGetPhysicalDeviceSurfaceCapabilitiesKHR') vkGetPhysicalDeviceSurfacePresentModesKHR = vkGetInstanceProcAddr(self.inst, 'vkGetPhysicalDeviceSurfacePresentModesKHR') vkCreateDebugReportCallbackEXT = vkGetInstanceProcAddr(self.inst, 'vkCreateDebugReportCallbackEXT') self.vkDestroyDebugReportCallbackEXT = vkGetInstanceProcAddr(self.inst, 'vkDestroyDebugReportCallbackEXT') self.debug_callback = vkCreateDebugReportCallbackEXT(self.inst, debug_info, None) gpu = vkEnumeratePhysicalDevices(self.inst)[0] gpu_props = vkGetPhysicalDeviceProperties(gpu) queue_props = vkGetPhysicalDeviceQueueFamilyProperties(gpu) features = vkGetPhysicalDeviceFeatures(gpu) ##init sdl if sdl2.SDL_Init(sdl2.SDL_INIT_VIDEO) != 0: print(sdl2.SDL_GetError()) self.window = sdl2.SDL_CreateWindow(app_name.encode('ascii'), sdl2.SDL_WINDOWPOS_UNDEFINED, sdl2.SDL_WINDOWPOS_UNDEFINED, self.width, self.height, 0) if not self.window: print(sdl2.SDL_GetError()) wm_info = sdl2.SDL_SysWMinfo() sdl2.SDL_VERSION(wm_info.version) sdl2.SDL_GetWindowWMInfo(self.window, ctypes.byref(wm_info)) if wm_info.subsystem == sdl2.SDL_SYSWM_X11: vkCreateXlibSurfaceKHR = vkGetInstanceProcAddr(self.inst, 'vkCreateXlibSurfaceKHR') self.surface = vkCreateXlibSurfaceKHR(self.inst, VkXlibSurfaceCreateInfoKHR(dpy=wm_info.info.x11.display, window=wm_info.info.x11.window), None) elif wm_info.subsystem == sdl2.SDL_SYSWM_WINDOWS: vkCreateWin32SurfaceKHR = vkGetInstanceProcAddr(self.inst, 'vkCreateWin32SurfaceKHR') import win32misc hinstance = win32misc.getInstance(wm_info.info.win.window) self.surface = vkCreateWin32SurfaceKHR(self.inst, VkWin32SurfaceCreateInfoKHR(hinstance=hinstance, hwnd=wm_info.info.win.window), None) else: assert False support_presents = [vkGetPhysicalDeviceSurfaceSupportKHR(gpu, i, self.surface) for i, _ in enumerate(queue_props)] graphics_queue_node_index = None present_queue_node_index = None for i, v in enumerate(queue_props): if v.queueFlags & VK_QUEUE_GRAPHICS_BIT: if not graphics_queue_node_index: graphics_queue_node_index = i if support_presents[i] == VK_TRUE: graphics_queue_node_index = i present_queue_node_index = i break if not present_queue_node_index: for i, v in enumerate(support_presents): if v == VK_TRUE: present_queue_node_index = i assert (graphics_queue_node_index is not None) and (present_queue_node_index is not None) assert graphics_queue_node_index == present_queue_node_index queue_info = VkDeviceQueueCreateInfo(queueFamilyIndex=graphics_queue_node_index, queueCount=1, pQueuePriorities=[0.0]) extensions = [string(i.extensionName) for i in vkEnumerateDeviceExtensionProperties(gpu, None)] device_info = VkDeviceCreateInfo(queueCreateInfoCount=1, pQueueCreateInfos=queue_info, pEnabledFeatures=VkPhysicalDeviceFeatures(), ppEnabledLayerNames=[], ppEnabledExtensionNames=extensions) self.device = vkCreateDevice(gpu, device_info, None) vkCreateSwapchainKHR = vkGetDeviceProcAddr(self.device, 'vkCreateSwapchainKHR') vkGetSwapchainImagesKHR = vkGetDeviceProcAddr(self.device, 'vkGetSwapchainImagesKHR') self.vkAcquireNextImageKHR = vkGetDeviceProcAddr(self.device, 'vkAcquireNextImageKHR') self.vkQueuePresentKHR = vkGetDeviceProcAddr(self.device, 'vkQueuePresentKHR') self.vkDestroySwapchainKHR = vkGetDeviceProcAddr(self.device, 'vkDestroySwapchainKHR') self.queue = vkGetDeviceQueue(self.device, graphics_queue_node_index, 0) surface_formats = vkGetPhysicalDeviceSurfaceFormatsKHR(gpu, self.surface) if len(surface_formats) == 1 and surface_formats[0].format == VK_FORMAT_UNDEFINED: format_ = VK_FORMAT_B8G8R8A8_UNORM else: format_ = surface_formats[0].format color_space = surface_formats[0].colorSpace self.memory_properties = vkGetPhysicalDeviceMemoryProperties(gpu) self.cmd_pool_info = VkCommandPoolCreateInfo(queueFamilyIndex=graphics_queue_node_index, flags=VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT) self.cmd_pool = vkCreateCommandPool(self.device, self.cmd_pool_info, None) cmd_buffer_info = VkCommandBufferAllocateInfo(commandPool=self.cmd_pool, level=VK_COMMAND_BUFFER_LEVEL_PRIMARY, commandBufferCount=1) self.draw_cmd = vkAllocateCommandBuffers(self.device, cmd_buffer_info)[0] surface_capabilities = vkGetPhysicalDeviceSurfaceCapabilitiesKHR(gpu, self.surface) present_modes = vkGetPhysicalDeviceSurfacePresentModesKHR(gpu, self.surface) if surface_capabilities.currentExtent.width == ffi.cast('uint32_t', -1): swapchain_extent = VkExtent2D(width=self.width, height=height) else: swapchain_extent = surface_capabilities.currentExtent width = surface_capabilities.currentExtent.width height = surface_capabilities.currentExtent.height swapchain_present_mode = VK_PRESENT_MODE_MAILBOX_KHR desiredNumberOfSwapchainImages = surface_capabilities.minImageCount + 1 if (surface_capabilities.maxImageCount > 0) and (desiredNumberOfSwapchainImages > surface_capabilities.maxImageCount): desiredNumberOfSwapchainImages = surface_capabilities.maxImageCount pre_transform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR if surface_capabilities.supportedTransforms & VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR else surface_capabilities.currentTransform swapchain_info = VkSwapchainCreateInfoKHR(surface=self.surface, minImageCount=desiredNumberOfSwapchainImages, imageFormat=format_, imageColorSpace=color_space, imageExtent=swapchain_extent, imageUsage=VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, preTransform=pre_transform, compositeAlpha=VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR, imageArrayLayers=1, imageSharingMode=VK_SHARING_MODE_EXCLUSIVE, presentMode=swapchain_present_mode, oldSwapchain=self.old_swapchain, clipped=True) self.swapchain = vkCreateSwapchainKHR(self.device, swapchain_info, None) self.swapchain_images = vkGetSwapchainImagesKHR(self.device, self.swapchain) def _getView(image): self.set_image_layout(image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, 0, VK_ACCESS_MEMORY_READ_BIT) return vkCreateImageView(self.device, VkImageViewCreateInfo(format=format_, components={'r': VK_COMPONENT_SWIZZLE_R, 'g': VK_COMPONENT_SWIZZLE_G, 'b': VK_COMPONENT_SWIZZLE_B, 'a': VK_COMPONENT_SWIZZLE_A}, subresourceRange={'aspectMask': VK_IMAGE_ASPECT_COLOR_BIT, 'baseMipLevel': 0, 'levelCount': 1, 'baseArrayLayer': 0, 'layerCount': 1}, viewType=VK_IMAGE_VIEW_TYPE_2D, flags=0, image=image), None) self.views = [_getView(i) for i in self.swapchain_images] current_buffer = 0 depth_format = VK_FORMAT_D16_UNORM image = VkImageCreateInfo(imageType=VK_IMAGE_TYPE_2D, format=depth_format, extent=[self.width, self.height, 1], mipLevels=1, arrayLayers=1, samples=VK_SAMPLE_COUNT_1_BIT, tiling=VK_IMAGE_TILING_OPTIMAL, usage=VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) mem_alloc = VkMemoryAllocateInfo() view = VkImageViewCreateInfo(format=depth_format, subresourceRange=VkImageSubresourceRange(aspectMask=VK_IMAGE_ASPECT_DEPTH_BIT, baseMipLevel=0, levelCount=1, baseArrayLayer=0, layerCount=1), viewType=VK_IMAGE_VIEW_TYPE_2D) self.depth_image = vkCreateImage(self.device, image, None) mem_reqs = vkGetImageMemoryRequirements(self.device, self.depth_image) mem_alloc.allocationSize = mem_reqs.size mem_alloc.memoryTypeIndex = self.memory_type_from_properties(mem_reqs.memoryTypeBits, 0) self.depth_mem = vkAllocateMemory(self.device, mem_alloc, None) vkBindImageMemory(self.device, self.depth_image, self.depth_mem, 0) self.set_image_layout(self.depth_image, VK_IMAGE_ASPECT_DEPTH_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, 0) view.image = self.depth_image self.depth_view = vkCreateImageView(self.device, view, None) attachments = [VkAttachmentDescription(format=format_, samples=VK_SAMPLE_COUNT_1_BIT, loadOp=VK_ATTACHMENT_LOAD_OP_CLEAR, storeOp=VK_ATTACHMENT_STORE_OP_STORE, stencilLoadOp=VK_ATTACHMENT_LOAD_OP_DONT_CARE, stencilStoreOp=VK_ATTACHMENT_STORE_OP_DONT_CARE, initialLayout=VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, finalLayout=VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL), VkAttachmentDescription(format=depth_format, samples=VK_SAMPLE_COUNT_1_BIT, loadOp=VK_ATTACHMENT_LOAD_OP_CLEAR, storeOp=VK_ATTACHMENT_STORE_OP_DONT_CARE, stencilLoadOp=VK_ATTACHMENT_LOAD_OP_DONT_CARE, stencilStoreOp=VK_ATTACHMENT_STORE_OP_DONT_CARE, initialLayout=VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, finalLayout=VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL)] color_reference = VkAttachmentReference(attachment=0, layout=VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) depth_reference = VkAttachmentReference(attachment=1, layout=VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL) subpass = VkSubpassDescription(pipelineBindPoint=VK_PIPELINE_BIND_POINT_GRAPHICS, colorAttachmentCount=1, pColorAttachments=[color_reference], pDepthStencilAttachment=depth_reference) rp_info = VkRenderPassCreateInfo(attachmentCount=len(attachments), pAttachments=attachments, subpassCount=1, pSubpasses=[subpass]) self.render_pass = vkCreateRenderPass(self.device, rp_info, None) self.framebuffers = [vkCreateFramebuffer(self.device, VkFramebufferCreateInfo(renderPass=self.render_pass, attachmentCount=2, pAttachments=[v, self.depth_view], width=self.width, height=self.height, layers=1), None) for i, v in enumerate(self.views)] def flush_init_cmd(self): if self.setup_cmd: vkEndCommandBuffer(self.setup_cmd) submit_info = VkSubmitInfo(commandBufferCount=1, pCommandBuffers=[self.setup_cmd]) vkQueueSubmit(self.queue, 1, submit_info, VK_NULL_HANDLE) vkQueueWaitIdle(self.queue) vkFreeCommandBuffers(self.device, self.cmd_pool, 1, [self.setup_cmd]) self.setup_cmd = VK_NULL_HANDLE cmd_buf_hinfo = VkCommandBufferInheritanceInfo(occlusionQueryEnable=VK_FALSE) cmd_buf_info = VkCommandBufferBeginInfo(pInheritanceInfo=cmd_buf_hinfo) vkBeginCommandBuffer(self.draw_cmd, cmd_buf_info) vkEndCommandBuffer(self.draw_cmd) def draw(self): vkDeviceWaitIdle(self.device) present_complete_semaphore = vkCreateSemaphore(self.device, VkSemaphoreCreateInfo(), None) current_buffer = self.vkAcquireNextImageKHR(self.device, self.swapchain, ffi.cast('uint64_t', -1), present_complete_semaphore, 0) self.set_image_layout(self.swapchain_images[current_buffer], VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_ACCESS_MEMORY_READ_BIT) self.flush_init_cmd() cmd_buf_hinfo = VkCommandBufferInheritanceInfo(occlusionQueryEnable=VK_FALSE) cmd_buf_info = VkCommandBufferBeginInfo(pInheritanceInfo=cmd_buf_hinfo) clear_values = [VkClearValue(color={'float32': [0.2, 0.2, 0.2, 0.2]}), VkClearValue(depthStencil={'depth': 1.0, 'stencil': 0})] rp_begin = VkRenderPassBeginInfo(renderPass=self.render_pass, framebuffer=self.framebuffers[current_buffer], renderArea={'offset': {'x': 0, 'y': 0}, 'extent': {'width': self.width, 'height': self.height}}, clearValueCount=len(clear_values), pClearValues=clear_values) vkBeginCommandBuffer(self.draw_cmd, cmd_buf_info) vkCmdBeginRenderPass(self.draw_cmd, rp_begin, VK_SUBPASS_CONTENTS_INLINE) vkCmdEndRenderPass(self.draw_cmd) pre_present_barrier = VkImageMemoryBarrier(srcAccessMask=VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, dstAccessMask=VK_ACCESS_MEMORY_READ_BIT, oldLayout=VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, newLayout=VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, srcQueueFamilyIndex=ffi.cast('uint32_t', VK_QUEUE_FAMILY_IGNORED), dstQueueFamilyIndex=ffi.cast('uint32_t', VK_QUEUE_FAMILY_IGNORED), subresourceRange=[VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1], image=self.swapchain_images[current_buffer]) vkCmdPipelineBarrier(self.draw_cmd, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, None, 0, None, 1, pre_present_barrier) vkEndCommandBuffer(self.draw_cmd) submit_info = VkSubmitInfo(waitSemaphoreCount=1, pWaitSemaphores=[present_complete_semaphore], pWaitDstStageMask=[VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT], commandBufferCount=1, pCommandBuffers=[self.draw_cmd]) vkQueueSubmit(self.queue, 1, [submit_info], VK_NULL_HANDLE) vkQueueWaitIdle(self.queue) present = VkPresentInfoKHR(swapchainCount=1, pSwapchains=[self.swapchain], pImageIndices=[current_buffer]) self.vkQueuePresentKHR(self.queue, present) vkDestroySemaphore(self.device, present_complete_semaphore, None) def run(self): #main loop running = True event = sdl2.SDL_Event() last_ticks = 0 while running: while sdl2.SDL_PollEvent(ctypes.byref(event)) != 0: if event.type == sdl2.SDL_QUIT: running = False new_ticks = sdl2.SDL_GetTicks() if new_ticks - last_ticks > 1000 / 30: self.draw() last_ticks = new_ticks def release(self): #cleanup vkFreeMemory(self.device, self.depth_mem, None) for i in self.framebuffers: vkDestroyFramebuffer(self.device, i, None) if self.setup_cmd: vkFreeCommandBuffers(self.device, self.cmd_pool, 1, [self.setup_cmd]) vkFreeCommandBuffers(self.device, self.cmd_pool, 1, [self.draw_cmd]) vkDestroyCommandPool(self.device, self.cmd_pool, None) vkDestroyRenderPass(self.device, self.render_pass, None) for i in self.views: vkDestroyImageView(self.device, i, None) vkDestroyImageView(self.device, self.depth_view, None) vkDestroyImage(self.device, self.depth_image, None) self.vkDestroySwapchainKHR(self.device, self.swapchain, None) vkDestroyDevice(self.device, None) self.vkDestroyDebugReportCallbackEXT(self.inst, self.debug_callback, None) self.vkDestroySurfaceKHR(self.inst, self.surface, None) vkDestroyInstance(self.inst, None) sdl2.SDL_DestroyWindow(self.window) sdl2.SDL_Quit() def memory_type_from_properties(self, typeBits, requirements_mask): for i, v in enumerate(self.memory_properties.memoryTypes): if (typeBits & 1) == 1: if (v.propertyFlags & requirements_mask) == requirements_mask: return i typeBits >>= 1 assert False def set_image_layout(self, image, aspect_mask, old_image_layout, new_image_layout, src_access_mask=0, dst_access_mask=0): if self.setup_cmd == VK_NULL_HANDLE: cmd = VkCommandBufferAllocateInfo(commandPool=self.cmd_pool, level=VK_COMMAND_BUFFER_LEVEL_PRIMARY, commandBufferCount=1) self.setup_cmd = vkAllocateCommandBuffers(self.device, cmd)[0] cmd_buf_hinfo = VkCommandBufferInheritanceInfo() cmd_buf_info = VkCommandBufferBeginInfo(pInheritanceInfo=cmd_buf_hinfo) vkBeginCommandBuffer(self.setup_cmd, cmd_buf_info) image_memory_barrier = VkImageMemoryBarrier(srcAccessMask=src_access_mask, dstAccessMask=dst_access_mask, oldLayout=old_image_layout, newLayout=new_image_layout, image=image, subresourceRange=[aspect_mask, 0, 1, 0, 1]) dst_stage_masks = {VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: VK_ACCESS_TRANSFER_READ_BIT, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT} if new_image_layout in dst_stage_masks: image_memory_barrier.dstAccessMask = dst_stage_masks[new_image_layout] src_stages = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT dest_stages = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT vkCmdPipelineBarrier(self.setup_cmd, src_stages, dest_stages, 0, 0, None, 0, None, 1, [image_memory_barrier]) demo = Demo() demo.run() demo.release()
from test import support import random import unittest from functools import cmp_to_key verbose = support.verbose nerrors = 0 def check(tag, expected, raw, compare=None): global nerrors if verbose: print(" checking", tag) orig = raw[:] # save input in case of error if compare: raw.sort(key=cmp_to_key(compare)) else: raw.sort() if len(expected) != len(raw): print("error in", tag) print("length mismatch;", len(expected), len(raw)) print(expected) print(orig) print(raw) nerrors += 1 return for i, good in enumerate(expected): maybe = raw[i] if good is not maybe: print("error in", tag) print("out of order at index", i, good, maybe) print(expected) print(orig) print(raw) nerrors += 1 return class TestBase(unittest.TestCase): def testStressfully(self): # Try a variety of sizes at and around powers of 2, and at powers of 10. sizes = [0] for power in range(1, 10): n = 2 ** power sizes.extend(range(n-1, n+2)) sizes.extend([10, 100, 1000]) class Complains(object): maybe_complain = True def __init__(self, i): self.i = i def __lt__(self, other): if Complains.maybe_complain and random.random() < 0.001: if verbose: print(" complaining at", self, other) raise RuntimeError return self.i < other.i def __repr__(self): return "Complains(%d)" % self.i class Stable(object): def __init__(self, key, i): self.key = key self.index = i def __lt__(self, other): return self.key < other.key def __repr__(self): return "Stable(%d, %d)" % (self.key, self.index) for n in sizes: x = list(range(n)) if verbose: print("Testing size", n) s = x[:] check("identity", x, s) s = x[:] s.reverse() check("reversed", x, s) s = x[:] random.shuffle(s) check("random permutation", x, s) y = x[:] y.reverse() s = x[:] check("reversed via function", y, s, lambda a, b: (b>a)-(b<a)) if verbose: print(" Checking against an insane comparison function.") print(" If the implementation isn't careful, this may segfault.") s = x[:] s.sort(key=cmp_to_key(lambda a, b: int(random.random() * 3) - 1)) check("an insane function left some permutation", x, s) if len(x) >= 2: def bad_key(x): raise RuntimeError s = x[:] self.assertRaises(RuntimeError, s.sort, key=bad_key) x = [Complains(i) for i in x] s = x[:] random.shuffle(s) Complains.maybe_complain = True it_complained = False try: s.sort() except RuntimeError: it_complained = True if it_complained: Complains.maybe_complain = False check("exception during sort left some permutation", x, s) s = [Stable(random.randrange(10), i) for i in range(n)] augmented = [(e, e.index) for e in s] augmented.sort() # forced stable because ties broken by index x = [e for e, i in augmented] # a stable sort of s check("stability", x, s) #============================================================================== class TestBugs(unittest.TestCase): def test_bug453523(self): # bug 453523 -- list.sort() crasher. # If this fails, the most likely outcome is a core dump. # Mutations during a list sort should raise a ValueError. class C: def __lt__(self, other): if L and random.random() < 0.75: L.pop() else: L.append(3) return random.random() < 0.5 L = [C() for i in range(50)] try: L.sort() except ValueError: pass @support.impl_detail(pypy=False) def test_undetected_mutation(self): # Python 2.4a1 did not always detect mutation # So does pypy... memorywaster = [] for i in range(20): def mutating_cmp(x, y): L.append(3) L.pop() return (x > y) - (x < y) L = [1,2] self.assertRaises(ValueError, L.sort, key=cmp_to_key(mutating_cmp)) def mutating_cmp(x, y): L.append(3) del L[:] return (x > y) - (x < y) self.assertRaises(ValueError, L.sort, key=cmp_to_key(mutating_cmp)) memorywaster = [memorywaster] #============================================================================== class TestDecorateSortUndecorate(unittest.TestCase): def test_decorated(self): data = 'The quick Brown fox Jumped over The lazy Dog'.split() copy = data[:] random.shuffle(data) data.sort(key=str.lower) def my_cmp(x, y): xlower, ylower = x.lower(), y.lower() return (xlower > ylower) - (xlower < ylower) copy.sort(key=cmp_to_key(my_cmp)) def test_baddecorator(self): data = 'The quick Brown fox Jumped over The lazy Dog'.split() self.assertRaises(TypeError, data.sort, key=lambda x,y: 0) def test_stability(self): data = [(random.randrange(100), i) for i in range(200)] copy = data[:] data.sort(key=lambda t: t[0]) # sort on the random first field copy.sort() # sort using both fields self.assertEqual(data, copy) # should get the same result def test_key_with_exception(self): # Verify that the wrapper has been removed data = list(range(-2, 2)) dup = data[:] self.assertRaises(ZeroDivisionError, data.sort, key=lambda x: 1/x) self.assertEqual(data, dup) def test_key_with_mutation(self): data = list(range(10)) def k(x): del data[:] data[:] = range(20) return x self.assertRaises(ValueError, data.sort, key=k) def test_key_with_mutating_del(self): data = list(range(10)) class SortKiller(object): def __init__(self, x): pass def __del__(self): del data[:] data[:] = range(20) def __lt__(self, other): return id(self) < id(other) try: data.sort(key=SortKiller) except ValueError: pass def test_key_with_mutating_del_and_exception(self): data = list(range(10)) ## dup = data[:] class SortKiller(object): def __init__(self, x): if x > 2: raise RuntimeError def __del__(self): del data[:] data[:] = list(range(20)) self.assertRaises(RuntimeError, data.sort, key=SortKiller) ## major honking subtlety: we *can't* do: ## ## self.assertEqual(data, dup) ## ## because there is a reference to a SortKiller in the ## traceback and by the time it dies we're outside the call to ## .sort() and so the list protection gimmicks are out of ## date (this cost some brain cells to figure out...). def test_reverse(self): data = list(range(100)) random.shuffle(data) data.sort(reverse=True) self.assertEqual(data, list(range(99,-1,-1))) def test_reverse_stability(self): data = [(random.randrange(100), i) for i in range(200)] copy1 = data[:] copy2 = data[:] def my_cmp(x, y): x0, y0 = x[0], y[0] return (x0 > y0) - (x0 < y0) def my_cmp_reversed(x, y): x0, y0 = x[0], y[0] return (y0 > x0) - (y0 < x0) data.sort(key=cmp_to_key(my_cmp), reverse=True) copy1.sort(key=cmp_to_key(my_cmp_reversed)) self.assertEqual(data, copy1) copy2.sort(key=lambda x: x[0], reverse=True) self.assertEqual(data, copy2) #============================================================================== if __name__ == "__main__": unittest.main()
""" Support Code ## Standard Headers """ from __future__ import division,print_function import sys,random,math sys.dont_write_bytecode = True from settings import * import Technix.sdivUtil as sdivUtil """ ## Simple, low-level stuff ### Maths Stuff """ def gt(x,y): return x > y def lt(x,y): return x < y def medianIQR(lst, ordered=False): if not ordered: lst = sorted(lst) n = len(lst) q = n//4 iqr = lst[q*3] - lst[q] if n % 2: return lst[q*2],iqr else: p = max(0,q-1) return (lst[p] + lst[q]) * 0.5,iqr def median(lst,ordered=False): return medianIQR(lst,ordered)[0] """ An accumulator for reporting on numbers. """ class N(): "Add/delete counts of numbers." def __init__(i,inits=[]): i.zero() map(i.__iadd__,inits) def zero(i): i.n = i.mu = i.m2 = 0 i.cache= Cache() def sd(i) : if i.n < 2: return 0 else: return (max(0,i.m2)/(i.n - 1))**0.5 def __iadd__(i,x): i.cache += x i.n += 1 delta = x - i.mu i.mu += delta/(1.0*i.n) i.m2 += delta*(x - i.mu) return i def __isub__(i,x): i.cache = Cache() if i.n < 2: return i.zero() i.n -= 1 delta = x - i.mu i.mu -= delta/(1.0*i.n) i.m2 -= delta*(x - i.mu) return i class Cache: "Keep a random sample of stuff seen so far." def __init__(i,inits=[]): i.all,i.n,i._has = [],0,None map(i.__iadd__,inits) def __iadd__(i,x): i.n += 1 if len(i.all) < The.cache.size: # if not full i._has = None i.all += [x] # then add else: # otherwise, maybe replace an old item if random.random() <= The.cache.size/i.n: i._has=None i.all[int(random.random()*The.cache.size)] = x return i def has(i): if i._has == None: lst = sorted(i.all) med,iqr = medianIQR(lst,ordered=True) i._has = o( median = med, iqr = iqr, lo = i.all[0], hi = i.all[-1]) return i._has """ ### Random stuff. """ by = lambda x: random.uniform(0,x) rseed = random.seed any = random.choice rand = random.random def seed(r=None): global The if The is None: The=defaults() if r is None: r = The.seed rseed(r) """ ### List Handling Tricks """ def first(lst): return lst[0] def second(lst): return lst[1] def third(lst): return lst[2] """ ### Printing Stuff Print without newline: """ def say(*lst): print(*lst,end="") """ Print a list of numbers without an excess of decimal places: """ def gs(lst) : return [g(x) for x in lst] def g(x) : txt = '%g' % x return int(txt) if int(x) == x else float(txt) """ Pretty print a dictionary: """ def showd(d): def one(k,v): if isinstance(v,list): v = gs(v) if isinstance(v,float): return ":%s %g" % (k,v) return ":%s %s" % (k,v) return ' '.join([one(k,v) for k,v in sorted(d.items()) if not "_" in k]) """ ## Decorator to run code at Start-up """ def go(f): "A decorator that runs code at load time." print("\n# ---|", f.__name__,"|-----------------") if f.__doc__: print("#", f.__doc__) f() """ ## Handling command line options. Convert command line to a function call. e.g. if the file lib.py ends with if __name__ == '__main__':eval(todo()) then python lib.py myfun :a 1 :b fred results in a call to _myfun(a=1,b='fred')_. """ def todo(com="print(The._logo,'WHERE (2.0) you at?')"): import sys if len(sys.argv) < 2: return com def strp(x): return isinstance(x,basestring) def wrap(x): return "'%s'"%x if strp(x) else str(x) def oneTwo(lst): while lst: yield lst.pop(0), lst.pop(0) def value(x): try: return eval(x) except: return x def two(x,y): return x[1:] +"="+wrap(value(y)) twos = [two(x,y) for x,y in oneTwo(sys.argv[2:])] return sys.argv[1]+'(**dict('+ ','.join(twos)+'))' """ ## More interesting, low-level stuff """ def timing(f,repeats=10): "How long does 'f' take to run?" import time time1 = time.clock() for _ in range(repeats): f() return (time.clock() - time1)*1.0/repeats """ ## Data Completion Tool Fills in some details on a table of data. For example, def nasa93(): vl=1;l=2;n=3;h=4;vh=5;xh=6 return data(indep= [ 'Prec', 'Flex', 'Resl', 'Team', 'Pmat', 'rely', 'data', 'cplx', 'ruse', 'docu', 'time', 'stor', 'pvol', 'acap', 'pcap', 'pcon', 'aexp', 'plex', 'ltex', 'tool', 'site', 'sced', 'kloc'], less = ['effort', 'defects', 'months'], _rows=[ [h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,25.9,117.6,808,15.3], [h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,24.6,117.6,767,15.0], [h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,7.7,31.2,240,10.1], ... Adds in information on _cols_, _decisions_, _hi,lo_, etc: { :cols [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 22, 23, 24] :decisions [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22] :eval <function <lambda> at 0x7f3f825bea28> :hi {0: 4, 1: 4, 2: 4, 3: 5, 4: 4, 5: 5, 6: 5, 7: 6, 8: 3, 9: 3, 10: 6, 11: 6, 12: 4, 13: 5, 14: 5, 15: 3, 16: 5, 17: 4, 18: 4, 19: 4, 20: 3, 21: 3, 22: 980, 23: 8211, 24: 50961} :lo {0: 4, 1: 4, 2: 4, 3: 5, 4: 2, 5: 2, 6: 2, 7: 2, 8: 3, 9: 3, 10: 3, 11: 3, 12: 2, 13: 3, 14: 3, 15: 3, 16: 2, 17: 1, 18: 1, 19: 3, 20: 3, 21: 2, 22: 0.9, 23: 8.4, 24: 28} :names ['Prec', 'Flex', 'Resl', 'Team', 'Pmat', 'rely', 'data', 'cplx', 'ruse', 'docu', 'time', 'stor', 'pvol', 'acap', 'pcap', 'pcon', 'aexp', 'plex', 'ltex', 'tool', 'site', 'sced', 'kloc', 'effort', 'defects', 'months'] :objectives [22, 23, 24] :w {0: 1, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1, 6: 1, 7: 1, 8: 1, 9: 1, 10: 1, 11: 1, 12: 1, 13: 1, 14: 1, 15: 1, 16: 1, 17: 1, 18: 1, 19: 1, 20: 1, 21: 1, 22: -1, 23: -1, 24: -1} } Code: """ def data(indep=[], less=[], more=[], _rows=[], _tunings=[],weighFeature=True, _split="variance",_isCocomo = True): nindep= len(indep) ndep = len(less) + len(more) m= o(lo={}, hi={}, w={}, eval = lambda m,it : True, _rows = [o(cells=r,score=0,scored=False, x0=None,y0=None) for r in _rows], indep = indep, less = less, more = more, names = indep+less+more, _tunings = _tunings, _split = _split, _isCocomo = _isCocomo) m.decisions = [x for x in range(nindep)] m.objectives = [nindep+ x for x in range(ndep)] m.cols = m.decisions + m.objectives for x in m.decisions : m.w[x]= 1 for y,_ in enumerate(less) : m.w[len(m.decisions)+y] = -1 for z,_ in enumerate(more) : m.w[len(m.decisions)+len(less)+z] = 1 for x in m.cols: all = sorted(row.cells[x] for row in m._rows) m.lo[x] = all[0] m.hi[x] = all[-1] if weighFeature : sdivUtil.fss(m) return m """ Add tuning coeffecients to LOC """ def tuneLOC(m): for row in m._rows: if m._klocWt : b = m._klocWt row.cells[22] = row.cells[22]**(b) else : sfs, b = getTuningFactors(m, row.cells) row.cells[22] = row.cells[22]**(b+0.01*sfs) """ Compute tuning factors for LOC based on """ def getTuningFactors(m, cells): tunings = m._tunings sfs = 0 b = 0.91 for i in range(5): sfs += tunings[i][cells[i]] return sfs,b """ Effort is the third column in nasa93 dataset """ def effort(m, row): return row.cells[len(m.indep)] """ ## Start-up Actions """ if __name__ == '__main__': eval(todo())
from typing import Union, Dict, Pattern, Callable, Any, List import re import logging log = logging.getLogger('foo') class ValidationError(Exception): def __init__(self, reason): super().__init__() self.reason = reason class RequiredKey: def __init__(self, key: str): self.key = key Key = Union[str, RequiredKey] class TypedValidator: def __init__(self, t): self.t = t def validate(self, name, obj): if not isinstance(obj, self.t): raise ValidationError(f'{name} is not {self.t}') class DictValidator(TypedValidator): def __init__(self, vchecker: 'Validator'): super().__init__(dict) self.vchecker = vchecker def validate(self, name: str, obj): super().validate(name, obj) for k, v in obj.items(): if not isinstance(k, str): raise ValidationError(f'{name} has non-str key') self.vchecker.validate(f"{name}[{k}]", v) class KeyedValidator(TypedValidator): def __init__(self, keyed_checkers: Dict[Key, 'Validator']): super().__init__(dict) self.checkers = {} for k, v in keyed_checkers.items(): if isinstance(k, RequiredKey): self.checkers[k.key] = (v, True) else: self.checkers[k] = (v, False) def __getitem__(self, key: str): return self.checkers[key][0] def validate(self, name: str, obj): super().validate(name, obj) unknown_keys = set(obj.keys()) - set(self.checkers.keys()) if len(unknown_keys) != 0: raise ValidationError(f'unknown keys in {name}: {unknown_keys}') for k, (checker, required) in self.checkers.items(): if required and k not in obj: raise ValidationError(f'{name} missing required key {k}.') if k in obj: checker.validate(f"{name}.{k}", obj[k]) class ListValidator(TypedValidator): def __init__(self, checker: 'Validator'): super().__init__(list) self.checker = checker def validate(self, name: str, obj): super().validate(name, obj) for i, elt in enumerate(obj): self.checker.validate(f"{name}[{i}]", elt) class SetValidator: def __init__(self, valid): self.valid = valid def validate(self, name: str, obj): if obj not in self.valid: raise ValidationError(f'{name} must be one of: {self.valid}') class RegexValidator(TypedValidator): def __init__(self, pattern: str, re_obj: Pattern, maxlen: int): super().__init__(str) self.pattern = pattern self.re_obj = re_obj if re_obj is not None else re.compile(pattern) self.maxlen = maxlen def validate(self, name: str, obj): super().validate(name, obj) if self.maxlen is not None and len(obj) > self.maxlen: raise ValidationError(f'length of {name} must be <= {self.maxlen}') if not self.re_obj.fullmatch(obj): raise ValidationError(f'{name} must match regex: {self.pattern}') class NumericValidator: def __init__(self, conditions: Dict[str, Callable[[Any], Any]]): self.conditions = conditions def validate(self, name: str, obj): if not isinstance(obj, int) and not isinstance(obj, float): raise ValidationError(f'{name} is not numeric') for text, condition in self.conditions.items(): if not condition(obj): raise ValidationError(f'{name} does not satisfy the condition {text}') class SwitchValidator(TypedValidator): def __init__(self, key: str, checkers: Dict[str, Dict[Key, 'Validator']]): super().__init__(dict) self.key = key self.valid_key = oneof(*checkers.keys()) self.checkers = {k: keyed({required(key): self.valid_key, **fields}) for k, fields in checkers.items()} def __getitem__(self, key): return self.checkers[key] def validate(self, name: str, obj): super().validate(name, obj) key = obj[self.key] self.valid_key.validate(f"{name}.{key}", key) self.checkers[key].validate(f"{name}", obj) class NullableValidator: def __init__(self, wrapped: 'Validator'): self.checker = wrapped def validate(self, name: str, obj): if obj is not None: self.checker.validate(name, obj) class TruthyValidator: def validate(self, name: str, obj): # pylint: disable=no-self-use if not obj: raise ValidationError(f'{name} cannot be {obj}') class MultipleValidator: def __init__(self, checkers: List['Validator']): self.checkers = checkers def validate(self, name: str, obj): excs = [] for checker in self.checkers: try: checker.validate(name, obj) return except ValidationError as e: excs.append(e) if excs: reasons = ' or '.join([e.reason for e in excs]) log.info(reasons) raise ValidationError(f'{name} does not satisfy any conditions: {reasons}') def required(key: str) -> RequiredKey: return RequiredKey(key) str_type = TypedValidator(str) non_empty_str_type = MultipleValidator([str_type, TruthyValidator()]) bool_type = TypedValidator(bool) int_type = TypedValidator(int) Validator = Union[TypedValidator, NumericValidator, NullableValidator, TruthyValidator, SetValidator, MultipleValidator] def dictof(vchecker: Validator): return DictValidator(vchecker) def keyed(checkers: Dict[Key, Validator]): return KeyedValidator(checkers) def listof(checker: Validator): return ListValidator(checker) def oneof(*items): return SetValidator(set(items)) def regex(pattern, re_obj=None, maxlen=None): return RegexValidator(pattern, re_obj, maxlen) def nullable(wrapped: Validator): return NullableValidator(wrapped) def numeric(**conditions: Callable[[Any], Any]): return NumericValidator(conditions) def switch(key: str, checkers: Dict[str, Dict[Key, Validator]]): return SwitchValidator(key, checkers) def anyof(*checkers: Validator): return MultipleValidator(list(checkers))
"""Loads plugins from ``porcupine.plugins``.""" # many things are wrapped in try/except here to allow writing Porcupine # plugins using Porcupine, so Porcupine must run if the plugins are # broken from __future__ import annotations import argparse import dataclasses import enum import importlib.machinery import logging import pkgutil import random import time import traceback from typing import Any, Iterable, List, Sequence import toposort from porcupine import get_main_window, settings from porcupine.plugins import __path__ as plugin_paths log = logging.getLogger(__name__) class Status(enum.Enum): """ This :mod:`enum` represents the status of the plugin in the currently running Porcupine process. .. data:: LOADING The plugin hasn't been set up successfully yet, but no errors preventing the setup have occurred. .. data:: ACTIVE The plugin was imported and its ``setup()`` function was called successfully. .. data:: DISABLED_BY_SETTINGS The plugin wasn't loaded because it's in the ``disabled_plugins`` setting. See :mod:`porcupine.settings`. .. data:: DISABLED_ON_COMMAND_LINE The plugin wasn't loaded because it was listed in a ``--without-plugins`` argument given to Porcupine. .. data:: IMPORT_FAILED Importing the plugin raised an error. .. data:: SETUP_FAILED The plugin was imported successfully, but its ``setup()`` function raised an exception or logged an error. In a plugin named ``foo``, any message logged with severity ``ERROR`` or ``CRITICAL`` to the logger named ``porcupine.plugins.foo`` counts as logging an error. Therefore you can do this:: import logging log = logging.getLogger(__name__) # __name__ == "porcupine.plugins.foo" def setup() -> None: if bar_is_installed: ... else: log.error("bar is not installed") When bar is not installed, this plugin will show a one-line error message in the plugin manager and the terminal. If an exception is raised, the full traceback is shown instead. .. data:: CIRCULAR_DEPENDENCY_ERROR Plugins with this status were imported, but their ``setup_before`` and ``setup_after`` lists make it impossible to determine the correct order for calling their ``setup()`` function. For example, if plugin *A* should be set up before *B*, *B* should be set up before *C* and *C* should be set up before *A*, then *A*, *B* and *C* will all fail with ``CIRCULAR_DEPENDENCY_ERROR``. """ LOADING = enum.auto() ACTIVE = enum.auto() DISABLED_BY_SETTINGS = enum.auto() DISABLED_ON_COMMAND_LINE = enum.auto() IMPORT_FAILED = enum.auto() SETUP_FAILED = enum.auto() CIRCULAR_DEPENDENCY_ERROR = enum.auto() @dataclasses.dataclass(eq=False) class PluginInfo: """ This :mod:`dataclass <dataclasses>` represents a plugin. It's usually better to use ``info.setup_before`` instead of accessing ``info.module.setup_before`` directly. Not all plugins define a ``setup_before`` variable, and if it's not present, then ``info.setup_before`` is an empty set. This also applies to ``setup_after``. The value of *error* depends on *status*: * If *status* is ``LOADING``, ``ACTIVE``, ``DISABLED_BY_SETTINGS`` or ``DISABLED_ON_COMMAND_LINE``, then *error* is ``None``. * If *status* is ``IMPORT_FAILED`` or ``SETUP_FAILED``, then *error* is a Python error message, starting with ``Traceback (most recent call last):``. * If *status* is ``CIRCULAR_DEPENDENCY_ERROR``, then *error* is a user-readable one-line message. """ name: str came_with_porcupine: bool status: Status module: Any | None # you have to check for None, otherwise mypy won't complain error: str | None _mutable_plugin_infos: list[PluginInfo] = [] plugin_infos: Sequence[PluginInfo] = _mutable_plugin_infos # changing content is mypy error _dependencies: dict[PluginInfo, set[PluginInfo]] = {} def _run_setup_argument_parser_function(info: PluginInfo, parser: argparse.ArgumentParser) -> None: assert info.status == Status.LOADING assert info.module is not None if hasattr(info.module, "setup_argument_parser"): start = time.perf_counter() try: info.module.setup_argument_parser(parser) except Exception: log.exception(f"{info.name}.setup_argument_parser() doesn't work") info.status = Status.SETUP_FAILED info.error = traceback.format_exc() duration = time.perf_counter() - start log.debug("ran %s.setup_argument_parser() in %.3f milliseconds", info.name, duration * 1000) def _import_plugin(info: PluginInfo) -> None: assert info.status == Status.LOADING assert info.module is None log.debug(f"trying to import porcupine.plugins.{info.name}") start = time.perf_counter() try: info.module = importlib.import_module(f"porcupine.plugins.{info.name}") setup_before = set(getattr(info.module, "setup_before", [])) setup_after = set(getattr(info.module, "setup_after", [])) except Exception: log.exception(f"can't import porcupine.plugins.{info.name}") info.status = Status.IMPORT_FAILED info.error = traceback.format_exc() return for dep_info in plugin_infos: if dep_info.name in setup_after: _dependencies[info].add(dep_info) if dep_info.name in setup_before: _dependencies[dep_info].add(info) duration = time.perf_counter() - start log.debug("imported porcupine.plugins.%s in %.3f milliseconds", info.name, duration * 1000) # Remember to generate <<PluginsLoaded>> when this succeeds def _run_setup_and_set_status(info: PluginInfo) -> None: assert info.status == Status.LOADING assert info.module is not None error_log: list[logging.LogRecord] = [] logger = logging.getLogger(f"porcupine.plugins.{info.name}") handler = logging.Handler() handler.setLevel(logging.ERROR) handler.emit = error_log.append # type: ignore logger.addHandler(handler) if hasattr(info.module, "setup"): start = time.perf_counter() try: log.debug(f"calling porcupine.plugins.{info.name}.setup()") info.module.setup() except Exception: log.exception(f"{info.name}.setup() doesn't work") info.status = Status.SETUP_FAILED info.error = traceback.format_exc() else: if error_log: info.status = Status.SETUP_FAILED info.error = "".join( f"{record.levelname}: {record.message}\n" for record in error_log ) else: info.status = Status.ACTIVE duration = time.perf_counter() - start logger.debug("ran %s.setup() in %.3f milliseconds", info.name, duration * 1000) else: info.status = Status.SETUP_FAILED info.error = ( "There is no setup() function. Make sure to include a setup function into your" " plugin.\nTo learn more about Porcupine's plugin API, visit" " https://akuli.github.io/porcupine/plugin-intro.html" ) log.warning(f"Calling {info.name!r} plugin's setup() function failed.\n{info.error}") logger.removeHandler(handler) def _did_plugin_come_with_porcupine(finder: object) -> bool: return isinstance(finder, importlib.machinery.FileFinder) and finder.path == plugin_paths[-1] # undocumented on purpose, don't use in plugins def import_plugins(disabled_on_command_line: list[str]) -> None: assert not _mutable_plugin_infos and not _dependencies _mutable_plugin_infos.extend( PluginInfo( name=name, came_with_porcupine=_did_plugin_come_with_porcupine(finder), status=Status.LOADING, module=None, error=None, ) for finder, name, is_pkg in pkgutil.iter_modules(plugin_paths) if not name.startswith("_") ) _dependencies.update({info: set() for info in plugin_infos}) for info in _mutable_plugin_infos: # If it's disabled in settings and on command line, then status is set # to DISABLED_BY_SETTINGS. This makes more sense for the user of the # plugin manager dialog. if info.name in settings.get("disabled_plugins", List[str]): info.status = Status.DISABLED_BY_SETTINGS continue if info.name in disabled_on_command_line: info.status = Status.DISABLED_ON_COMMAND_LINE continue _import_plugin(info) # undocumented on purpose, don't use in plugins # TODO: document what setup_argument_parser() function in a plugin does def run_setup_argument_parser_functions(parser: argparse.ArgumentParser) -> None: for info in plugin_infos: if info.status == Status.LOADING: _run_setup_argument_parser_function(info, parser) # undocumented on purpose, don't use in plugins def run_setup_functions(shuffle: bool) -> None: imported_infos = [info for info in plugin_infos if info.status == Status.LOADING] # the toposort will partially work even if there's a circular # dependency, the CircularDependencyError is raised after doing # everything possible (see source code) loading_order = [] try: toposort_result: Iterable[Iterable[PluginInfo]] = toposort.toposort(_dependencies) for infos in toposort_result: load_list = [info for info in infos if info.status == Status.LOADING] if shuffle: # for plugin developers wanting to make sure that the # dependencies specified in setup_before and setup_after # are correct random.shuffle(load_list) else: # for consistency in UI (e.g. always same order of menu items) load_list.sort(key=(lambda info: info.name)) loading_order.extend(load_list) except toposort.CircularDependencyError as e: log.exception("circular dependency") for info in set(imported_infos) - set(loading_order): info.status = Status.CIRCULAR_DEPENDENCY_ERROR parts = ", ".join(f"{a} depends on {b}" for a, b in e.data.items()) info.error = f"Circular dependency error: {parts}" for info in loading_order: assert info.status == Status.LOADING _run_setup_and_set_status(info) get_main_window().event_generate("<<PluginsLoaded>>") def can_setup_while_running(info: PluginInfo) -> bool: """ Returns whether the plugin can be set up now, without having to restart Porcupine. """ if info.status not in {Status.DISABLED_BY_SETTINGS, Status.DISABLED_ON_COMMAND_LINE}: return False if info.module is None: # Importing may give more information about dependencies, needed below old_status = info.status info.status = Status.LOADING _import_plugin(info) if info.status != Status.LOADING: # error return False info.status = old_status # If a plugin defines setup_argument_parser, it likely wants it to run on # startup, and now it's too late. if hasattr(info.module, "setup_argument_parser"): return False # Check whether no other active plugin depends on loading after this plugin setup_preventors = [ other.name for other, other_must_setup_after_these in _dependencies.items() if other.status == Status.ACTIVE and info in other_must_setup_after_these ] if setup_preventors: log.info( f"can't setup {info.name} now because it must be done before setting up the following" " plugins, which are already active: " + "\n".join(setup_preventors) ) return False return True def setup_while_running(info: PluginInfo) -> None: """Run the ``setup_argument_parser()`` and ``setup()`` functions now. Before calling this function, make sure that :func:`can_setup_while_running` returns ``True``. """ info.status = Status.LOADING dummy_parser = argparse.ArgumentParser() _run_setup_argument_parser_function(info, dummy_parser) if info.status != Status.LOADING: # error return _run_setup_and_set_status(info) assert info.status != Status.LOADING if info.status == Status.ACTIVE: get_main_window().event_generate("<<PluginsLoaded>>")
# Copyright 2011 Grid Dynamics # Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr import webob from webob import exc from nova.api.openstack import extensions from nova.api.openstack import wsgi from nova import exception from nova.i18n import _ from nova import network from nova.objects import base as base_obj from nova.objects import fields as obj_fields authorize = extensions.extension_authorizer('compute', 'networks') authorize_view = extensions.extension_authorizer('compute', 'networks:view') extended_fields = ('mtu', 'dhcp_server', 'enable_dhcp', 'share_address') def network_dict(context, network, extended): fields = ('id', 'cidr', 'netmask', 'gateway', 'broadcast', 'dns1', 'dns2', 'cidr_v6', 'gateway_v6', 'label', 'netmask_v6') admin_fields = ('created_at', 'updated_at', 'deleted_at', 'deleted', 'injected', 'bridge', 'vlan', 'vpn_public_address', 'vpn_public_port', 'vpn_private_address', 'dhcp_start', 'project_id', 'host', 'bridge_interface', 'multi_host', 'priority', 'rxtx_base') if network: # NOTE(mnaser): We display a limited set of fields so users can know # what networks are available, extra system-only fields # are only visible if they are an admin. if context.is_admin: fields += admin_fields if extended: fields += extended_fields # TODO(mriedem): Remove the NovaObject type check once the # neutronv2 API is returning Network objects from get/get_all. is_obj = isinstance(network, base_obj.NovaObject) result = {} for field in fields: # NOTE(mriedem): If network is an object, IPAddress fields need to # be cast to a string so they look the same in the response as # before the objects conversion. if is_obj and isinstance(network.fields[field].AUTO_TYPE, obj_fields.IPAddress): val = network.get(field) if val is not None: result[field] = str(network.get(field)) else: result[field] = val else: # It's either not an object or it's not an IPAddress field. result[field] = network.get(field) uuid = network.get('uuid') if uuid: result['id'] = uuid return result else: return {} class NetworkController(wsgi.Controller): def __init__(self, network_api=None, ext_mgr=None): self.network_api = network_api or network.API() if ext_mgr: self.extended = ext_mgr.is_loaded('os-extended-networks') else: self.extended = False def index(self, req): context = req.environ['nova.context'] authorize_view(context) networks = self.network_api.get_all(context) result = [network_dict(context, net_ref, self.extended) for net_ref in networks] return {'networks': result} @wsgi.action("disassociate") def _disassociate_host_and_project(self, req, id, body): context = req.environ['nova.context'] authorize(context) try: self.network_api.associate(context, id, host=None, project=None) except exception.NetworkNotFound: msg = _("Network not found") raise exc.HTTPNotFound(explanation=msg) except NotImplementedError: msg = _('Disassociate network is not implemented by the ' 'configured Network API') raise exc.HTTPNotImplemented(explanation=msg) return webob.Response(status_int=202) def show(self, req, id): context = req.environ['nova.context'] authorize_view(context) try: network = self.network_api.get(context, id) except exception.NetworkNotFound: msg = _("Network not found") raise exc.HTTPNotFound(explanation=msg) return {'network': network_dict(context, network, self.extended)} def delete(self, req, id): context = req.environ['nova.context'] authorize(context) try: self.network_api.delete(context, id) except exception.NetworkInUse as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.NetworkNotFound: msg = _("Network not found") raise exc.HTTPNotFound(explanation=msg) return webob.Response(status_int=202) def create(self, req, body): context = req.environ['nova.context'] authorize(context) def bad(e): return exc.HTTPBadRequest(explanation=e) if not (body and body.get("network")): raise bad(_("Missing network in body")) params = body["network"] if not params.get("label"): raise bad(_("Network label is required")) cidr = params.get("cidr") or params.get("cidr_v6") if not cidr: raise bad(_("Network cidr or cidr_v6 is required")) if params.get("project_id") == "": params["project_id"] = None try: params["num_networks"] = 1 try: params["network_size"] = netaddr.IPNetwork(cidr).size except netaddr.AddrFormatError: raise exception.InvalidCidr(cidr=cidr) if not self.extended: create_params = ('allowed_start', 'allowed_end') for field in extended_fields + create_params: if field in params: del params[field] network = self.network_api.create(context, **params)[0] except exception.NovaException as ex: if ex.code == 400: raise bad(ex.format_message()) elif ex.code == 409: raise exc.HTTPConflict(explanation=ex.format_message()) raise return {"network": network_dict(context, network, self.extended)} def add(self, req, body): context = req.environ['nova.context'] authorize(context) if not body: raise exc.HTTPUnprocessableEntity() network_id = body.get('id', None) project_id = context.project_id try: self.network_api.add_network_to_project( context, project_id, network_id) except NotImplementedError: msg = (_("VLAN support must be enabled")) raise exc.HTTPNotImplemented(explanation=msg) except Exception as ex: msg = (_("Cannot associate network %(network)s" " with project %(project)s: %(message)s") % {"network": network_id or "", "project": project_id, "message": getattr(ex, "value", ex)}) raise exc.HTTPBadRequest(explanation=msg) return webob.Response(status_int=202) class Os_networks(extensions.ExtensionDescriptor): """Admin-only Network Management Extension.""" name = "Networks" alias = "os-networks" namespace = ("http://docs.openstack.org/compute/" "ext/os-networks/api/v1.1") updated = "2011-12-23T00:00:00Z" def get_resources(self): member_actions = {'action': 'POST'} collection_actions = {'add': 'POST'} res = extensions.ResourceExtension( 'os-networks', NetworkController(ext_mgr=self.ext_mgr), member_actions=member_actions, collection_actions=collection_actions) return [res]
#!/usr/bin/env python # # Copyright (c) 2001 - 2016 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "test/VariantDir/SConscript-variant_dir.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog" """ Verify that specifying a variant_dir argument to SConscript works properly. """ import TestSCons test = TestSCons.TestSCons() all1 = test.workpath('test', 'build', 'var1', 'all') all2 = test.workpath('test', 'build', 'var2', 'all') all3 = test.workpath('test', 'build', 'var3', 'all') all4 = test.workpath('test', 'build', 'var4', 'all') all5 = test.workpath('build', 'var5', 'all') all6 = test.workpath('build', 'var6', 'all') all7 = test.workpath('build', 'var7', 'all') all8 = test.workpath('build', 'var8', 'all') all9 = test.workpath('test', 'build', 'var9', 'src', 'all') test.subdir('test') test.write(['test', 'SConstruct'], """ src = Dir('src') alt = Dir('alt') var1 = Dir('build/var1') var2 = Dir('build/var2') var3 = Dir('build/var3') var4 = Dir('build/var4') var5 = Dir('../build/var5') var6 = Dir('../build/var6') var7 = Dir('../build/var7') var8 = Dir('../build/var8') var9 = Dir('../build/var9') def cat(env, source, target): target = str(target[0]) f = open(target, "wb") for src in source: f.write(open(str(src), "rb").read()) f.close() env = Environment(BUILDERS={'Cat':Builder(action=cat)}, BUILD='build') Export("env") SConscript('src/SConscript', variant_dir=var1) SConscript('src/SConscript', variant_dir='build/var2', src_dir=src) SConscript('src/SConscript', variant_dir='build/var3', duplicate=0) #XXX We can't support var4 and var5 yet, because our VariantDir linkage #XXX is to an entire source directory. We haven't yet generalized our #XXX infrastructure to be able to take the SConscript file from one source #XXX directory, but the rest of the files from a different one. #XXX SConscript('src/SConscript', variant_dir=var4, src_dir=alt, duplicate=0) #XXX SConscript('src/SConscript', variant_dir='../build/var5', src_dir='alt') SConscript('src/SConscript', variant_dir=var6) SConscript('src/SConscript', variant_dir=var7, src_dir=src, duplicate=0) env.SConscript('src/SConscript', variant_dir='../$BUILD/var8', duplicate=0) # This tests the fact that if you specify a src_dir that is above # the dir a SConscript is in, that we do the intuitive thing, i.e., # we set the path of the SConscript accordingly. The below is # equivalent to saying: # # VariantDir('build/var9', '.') # SConscript('build/var9/src/SConscript') SConscript('src/SConscript', variant_dir='build/var9', src_dir='.') """) test.subdir(['test', 'src'], ['test', 'alt']) test.write(['test', 'src', 'SConscript'], """ Import("env") env.Cat('aaa.out', 'aaa.in') env.Cat('bbb.out', 'bbb.in') env.Cat('ccc.out', 'ccc.in') env.Cat('all', ['aaa.out', 'bbb.out', 'ccc.out']) """) test.write('test/src/aaa.in', "test/src/aaa.in\n") test.write('test/src/bbb.in', "test/src/bbb.in\n") test.write('test/src/ccc.in', "test/src/ccc.in\n") test.write('test/alt/aaa.in', "test/alt/aaa.in\n") test.write('test/alt/bbb.in', "test/alt/bbb.in\n") test.write('test/alt/ccc.in', "test/alt/ccc.in\n") test.run(chdir='test', arguments = '. ../build') all_src = "test/src/aaa.in\ntest/src/bbb.in\ntest/src/ccc.in\n" all_alt = "test/alt/aaa.in\ntest/alt/bbb.in\ntest/alt/ccc.in\n" test.must_match(all1, all_src) test.must_match(all2, all_src) test.must_match(all3, all_src) #XXX We can't support var4 and var5 yet, because our VariantDir linkage #XXX is to an entire source directory. We haven't yet generalized our #XXX infrastructure to be able to take the SConscript file from one source #XXX directory, but the rest of the files from a different one. #XXX test.must_match(all4, all_alt) #XXX test.must_match(all5, all_alt) test.must_match(all6, all_src) test.must_match(all7, all_src) test.must_match(all8, all_src) test.must_match(all9, all_src) import os import stat def equal_stats(x,y): x = os.stat(x) y = os.stat(y) return (stat.S_IMODE(x[stat.ST_MODE]) == stat.S_IMODE(y[stat.ST_MODE]) and x[stat.ST_MTIME] == y[stat.ST_MTIME]) # Make sure we did duplicate the source files in build/var1, # and that their stats are the same: for file in ['aaa.in', 'bbb.in', 'ccc.in']: test.must_exist(test.workpath('test', 'build', 'var1', file)) test.fail_test(not equal_stats(test.workpath('test', 'build', 'var1', file), test.workpath('test', 'src', file))) # Make sure we did duplicate the source files in build/var2, # and that their stats are the same: for file in ['aaa.in', 'bbb.in', 'ccc.in']: test.must_exist(test.workpath('test', 'build', 'var2', file)) test.fail_test(not equal_stats(test.workpath('test', 'build', 'var2', file), test.workpath('test', 'src', file))) # Make sure we didn't duplicate the source files in build/var3. test.must_not_exist(test.workpath('test', 'build', 'var3', 'aaa.in')) test.must_not_exist(test.workpath('test', 'build', 'var3', 'bbb.in')) test.must_not_exist(test.workpath('test', 'build', 'var3', 'ccc.in')) #XXX We can't support var4 and var5 yet, because our VariantDir linkage #XXX is to an entire source directory. We haven't yet generalized our #XXX infrastructure to be able to take the SConscript file from one source #XXX directory, but the rest of the files from a different one. #XXX Make sure we didn't duplicate the source files in build/var4. #XXXtest.must_not_exist(test.workpath('test', 'build', 'var4', 'aaa.in')) #XXXtest.must_not_exist(test.workpath('test', 'build', 'var4', 'bbb.in')) #XXXtest.must_not_exist(test.workpath('test', 'build', 'var4', 'ccc.in')) #XXX We can't support var4 and var5 yet, because our VariantDir linkage #XXX is to an entire source directory. We haven't yet generalized our #XXX infrastructure to be able to take the SConscript file from one source #XXX directory, but the rest of the files from a different one. #XXX Make sure we did duplicate the source files in build/var5, #XXX and that their stats are the same: #XXXfor file in ['aaa.in', 'bbb.in', 'ccc.in']: #XXX test.must_exist(test.workpath('build', 'var5', file)) #XXX test.fail_test(not equal_stats(test.workpath('build', 'var5', file), #XXX test.workpath('test', 'src', file))) # Make sure we did duplicate the source files in build/var6, # and that their stats are the same: for file in ['aaa.in', 'bbb.in', 'ccc.in']: test.must_exist(test.workpath('build', 'var6', file)) test.fail_test(not equal_stats(test.workpath('build', 'var6', file), test.workpath('test', 'src', file))) # Make sure we didn't duplicate the source files in build/var7. test.must_not_exist(test.workpath('build', 'var7', 'aaa.in')) test.must_not_exist(test.workpath('build', 'var7', 'bbb.in')) test.must_not_exist(test.workpath('build', 'var7', 'ccc.in')) # Make sure we didn't duplicate the source files in build/var8. test.must_not_exist(test.workpath('build', 'var8', 'aaa.in')) test.must_not_exist(test.workpath('build', 'var8', 'bbb.in')) test.must_not_exist(test.workpath('build', 'var8', 'ccc.in')) ################### test.subdir('test2') test.write(['test2', 'SConstruct'], """\ SConscript('SConscript', variant_dir='Build', src_dir='.', duplicate=0) """) test.write(['test2', 'SConscript'], """\ env = Environment() foo_obj = env.Object('foo.c') env.Program('foo', [foo_obj, 'bar.c']) """) test.write(['test2', 'bar.c'], r""" #include <stdio.h> #include <stdlib.h> void bar(void) { printf("bar.c\n"); } """) test.write(['test2', 'foo.c'], r""" #include <stdio.h> #include <stdlib.h> extern void bar(void); int main(int argc, char *argv[]) { bar(); printf("foo.c\n"); } """) test.run(chdir="test2") _obj = TestSCons._obj test.must_not_exist(test.workpath('test2', 'foo' + _obj)) test.must_not_exist(test.workpath('test2', 'bar' + _obj)) test.must_exist(test.workpath('test2', 'Build', 'foo' + _obj)) test.must_exist(test.workpath('test2', 'Build', 'bar' + _obj)) ################### # Make sure that directories for subsidiary SConscript() calls # in a variant_dir get created if they don't already exist. test.subdir('test3') test.subdir(['test3', 'src'], ['test3', 'src', '_glscry']) test.write(['test3', 'SConstruct'], """\ SConscript(dirs=['src'], variant_dir='build', duplicate=0) """) test.write(['test3', 'src', 'SConscript'], """\ SConscript(dirs=['_glscry']) """) test.write(['test3', 'src', '_glscry', 'SConscript'], """\ """) test.write(['test3', 'src', 'file.in'], "file.in\n") test.write(['test3', 'src', '_glscry', 'file.in'], "file.in\n") test.run(chdir='test3') test.pass_test() # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ragged_batch_gather_ops.batch_gather.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops.ragged import ragged_batch_gather_ops from tensorflow.python.ops.ragged import ragged_batch_gather_with_default_op from tensorflow.python.ops.ragged import ragged_factory_ops from tensorflow.python.ops.ragged import ragged_tensor from tensorflow.python.platform import googletest @test_util.run_all_in_graph_and_eager_modes class RaggedBatchGatherOpTest(test_util.TensorFlowTestCase, parameterized.TestCase): @parameterized.parameters([ #========================================================================= # Docstring Example #========================================================================= dict( descr='Docstring example', params=ragged_factory_ops.constant_value([['a', 'b', 'c'], ['d'], [], ['e']]), indices=ragged_factory_ops.constant_value([[1, 2, 0], [], [], [0, 0]]), expected=ragged_factory_ops.constant_value([[b'b', b'c', b'a'], [], [], [b'e', b'e']])), #========================================================================= # 0 Batch Dimensions #========================================================================= dict( descr='params: [P1], indices: [I], result: [I]', params=['a', 'b', 'c', 'd'], indices=[3, 2], expected=[b'd', b'c']), dict( descr='params: [P1, (P2)], indices: [I], result: [I, (P2)]', params=ragged_factory_ops.constant_value([['a', 'b'], [], ['c'], ['d', 'e']]), indices=[3, 2], expected=ragged_factory_ops.constant_value([[b'd', b'e'], [b'c']])), #========================================================================= # 1 Batch Dimension #========================================================================= dict( descr='params: [B1, P1], indices: [B1, I], result: [B1, I]', params=[['a', 'b', 'c'], ['d', 'e', 'f'], ['g', 'h', 'i']], indices=[[2, 0], [0, 1], [1, 0]], expected=[[b'c', b'a'], [b'd', b'e'], [b'h', b'g']]), dict( descr='params: [B1, (P1)], indices: [B1, I], result: [B1, I]', params=ragged_factory_ops.constant_value([['a', 'b', 'c'], ['d', 'e'], ['g']]), indices=[[2, 0], [0, 1], [0, 0]], expected=[[b'c', b'a'], [b'd', b'e'], [b'g', b'g']]), dict( descr='params: [B1, P1], indices: [B1, (I)], result: [B1, (I)]', params=[['a', 'b', 'c'], ['d', 'e', 'f'], ['g', 'h', 'i']], indices=ragged_factory_ops.constant_value([[2, 0, 2], [0], [1]]), expected=ragged_factory_ops.constant_value([[b'c', b'a', b'c'], [b'd'], [b'h']])), dict( descr=('params: [B1, (P1), (P2), P3], indices: [B1, I], ' 'result: [B1, I, (P2), P3]'), params=ragged_factory_ops.constant_value( [[[['a']], [['b'], ['c']]], [[['d'], ['e']], [['f']]], [[['g']]]], ragged_rank=2), indices=[[1, 0], [0, 1], [0, 0]], expected=ragged_factory_ops.constant_value( [[[[b'b'], [b'c']], [[b'a']]], [[[b'd'], [b'e']], [[b'f']]], [[[b'g']], [[b'g']]]], ragged_rank=2)), #========================================================================= # 2 Batch Dimensions #========================================================================= dict( descr=('params: [B1, B2, P1], indices: [B1, B2, I], ' 'result: [B1, B2, I]'), params=[[['a', 'b', 'c']], [['d', 'e', 'f']], [['g', 'h', 'i']]], indices=[[[2, 0]], [[0, 1]], [[1, 0]]], expected=[[[b'c', b'a']], [[b'd', b'e']], [[b'h', b'g']]]), dict( descr=('params: [B1, (B2), P1], indices: [B1, (B2), I], ' 'result: [B1, (B2), I]'), params=ragged_factory_ops.constant_value( [[['a', 'b', 'c'], ['d', 'e', 'f']], [['g', 'h', 'i']]], ragged_rank=1), indices=ragged_factory_ops.constant_value( [[[2, 0], [0, 1]], [[1, 0]]], ragged_rank=1), expected=ragged_factory_ops.constant_value( [[[b'c', b'a'], [b'd', b'e']], [[b'h', b'g']]], ragged_rank=1)), dict( descr=('params: [B1, (B2), (P1)], indices: [B1, (B2), I], ' 'result: [B1, (B2), I]'), params=ragged_factory_ops.constant_value( [[['a', 'b', 'c'], ['d']], [['e', 'f']]], ragged_rank=2), indices=ragged_factory_ops.constant_value( [[[2, 0], [0, 0]], [[1, 0]]], ragged_rank=1), expected=ragged_factory_ops.constant_value( [[[b'c', b'a'], [b'd', b'd']], [[b'f', b'e']]], ragged_rank=1)), dict( descr=('params: [B1, (B2), P1], indices: [B1, (B2), (I)], ' 'result: [B1, (B2), (I)]'), params=ragged_factory_ops.constant_value( [[['a', 'b', 'c'], ['d', 'e', 'f']], [['g', 'h', 'i']]], ragged_rank=1), indices=ragged_factory_ops.constant_value( [[[2, 1, 0], [0]], [[1, 1]]], ragged_rank=2), expected=ragged_factory_ops.constant_value( [[[b'c', b'b', b'a'], [b'd']], [[b'h', b'h']]], ragged_rank=2)), #========================================================================= # 3 Batch Dimensions #========================================================================= dict( descr=( 'params: [B1, (B2), (B3), (P1)], indices: [B1, (B2), (B3), I], ' 'result: [B1, (B2), (B3), I]'), params=ragged_factory_ops.constant_value( [[[['a', 'b', 'c'], ['d']], [['e', 'f']]]], ragged_rank=3), indices=ragged_factory_ops.constant_value( [[[[2, 0], [0, 0]], [[1, 0]]]], ragged_rank=2), expected=ragged_factory_ops.constant_value( [[[[b'c', b'a'], [b'd', b'd']], [[b'f', b'e']]]], ragged_rank=2)), ]) def testRaggedBatchGather(self, descr, params, indices, expected): result = ragged_batch_gather_ops.batch_gather(params, indices) self.assertAllEqual(result, expected) @parameterized.parameters([ # Docstring example: dict( descr='Docstring example', params=[['a', 'b', 'c'], ['d'], [], ['e']], indices=[[1, 2, -1], [], [], [0, 10]], expected=[['b', 'c', 'FOO'], [], [], ['e', 'FOO']], default_value='FOO', ), # Dimensions: # indices: [4] # params: [2, (d1), (d2)] dict( descr='params: [2, (d1), (d2), indices: [4]', indices=[1, 100, 0, -1], params=[[['The', 'deal', 'came', 'about', '18', 'months', 'after', 'Yahoo', '!', 'rejected', 'a', '47.5', '-', 'billion', '-', 'dollar', 'takeover', 'offer', 'from', 'Microsoft', '.'], ['Trumpty', 'Dumpty', 'sat', 'on', 'a', 'wall']], [["It's", 'always', 'darkest', 'before', 'the', 'dawn']]], expected=[[["It's", 'always', 'darkest', 'before', 'the', 'dawn']], [['$NONE^']], [['The', 'deal', 'came', 'about', '18', 'months', 'after', 'Yahoo', '!', 'rejected', 'a', '47.5', '-', 'billion', '-', 'dollar', 'takeover', 'offer', 'from', 'Microsoft', '.'], ['Trumpty', 'Dumpty', 'sat', 'on', 'a', 'wall']], [['$NONE^']]], ), # Dimensions: # params: [1, (d1)] # indices: [3] dict( descr='params: rank 2, indices: rank 1', params=[ ['Bruce', 'Wayne'], ], indices=[-1, 0, 1000], expected=[['$NONE^'], ['Bruce', 'Wayne'], ['$NONE^']] ), # Dimensions: # params: [1, (d1)] # indices: [1, (d2)] dict( descr='Test underbound indices of shape [1, (d2)]', params=[ ['The', 'deal', 'came', 'about', '18', 'months', 'after', 'Yahoo', '!', 'rejected', 'a', '47.5', '-', 'billion', '-', 'dollar', 'takeover', 'offer', 'from', 'Microsoft', '.'], ], indices=[[8, -1]], expected=[['!', '$NONE^']], ), dict( descr='Test underbound indices of shape [2, (d2)]', params=[ ['The', 'deal', 'came', 'about', '18', 'months', 'after', 'Yahoo', '!', 'rejected', 'a', '47.5', '-', 'billion', '-', 'dollar', 'takeover', 'offer', 'from', 'Microsoft', '.'], ['Who', 'let', 'the', 'dogs', 'out', '?'], ], indices=[[8, -1], [1, 100]], expected=[['!', '$NONE^'], ['let', '$NONE^']], ), # Dimensions: # params: [2, (d1)] # indices: [2, (d2)] dict( descr='Test underbound indices of rank 2', params=[ ['The', 'deal', 'came', 'about', '18', 'months', 'after', 'Yahoo', '!', 'rejected', 'a', '47.5', '-', 'billion', '-', 'dollar', 'takeover', 'offer', 'from', 'Microsoft', '.'], ['He', 'left', 'us', '.', 'Little', 'boys', 'crowded', 'together', 'on', 'long', 'wooden', 'benches', ',', 'and', 'in', 'the', 'center', 'of', 'the', 'room', 'sat', 'the', 'teacher', '.', 'His', 'black', 'beard', 'dripped', 'down', 'over', 'the', 'front', 'of', 'his', 'coat', '.', 'One', 'white', 'hand', 'poised', 'a', 'stick', 'above', 'his', 'desk', '.', 'He', 'turned', 'his', 'surly', ',', 'half', '-', 'closed', 'eyes', 'toward', 'us', ',', 'stared', 'for', 'a', 'second', ',', 'then', 'shouted', 'in', 'Yiddish', ',', '``', 'One', ',', 'two', ',', 'three', "''", '!', '!', 'Rapping', 'the', 'stick', 'against', 'the', 'desk', '.', 'The', 'little', 'boys', 'shrilled', 'out', 'a', 'Yiddish', 'translation', 'or', 'interpretation', 'of', 'the', 'Five', 'Books', 'of', 'Moses', ',', 'which', 'they', 'had', 'previously', 'chanted', 'in', 'Hebrew', '.']], indices=[[8, -1], [3, 23, 35, 45, 75, 83, -121]], expected=[['!', '$NONE^'], ['.', '.', '.', '.', '!', '.', '$NONE^']], ), dict( descr='Test overbound indices of rank 2', params=[ ['The', 'deal', 'came', 'about', '18', 'months', 'after', 'Yahoo', '!', 'rejected', 'a', '47.5', '-', 'billion', '-', 'dollar', 'takeover', 'offer', 'from', 'Microsoft', '.'], ['He', 'left', 'us', '.', 'Little', 'boys', 'crowded', 'together', 'on', 'long', 'wooden', 'benches', ',', 'and', 'in', 'the', 'center', 'of', 'the', 'room', 'sat', 'the', 'teacher', '.', 'His', 'black', 'beard', 'dripped', 'down', 'over', 'the', 'front', 'of', 'his', 'coat', '.', 'One', 'white', 'hand', 'poised', 'a', 'stick', 'above', 'his', 'desk', '.', 'He', 'turned', 'his', 'surly', ',', 'half', '-', 'closed', 'eyes', 'toward', 'us', ',', 'stared', 'for', 'a', 'second', ',', 'then', 'shouted', 'in', 'Yiddish', ',', '``', 'One', ',', 'two', ',', 'three', "''", '!', '!', 'Rapping', 'the', 'stick', 'against', 'the', 'desk', '.', 'The', 'little', 'boys', 'shrilled', 'out', 'a', 'Yiddish', 'translation', 'or', 'interpretation', 'of', 'the', 'Five', 'Books', 'of', 'Moses', ',', 'which', 'they', 'had', 'previously', 'chanted', 'in', 'Hebrew', '.']], indices=[[8, 8823], [3, 23, 35, 45, 75, 83, 1234]], expected=[['!', '$NONE^'], ['.', '.', '.', '.', '!', '.', '$NONE^']], ), # Dimensions: # params: [2, (d1), 2] # indices: [2, (d2)] dict( descr='params: rank 3, indices: rank 2', params=[ [['The', 'deal'], ['takeover', 'offer'], ['from', 'Microsoft']], [['Who', 'let'], ['the', 'dogs'], ['out', '?']], ], ragged_rank=1, indices=[[1, -1, 2, 30], [1, 100]], indices_ragged_rank=1, expected=[[['takeover', 'offer'], ['$NONE^', '$NONE^'], ['from', 'Microsoft'], ['$NONE^', '$NONE^']], [['the', 'dogs'], ['$NONE^', '$NONE^']]], expected_ragged_rank=1, default_value=['$NONE^', '$NONE^'], ), # Dimensions: # params: [2, (d1), (d2)] # indices: [2, (d3)] dict( descr='params: [2, (d1), (d2)], indices: [2, (d3)]', params=[ [['The', 'deal', 'came', 'about', '18', 'months', 'after', 'Yahoo', '!', 'rejected', 'a', '47.5', '-', 'billion', '-', 'dollar', 'takeover', 'offer', 'from', 'Microsoft', '.'], ['Trumpty', 'Dumpty', 'sat', 'on', 'a', 'wall'], ], [['It\'s', 'always', 'darkest', 'before', 'the', 'dawn']] ], indices=[[1, 100], [0, -1]], expected=[[['Trumpty', 'Dumpty', 'sat', 'on', 'a', 'wall'], ['$NONE^']], [["It's", 'always', 'darkest', 'before', 'the', 'dawn'], ['$NONE^']]] ), # Dimensions: # params: [2, (d1), (d2)] # indices: [2, (d1), (d3)] dict( descr='Test overbound indices of rank 3', params=[ [['The', 'deal', 'came', 'about', '18', 'months', 'after', 'Yahoo', '!', 'rejected', 'a', '47.5', '-', 'billion', '-', 'dollar', 'takeover', 'offer', 'from', 'Microsoft', '.'], ['Foo', 'bar', 'mar']], [['He', 'left', 'us', '.', 'Little', 'boys', 'crowded', 'together', 'on', 'long', 'wooden', 'benches', ',', 'and', 'in', 'the', 'center', 'of', 'the', 'room', 'sat', 'the', 'teacher', '.', 'His', 'black', 'beard', 'dripped', 'down', 'over', 'the', 'front', 'of', 'his', 'coat', '.', 'One', 'white', 'hand', 'poised', 'a', 'stick', 'above', 'his', 'desk', '.', 'He', 'turned', 'his', 'surly', ',', 'half', '-', 'closed', 'eyes', 'toward', 'us', ',', 'stared', 'for', 'a', 'second', ',', 'then', 'shouted', 'in', 'Yiddish', ',', '``', 'One', ',', 'two', ',', 'three', "''", '!', '!', 'Rapping', 'the', 'stick', 'against', 'the', 'desk', '.', 'The', 'little', 'boys', 'shrilled', 'out', 'a', 'Yiddish', 'translation', 'or', 'interpretation', 'of', 'the', 'Five', 'Books', 'of', 'Moses', ',', 'which', 'they', 'had', 'previously', 'chanted', 'in', 'Hebrew', '.'], ['I', 'too', 'was', 'hustled', 'scammed', 'bamboozled', 'hood', 'winked', 'lead', 'astray']] ], indices=[[[8, 8823], [0, 100]], [[3, 23, 35, 45, 75, 83, 1234], [5]]], expected=[[['!', '$NONE^'], ['Foo', '$NONE^']], [['.', '.', '.', '.', '!', '.', '$NONE^'], ['bamboozled']]], ), # params.shape = [2, (d1), 8] # indices.shape = [2, (d1), 3] dict( descr='params = [2, (2, 1), 8], indices = [2, (2, 1), 3]', params=[[['h'] * 8, ['w'] * 8], [['b'] * 8]], ragged_rank=1, indices=[[[0, 100, 1], [0, 1, 0]], [[1, 0, 0]]], indices_ragged_rank=1, expected=[[['h', '$NONE^', 'h'], ['w', 'w', 'w']], [['b', 'b', 'b']]], expected_ragged_rank=1, ), ]) def testRaggedBatchGatherWithDefault( self, descr, params, indices, expected, indices_ragged_rank=None, expected_ragged_rank=None, ragged_rank=None, default_value='$NONE^'): params = ragged_factory_ops.constant(params, ragged_rank=ragged_rank) indices = ragged_factory_ops.constant( indices, ragged_rank=indices_ragged_rank or ragged_rank) expected = ragged_factory_ops.constant( expected, ragged_rank=expected_ragged_rank or ragged_rank) result = ragged_batch_gather_with_default_op.batch_gather_with_default( params, indices, default_value) self.assertAllEqual(result, expected) @parameterized.parameters([ # Dimensions: # params: dims [2, 5], indices: [2, 2] dict( descr='params: dims [2, 5], indices: [2, 2]', params=[ ['The', 'deal', 'came', 'about', '18'], ['He', 'left', 'us', '.', 'Little']], indices=[[0, -1], [3, 121]], expected=[['The', '$NONE^'], ['.', '$NONE^']], default_value='$NONE^', ), # Dimensions: # params: dims [2, 2, 5], indices: [2, 2] dict( descr='params: dims [2, 2, 5], indices: [2, 2]', params=[ [['The', 'deal', 'came', 'about', '18'], ['The', 'deal', 'came', 'about', '19'], ], [['He', 'left', 'us', '.', 'Little'], ['The', 'deal', 'came', 'about', '20'], ] ], indices=[[0, -1], [0, 121]], expected=[[['The', 'deal', 'came', 'about', '18'], ['$NONE^', '$NONE^', '$NONE^', '$NONE^', '$NONE^']], [['He', 'left', 'us', '.', 'Little'], ['$NONE^', '$NONE^', '$NONE^', '$NONE^', '$NONE^']]], default_value='$NONE^', ), # Test default_value with shape [5] dict( descr='params: dims [2, 2, 5], indices: [2, 2]', params=[ [['The', 'deal', 'came', 'about', '18'], ['The', 'deal', 'came', 'about', '19'], ], [['He', 'left', 'us', '.', 'Little'], ['The', 'deal', 'came', 'about', '20'], ] ], indices=[[0, -1], [0, 121]], expected=[[['The', 'deal', 'came', 'about', '18'], [':FOO:', ':FOO:', ':FOO:', ':FOO:', ':FOO:']], [['He', 'left', 'us', '.', 'Little'], [':FOO:', ':FOO:', ':FOO:', ':FOO:', ':FOO:']]], default_value=[':FOO:', ':FOO:', ':FOO:', ':FOO:', ':FOO:'], ), ]) def testRaggedBatchGatherWithDefaultOnTensors( self, descr, params, indices, expected, default_value): params = constant_op.constant(params) indices = constant_op.constant(indices) expected = constant_op.constant(expected) result = ragged_batch_gather_with_default_op.batch_gather_with_default( params, indices, default_value) self.assertAllEqual(expected, result) @parameterized.parameters([ dict( params=[['The', 'deal', 'came', 'about', '18', 'months', 'after', 'Yahoo', '!', 'rejected', 'a', '47.5', '-', 'billion', '-', 'dollar', 'takeover', 'offer', 'from', 'Microsoft', '.']], indices=[[[8, -1]]], # Exception here because different errors are thrown in eager vs # graph mode. error=Exception, default_value='$NONE^', ), ]) def testRankMismatch( self, params, indices, default_value, error): params = ragged_factory_ops.constant(params) indices = ragged_factory_ops.constant(indices) with self.assertRaises(error): _ = ragged_batch_gather_with_default_op.batch_gather_with_default( params, indices, default_value) @parameterized.parameters([ # Dimensions: # params: [2, (d1), 2] # indices: [2, (d2)] # default_value: [] dict( descr='params: rank 3, indices: rank 2, default: rank = [], but' ' should be [2]', params=[ [['The', 'deal'], ['takeover', 'offer'], ['from', 'Microsoft']], [['Who', 'let'], ['the', 'dogs'], ['out', '?']], ], ragged_rank=1, indices=[[1, -1, 2, 30], [1, 100]], indices_ragged_rank=1, default_value='$NONE^', error=Exception, ) ]) def testInvalidDefaultValueRank( self, descr, params, indices, default_value, error, ragged_rank=None, indices_ragged_rank=None): params = ragged_factory_ops.constant(params, ragged_rank=ragged_rank) indices = ragged_factory_ops.constant( indices, ragged_rank=indices_ragged_rank) with self.assertRaises(error): _ = ragged_batch_gather_with_default_op.batch_gather_with_default( params, indices, default_value) def testRaggedBatchGatherUnknownRankError(self): if context.executing_eagerly(): return params = [['a', 'b'], ['c', 'd']] indices = array_ops.placeholder(dtypes.int32, shape=None) ragged_indices = ragged_tensor.RaggedTensor.from_row_splits( indices, [0, 2, 4]) with self.assertRaisesRegexp( ValueError, 'batch_gather does not allow indices with unknown shape.'): ragged_batch_gather_ops.batch_gather(params, indices) with self.assertRaisesRegexp( ValueError, 'batch_gather does not allow indices with unknown shape.'): ragged_batch_gather_ops.batch_gather(params, ragged_indices) @parameterized.parameters( [ dict( params=ragged_factory_ops.constant_value([['a'], ['b'], ['c']]), indices=ragged_factory_ops.constant_value([[0], [0]]), message='Dimensions 3 and 2 are not compatible'), dict( params=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], indices=ragged_factory_ops.constant_value([[[0, 0], [0, 0, 0]], [[0]]]), message='batch shape from indices does not match params shape'), dict( # rank mismatch params=ragged_factory_ops.constant_value([[[0, 0], [0, 0, 0]], [[0]]]), indices=ragged_factory_ops.constant_value([[[0, 0]], [[0, 0, 0]], [[0]]]), error=(ValueError, errors.InvalidArgumentError)), dict( params=ragged_factory_ops.constant_value([[[0, 0], [0, 0, 0]], [[0]], [[0]]]), indices=ragged_factory_ops.constant_value([[[0, 0]], [[0, 0, 0]], [[0]]]), error=errors.InvalidArgumentError, message='.*Condition x == y did not hold.*'), dict( params=ragged_factory_ops.constant_value(['a', 'b', 'c']), indices=ragged_factory_ops.constant_value([[0], [0]]), message='batch shape from indices does not match params shape'), dict( params=ragged_factory_ops.constant_value([['a']]), indices=0, message='indices.rank must be at least 1.'), dict( params=ragged_factory_ops.constant_value([['a']]), indices=[[[0]]], message='batch shape from indices does not match params shape'), ]) def testRaggedBatchGatherStaticError(self, params, indices, message=None, error=ValueError): with self.assertRaisesRegexp(error, message): ragged_batch_gather_ops.batch_gather(params, indices) if __name__ == '__main__': googletest.main()
# Written by Bram Cohen # see LICENSE.txt for license information from cStringIO import StringIO from binascii import b2a_hex from socket import error as socketerror from urllib import quote from traceback import print_exc from BitTornado.BTcrypto import Crypto try: True except: True = 1 False = 0 bool = lambda x: not not x DEBUG = False MAX_INCOMPLETE = 8 protocol_name = 'BitTorrent protocol' option_pattern = chr(0)*8 def toint(s): return long(b2a_hex(s), 16) def tobinary16(i): return chr((i >> 8) & 0xFF) + chr(i & 0xFF) hexchars = '0123456789ABCDEF' hexmap = [] for i in xrange(256): hexmap.append(hexchars[(i&0xF0)/16]+hexchars[i&0x0F]) def tohex(s): r = [] for c in s: r.append(hexmap[ord(c)]) return ''.join(r) def make_readable(s): if not s: return '' if quote(s).find('%') >= 0: return tohex(s) return '"'+s+'"' class IncompleteCounter: def __init__(self): self.c = 0 def increment(self): self.c += 1 def decrement(self): self.c -= 1 def toomany(self): return self.c >= MAX_INCOMPLETE incompletecounter = IncompleteCounter() # header, options, download id, my id, [length, message] class Connection: def __init__(self, Encoder, connection, id, ext_handshake=False, encrypted = None, options = None): self.Encoder = Encoder self.connection = connection self.connecter = Encoder.connecter self.id = id self.locally_initiated = (id != None) self.readable_id = make_readable(id) self.complete = False self.keepalive = lambda: None self.closed = False self.buffer = '' self.bufferlen = None self.log = None self.read = self._read self.write = self._write self.cryptmode = 0 self.encrypter = None if self.locally_initiated: incompletecounter.increment() if encrypted: self.encrypted = True self.encrypter = Crypto(True) self.write(self.encrypter.pubkey+self.encrypter.padding()) else: self.encrypted = False self.write(chr(len(protocol_name)) + protocol_name + option_pattern + self.Encoder.download_id ) self.next_len, self.next_func = 1+len(protocol_name), self.read_header elif ext_handshake: self.Encoder.connecter.external_connection_made += 1 if encrypted: # passed an already running encrypter self.encrypter = encrypted self.encrypted = True self._start_crypto() self.next_len, self.next_func = 14, self.read_crypto_block3c else: self.encrypted = False self.options = options self.write(self.Encoder.my_id) self.next_len, self.next_func = 20, self.read_peer_id else: self.encrypted = None # don't know yet self.next_len, self.next_func = 1+len(protocol_name), self.read_header self.Encoder.raw_server.add_task(self._auto_close, 30) def _log_start(self): # only called with DEBUG = True self.log = open('peerlog.'+self.get_ip()+'.txt','a') self.log.write('connected - ') if self.locally_initiated: self.log.write('outgoing\n') else: self.log.write('incoming\n') self._logwritefunc = self.write self.write = self._log_write def _log_write(self, s): self.log.write('w:'+b2a_hex(s)+'\n') self._logwritefunc(s) def get_ip(self, real=False): return self.connection.get_ip(real) def get_id(self): return self.id def get_readable_id(self): return self.readable_id def is_locally_initiated(self): return self.locally_initiated def is_encrypted(self): return bool(self.encrypted) def is_flushed(self): return self.connection.is_flushed() def _read_header(self, s): if s == chr(len(protocol_name))+protocol_name: return 8, self.read_options return None def read_header(self, s): if self._read_header(s): if self.encrypted or self.Encoder.config['crypto_stealth']: return None return 8, self.read_options if self.locally_initiated and not self.encrypted: return None elif not self.Encoder.config['crypto_allowed']: return None if not self.encrypted: self.encrypted = True self.encrypter = Crypto(self.locally_initiated) self._write_buffer(s) return self.encrypter.keylength, self.read_crypto_header ################## ENCRYPTION SUPPORT ###################### def _start_crypto(self): self.encrypter.setrawaccess(self._read,self._write) self.write = self.encrypter.write self.read = self.encrypter.read if self.buffer: self.buffer = self.encrypter.decrypt(self.buffer) def _end_crypto(self): self.read = self._read self.write = self._write self.encrypter = None def read_crypto_header(self, s): self.encrypter.received_key(s) self.encrypter.set_skey(self.Encoder.download_id) if self.locally_initiated: if self.Encoder.config['crypto_only']: cryptmode = '\x00\x00\x00\x02' # full stream encryption else: cryptmode = '\x00\x00\x00\x03' # header or full stream padc = self.encrypter.padding() self.write( self.encrypter.block3a + self.encrypter.block3b + self.encrypter.encrypt( ('\x00'*8) # VC + cryptmode # acceptable crypto modes + tobinary16(len(padc)) + padc # PadC + '\x00\x00' ) ) # no initial payload data self._max_search = 520 return 1, self.read_crypto_block4a self.write(self.encrypter.pubkey+self.encrypter.padding()) self._max_search = 520 return 0, self.read_crypto_block3a def _search_for_pattern(self, s, pat): p = s.find(pat) if p < 0: if len(s) >= len(pat): self._max_search -= len(s)+1-len(pat) if self._max_search < 0: self.close() return False self._write_buffer(s[1-len(pat):]) return False self._write_buffer(s[p+len(pat):]) return True ### INCOMING CONNECTION ### def read_crypto_block3a(self, s): if not self._search_for_pattern(s,self.encrypter.block3a): return -1, self.read_crypto_block3a # wait for more data return len(self.encrypter.block3b), self.read_crypto_block3b def read_crypto_block3b(self, s): if s != self.encrypter.block3b: return None self.Encoder.connecter.external_connection_made += 1 self._start_crypto() return 14, self.read_crypto_block3c def read_crypto_block3c(self, s): if s[:8] != ('\x00'*8): # check VC return None self.cryptmode = toint(s[8:12]) % 4 if self.cryptmode == 0: return None # no encryption selected if ( self.cryptmode == 1 # only header encryption and self.Encoder.config['crypto_only'] ): return None padlen = (ord(s[12])<<8)+ord(s[13]) if padlen > 512: return None return padlen+2, self.read_crypto_pad3 def read_crypto_pad3(self, s): s = s[-2:] ialen = (ord(s[0])<<8)+ord(s[1]) if ialen > 65535: return None if self.cryptmode == 1: cryptmode = '\x00\x00\x00\x01' # header only encryption else: cryptmode = '\x00\x00\x00\x02' # full stream encryption padd = self.encrypter.padding() self.write( ('\x00'*8) # VC + cryptmode # encryption mode + tobinary16(len(padd)) + padd ) # PadD if ialen: return ialen, self.read_crypto_ia return self.read_crypto_block3done() def read_crypto_ia(self, s): if DEBUG: self._log_start() self.log.write('r:'+b2a_hex(s)+'(ia)\n') if self.buffer: self.log.write('r:'+b2a_hex(self.buffer)+'(buffer)\n') return self.read_crypto_block3done(s) def read_crypto_block3done(self, ia=''): if DEBUG: if not self.log: self._log_start() if self.cryptmode == 1: # only handshake encryption assert not self.buffer # oops; check for exceptions to this self._end_crypto() if ia: self._write_buffer(ia) return 1+len(protocol_name), self.read_encrypted_header ### OUTGOING CONNECTION ### def read_crypto_block4a(self, s): if not self._search_for_pattern(s,self.encrypter.VC_pattern()): return -1, self.read_crypto_block4a # wait for more data self._start_crypto() return 6, self.read_crypto_block4b def read_crypto_block4b(self, s): self.cryptmode = toint(s[:4]) % 4 if self.cryptmode == 1: # only header encryption if self.Encoder.config['crypto_only']: return None elif self.cryptmode != 2: return None # unknown encryption padlen = (ord(s[4])<<8)+ord(s[5]) if padlen > 512: return None if padlen: return padlen, self.read_crypto_pad4 return self.read_crypto_block4done() def read_crypto_pad4(self, s): # discard data return self.read_crypto_block4done() def read_crypto_block4done(self): if DEBUG: self._log_start() if self.cryptmode == 1: # only handshake encryption if not self.buffer: # oops; check for exceptions to this return None self._end_crypto() self.write(chr(len(protocol_name)) + protocol_name + option_pattern + self.Encoder.download_id) return 1+len(protocol_name), self.read_encrypted_header ### START PROTOCOL OVER ENCRYPTED CONNECTION ### def read_encrypted_header(self, s): return self._read_header(s) ################################################ def read_options(self, s): self.options = s return 20, self.read_download_id def read_download_id(self, s): if ( s != self.Encoder.download_id or not self.Encoder.check_ip(ip=self.get_ip()) ): return None if not self.locally_initiated: if not self.encrypted: self.Encoder.connecter.external_connection_made += 1 self.write(chr(len(protocol_name)) + protocol_name + option_pattern + self.Encoder.download_id + self.Encoder.my_id) return 20, self.read_peer_id def read_peer_id(self, s): if not self.encrypted and self.Encoder.config['crypto_only']: return None # allows older trackers to ping, # but won't proceed w/ connections if not self.id: self.id = s self.readable_id = make_readable(s) else: if s != self.id: return None self.complete = self.Encoder.got_id(self) if not self.complete: return None if self.locally_initiated: self.write(self.Encoder.my_id) incompletecounter.decrement() self._switch_to_read2() c = self.Encoder.connecter.connection_made(self) self.keepalive = c.send_keepalive return 4, self.read_len def read_len(self, s): l = toint(s) if l > self.Encoder.max_len: return None return l, self.read_message def read_message(self, s): if s != '': self.connecter.got_message(self, s) return 4, self.read_len def read_dead(self, s): return None def _auto_close(self): if not self.complete: self.close() def close(self): if not self.closed: self.connection.close() self.sever() def sever(self): if self.log: self.log.write('closed\n') self.log.close() self.closed = True del self.Encoder.connections[self.connection] if self.complete: self.connecter.connection_lost(self) elif self.locally_initiated: incompletecounter.decrement() def send_message_raw(self, message): self.write(message) def _write(self, message): if not self.closed: self.connection.write(message) def data_came_in(self, connection, s): self.read(s) def _write_buffer(self, s): self.buffer = s+self.buffer def _read(self, s): if self.log: self.log.write('r:'+b2a_hex(s)+'\n') self.Encoder.measurefunc(len(s)) self.buffer += s while True: if self.closed: return # self.next_len = # of characters function expects # or 0 = all characters in the buffer # or -1 = wait for next read, then all characters in the buffer # not compatible w/ keepalives, switch out after all negotiation complete if self.next_len <= 0: m = self.buffer self.buffer = '' elif len(self.buffer) >= self.next_len: m = self.buffer[:self.next_len] self.buffer = self.buffer[self.next_len:] else: return try: x = self.next_func(m) except: self.next_len, self.next_func = 1, self.read_dead raise if x is None: self.close() return self.next_len, self.next_func = x if self.next_len < 0: # already checked buffer return # wait for additional data if self.bufferlen is not None: self._read2('') return def _switch_to_read2(self): self._write_buffer = None if self.encrypter: self.encrypter.setrawaccess(self._read2,self._write) else: self.read = self._read2 self.bufferlen = len(self.buffer) self.buffer = [self.buffer] def _read2(self, s): # more efficient, requires buffer['',''] & bufferlen if self.log: self.log.write('r:'+b2a_hex(s)+'\n') self.Encoder.measurefunc(len(s)) while True: if self.closed: return p = self.next_len-self.bufferlen if self.next_len == 0: m = '' elif s: if p > len(s): self.buffer.append(s) self.bufferlen += len(s) return self.bufferlen = len(s)-p self.buffer.append(s[:p]) m = ''.join(self.buffer) if p == len(s): self.buffer = [] else: self.buffer=[s[p:]] s = '' elif p <= 0: # assert len(self.buffer) == 1 s = self.buffer[0] self.bufferlen = len(s)-self.next_len m = s[:self.next_len] if p == 0: self.buffer = [] else: self.buffer = [s[self.next_len:]] s = '' else: return try: x = self.next_func(m) except: self.next_len, self.next_func = 1, self.read_dead raise if x is None: self.close() return self.next_len, self.next_func = x if self.next_len < 0: # already checked buffer return # wait for additional data def connection_flushed(self, connection): if self.complete: self.connecter.connection_flushed(self) def connection_lost(self, connection): if self.Encoder.connections.has_key(connection): self.sever() class _dummy_banlist: def includes(self, x): return False class Encoder: def __init__(self, connecter, raw_server, my_id, max_len, schedulefunc, keepalive_delay, download_id, measurefunc, config, bans=_dummy_banlist() ): self.raw_server = raw_server self.connecter = connecter self.my_id = my_id self.max_len = max_len self.schedulefunc = schedulefunc self.keepalive_delay = keepalive_delay self.download_id = download_id self.measurefunc = measurefunc self.config = config self.connections = {} self.banned = {} self.external_bans = bans self.to_connect = [] self.paused = False if self.config['max_connections'] == 0: self.max_connections = 2 ** 30 else: self.max_connections = self.config['max_connections'] schedulefunc(self.send_keepalives, keepalive_delay) def send_keepalives(self): self.schedulefunc(self.send_keepalives, self.keepalive_delay) if self.paused: return for c in self.connections.values(): c.keepalive() def start_connections(self, list): if not self.to_connect: self.raw_server.add_task(self._start_connection_from_queue) self.to_connect = list def _start_connection_from_queue(self): if self.connecter.external_connection_made: max_initiate = self.config['max_initiate'] else: max_initiate = int(self.config['max_initiate']*1.5) cons = len(self.connections) if cons >= self.max_connections or cons >= max_initiate: delay = 60 elif self.paused or incompletecounter.toomany(): delay = 1 else: delay = 0 dns, id, encrypted = self.to_connect.pop(0) self.start_connection(dns, id, encrypted) if self.to_connect: self.raw_server.add_task(self._start_connection_from_queue, delay) def start_connection(self, dns, id, encrypted = None): if ( self.paused or len(self.connections) >= self.max_connections or id == self.my_id or not self.check_ip(ip=dns[0]) ): return True if self.config['crypto_only']: if encrypted is None or encrypted: # fails on encrypted = 0 encrypted = True else: return True for v in self.connections.values(): if v is None: continue if id and v.id == id: return True ip = v.get_ip(True) if self.config['security'] and ip != 'unknown' and ip == dns[0]: return True try: c = self.raw_server.start_connection(dns) con = Connection(self, c, id, encrypted = encrypted) self.connections[c] = con c.set_handler(con) except socketerror: return False return True def _start_connection(self, dns, id, encrypted = None): def foo(self=self, dns=dns, id=id, encrypted=encrypted): self.start_connection(dns, id, encrypted) self.schedulefunc(foo, 0) def check_ip(self, connection=None, ip=None): if not ip: ip = connection.get_ip(True) if self.config['security'] and self.banned.has_key(ip): return False if self.external_bans.includes(ip): return False return True def got_id(self, connection): if connection.id == self.my_id: self.connecter.external_connection_made -= 1 return False ip = connection.get_ip(True) for v in self.connections.values(): if connection is not v: if connection.id == v.id: if ip == v.get_ip(True): v.close() else: return False if self.config['security'] and ip != 'unknown' and ip == v.get_ip(True): v.close() return True def external_connection_made(self, connection): if self.paused or len(self.connections) >= self.max_connections: connection.close() return False con = Connection(self, connection, None) self.connections[connection] = con connection.set_handler(con) return True def externally_handshaked_connection_made(self, connection, options, already_read, encrypted = None): if ( self.paused or len(self.connections) >= self.max_connections or not self.check_ip(connection=connection) ): connection.close() return False con = Connection(self, connection, None, ext_handshake = True, encrypted = encrypted, options = options) self.connections[connection] = con connection.set_handler(con) if already_read: con.data_came_in(con, already_read) return True def close_all(self): for c in self.connections.values(): c.close() self.connections = {} def ban(self, ip): self.banned[ip] = 1 def pause(self, flag): self.paused = flag
#Copyright ReportLab Europe Ltd. 2000-2012 #see license.txt for license details #history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/lib/styles.py __version__=''' $Id: styles.py 3959 2012-09-27 14:39:39Z robin $ ''' __doc__='''Classes for ParagraphStyle and similar things. A style is a collection of attributes, but with some extra features to allow 'inheritance' from a parent, and to ensure nobody makes changes after construction. ParagraphStyle shows all the attributes available for formatting paragraphs. getSampleStyleSheet() returns a stylesheet you can use for initial development, with a few basic heading and text styles. ''' __all__=( 'PropertySet', 'ParagraphStyle', 'LineStyle', 'ListStyle', 'StyleSheet1', 'getSampleStyleSheet', ) from reportlab.lib.colors import white, black from reportlab.lib.enums import TA_LEFT, TA_CENTER from reportlab.lib.fonts import tt2ps from reportlab.rl_config import canvas_basefontname as _baseFontName _baseFontNameB = tt2ps(_baseFontName,1,0) _baseFontNameI = tt2ps(_baseFontName,0,1) _baseFontNameBI = tt2ps(_baseFontName,1,1) ########################################################### # This class provides an 'instance inheritance' # mechanism for its descendants, simpler than acquisition # but not as far-reaching ########################################################### class PropertySet: defaults = {} def __init__(self, name, parent=None, **kw): """When initialized, it copies the class defaults; then takes a copy of the attributes of the parent if any. All the work is done in init - styles should cost little to use at runtime.""" # step one - validate the hell out of it assert 'name' not in self.defaults, "Class Defaults may not contain a 'name' attribute" assert 'parent' not in self.defaults, "Class Defaults may not contain a 'parent' attribute" if parent: assert parent.__class__ == self.__class__, "Parent style %s must have same class as new style %s" % (parent.__class__.__name__,self.__class__.__name__) #step two self.name = name self.parent = parent self.__dict__.update(self.defaults) #step two - copy from parent if any. Try to be # very strict that only keys in class defaults are # allowed, so they cannot inherit self.refresh() self._setKwds(**kw) def _setKwds(self,**kw): #step three - copy keywords if any for (key, value) in kw.items(): self.__dict__[key] = value def __repr__(self): return "<%s '%s'>" % (self.__class__.__name__, self.name) def refresh(self): """re-fetches attributes from the parent on demand; use if you have been hacking the styles. This is used by __init__""" if self.parent: for (key, value) in self.parent.__dict__.items(): if (key not in ['name','parent']): self.__dict__[key] = value def listAttrs(self, indent=''): print indent + 'name =', self.name print indent + 'parent =', self.parent keylist = self.__dict__.keys() keylist.sort() keylist.remove('name') keylist.remove('parent') for key in keylist: value = self.__dict__.get(key, None) print indent + '%s = %s' % (key, value) def clone(self, name, parent=None, **kwds): r = self.__class__(name,parent) r.__dict__ = self.__dict__.copy() r.parent = parent is None and self or parent r._setKwds(**kwds) return r class ParagraphStyle(PropertySet): defaults = { 'fontName':_baseFontName, 'fontSize':10, 'leading':12, 'leftIndent':0, 'rightIndent':0, 'firstLineIndent':0, 'alignment':TA_LEFT, 'spaceBefore':0, 'spaceAfter':0, 'bulletFontName':_baseFontName, 'bulletFontSize':10, 'bulletIndent':0, #'bulletColor':black, 'textColor': black, 'backColor':None, 'wordWrap':None, 'borderWidth': 0, 'borderPadding': 0, 'borderColor': None, 'borderRadius': None, 'allowWidows': 1, 'allowOrphans': 0, 'textTransform':None, #uppercase lowercase (captitalize not yet) or None or absent 'endDots':None, #dots on the last line of left/right justified paras #string or object with text and optional fontName, fontSize, textColor & backColor #dy } class LineStyle(PropertySet): defaults = { 'width':1, 'color': black } def prepareCanvas(self, canvas): """You can ask a LineStyle to set up the canvas for drawing the lines.""" canvas.setLineWidth(1) #etc. etc. class ListStyle(PropertySet): defaults = dict( leftIndent=18, rightIndent=0, bulletAlign='left', bulletType='1', bulletColor=black, bulletFontName='Helvetica', bulletFontSize=12, bulletOffsetY=0, bulletDedent='auto', bulletDir='ltr', bulletFormat=None, start=None, #starting value for a list ) _stylesheet1_undefined = object() class StyleSheet1: """ This may or may not be used. The idea is to: 1. slightly simplify construction of stylesheets; 2. enforce rules to validate styles when added (e.g. we may choose to disallow having both 'heading1' and 'Heading1' - actual rules are open to discussion); 3. allow aliases and alternate style lookup mechanisms 4. Have a place to hang style-manipulation methods (save, load, maybe support a GUI editor) Access is via getitem, so they can be compatible with plain old dictionaries. """ def __init__(self): self.byName = {} self.byAlias = {} def __getitem__(self, key): try: return self.byAlias[key] except KeyError: try: return self.byName[key] except KeyError: raise KeyError("Style '%s' not found in stylesheet" % key) def get(self,key,default=_stylesheet1_undefined): try: return self[key] except KeyError: if default!=_stylesheet1_undefined: return default raise def __contains__(self, key): return key in self.byAlias or key in self.byName def has_key(self,key): return key in self def add(self, style, alias=None): key = style.name if key in self.byName: raise KeyError("Style '%s' already defined in stylesheet" % key) if key in self.byAlias: raise KeyError("Style name '%s' is already an alias in stylesheet" % key) if alias: if alias in self.byName: raise KeyError("Style '%s' already defined in stylesheet" % alias) if alias in self.byAlias: raise KeyError("Alias name '%s' is already an alias in stylesheet" % alias) #passed all tests? OK, add it self.byName[key] = style if alias: self.byAlias[alias] = style def list(self): styles = self.byName.items() styles.sort() alii = {} for (alias, style) in self.byAlias.items(): alii[style] = alias for (name, style) in styles: alias = alii.get(style, None) print name, alias style.listAttrs(' ') print def testStyles(): pNormal = ParagraphStyle('Normal',None) pNormal.fontName = _baseFontName pNormal.fontSize = 12 pNormal.leading = 14.4 pNormal.listAttrs() print pPre = ParagraphStyle('Literal', pNormal) pPre.fontName = 'Courier' pPre.listAttrs() return pNormal, pPre def getSampleStyleSheet(): """Returns a stylesheet object""" stylesheet = StyleSheet1() stylesheet.add(ParagraphStyle(name='Normal', fontName=_baseFontName, fontSize=10, leading=12) ) stylesheet.add(ParagraphStyle(name='BodyText', parent=stylesheet['Normal'], spaceBefore=6) ) stylesheet.add(ParagraphStyle(name='Italic', parent=stylesheet['BodyText'], fontName = _baseFontNameI) ) stylesheet.add(ParagraphStyle(name='Heading1', parent=stylesheet['Normal'], fontName = _baseFontNameB, fontSize=18, leading=22, spaceAfter=6), alias='h1') stylesheet.add(ParagraphStyle(name='Title', parent=stylesheet['Normal'], fontName = _baseFontNameB, fontSize=18, leading=22, alignment=TA_CENTER, spaceAfter=6), alias='title') stylesheet.add(ParagraphStyle(name='Heading2', parent=stylesheet['Normal'], fontName = _baseFontNameB, fontSize=14, leading=18, spaceBefore=12, spaceAfter=6), alias='h2') stylesheet.add(ParagraphStyle(name='Heading3', parent=stylesheet['Normal'], fontName = _baseFontNameBI, fontSize=12, leading=14, spaceBefore=12, spaceAfter=6), alias='h3') stylesheet.add(ParagraphStyle(name='Heading4', parent=stylesheet['Normal'], fontName = _baseFontNameBI, fontSize=10, leading=12, spaceBefore=10, spaceAfter=4), alias='h4') stylesheet.add(ParagraphStyle(name='Heading5', parent=stylesheet['Normal'], fontName = _baseFontNameB, fontSize=9, leading=10.8, spaceBefore=8, spaceAfter=4), alias='h5') stylesheet.add(ParagraphStyle(name='Heading6', parent=stylesheet['Normal'], fontName = _baseFontNameB, fontSize=7, leading=8.4, spaceBefore=6, spaceAfter=2), alias='h6') stylesheet.add(ParagraphStyle(name='Bullet', parent=stylesheet['Normal'], firstLineIndent=0, spaceBefore=3), alias='bu') stylesheet.add(ParagraphStyle(name='Definition', parent=stylesheet['Normal'], firstLineIndent=0, leftIndent=36, bulletIndent=0, spaceBefore=6, bulletFontName=_baseFontNameBI), alias='df') stylesheet.add(ParagraphStyle(name='Code', parent=stylesheet['Normal'], fontName='Courier', fontSize=8, leading=8.8, firstLineIndent=0, leftIndent=36)) return stylesheet
import time import os from io import BytesIO from jinja2 import Environment, PackageLoader from nbt import nbt from blockmodel.writers.stl_writer import Binary_STL_Writer from blockmodel.mapper import MinecraftBlockMapper from blockmodel.readers import * from blockmodel.constants import * from blockmodel.writers.file_writers import write_stl, write_x3d, write_collada, write_obj, write_csv THIS_DIR = os.path.dirname(__file__) RESOURCES_ROOT = os.path.join(os.path.dirname(__file__), "resources") jinja_env = Environment(loader=PackageLoader('blockmodel.model', 'templates')) class BlockModel(object): def __init__(self, reader): self.vertices = {} self.faces = [] self.texUvMappingsArray = [] self.volume = 0 self.surface = 0 lines = [] for y in range(33): for x in range(33): lines.append("vt %.5f %.5f" % (x/32.0, y/32.0)) self.texUvMappingsArray.append((x/32.0, y/32.0)) self.uv_mappings = "\n".join(lines) self.reader = reader self.timestamp = time.strftime("%Y-%m-%dT%H:%M:%S.000000", time.gmtime()) self.width = self.reader.width self.height = self.reader.height self.depth = self.reader.depth self.max_x = None self.min_x = None self.max_y = None self.min_y = None self.max_z = None self.min_z = None self.scale = 2 self.stl_scale = 2.0 self.xoffset = -self.width/2.0 self.yoffset = 0 self.zoffset = -self.depth/2.0 self.faces = [] self.stl_faces = [] self.block_mapper = MinecraftBlockMapper() self._process() self._make_stl() @classmethod def from_json(cls, as_json, max_size=None): return cls(JsonModelReader(as_json, max_size)) @classmethod def from_png(cls, as_png, max_size=None): return cls(PngModelReader(as_png, max_size)) @classmethod def from_sparse_json(cls, as_json, max_size=None): return cls(SparseJsonModelReader(as_json, max_size)) @classmethod def from_schematic_file(cls, schematic, max_size=None): return cls(SchematicModelReader(schematic, max_size)) def _is_blank_quadrant(self, x, y, z, xd, yd, zd, side): q = 0.5 x1 = x y1 = y z1 = z xd1 = xd yd1 = yd zd1 = zd if side == SIDE_TOP: if y + yd == self.height - q: return True if yd == 0.0: yd1 = 0.5 if yd == 0.5: yd1 = 0.0 y1 = y + 1 if side == SIDE_BOTTOM: if y + yd == 0: return True if yd == 0.0: yd1 = 0.5 y1 = y - 1 if yd == 0.5: yd1 = 0.0 if side == SIDE_RIGHT: if x + xd == self.width - q: return True if xd == 0.0: xd1 = 0.5 if xd == 0.5: xd1 = 0.0 x1 = x + 1 if side == SIDE_LEFT: if x + xd == 0: return True if xd == 0.0: x1 = x - 1 xd1 = 0.5 if xd == 0.5: xd1 = 0.0 if side == SIDE_FRONT: if z + zd == self.depth - q: return True if zd == 0.0: zd1 = 0.5 if zd == 0.5: zd1 = 0.0 z1 = z + 1 if side == SIDE_BACK: if z + zd == 0: return True if zd == 0.0: zd1 = 0.5 z1 = z - 1 if zd == 0.5: zd1 = 0.0 return self.block_mapper.is_blank(x1, y1, z1, xd1, yd1, zd1, self.reader) def _render_partial_face(self, block, x, y, z, side): d = 0.5 if side == SIDE_TOP: xd1 = 0.0 yd1 = 0.5 zd1 = 0.0 xd2 = 0.0 yd2 = 0.5 zd2 = 0.5 xd3 = 0.5 yd3 = 0.5 zd3 = 0.0 xd4 = 0.5 yd4 = 0.5 zd4 = 0.5 if side == SIDE_BOTTOM: xd1 = 0.0 yd1 = 0.0 zd1 = 0.0 xd2 = 0.0 yd2 = 0.0 zd2 = 0.5 xd3 = 0.5 yd3 = 0.0 zd3 = 0.0 xd4 = 0.5 yd4 = 0.0 zd4 = 0.5 if side == SIDE_RIGHT: xd1 = 0.5 yd1 = 0.0 zd1 = 0.0 xd2 = 0.5 yd2 = 0.5 zd2 = 0.0 xd3 = 0.5 yd3 = 0.0 zd3 = 0.5 xd4 = 0.5 yd4 = 0.5 zd4 = 0.5 if side == SIDE_LEFT: xd1 = 0.0 yd1 = 0.0 zd1 = 0.0 xd2 = 0.0 yd2 = 0.5 zd2 = 0.0 xd3 = 0.0 yd3 = 0.0 zd3 = 0.5 xd4 = 0.0 yd4 = 0.5 zd4 = 0.5 if side == SIDE_FRONT: xd1 = 0.0 yd1 = 0.0 zd1 = 0.5 xd2 = 0.5 yd2 = 0.0 zd2 = 0.5 xd3 = 0.0 yd3 = 0.5 zd3 = 0.5 xd4 = 0.5 yd4 = 0.5 zd4 = 0.5 if side == SIDE_BACK: xd1 = 0.0 yd1 = 0.0 zd1 = 0.0 xd2 = 0.5 yd2 = 0.0 zd2 = 0.0 xd3 = 0.0 yd3 = 0.5 zd3 = 0.0 xd4 = 0.5 yd4 = 0.5 zd4 = 0.0 self._render_face_quadrant(block, x, y, z, xd1, yd1, zd1, side, d) self._render_face_quadrant(block, x, y, z, xd2, yd2, zd2, side, d) self._render_face_quadrant(block, x, y, z, xd3, yd3, zd3, side, d) self._render_face_quadrant(block, x, y, z, xd4, yd4, zd4, side, d) def _renderface(self, block, x, y, z, side): if side == SIDE_TOP: other_block = self._get_block(x, y+1, z) elif side == SIDE_BOTTOM: other_block = self._get_block(x, y-1, z) elif side == SIDE_RIGHT: other_block = self._get_block(x+1, y, z) elif side == SIDE_LEFT: other_block = self._get_block(x-1, y, z) elif side == SIDE_FRONT: other_block = self._get_block(x, y, z+1) elif side == SIDE_BACK: other_block = self._get_block(x, y, z-1) else: raise Exception("Unrecognised side ID") if block.block_type == "cube": if other_block is None: self._add_face(self._get_face_corners(x, y, z, side), block, side) else: if other_block.block_type != "cube": self._render_partial_face(block, x, y, z, side) else: self._render_partial_face(block, x, y, z, side) def _get_face_corners(self, x, y, z, side): if side == SIDE_TOP: return (x, y + 1, z), (x, y + 1, z + 1), (x + 1, y + 1, z + 1), (x + 1, y + 1, z) if side == SIDE_BOTTOM: return (x + 1, y, z), (x + 1, y, z + 1), (x, y, z + 1), (x, y, z) if side == SIDE_RIGHT: return (x + 1, y + 1, z + 1), (x + 1, y, z + 1), (x + 1, y, z), (x + 1, y + 1, z) if side == SIDE_LEFT: return (x, y + 1, z), (x, y, z), (x, y, z + 1), (x, y + 1, z + 1) if side == SIDE_FRONT: return (x, y + 1, z + 1), (x, y, z + 1), (x + 1, y, z + 1), (x + 1, y + 1, z + 1) if side == SIDE_BACK: return (x + 1, y + 1, z), (x + 1, y, z), (x, y, z), (x, y + 1, z) def _get_block(self, x, y, z): return self.block_mapper.get_block(x, y, z, self.reader) def _render_block(self, block, x, y, z): block_volume = self.stl_scale ** 3 if block.block_type == "cube": self.volume += block_volume for side in ALL_SIDES: self._renderface(block, x, y, z, side) else: if block.block_type == "halfslab": self.volume += block_volume/2.0 if block.block_type == "stair": self.volume += (block_volume * 3.0)/4.0 self._render_block_sub_blocks(block, x, y, z) def _get_quadrant_corners_quads(self, x, y, z, xd, yd, zd, side, d): xx = x + xd yy = y + yd zz = z + zd #top if side == SIDE_TOP: A = (xx, yy + d, zz) B = (xx, yy + d, zz + d) C = (xx + d, yy + d, zz + d) D = (xx + d, yy + d, zz) quad_x = int(xd * 2.0) quad_y = 1 - int(zd * 2.0) #bottom if side == SIDE_BOTTOM: A = (xx + d, yy, zz) B = (xx + d, yy, zz + d) C = (xx, yy, zz + d) D = (xx, yy, zz) quad_x = 1 - int(xd * 2.0) quad_y = 1 - int(zd * 2.0) #right if side == SIDE_RIGHT: A = (xx + d, yy + d, zz + d) B = (xx + d, yy, zz + d) C = (xx + d, yy, zz) D = (xx + d, yy + d, zz) quad_x = 1 - int(zd * 2.0) quad_y = int(yd * 2.0) #left if side == SIDE_LEFT: A = (xx, yy + d, zz) B = (xx, yy, zz) C = (xx, yy, zz + d) D = (xx, yy + d, zz + d) quad_x = int(zd * 2.0) quad_y = int(yd * 2.0) #front if side == SIDE_FRONT: A = (xx, yy + d, zz + d) B = (xx, yy, zz + d) C = (xx + d, yy, zz + d) D = (xx + d, yy + d, zz + d) quad_x = int(xd * 2.0) quad_y = int(yd * 2.0) #back if side == SIDE_BACK: A = (xx + d, yy + d, zz) B = (xx + d, yy, zz) C = (xx, yy, zz) D = (xx, yy + d, zz) quad_x = 1 - int(xd * 2.0) quad_y = int(yd * 2.0) return (A, B, C, D), quad_x, quad_y def _render_face_quadrant(self, block, x, y, z, xd, yd, zd, side, d): if not self._is_blank_quadrant(x, y, z, xd, yd, zd, side): return corners, quad_x, quad_y = self._get_quadrant_corners_quads(x, y, z, xd, yd, zd, side, d) self._add_face(corners, block, side, quad_x, quad_y) def _render_block_sub_blocks(self, block, x, y, z): d = 0.5 for xd in (0.0, d): for yd in (0.0, d): for zd in (0.0, d): if self.block_mapper.is_blank(x, y, z, xd, yd, zd, self.reader): continue for side in ALL_SIDES: self._render_face_quadrant(block, x, y, z, xd, yd, zd, side, d) def _check_min_max(self, points): for p in points: x, y, z = p if self.max_x is None or x > self.max_x: self.max_x = x if self.min_x is None or x < self.min_x: self.min_x = x if self.max_y is None or y > self.max_y: self.max_y = y if self.min_y is None or y < self.min_y: self.min_y = y if self.max_z is None or z > self.max_z: self.max_z = z if self.min_z is None or z < self.min_z: self.min_z = z def _add_face(self, corners, block, side, quad_x=None, quad_y=None): scaled_stl = [((c[0] + self.xoffset) * self.stl_scale, -(c[2] + self.zoffset) * self.stl_scale, c[1] * self.stl_scale) for c in corners] # self.surface += poly_area(scaled_stl) self._check_min_max(scaled_stl) scaled_obj = [((c[0] + self.xoffset) * self.scale, c[1] * self.scale, (c[2] + self.zoffset) * self.scale) for c in corners] tex_x, tex_y = self.block_mapper.get_tex_uv(block, side) self._add_corners(scaled_obj, tex_x, tex_y, quad_x, quad_y) self.stl_faces.append(scaled_stl) def _process(self): for x in range(self.width): for y in range(self.height): for z in range(self.depth): block = self._get_block(x, y, z) if block is not None: self._render_block(block, x, y, z) def _make_stl(self): output = BytesIO() stlwriter = Binary_STL_Writer(output) stlwriter.add_faces(self.stl_faces) stlwriter.close() stl = output.getvalue() output.close() self.stl = stl def _get_ordered_vertices(self): ordered_vertices = [None] * len(self.vertices) for k, v in self.vertices.items(): ordered_vertices[v] = k return ordered_vertices def _as_x3d_faces(self): attrs = {} ordered_vertices = self._get_ordered_vertices() vertex_lines = ["%.5g %.5g %.5g" % v for v in ordered_vertices] coord_index = ["%i %i %i %i %i %i %i %i" % (f[0] - 1, f[2] - 1, f[4] - 1, -1, f[0] - 1, f[4] - 1, f[6] - 1, -1) for f in self.faces] tex_coord_index = ["%i %i %i %i %i %i %i %i" % (f[1] - 1, f[3] - 1, f[5] - 1, -1, f[1] - 1, f[5] - 1, f[7] - 1, -1) for f in self.faces] attrs["coordinate_point"] = " ".join(vertex_lines) attrs["coord_index"] = " ".join(coord_index) attrs["tex_coord_index"] = " ".join(tex_coord_index) attrs["timestamp"] = self.timestamp template = jinja_env.get_template("x3d_faces.xml") as_x3d = template.render(attrs) return str(as_x3d) def _as_x3d_triangles(self): attrs = {} ov = self._get_ordered_vertices() tx = self.texUvMappingsArray # each face is a pair of triangles index = [str(i) for i in range(len(self.faces) * 6)] all_the_coord_points = [] all_the_tex_coord_points = [] for f in self.faces: all_the_coord_points.append(ov[f[0] - 1]) all_the_coord_points.append(ov[f[2] - 1]) all_the_coord_points.append(ov[f[4] - 1]) all_the_coord_points.append(ov[f[0] - 1]) all_the_coord_points.append(ov[f[4] - 1]) all_the_coord_points.append(ov[f[6] - 1]) all_the_tex_coord_points.append(tx[f[1] - 1]) all_the_tex_coord_points.append(tx[f[3] - 1]) all_the_tex_coord_points.append(tx[f[5] - 1]) all_the_tex_coord_points.append(tx[f[1] - 1]) all_the_tex_coord_points.append(tx[f[5] - 1]) all_the_tex_coord_points.append(tx[f[7] - 1]) coord_points = ["%.5g %.5g %.5g" % cp for cp in all_the_coord_points] tex_coord_points = ["%.5g %.5g" % tp for tp in all_the_tex_coord_points] attrs["coordinate_point"] = " ".join(coord_points) attrs["index"] = " ".join(index) attrs["tex_coord_point"] = " ".join(tex_coord_points) attrs["timestamp"] = self.timestamp template = jinja_env.get_template("x3d_triangles.xml") as_x3d = template.render(attrs) return str(as_x3d) def _as_csv(self): lines = [] for z in range(self.depth + 1): blocks = [] for x in range(self.width + 1): for y in range(self.height + 1): block_id, block_data = self.reader.get(x, self.height - y, z) if block_id != 0: blocks.append("%s:%s" % (block_id, block_data)) continue blocks.append(",") lines.append("".join(blocks)) lines.append("\n") return "".join(lines) def _as_schematic(self): nbtfile = nbt.NBTFile() nbtfile.name = "Schematic" nbtfile.tags.append(nbt.TAG_Short(name="Height", value=self.height)) nbtfile.tags.append(nbt.TAG_Short(name="Width", value=self.width)) nbtfile.tags.append(nbt.TAG_Short(name="Length", value=self.depth)) nbtfile.tags.append(nbt.TAG_Int(name="WEOffsetX", value=-1)) nbtfile.tags.append(nbt.TAG_Int(name="WEOffsetY", value=0)) nbtfile.tags.append(nbt.TAG_Int(name="WEOffsetZ", value=-1)) nbtfile.tags.append(nbt.TAG_Int(name="WEOriginX", value=0)) nbtfile.tags.append(nbt.TAG_Int(name="WEOriginY", value=0)) nbtfile.tags.append(nbt.TAG_Int(name="WEOriginZ", value=0)) # YZX ordering data = bytearray() blocks = bytearray() for y in range(self.height): for z in range(self.depth): for x in range(self.width): block_id, block_data = self.reader.get(x, y, z) blocks.append(block_id) data.append(block_data) blocks_tag = nbt.TAG_Byte_Array() blocks_tag.value = blocks data_tag = nbt.TAG_Byte_Array() data_tag.value = data nbtfile["Blocks"] = blocks_tag nbtfile["Data"] = data_tag nbtfile.tags.append(nbt.TAG_String(name="Materials", value=u"Alpha")) nbtfile["Entities"] = nbt.TAG_List(type=nbt.TAG_Compound) nbtfile["TileEntities"] = nbt.TAG_List(type=nbt.TAG_Compound) output = BytesIO() nbtfile.write_file(fileobj=output) as_nbt = output.getvalue() output.close() return as_nbt def _as_collada(self): attrs = {} ordered_vertices = self._get_ordered_vertices() vertix_lines = ["%.5g %.5g %.5g" % v for v in ordered_vertices] zeroindexfaces = [[x-1 for x in f] for f in self.faces] face_lines = ["%i %i %i %i %i %i %i %i" % tuple(f) for f in zeroindexfaces] attrs["obj_vertex_source_array"] = " ".join(vertix_lines) attrs["obj_vertex_source_array_accessor_count"] = str(len(ordered_vertices)) attrs["obj_vertex_source_array_count"] = str(len(ordered_vertices) * 3) attrs["obj_uv_source_array"] = " ".join(["%.5g %.5g" % uv for uv in self.texUvMappingsArray]) attrs["polylist_p"] = " ".join(face_lines) attrs["vcount"] = " ".join("4" * len(self.faces)) attrs["polylist_count"] = str(len(self.faces)) attrs["timestamp"] = self.timestamp template = jinja_env.get_template("collada2.xml") as_col = template.render(attrs) return str(as_col) def _as_obj(self): ordered_vertices = self._get_ordered_vertices() vertix_lines = ["v %.5f %.5f %.5f" % v for v in ordered_vertices] face_lines = ["f %i/%i %i/%i %i/%i %i/%i" % tuple(f) for f in self.faces] objstr = "#A printcraft model\n" objstr += "mtllib printcraft.mtl\n" objstr += "o printcraft-model\n" objstr += "\n".join(vertix_lines) objstr += "\n" objstr += self.uv_mappings objstr += "\ng blocks\n" objstr += "usemtl minecraftblocks\n" objstr += "s off\n" objstr += "\n".join(face_lines) return objstr def _add_corners(self, corners, tex_x, tex_y, quad_x=None, quad_y=None): faceinfo = [] for counter, corner in enumerate(corners): i = self.vertices.get(corner) if i is None: i = len(self.vertices) self.vertices[corner] = i faceinfo.append(i + 1) faceinfo.append(self._get_vt_index(counter, tex_x, tex_y, quad_x, quad_y)) self.faces.append(faceinfo) def _get_vt_index(self, corner, blockx, blocky, quad_x=None, quad_y=None): x = blockx * 2 y = blocky * 2 if quad_x: x += quad_x if quad_y: y += quad_y if quad_x is None: if corner == 0: return ((y + 2) * 33) + x + 1 if corner == 1: return (y * 33) + x + 1 if corner == 2: return (y * 33) + x + 3 if corner == 3: return ((y + 2) * 33) + x + 3 elif quad_x == 0.5: if corner == 0: return ((y + 1) * 33) + x + 1 if corner == 1: return (y * 33) + x + 1 if corner == 2: return (y * 33) + x + 2 if corner == 3: return ((y + 1) * 33) + x + 2 else: if corner == 0: return ((y + 1) * 33) + x + 1 if corner == 1: return (y * 33) + x + 1 if corner == 2: return (y * 33) + x + 2 if corner == 3: return ((y + 1) * 33) + x + 2 def _get_content_width(self): return self.max_x - self.min_x def _get_content_height(self): return self.max_y - self.min_y def _get_content_depth(self): return self.max_z - self.min_z ## file out helpers def save_as_stl(self, file_path): write_stl(file_path, self.stl) def save_as_csv(self, file_path): write_csv(file_path, self.csv) def save_as_x3d(self, file_path): write_x3d(file_path, self.x3d) def save_as_collada(self, file_path): write_collada(file_path, self.collada) def save_as_obj(self, file_path): write_obj(file_path, self.obj) obj = property(_as_obj) x3d = property(_as_x3d_triangles) x3d_triangles = property(_as_x3d_triangles) x3d_faces = property(_as_x3d_faces) collada = property(_as_collada) csv = property(_as_csv) schematic = property(_as_schematic) content_width = property(_get_content_width) content_height = property(_get_content_height) content_depth = property(_get_content_depth)
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import tvm import numpy as np from tvm import relay def test_tflite_same_io_qnn_params(): data_dtype = "uint8" x = relay.var("x", shape=(1, 4), dtype=data_dtype) y = relay.var("y", shape=(1, 4), dtype=data_dtype) z = relay.qnn.op.add( lhs=x, rhs=y, lhs_scale=relay.const(0.00784314, "float32"), lhs_zero_point=relay.const(127, "int32"), rhs_scale=relay.const(0.00784314, "float32"), rhs_zero_point=relay.const(127, "int32"), output_scale=relay.const(0.00784314, "float32"), output_zero_point=relay.const(127, "int32"), ) func = relay.Function([x, y], z) mod = tvm.IRModule.from_expr(func) mod = relay.transform.InferType()(mod) mod = relay.qnn.transform.CanonicalizeOps()(mod) func = mod["main"] x_datas = [ np.array((140, 153, 165, 178)).reshape((1, 4)), np.array((25, 153, 178, 216)).reshape((1, 4)), np.array((25, 153, 216, 165)).reshape((1, 4)), ] y_datas = [ np.array((204, 178, 165, 140)).reshape((1, 4)), np.array((204, 178, 191, 25)).reshape((1, 4)), np.array((204, 178, 25, 191)).reshape((1, 4)), ] golden_outputs = [ np.array((217, 204, 203, 191)).reshape((1, 4)), np.array((102, 204, 242, 114)).reshape((1, 4)), np.array((102, 204, 114, 229)).reshape((1, 4)), ] for i in range(0, 3): x_data = x_datas[i] y_data = y_datas[i] golden_output = golden_outputs[i] intrp = relay.create_executor("graph", ctx=tvm.cpu(0), target="llvm") op_res = intrp.evaluate(func)(x_data, y_data) np.testing.assert_equal(op_res.asnumpy(), golden_output) def test_tflite_different_io_qnn_params(): data_dtype = "uint8" x = relay.var("x", shape=(1, 4), dtype=data_dtype) y = relay.var("y", shape=(1, 4), dtype=data_dtype) z = relay.qnn.op.add( lhs=x, rhs=y, lhs_scale=relay.const(0.0156863, "float32"), lhs_zero_point=relay.const(127, "int32"), rhs_scale=relay.const(0.0117647, "float32"), rhs_zero_point=relay.const(85, "int32"), output_scale=relay.const(0.0235294, "float32"), output_zero_point=relay.const(128, "int32"), ) func = relay.Function([x, y], z) mod = tvm.IRModule.from_expr(func) mod = relay.transform.InferType()(mod) mod = relay.qnn.transform.CanonicalizeOps()(mod) func = mod["main"] x_datas = [ np.array((76, 140, 153, 172)).reshape((1, 4)), np.array((133, 140, 146, 153)).reshape((1, 4)), np.array((76, 140, 172, 146)).reshape((1, 4)), ] y_datas = [ np.array((136, 119, 128, 17)).reshape((1, 4)), np.array((136, 119, 111, 94)).reshape((1, 4)), np.array((136, 119, 17, 128)).reshape((1, 4)), ] golden_outputs = [ np.array((120, 154, 167, 124)).reshape((1, 4)), np.array((158, 154, 154, 150)).reshape((1, 4)), np.array((120, 154, 124, 163)).reshape((1, 4)), ] for i in range(0, 3): x_data = x_datas[i] y_data = y_datas[i] golden_output = golden_outputs[i] intrp = relay.create_executor("graph", ctx=tvm.cpu(0), target="llvm") op_res = intrp.evaluate(func)(x_data, y_data) np.testing.assert_equal(op_res.asnumpy(), golden_output) def test_saturation(): # Same params data_dtype = "uint8" x = relay.var("x", shape=(1, 4), dtype=data_dtype) y = relay.var("y", shape=(1, 4), dtype=data_dtype) z = relay.qnn.op.add( lhs=x, rhs=y, lhs_scale=relay.const(0.125, "float32"), lhs_zero_point=relay.const(0, "int32"), rhs_scale=relay.const(0.125, "float32"), rhs_zero_point=relay.const(0, "int32"), output_scale=relay.const(0.125, "float32"), output_zero_point=relay.const(0, "int32"), ) func = relay.Function([x, y], z) mod = tvm.IRModule.from_expr(func) mod = relay.transform.InferType()(mod) mod = relay.qnn.transform.CanonicalizeOps()(mod) func = mod["main"] mod = relay.transform.InferType()(mod) x_data = np.array((255, 1, 1, 0)).reshape((1, 4)) y_data = np.array((255, 255, 128, 0)).reshape((1, 4)) golden_output = np.array((255, 255, 129, 0)).reshape((1, 4)) intrp = relay.create_executor("graph", ctx=tvm.cpu(0), target="llvm") op_res = intrp.evaluate(func)(x_data, y_data) np.testing.assert_equal(op_res.asnumpy(), golden_output) # Same params, different scale z = relay.qnn.op.add( lhs=x, rhs=y, lhs_scale=relay.const(0.125, "float32"), lhs_zero_point=relay.const(0, "int32"), rhs_scale=relay.const(0.125, "float32"), rhs_zero_point=relay.const(0, "int32"), output_scale=relay.const(0.25, "float32"), output_zero_point=relay.const(0, "int32"), ) func = relay.Function([x, y], z) mod = tvm.IRModule.from_expr(func) mod = relay.transform.InferType()(mod) mod = relay.qnn.transform.CanonicalizeOps()(mod) func = mod["main"] x_data = np.array((255, 1, 1, 0)).reshape((1, 4)) y_data = np.array((255, 255, 127, 0)).reshape((1, 4)) golden_output = np.array((255, 129, 65, 0)).reshape((1, 4)) intrp = relay.create_executor("graph", ctx=tvm.cpu(0), target="llvm") op_res = intrp.evaluate(func)(x_data, y_data) np.testing.assert_equal(op_res.asnumpy(), golden_output) # Same io params, different output scale z = relay.qnn.op.add( lhs=x, rhs=y, lhs_scale=relay.const(0.125, "float32"), lhs_zero_point=relay.const(0, "int32"), rhs_scale=relay.const(0.125, "float32"), rhs_zero_point=relay.const(0, "int32"), output_scale=relay.const(0.25, "float32"), output_zero_point=relay.const(0, "int32"), ) func = relay.Function([x, y], z) mod = tvm.IRModule.from_expr(func) mod = relay.transform.InferType()(mod) mod = relay.qnn.transform.CanonicalizeOps()(mod) func = mod["main"] x_data = np.array((255, 1, 1, 0)).reshape((1, 4)) y_data = np.array((255, 255, 127, 0)).reshape((1, 4)) golden_output = np.array((255, 129, 65, 0)).reshape((1, 4)) intrp = relay.create_executor("graph", ctx=tvm.cpu(0), target="llvm") op_res = intrp.evaluate(func)(x_data, y_data) np.testing.assert_equal(op_res.asnumpy(), golden_output) # All params different z = relay.qnn.op.add( lhs=x, rhs=y, lhs_scale=relay.const(0.5, "float32"), lhs_zero_point=relay.const(0, "int32"), rhs_scale=relay.const(0.25, "float32"), rhs_zero_point=relay.const(0, "int32"), output_scale=relay.const(0.125, "float32"), output_zero_point=relay.const(0, "int32"), ) func = relay.Function([x, y], z) mod = tvm.IRModule.from_expr(func) mod = relay.transform.InferType()(mod) mod = relay.qnn.transform.CanonicalizeOps()(mod) func = mod["main"] x_data = np.array((255, 0, 1, 0)).reshape((1, 4)) y_data = np.array((0, 128, 64, 0)).reshape((1, 4)) golden_output = np.array((255, 255, 132, 0)).reshape((1, 4)) intrp = relay.create_executor("graph", ctx=tvm.cpu(0), target="llvm") op_res = intrp.evaluate(func)(x_data, y_data) np.testing.assert_equal(op_res.asnumpy(), golden_output) if __name__ == "__main__": test_tflite_same_io_qnn_params() test_tflite_different_io_qnn_params() test_saturation()
# Copyright 2010 Google Inc. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. import errno import httplib import os import random import re import socket import time import urlparse from boto import config, UserAgent from boto.connection import AWSAuthConnection from boto.exception import InvalidUriError from boto.exception import ResumableTransferDisposition from boto.exception import ResumableUploadException from boto.s3.keyfile import KeyFile try: from hashlib import md5 except ImportError: from md5 import md5 """ Handler for Google Cloud Storage resumable uploads. See http://code.google.com/apis/storage/docs/developer-guide.html#resumable for details. Resumable uploads will retry failed uploads, resuming at the byte count completed by the last upload attempt. If too many retries happen with no progress (per configurable num_retries param), the upload will be aborted in the current process. The caller can optionally specify a tracker_file_name param in the ResumableUploadHandler constructor. If you do this, that file will save the state needed to allow retrying later, in a separate process (e.g., in a later run of gsutil). """ class ResumableUploadHandler(object): BUFFER_SIZE = 8192 RETRYABLE_EXCEPTIONS = (httplib.HTTPException, IOError, socket.error, socket.gaierror) # (start, end) response indicating server has nothing (upload protocol uses # inclusive numbering). SERVER_HAS_NOTHING = (0, -1) def __init__(self, tracker_file_name=None, num_retries=None): """ Constructor. Instantiate once for each uploaded file. :type tracker_file_name: string :param tracker_file_name: optional file name to save tracker URI. If supplied and the current process fails the upload, it can be retried in a new process. If called with an existing file containing a valid tracker URI, we'll resume the upload from this URI; else we'll start a new resumable upload (and write the URI to this tracker file). :type num_retries: int :param num_retries: the number of times we'll re-try a resumable upload making no progress. (Count resets every time we get progress, so upload can span many more than this number of retries.) """ self.tracker_file_name = tracker_file_name self.num_retries = num_retries self.server_has_bytes = 0 # Byte count at last server check. self.tracker_uri = None if tracker_file_name: self._load_tracker_uri_from_file() # Save upload_start_point in instance state so caller can find how # much was transferred by this ResumableUploadHandler (across retries). self.upload_start_point = None def _load_tracker_uri_from_file(self): f = None try: f = open(self.tracker_file_name, 'r') uri = f.readline().strip() self._set_tracker_uri(uri) except IOError, e: # Ignore non-existent file (happens first time an upload # is attempted on a file), but warn user for other errors. if e.errno != errno.ENOENT: # Will restart because self.tracker_uri == None. print('Couldn\'t read URI tracker file (%s): %s. Restarting ' 'upload from scratch.' % (self.tracker_file_name, e.strerror)) except InvalidUriError, e: # Warn user, but proceed (will restart because # self.tracker_uri == None). print('Invalid tracker URI (%s) found in URI tracker file ' '(%s). Restarting upload from scratch.' % (uri, self.tracker_file_name)) finally: if f: f.close() def _save_tracker_uri_to_file(self): """ Saves URI to tracker file if one was passed to constructor. """ if not self.tracker_file_name: return f = None try: f = open(self.tracker_file_name, 'w') f.write(self.tracker_uri) except IOError, e: raise ResumableUploadException( 'Couldn\'t write URI tracker file (%s): %s.\nThis can happen' 'if you\'re using an incorrectly configured upload tool\n' '(e.g., gsutil configured to save tracker files to an ' 'unwritable directory)' % (self.tracker_file_name, e.strerror), ResumableTransferDisposition.ABORT) finally: if f: f.close() def _set_tracker_uri(self, uri): """ Called when we start a new resumable upload or get a new tracker URI for the upload. Saves URI and resets upload state. Raises InvalidUriError if URI is syntactically invalid. """ parse_result = urlparse.urlparse(uri) if (parse_result.scheme.lower() not in ['http', 'https'] or not parse_result.netloc): raise InvalidUriError('Invalid tracker URI (%s)' % uri) self.tracker_uri = uri self.tracker_uri_host = parse_result.netloc self.tracker_uri_path = '%s?%s' % ( parse_result.path, parse_result.query) self.server_has_bytes = 0 def get_tracker_uri(self): """ Returns upload tracker URI, or None if the upload has not yet started. """ return self.tracker_uri def get_upload_id(self): """ Returns the upload ID for the resumable upload, or None if the upload has not yet started. """ # We extract the upload_id from the tracker uri. We could retrieve the # upload_id from the headers in the response but this only works for # the case where we get the tracker uri from the service. In the case # where we get the tracker from the tracking file we need to do this # logic anyway. delim = '?upload_id=' if self.tracker_uri and delim in self.tracker_uri: return self.tracker_uri[self.tracker_uri.index(delim) + len(delim):] else: return None def _remove_tracker_file(self): if (self.tracker_file_name and os.path.exists(self.tracker_file_name)): os.unlink(self.tracker_file_name) def _build_content_range_header(self, range_spec='*', length_spec='*'): return 'bytes %s/%s' % (range_spec, length_spec) def _query_server_state(self, conn, file_length): """ Queries server to find out state of given upload. Note that this method really just makes special case use of the fact that the upload server always returns the current start/end state whenever a PUT doesn't complete. Returns HTTP response from sending request. Raises ResumableUploadException if problem querying server. """ # Send an empty PUT so that server replies with this resumable # transfer's state. put_headers = {} put_headers['Content-Range'] = ( self._build_content_range_header('*', file_length)) put_headers['Content-Length'] = '0' return AWSAuthConnection.make_request(conn, 'PUT', path=self.tracker_uri_path, auth_path=self.tracker_uri_path, headers=put_headers, host=self.tracker_uri_host) def _query_server_pos(self, conn, file_length): """ Queries server to find out what bytes it currently has. Returns (server_start, server_end), where the values are inclusive. For example, (0, 2) would mean that the server has bytes 0, 1, *and* 2. Raises ResumableUploadException if problem querying server. """ resp = self._query_server_state(conn, file_length) if resp.status == 200: # To handle the boundary condition where the server has the complete # file, we return (server_start, file_length-1). That way the # calling code can always simply read up through server_end. (If we # didn't handle this boundary condition here, the caller would have # to check whether server_end == file_length and read one fewer byte # in that case.) return (0, file_length - 1) # Completed upload. if resp.status != 308: # This means the server didn't have any state for the given # upload ID, which can happen (for example) if the caller saved # the tracker URI to a file and then tried to restart the transfer # after that upload ID has gone stale. In that case we need to # start a new transfer (and the caller will then save the new # tracker URI to the tracker file). raise ResumableUploadException( 'Got non-308 response (%s) from server state query' % resp.status, ResumableTransferDisposition.START_OVER) got_valid_response = False range_spec = resp.getheader('range') if range_spec: # Parse 'bytes=<from>-<to>' range_spec. m = re.search('bytes=(\d+)-(\d+)', range_spec) if m: server_start = long(m.group(1)) server_end = long(m.group(2)) got_valid_response = True else: # No Range header, which means the server does not yet have # any bytes. Note that the Range header uses inclusive 'from' # and 'to' values. Since Range 0-0 would mean that the server # has byte 0, omitting the Range header is used to indicate that # the server doesn't have any bytes. return self.SERVER_HAS_NOTHING if not got_valid_response: raise ResumableUploadException( 'Couldn\'t parse upload server state query response (%s)' % str(resp.getheaders()), ResumableTransferDisposition.START_OVER) if conn.debug >= 1: print 'Server has: Range: %d - %d.' % (server_start, server_end) return (server_start, server_end) def _start_new_resumable_upload(self, key, headers=None): """ Starts a new resumable upload. Raises ResumableUploadException if any errors occur. """ conn = key.bucket.connection if conn.debug >= 1: print 'Starting new resumable upload.' self.server_has_bytes = 0 # Start a new resumable upload by sending a POST request with an # empty body and the "X-Goog-Resumable: start" header. Include any # caller-provided headers (e.g., Content-Type) EXCEPT Content-Length # (and raise an exception if they tried to pass one, since it's # a semantic error to specify it at this point, and if we were to # include one now it would cause the server to expect that many # bytes; the POST doesn't include the actual file bytes We set # the Content-Length in the subsequent PUT, based on the uploaded # file size. post_headers = {} for k in headers: if k.lower() == 'content-length': raise ResumableUploadException( 'Attempt to specify Content-Length header (disallowed)', ResumableTransferDisposition.ABORT) post_headers[k] = headers[k] post_headers[conn.provider.resumable_upload_header] = 'start' resp = conn.make_request( 'POST', key.bucket.name, key.name, post_headers) # Get tracker URI from response 'Location' header. body = resp.read() # Check for various status conditions. if resp.status in [500, 503]: # Retry status 500 and 503 errors after a delay. raise ResumableUploadException( 'Got status %d from attempt to start resumable upload. ' 'Will wait/retry' % resp.status, ResumableTransferDisposition.WAIT_BEFORE_RETRY) elif resp.status != 200 and resp.status != 201: raise ResumableUploadException( 'Got status %d from attempt to start resumable upload. ' 'Aborting' % resp.status, ResumableTransferDisposition.ABORT) # Else we got 200 or 201 response code, indicating the resumable # upload was created. tracker_uri = resp.getheader('Location') if not tracker_uri: raise ResumableUploadException( 'No resumable tracker URI found in resumable initiation ' 'POST response (%s)' % body, ResumableTransferDisposition.WAIT_BEFORE_RETRY) self._set_tracker_uri(tracker_uri) self._save_tracker_uri_to_file() def _upload_file_bytes(self, conn, http_conn, fp, file_length, total_bytes_uploaded, cb, num_cb, headers): """ Makes one attempt to upload file bytes, using an existing resumable upload connection. Returns (etag, generation, metageneration) from server upon success. Raises ResumableUploadException if any problems occur. """ buf = fp.read(self.BUFFER_SIZE) if cb: # The cb_count represents the number of full buffers to send between # cb executions. if num_cb > 2: cb_count = file_length / self.BUFFER_SIZE / (num_cb-2) elif num_cb < 0: cb_count = -1 else: cb_count = 0 i = 0 cb(total_bytes_uploaded, file_length) # Build resumable upload headers for the transfer. Don't send a # Content-Range header if the file is 0 bytes long, because the # resumable upload protocol uses an *inclusive* end-range (so, sending # 'bytes 0-0/1' would actually mean you're sending a 1-byte file). if not headers: put_headers = {} else: put_headers = headers.copy() if file_length: if total_bytes_uploaded == file_length: range_header = self._build_content_range_header( '*', file_length) else: range_header = self._build_content_range_header( '%d-%d' % (total_bytes_uploaded, file_length - 1), file_length) put_headers['Content-Range'] = range_header # Set Content-Length to the total bytes we'll send with this PUT. put_headers['Content-Length'] = str(file_length - total_bytes_uploaded) http_request = AWSAuthConnection.build_base_http_request( conn, 'PUT', path=self.tracker_uri_path, auth_path=None, headers=put_headers, host=self.tracker_uri_host) http_conn.putrequest('PUT', http_request.path) for k in put_headers: http_conn.putheader(k, put_headers[k]) http_conn.endheaders() # Turn off debug on http connection so upload content isn't included # in debug stream. http_conn.set_debuglevel(0) while buf: http_conn.send(buf) for alg in self.digesters: self.digesters[alg].update(buf) total_bytes_uploaded += len(buf) if cb: i += 1 if i == cb_count or cb_count == -1: cb(total_bytes_uploaded, file_length) i = 0 buf = fp.read(self.BUFFER_SIZE) http_conn.set_debuglevel(conn.debug) if cb: cb(total_bytes_uploaded, file_length) if total_bytes_uploaded != file_length: # Abort (and delete the tracker file) so if the user retries # they'll start a new resumable upload rather than potentially # attempting to pick back up later where we left off. raise ResumableUploadException( 'File changed during upload: EOF at %d bytes of %d byte file.' % (total_bytes_uploaded, file_length), ResumableTransferDisposition.ABORT) resp = http_conn.getresponse() # Restore http connection debug level. http_conn.set_debuglevel(conn.debug) if resp.status == 200: # Success. return (resp.getheader('etag'), resp.getheader('x-goog-generation'), resp.getheader('x-goog-metageneration')) # Retry timeout (408) and status 500 and 503 errors after a delay. elif resp.status in [408, 500, 503]: disposition = ResumableTransferDisposition.WAIT_BEFORE_RETRY else: # Catch all for any other error codes. disposition = ResumableTransferDisposition.ABORT raise ResumableUploadException('Got response code %d while attempting ' 'upload (%s)' % (resp.status, resp.reason), disposition) def _attempt_resumable_upload(self, key, fp, file_length, headers, cb, num_cb): """ Attempts a resumable upload. Returns (etag, generation, metageneration) from server upon success. Raises ResumableUploadException if any problems occur. """ (server_start, server_end) = self.SERVER_HAS_NOTHING conn = key.bucket.connection if self.tracker_uri: # Try to resume existing resumable upload. try: (server_start, server_end) = ( self._query_server_pos(conn, file_length)) self.server_has_bytes = server_start if server_end: # If the server already has some of the content, we need to # update the digesters with the bytes that have already been # uploaded to ensure we get a complete hash in the end. print 'Catching up hash digest(s) for resumed upload' fp.seek(0) # Read local file's bytes through position server has. For # example, if server has (0, 3) we want to read 3-0+1=4 bytes. bytes_to_go = server_end + 1 while bytes_to_go: chunk = fp.read(min(key.BufferSize, bytes_to_go)) if not chunk: raise ResumableUploadException( 'Hit end of file during resumable upload hash ' 'catchup. This should not happen under\n' 'normal circumstances, as it indicates the ' 'server has more bytes of this transfer\nthan' ' the current file size. Restarting upload.', ResumableTransferDisposition.START_OVER) for alg in self.digesters: self.digesters[alg].update(chunk) bytes_to_go -= len(chunk) if conn.debug >= 1: print 'Resuming transfer.' except ResumableUploadException, e: if conn.debug >= 1: print 'Unable to resume transfer (%s).' % e.message self._start_new_resumable_upload(key, headers) else: self._start_new_resumable_upload(key, headers) # upload_start_point allows the code that instantiated the # ResumableUploadHandler to find out the point from which it started # uploading (e.g., so it can correctly compute throughput). if self.upload_start_point is None: self.upload_start_point = server_end total_bytes_uploaded = server_end + 1 # Corner case: Don't attempt to seek if we've already uploaded the # entire file, because if the file is a stream (e.g., the KeyFile # wrapper around input key when copying between providers), attempting # to seek to the end of file would result in an InvalidRange error. if file_length < total_bytes_uploaded: fp.seek(total_bytes_uploaded) conn = key.bucket.connection # Get a new HTTP connection (vs conn.get_http_connection(), which reuses # pool connections) because httplib requires a new HTTP connection per # transaction. (Without this, calling http_conn.getresponse() would get # "ResponseNotReady".) http_conn = conn.new_http_connection(self.tracker_uri_host, conn.port, conn.is_secure) http_conn.set_debuglevel(conn.debug) # Make sure to close http_conn at end so if a local file read # failure occurs partway through server will terminate current upload # and can report that progress on next attempt. try: return self._upload_file_bytes(conn, http_conn, fp, file_length, total_bytes_uploaded, cb, num_cb, headers) except (ResumableUploadException, socket.error): resp = self._query_server_state(conn, file_length) if resp.status == 400: raise ResumableUploadException('Got 400 response from server ' 'state query after failed resumable upload attempt. This ' 'can happen for various reasons, including specifying an ' 'invalid request (e.g., an invalid canned ACL) or if the ' 'file size changed between upload attempts', ResumableTransferDisposition.ABORT) else: raise finally: http_conn.close() def _check_final_md5(self, key, etag): """ Checks that etag from server agrees with md5 computed before upload. This is important, since the upload could have spanned a number of hours and multiple processes (e.g., gsutil runs), and the user could change some of the file and not realize they have inconsistent data. """ if key.bucket.connection.debug >= 1: print 'Checking md5 against etag.' if key.md5 != etag.strip('"\''): # Call key.open_read() before attempting to delete the # (incorrect-content) key, so we perform that request on a # different HTTP connection. This is neededb because httplib # will return a "Response not ready" error if you try to perform # a second transaction on the connection. key.open_read() key.close() key.delete() raise ResumableUploadException( 'File changed during upload: md5 signature doesn\'t match etag ' '(incorrect uploaded object deleted)', ResumableTransferDisposition.ABORT) def handle_resumable_upload_exception(self, e, debug): if (e.disposition == ResumableTransferDisposition.ABORT_CUR_PROCESS): if debug >= 1: print('Caught non-retryable ResumableUploadException (%s); ' 'aborting but retaining tracker file' % e.message) raise elif (e.disposition == ResumableTransferDisposition.ABORT): if debug >= 1: print('Caught non-retryable ResumableUploadException (%s); ' 'aborting and removing tracker file' % e.message) self._remove_tracker_file() raise else: if debug >= 1: print('Caught ResumableUploadException (%s) - will retry' % e.message) def track_progress_less_iterations(self, server_had_bytes_before_attempt, roll_back_md5=True, debug=0): # At this point we had a re-tryable failure; see if made progress. if self.server_has_bytes > server_had_bytes_before_attempt: self.progress_less_iterations = 0 # If progress, reset counter. else: self.progress_less_iterations += 1 if roll_back_md5: # Rollback any potential hash updates, as we did not # make any progress in this iteration. self.digesters = self.digesters_before_attempt if self.progress_less_iterations > self.num_retries: # Don't retry any longer in the current process. raise ResumableUploadException( 'Too many resumable upload attempts failed without ' 'progress. You might try this upload again later', ResumableTransferDisposition.ABORT_CUR_PROCESS) # Use binary exponential backoff to desynchronize client requests. sleep_time_secs = random.random() * (2**self.progress_less_iterations) if debug >= 1: print ('Got retryable failure (%d progress-less in a row).\n' 'Sleeping %3.1f seconds before re-trying' % (self.progress_less_iterations, sleep_time_secs)) time.sleep(sleep_time_secs) def send_file(self, key, fp, headers, cb=None, num_cb=10, hash_algs=None): """ Upload a file to a key into a bucket on GS, using GS resumable upload protocol. :type key: :class:`boto.s3.key.Key` or subclass :param key: The Key object to which data is to be uploaded :type fp: file-like object :param fp: The file pointer to upload :type headers: dict :param headers: The headers to pass along with the PUT request :type cb: function :param cb: a callback function that will be called to report progress on the upload. The callback should accept two integer parameters, the first representing the number of bytes that have been successfully transmitted to GS, and the second representing the total number of bytes that need to be transmitted. :type num_cb: int :param num_cb: (optional) If a callback is specified with the cb parameter, this parameter determines the granularity of the callback by defining the maximum number of times the callback will be called during the file transfer. Providing a negative integer will cause your callback to be called with each buffer read. :type hash_algs: dictionary :param hash_algs: (optional) Dictionary mapping hash algorithm descriptions to corresponding state-ful hashing objects that implement update(), digest(), and copy() (e.g. hashlib.md5()). Defaults to {'md5': md5()}. Raises ResumableUploadException if a problem occurs during the transfer. """ if not headers: headers = {} # If Content-Type header is present and set to None, remove it. # This is gsutil's way of asking boto to refrain from auto-generating # that header. CT = 'Content-Type' if CT in headers and headers[CT] is None: del headers[CT] headers['User-Agent'] = UserAgent # Determine file size different ways for case where fp is actually a # wrapper around a Key vs an actual file. if isinstance(fp, KeyFile): file_length = fp.getkey().size else: fp.seek(0, os.SEEK_END) file_length = fp.tell() fp.seek(0) debug = key.bucket.connection.debug # Compute the MD5 checksum on the fly. if hash_algs is None: hash_algs = {'md5': md5} self.digesters = dict( (alg, hash_algs[alg]()) for alg in hash_algs or {}) # Use num-retries from constructor if one was provided; else check # for a value specified in the boto config file; else default to 5. if self.num_retries is None: self.num_retries = config.getint('Boto', 'num_retries', 6) self.progress_less_iterations = 0 while True: # Retry as long as we're making progress. server_had_bytes_before_attempt = self.server_has_bytes self.digesters_before_attempt = dict( (alg, self.digesters[alg].copy()) for alg in self.digesters) try: # Save generation and metageneration in class state so caller # can find these values, for use in preconditions of future # operations on the uploaded object. (etag, self.generation, self.metageneration) = ( self._attempt_resumable_upload(key, fp, file_length, headers, cb, num_cb)) # Get the final digests for the uploaded content. for alg in self.digesters: key.local_hashes[alg] = self.digesters[alg].digest() # Upload succceded, so remove the tracker file (if have one). self._remove_tracker_file() self._check_final_md5(key, etag) key.generation = self.generation if debug >= 1: print 'Resumable upload complete.' return except self.RETRYABLE_EXCEPTIONS, e: if debug >= 1: print('Caught exception (%s)' % e.__repr__()) if isinstance(e, IOError) and e.errno == errno.EPIPE: # Broken pipe error causes httplib to immediately # close the socket (http://bugs.python.org/issue5542), # so we need to close the connection before we resume # the upload (which will cause a new connection to be # opened the next time an HTTP request is sent). key.bucket.connection.connection.close() except ResumableUploadException, e: self.handle_resumable_upload_exception(e, debug) self.track_progress_less_iterations(server_had_bytes_before_attempt, True, debug)
# # Copyright 2016 University of Oxford # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Test cases for the htsget CLI. """ from __future__ import print_function from __future__ import division import logging import os import sys import tempfile import unittest import mock import htsget.cli as cli import htsget.exceptions as exceptions class TestMain(unittest.TestCase): """ Simple tests for the main function. """ with mock.patch("htsget.cli.run") as mocked_run, \ mock.patch("argparse.ArgumentParser.parse_args") as mocked_parse: cli.htsget_main() mocked_parse.assert_called_once() mocked_run.assert_called_once() class TestHtsgetArgumentParser(unittest.TestCase): """ Tests the parser to ensure it parses input values correctly. """ def parse_args(self, args): parser = cli.get_htsget_parser() return parser.parse_args(args) def test_defaults(self): args = self.parse_args(["URL"]) self.assertEqual(args.url, "URL") self.assertEqual(args.format, None) self.assertEqual(args.reference_name, None) self.assertEqual(args.reference_md5, None) self.assertEqual(args.start, None) self.assertEqual(args.end, None) self.assertEqual(args.output, None) self.assertEqual(args.max_retries, 5) self.assertEqual(args.retry_wait, 5) self.assertEqual(args.timeout, 120) self.assertEqual(args.bearer_token, None) class TestHtsgetRun(unittest.TestCase): """ Tests to ensure the run function correctly passes along parameters. """ def setUp(self): fd, self.output_filename = tempfile.mkstemp(prefix="htsget_cli_test_") os.close(fd) def tearDown(self): os.unlink(self.output_filename) def run_cmd(self, cmd): parser = cli.get_htsget_parser() args = parser.parse_args(cmd.split()) with mock.patch("htsget.get") as mocked_get, \ mock.patch("sys.exit") as mocked_exit: cli.run(args) self.assertEqual(mocked_get.call_count, 1) mocked_exit.assert_called_once_with(0) return mocked_get.call_args def test_defaults(self): url = "http://example.com/stuff" args, kwargs = self.run_cmd("{}".format(url)) self.assertEqual(args[0], url) self.assertEqual(kwargs["start"], None) self.assertEqual(kwargs["end"], None) self.assertEqual(kwargs["reference_name"], None) self.assertEqual(kwargs["reference_md5"], None) self.assertEqual(kwargs["data_format"], None) def test_defaults_with_file(self): url = "http://example.com/stuff" args, kwargs = self.run_cmd("{} -O {}".format(url, self.output_filename)) self.assertEqual(args[0], url) self.assertEqual(args[1].name, self.output_filename) self.assertEqual(kwargs["start"], None) self.assertEqual(kwargs["end"], None) self.assertEqual(kwargs["reference_name"], None) self.assertEqual(kwargs["reference_md5"], None) def test_reference_name(self): url = "http://example.com/otherstuff" for reference_name in ["chr1", "1", "x" * 100]: args, kwargs = self.run_cmd("{} -O {} -r {}".format( url, self.output_filename, reference_name)) self.assertEqual(args[0], url) self.assertEqual(args[1].name, self.output_filename) self.assertEqual(kwargs["start"], None) self.assertEqual(kwargs["end"], None) self.assertEqual(kwargs["reference_name"], reference_name) self.assertEqual(kwargs["reference_md5"], None) args, kwargs = self.run_cmd("{} -O {} --reference-name {}".format( url, self.output_filename, reference_name)) self.assertEqual(args[0], url) self.assertEqual(args[1].name, self.output_filename) self.assertEqual(kwargs["start"], None) self.assertEqual(kwargs["end"], None) self.assertEqual(kwargs["reference_name"], reference_name) self.assertEqual(kwargs["reference_md5"], None) def test_reference_md5(self): url = "http://example.com/otherstuff" reference_md5 = "d7866be4ab9deb8b26d38a978b0684e3" args, kwargs = self.run_cmd("{} -O {} -m {}".format( url, self.output_filename, reference_md5)) self.assertEqual(args[0], url) self.assertEqual(args[1].name, self.output_filename) self.assertEqual(kwargs["start"], None) self.assertEqual(kwargs["end"], None) self.assertEqual(kwargs["reference_name"], None) self.assertEqual(kwargs["reference_md5"], reference_md5) args, kwargs = self.run_cmd("{} -O {} --reference-md5 {}".format( url, self.output_filename, reference_md5)) self.assertEqual(args[0], url) self.assertEqual(args[1].name, self.output_filename) self.assertEqual(kwargs["start"], None) self.assertEqual(kwargs["end"], None) self.assertEqual(kwargs["reference_name"], None) self.assertEqual(kwargs["reference_md5"], reference_md5) def test_start(self): url = "http://example.com/otherstuff" reference_name = "chr2" for start in [0, 100, 2**32]: args, kwargs = self.run_cmd("{} -O {} -r {} -s {}".format( url, self.output_filename, reference_name, start)) self.assertEqual(args[0], url) self.assertEqual(args[1].name, self.output_filename) self.assertEqual(kwargs["start"], start) self.assertEqual(kwargs["end"], None) self.assertEqual(kwargs["reference_name"], reference_name) args, kwargs = self.run_cmd("{} -O {} -r {} --start {}".format( url, self.output_filename, reference_name, start)) self.assertEqual(args[0], url) self.assertEqual(args[1].name, self.output_filename) self.assertEqual(kwargs["start"], start) self.assertEqual(kwargs["end"], None) self.assertEqual(kwargs["reference_name"], reference_name) def test_end(self): url = "http://example.com/otherstuff" reference_name = "chr2" for end in [0, 100, 2**32]: args, kwargs = self.run_cmd("{} -O {} -r {} -e {}".format( url, self.output_filename, reference_name, end)) self.assertEqual(args[0], url) self.assertEqual(args[1].name, self.output_filename) self.assertEqual(kwargs["start"], None) self.assertEqual(kwargs["end"], end) self.assertEqual(kwargs["reference_name"], reference_name) args, kwargs = self.run_cmd("{} -O {} -r {} --end {}".format( url, self.output_filename, reference_name, end)) self.assertEqual(args[0], url) self.assertEqual(args[1].name, self.output_filename) self.assertEqual(kwargs["start"], None) self.assertEqual(kwargs["end"], end) self.assertEqual(kwargs["reference_name"], reference_name) def test_start_end(self): url = "http://example.com/otherstuff" reference_name = "chr2" for start, end in [(0, 1), (100, 200), (5, 2**32)]: args, kwargs = self.run_cmd("{} -O {} -r {} -s {} -e {}".format( url, self.output_filename, reference_name, start, end)) self.assertEqual(args[0], url) self.assertEqual(args[1].name, self.output_filename) self.assertEqual(kwargs["start"], start) self.assertEqual(kwargs["end"], end) self.assertEqual(kwargs["reference_name"], reference_name) def test_format(self): url = "http://example.com/otherstuff" for fmt in ["bam", "CRAM", "anything"]: args, kwargs = self.run_cmd("{} -O {} -f {}".format( url, self.output_filename, fmt)) self.assertEqual(args[0], url) self.assertEqual(args[1].name, self.output_filename) self.assertEqual(kwargs["data_format"], fmt) def test_max_retries(self): url = "http://example.com/otherstuff" for max_retries in [0, 5, 10]: args, kwargs = self.run_cmd("{} -O {} -M {}".format( url, self.output_filename, max_retries)) kwargs["max_retries"] = max_retries args, kwargs = self.run_cmd("{} -O {} --max-retries {}".format( url, self.output_filename, max_retries)) kwargs["max_retries"] = max_retries def test_retry_wait(self): url = "http://example.com/otherstuff" for retry_wait in [0, 5, 10, 1.4]: args, kwargs = self.run_cmd("{} -O {} -W {}".format( url, self.output_filename, retry_wait)) kwargs["retry_wait"] = retry_wait args, kwargs = self.run_cmd("{} -O {} --retry-wait {}".format( url, self.output_filename, retry_wait)) kwargs["retry_wait"] = retry_wait def test_timeout(self): url = "http://example.com/otherstuff" for timeout in [0, 5, 10, 1.4]: args, kwargs = self.run_cmd("{} -O {} -r {}".format( url, self.output_filename, timeout)) kwargs["timeout"] = timeout args, kwargs = self.run_cmd("{} -O {} --timeout {}".format( url, self.output_filename, timeout)) kwargs["timeout"] = timeout def test_bearer_token(self): url = "http://example.com/otherstuff" for bearer_token in ["yy", "x" * 1024]: args, kwargs = self.run_cmd("{} -O {} -b {}".format( url, self.output_filename, bearer_token)) kwargs["bearer-token"] = bearer_token args, kwargs = self.run_cmd("{} -O {} --bearer-token {}".format( url, self.output_filename, bearer_token)) kwargs["bearer_token"] = bearer_token def test_stdout_zero_retries(self): url = "http://example.com/stuff" args, kwargs = self.run_cmd("{}".format(url)) self.assertEqual(args[0], url) self.assertEqual(kwargs["max_retries"], 0) # this is true even if we specify it explicitly args, kwargs = self.run_cmd("{} --max-retries 10".format(url)) self.assertEqual(args[0], url) self.assertEqual(kwargs["max_retries"], 0) class TestVerbosity(unittest.TestCase): """ Tests to ensure the verbosity settings work. """ def run_cmd(self, cmd): parser = cli.get_htsget_parser() args = parser.parse_args(cmd.split()) with mock.patch("htsget.get") as mocked_get, \ mock.patch("sys.exit") as mocked_exit, \ mock.patch("logging.basicConfig") as mocked_log_config: cli.run(args) self.assertEqual(mocked_get.call_count, 1) self.assertEqual(mocked_log_config.call_count, 1) mocked_exit.assert_called_once_with(0) return mocked_log_config.call_args[1]["level"] def test_defaults(self): level = self.run_cmd("http://url.com") self.assertEqual(level, logging.WARNING) def test_repeats(self): level = self.run_cmd("http://url.com -v") self.assertEqual(level, logging.INFO) level = self.run_cmd("http://url.com --verbose") self.assertEqual(level, logging.INFO) level = self.run_cmd("http://url.com -vv") self.assertEqual(level, logging.DEBUG) level = self.run_cmd("http://url.com -v -v") self.assertEqual(level, logging.DEBUG) level = self.run_cmd("http://url.com --verbose --verbose") self.assertEqual(level, logging.DEBUG) class TestRuntimeErrors(unittest.TestCase): """ Test cases to cover the various error conditions that may occur at runtime. """ def assert_exception_writes_error_message(self, exception, message): parser = cli.get_htsget_parser() args = parser.parse_args(["https://some.url"]) saved_stderr = sys.stderr try: with tempfile.TemporaryFile("w+") as tmp_stderr: sys.stderr = tmp_stderr with mock.patch("htsget.get") as mocked_get, \ mock.patch("sys.exit") as mocked_exit, \ mock.patch("logging.basicConfig"): mocked_get.side_effect = exception cli.run(args) tmp_stderr.seek(0) stderr = tmp_stderr.read().strip() mocked_exit.assert_called_once_with(1) finally: sys.stderr = saved_stderr self.assertTrue(stderr.endswith(message)) def test_keyboard_interrupt(self): self.assert_exception_writes_error_message(KeyboardInterrupt, "interrupted") def test_exception_wrapper(self): msg = "some message" self.assert_exception_writes_error_message( exceptions.ExceptionWrapper(Exception(msg)), msg) def test_htsget_exception(self): msg = "some other message" self.assert_exception_writes_error_message(exceptions.HtsgetException(msg), msg)
import json import time import attr from attr.validators import instance_of as io, optional from typing import * import couchbase_core._libcouchbase as LCB from couchbase_core import mk_formstr from couchbase.options import OptionBlock, OptionBlockTimeOut, forward_args, timedelta from couchbase.management.admin import METHMAP from couchbase.management.generic import GenericManager from couchbase.exceptions import (ErrorMapper, HTTPException, QueryIndexAlreadyExistsException, WatchQueryIndexTimeoutException, QueryIndexNotFoundException, InvalidArgumentException) try: from typing import Protocol except BaseException: from typing_extensions import Protocol class QueryErrorMapper(ErrorMapper): @staticmethod def mapping(): # type: (...) -> Dict[CBErrorType,Dict[Any, CBErrorType]] return {HTTPException: {".*[iI]ndex.*already exists.*": QueryIndexAlreadyExistsException, ".*[iI]ndex.*[nN]ot [fF]ound.*": QueryIndexNotFoundException}} def is_null_or_empty( value # type: str ) -> bool: return not (value and value.split()) @QueryErrorMapper.wrap class QueryIndexManager(GenericManager): def __init__(self, parent_cluster): """ Query Index Manager The Query Index Manager interface contains the means for managing indexes used for queries. :param parent_cluster: Parent cluster """ super(QueryIndexManager, self).__init__(parent_cluster) def _http_request(self, **kwargs): # the kwargs can override the defaults imeth = None method = kwargs.get('method', 'GET') if not method in METHMAP: raise InvalidArgumentException("Unknown HTTP Method", method) imeth = METHMAP[method] return self._admin_bucket._http_request( type=LCB.LCB_HTTP_TYPE_QUERY, path=kwargs['path'], method=imeth, content_type=kwargs.get('content_type', 'application/json'), post_data=kwargs.get('content', None), response_format=LCB.FMT_JSON, timeout=kwargs.get('timeout', None)) def _validate_scope_and_collection(self, # type: "QueryIndexManager" scope=None, # type: str collection=None # type: str ) -> bool: if not (scope and scope.split()) and (collection and collection.split()): raise InvalidArgumentException( "Both scope and collection must be set. Invalid scope.") if (scope and scope.split()) and not (collection and collection.split()): raise InvalidArgumentException( "Both scope and collection must be set. Invalid collection.") def _build_keyspace(self, # type: "QueryIndexManager" bucket, # type: str scope=None, # type: str collection=None # type: str ) -> str: # None AND empty check done in validation, only check for None if scope and collection: return "`{}`.`{}`.`{}`".format(bucket, scope, collection) if scope: return "`{}`.`{}`".format(bucket, scope) return "`{}`".format(bucket) def _create_index(self, bucket_name, fields, index_name=None, **kwargs): scope_name = kwargs.get("scope_name", None) collection_name = kwargs.get("collection_name", None) self._validate_scope_and_collection(scope_name, collection_name) primary = kwargs.get("primary", False) condition = kwargs.get("condition", None) if primary and fields: raise TypeError('Cannot create primary index with explicit fields') elif not primary and not fields: raise ValueError('Fields required for non-primary index') if condition and primary: raise ValueError('cannot specify condition for primary index') query_str = "" if not fields: query_str += "CREATE PRIMARY INDEX" else: query_str += "CREATE INDEX" if index_name and index_name.split(): query_str += " `{}` ".format(index_name) query_str += " ON {} ".format(self._build_keyspace( bucket_name, scope_name, collection_name)) if fields: field_names = ["`{}`".format(f) for f in fields] query_str += "({})".format(", ".join(field_names)) if condition: query_str += " WHERE {}".format(condition) options = {} deferred = kwargs.get("deferred", False) if deferred: options["defer_build"] = deferred num_replicas = kwargs.get("num_replicas", None) if num_replicas: options["num_replica"] = num_replicas if options: query_str += " WITH {{{}}}".format( ", ".join(["'{0}':{1}".format(k, v) for k, v in options.items()])) def possibly_raise(error): if isinstance(error, list) and "msg" in error[0] and "already exists" in error[0]["msg"]: if not kwargs.get('ignore_if_exists', False): raise try: resp = self._http_request( path="", method="POST", content=mk_formstr({"statement": query_str}), content_type='application/x-www-form-urlencoded', **kwargs ).value if "errors" in resp and possibly_raise(resp["errors"]): msg = resp["errors"][0].get("msg", "Index already exists") raise QueryIndexAlreadyExistsException.pyexc( msg, resp["errors"]) except HTTPException as h: error = getattr( getattr( h, 'objextra', None), 'value', {}).get( 'errors', "") if possibly_raise(error): raise def _drop_index(self, bucket_name, index_name=None, **kwargs): scope_name = kwargs.get("scope_name", None) collection_name = kwargs.get("collection_name", None) self._validate_scope_and_collection(scope_name, collection_name) # previous ignore_missing was a viable kwarg - should only have ignore_if_not_exists ignore_missing = kwargs.pop("ignore_missing", None) if ignore_missing: kwargs["ignore_if_not_exists"] = ignore_missing query_str = "" keyspace = self._build_keyspace( bucket_name, scope_name, collection_name) if not index_name: query_str += "DROP PRIMARY INDEX ON {}".format(keyspace) else: if scope_name and collection_name: query_str += "DROP INDEX `{0}` ON {1}".format( index_name, keyspace) else: query_str += "DROP INDEX {0}.`{1}`".format( keyspace, index_name) def possibly_raise(error): if isinstance(error, list) and "msg" in error[0] and "not found" in error[0]["msg"]: if not kwargs.get('ignore_if_not_exists', False): return True try: resp = self._http_request( path="", method="POST", content=mk_formstr({"statement": query_str}), content_type='application/x-www-form-urlencoded', **kwargs ).value if "errors" in resp and possibly_raise(resp["errors"]): msg = resp["errors"][0].get("msg", "Index not found") raise QueryIndexNotFoundException.pyexc(msg, resp["errors"]) except HTTPException as h: error = getattr( getattr( h, 'objextra', None), 'value', {}).get( 'errors', "") if possibly_raise(error): raise def get_all_indexes(self, # type: "QueryIndexManager" bucket_name, # type: str *options, # type: "GetAllQueryIndexOptions" **kwargs # type: Any ): # type: (...) -> List[QueryIndex] """ Fetches all indexes from the server. :param str bucket_name: the name of the bucket. :param GetAllQueryIndexOptions options: Options to use for getting all indexes. :param Any kwargs: Override corresponding value in options. :return: A list of QueryIndex objects. :raises: InvalidArgumentsException """ final_args = forward_args(kwargs, *options) scope_name = final_args.get("scope_name", None) collection_name = final_args.get("collection_name", None) if scope_name and collection_name: query_str = """ SELECT idx.* FROM system:indexes AS idx WHERE `bucket_id`="{0}" AND `scope_id`="{1}" AND `keyspace_id`="{2}" AND `using`="gsi" ORDER BY is_primary DESC, name ASC """.format(bucket_name, scope_name, collection_name) elif scope_name: query_str = """ SELECT idx.* FROM system:indexes AS idx WHERE `bucket_id`="{0}" AND `scope_id`="{1}" AND `using`="gsi" ORDER BY is_primary DESC, name ASC """.format(bucket_name, scope_name) else: query_str = """ SELECT idx.* FROM system:indexes AS idx WHERE ( (`bucket_id` IS MISSING AND `keyspace_id`="{0}") OR `bucket_id`="{0}" ) AND `using`="gsi" ORDER BY is_primary DESC, name ASC """.format(bucket_name) response = self._http_request( path="", method="POST", content=mk_formstr({"statement": query_str}), content_type='application/x-www-form-urlencoded', **final_args ).value if response and "results" in response: results = response.get("results") res = list(map(QueryIndex.from_server, results)) return res return [] def create_index(self, # type: "QueryIndexManager" bucket_name, # type: str index_name, # type: str fields, # type: Iterable[str] *options, # type: "CreateQueryIndexOptions" **kwargs ): # type: (...) -> None """ Creates a new index. :param str bucket_name: name of the bucket. :param str index_name: the name of the index. :param Iterable[str] fields: Fields over which to create the index. :param CreateQueryIndexOptions options: Options to use when creating index. :param Any kwargs: Override corresponding value in options. :raises: QueryIndexAlreadyExistsException :raises: InvalidArgumentsException """ # CREATE INDEX index_name ON bucket_name WITH { "num_replica": 2 } # https://docs.couchbase.com/server/current/n1ql/n1ql-language-reference/createindex.html # final_args = forward_args(kwargs, *options) self._create_index(bucket_name, fields, index_name, **final_args) def create_primary_index(self, # type: "QueryIndexManager" bucket_name, # type: str *options, # type: "CreatePrimaryQueryIndexOptions" **kwargs ): """ Creates a new primary index. :param str bucket_name: name of the bucket. :param str index_name: name of the index. :param CreatePrimaryQueryIndexOptions options: Options to use when creating primary index :param Any kwargs: Override corresponding values in options. :raises: QueryIndexAlreadyExistsException :raises: InvalidArgumentsException """ # CREATE INDEX index_name ON bucket_name WITH { "num_replica": 2 } # https://docs.couchbase.com/server/current/n1ql/n1ql-language-reference/createindex.html # kwargs['primary'] = True final_args = forward_args(kwargs, *options) index_name = final_args.pop("index_name", None) self._create_index(bucket_name, [], index_name, **final_args) def drop_index(self, # type: "QueryIndexManager" bucket_name, # type: str index_name, # type: str *options, # type: "DropQueryIndexOptions" **kwargs): """ Drops an index. :param str bucket_name: name of the bucket. :param str index_name: name of the index. :param DropQueryIndexOptions options: Options for dropping index. :param Any kwargs: Override corresponding value in options. :raises: QueryIndexNotFoundException :raises: InvalidArgumentsException """ final_args = forward_args(kwargs, *options) self._drop_index(bucket_name, index_name, **final_args) def drop_primary_index(self, # type: "QueryIndexManager" bucket_name, # type: str *options, # type: "DropPrimaryQueryIndexOptions" **kwargs): """ Drops a primary index. :param bucket_name: name of the bucket. :param index_name: name of the index. :param ignore_if_not_exists: Don't error/throw if the index does not exist. :param timeout: the time allowed for the operation to be terminated. This is controlled by the client. :raises: QueryIndexNotFoundException :raises: InvalidArgumentsException """ final_args = forward_args(kwargs, *options) index_name = final_args.pop("index_name", None) self._drop_index(bucket_name, index_name, **final_args) def watch_indexes(self, # type: "QueryIndexManager" bucket_name, # type: str index_names, # type: Iterable[str] *options, # type: "WatchQueryIndexOptions" **kwargs): """ Watch polls indexes until they are online. :param str bucket_name: name of the bucket. :param Iterable[str] index_names: name(s) of the index(es). :param WatchQueryIndexOptions options: Options for request to watch indexes. :param Any kwargs: Override corresponding valud in options. :raises: QueryIndexNotFoundException :raises: WatchQueryIndexTimeoutException """ final_args = forward_args(kwargs, *options) scope_name = final_args.get("scope_name", None) collection_name = final_args.get("collection_name", None) self._validate_scope_and_collection(scope_name, collection_name) if final_args.get("watch_primary", False): index_names.append("#primary") timeout = final_args.get("timeout", None) if not timeout: raise ValueError( 'Must specify a timeout condition for watch indexes') def check_indexes(index_names, indexes): for idx_name in index_names: match = next((i for i in indexes if i.name == idx_name), None) if not match: raise QueryIndexNotFoundException( "Cannot find index with name: {}".format(idx_name)) return all(map(lambda i: i.state == "online", indexes)) # timeout is converted to microsecs via final_args() timeout_millis = timeout / 1000 interval_millis = float(50) start = time.perf_counter() time_left = timeout_millis while True: opts = GetAllQueryIndexOptions( timeout=timedelta(milliseconds=time_left)) if scope_name: opts["scope_name"] = scope_name opts["collection_name"] = collection_name indexes = self.get_all_indexes(bucket_name, opts) all_online = check_indexes(index_names, indexes) if all_online: break interval_millis += 500 if interval_millis > 1000: interval_millis = 1000 time_left = timeout_millis - ((time.perf_counter() - start) * 1000) if interval_millis > time_left: interval_millis = time_left if time_left <= 0: raise WatchQueryIndexTimeoutException( "Failed to find all indexes online within the alloted time.") time.sleep(interval_millis / 1000) def _build_deferred_prior_6_5(self, bucket_name, **final_args): """ ** INTERNAL ** """ indexes = self.get_all_indexes(bucket_name, GetAllQueryIndexOptions( timeout=final_args.get("timeout", None))) deferred_indexes = [ idx.name for idx in indexes if idx.state in ["deferred", "pending"]] query_str = "BUILD INDEX ON `{}` ({})".format( bucket_name, ", ".join(["`{}`".format(di) for di in deferred_indexes])) self._http_request( path="", method="POST", content=mk_formstr({"statement": query_str}), content_type='application/x-www-form-urlencoded', **final_args ) def _build_deferred_6_5_plus(self, bucket_name, **final_args): """ ** INTERNAL ** """ scope_name = final_args.get("scope_name", None) collection_name = final_args.get("collection_name", None) self._validate_scope_and_collection(scope_name, collection_name) keyspace = self._build_keyspace( bucket_name, scope_name, collection_name) if scope_name and collection_name: inner_query_str = """ SELECT RAW idx.name FROM system:indexes AS idx WHERE `bucket_id`="{0}" AND `scope_id`="{1}" AND `keyspace_id`="{2}" AND state="deferred" """.format(bucket_name, scope_name, collection_name) else: inner_query_str = """ SELECT RAW idx.name FROM system:indexes AS idx WHERE ( (`bucket_id` IS MISSING AND `keyspace_id`="{0}") OR `bucket_id`="{0}" ) AND state="deferred" """.format(bucket_name) query_str = "BUILD INDEX ON {} (({}))".format( keyspace, inner_query_str) self._http_request( path="", method="POST", content=mk_formstr({"statement": query_str}), content_type='application/x-www-form-urlencoded', **final_args ) def build_deferred_indexes(self, # type: "QueryIndexManager" bucket_name, # type: str *options, # type: "BuildDeferredQueryIndexOptions" **kwargs ): """ Build Deferred builds all indexes which are currently in deferred state. :param str bucket_name: name of the bucket. :param BuildDeferredQueryIndexOptions options: Options for building deferred indexes. :param Any kwargs: Override corresponding value in options. :raise: InvalidArgumentsException """ final_args = forward_args(kwargs, *options) if self._admin_bucket._is_6_5_plus(): self._build_deferred_6_5_plus(bucket_name, **final_args) else: self._build_deferred_prior_6_5(bucket_name, **final_args) class IndexType(object): pass @attr.s class QueryIndex(Protocol): """The QueryIndex protocol provides a means of mapping a query index into an object.""" name = attr.ib(validator=io(str)) # type: str is_primary = attr.ib(validator=io(bool)) # type: bool type = attr.ib(validator=io(IndexType), type=IndexType) # type: IndexType state = attr.ib(validator=io(str)) # type: str namespace = attr.ib(validator=io(str)) # type: str keyspace = attr.ib(validator=io(str)) # type: str index_key = attr.ib(validator=io(Iterable)) # type: Iterable[str] condition = attr.ib(validator=io(str)) # type: str bucket_name = attr.ib(validator=optional(io(str))) # type: Optional[str] scope_name = attr.ib(validator=optional(io(str))) # type: Optional[str] collection_name = attr.ib( validator=optional(io(str))) # type: Optional[str] partition = attr.ib(validator=optional( validator=io(str))) # type: Optional[str] @classmethod def from_server(cls, json_data # type: Dict[str, Any] ): return cls(json_data.get("name"), bool(json_data.get("is_primary")), IndexType(), json_data.get("state"), json_data.get("keyspace_id"), json_data.get("namespace_id"), [], json_data.get("condition", ""), json_data.get( "bucket_id", json_data.get("keyspace_id", "")), json_data.get("scope_id", ""), json_data.get("keyspace_id", ""), json_data.get("partition", None) ) class GetAllQueryIndexOptions(OptionBlockTimeOut): @overload def __init__(self, timeout=None, # type: timedelta scope_name=None, # type: str collection_name=None # type: str ): pass def __init__(self, **kwargs): """ Get all query indexes options :param timeout: operation timeout in seconds :param scope_name: **UNCOMMITTED** scope_name is an uncommitted API that is unlikely to change, but may still change as final consensus on its behavior has not yet been reached. Nme of the scope where the index belongs :param collection_name: **UNCOMMITTED** collection_name is an uncommitted API that is unlikely to change, but may still change as final consensus on its behavior has not yet been reached. Name of the collection where the index belongs """ super(GetAllQueryIndexOptions, self).__init__(**kwargs) class CreateQueryIndexOptions(OptionBlockTimeOut): @overload def __init__(self, timeout=None, # type: timedelta ignore_if_exists=None, # type: bool num_replicas=None, # type: int deferred=None, # type: bool condition=None, # type: str scope_name=None, # type: str collection_name=None # type: str ): pass def __init__(self, **kwargs): """ Query Index creation options :param timeout: operation timeout in seconds :param ignore_if_exists: don't throw an exception if index already exists :param num_replicas: number of replicas :param deferred: whether the index creation should be deferred :param condition: 'where' condition for partial index creation :param scope_name: **UNCOMMITTED** scope_name is an uncommitted API that is unlikely to change, but may still change as final consensus on its behavior has not yet been reached. Nme of the scope where the index belongs :param collection_name: **UNCOMMITTED** collection_name is an uncommitted API that is unlikely to change, but may still change as final consensus on its behavior has not yet been reached. Name of the collection where the index belongs """ if 'ignore_if_exists' not in kwargs: kwargs['ignore_if_exists'] = False super(CreateQueryIndexOptions, self).__init__(**kwargs) class CreatePrimaryQueryIndexOptions(CreateQueryIndexOptions): @overload def __init__(self, index_name=None, # type: str timeout=None, # type: timedelta ignore_if_exists=None, # type: bool num_replicas=None, # type: int deferred=None, # type: bool scope_name=None, # type: str collection_name=None # type: str ): pass def __init__(self, **kwargs): """ Query Primary Index creation options :param index_name: name of primary index :param timeout: operation timeout in seconds :param ignore_if_exists: don't throw an exception if index already exists :param num_replicas: number of replicas :param deferred: whether the index creation should be deferred :param scope_name: **UNCOMMITTED** scope_name is an uncommitted API that is unlikely to change, but may still change as final consensus on its behavior has not yet been reached. Nme of the scope where the index belongs :param collection_name: **UNCOMMITTED** collection_name is an uncommitted API that is unlikely to change, but may still change as final consensus on its behavior has not yet been reached. Name of the collection where the index belongs """ super(CreatePrimaryQueryIndexOptions, self).__init__(**kwargs) class DropQueryIndexOptions(OptionBlockTimeOut): @overload def __init__(self, ignore_if_not_exists=None, # type: bool timeout=None, # type: timedelta scope_name=None, # type: str collection_name=None # type: str ): pass def __init__(self, **kwargs): """ Drop query index options :param ignore_if_exists: don't throw an exception if index already exists :param timeout: operation timeout in seconds :param scope_name: **UNCOMMITTED** scope_name is an uncommitted API that is unlikely to change, but may still change as final consensus on its behavior has not yet been reached. Nme of the scope where the index belongs :param collection_name: **UNCOMMITTED** collection_name is an uncommitted API that is unlikely to change, but may still change as final consensus on its behavior has not yet been reached. Name of the collection where the index belongs """ super(DropQueryIndexOptions, self).__init__(**kwargs) class DropPrimaryQueryIndexOptions(OptionBlockTimeOut): @overload def __init__(self, index_name=None, # str ignore_if_not_exists=None, # type: bool timeout=None, # type: timedelta scope_name=None, # type: str collection_name=None # type: str ): pass def __init__(self, **kwargs): """ Drop primary index options :param index_name: name of primary index :param timeout: operation timeout in seconds :param ignore_if_exists: don't throw an exception if index already exists :param scope_name: **UNCOMMITTED** scope_name is an uncommitted API that is unlikely to change, but may still change as final consensus on its behavior has not yet been reached. Nme of the scope where the index belongs :param collection_name: **UNCOMMITTED** collection_name is an uncommitted API that is unlikely to change, but may still change as final consensus on its behavior has not yet been reached. Name of the collection where the index belongs """ super(DropPrimaryQueryIndexOptions, self).__init__(**kwargs) class WatchQueryIndexOptions(OptionBlock): @overload def __init__(self, watch_primary=None, # type: bool timeout=None, # type: timedelta scope_name=None, # type: str collection_name=None # type: str ): pass def __init__(self, **kwargs): """ Watch query index options :param watch_primary: If True, watch primary indexes :param timeout: operation timeout in seconds :param scope_name: **UNCOMMITTED** scope_name is an uncommitted API that is unlikely to change, but may still change as final consensus on its behavior has not yet been reached. Nme of the scope where the index belongs :param collection_name: **UNCOMMITTED** collection_name is an uncommitted API that is unlikely to change, but may still change as final consensus on its behavior has not yet been reached. Name of the collection where the index belongs """ super(WatchQueryIndexOptions, self).__init__(**kwargs) class BuildDeferredQueryIndexOptions(OptionBlockTimeOut): @overload def __init__(self, timeout=None, # type: timedelta scope_name=None, # type: str collection_name=None # type: str ): pass def __init__(self, **kwargs): """ Build deferred query indexes options :param timeout: operation timeout in seconds :param scope_name: **UNCOMMITTED** scope_name is an uncommitted API that is unlikely to change, but may still change as final consensus on its behavior has not yet been reached. Nme of the scope where the index belongs :param collection_name: **UNCOMMITTED** collection_name is an uncommitted API that is unlikely to change, but may still change as final consensus on its behavior has not yet been reached. Name of the collection where the index belongs """ super(BuildDeferredQueryIndexOptions, self).__init__(**kwargs)
import numpy as np import pytest from numpy import linalg import numpy.testing as npt import itertools from utils import get_rstate, get_printing import dynesty # noqa from dynesty import utils as dyfunc # noqa """ Run a series of basic tests to check whether anything huge is broken. """ nlive = 500 printing = get_printing() def bootstrap_tol(results, rstate): """ Compute the uncertainty of means/covs by doing bootstrapping """ n = len(results.logz) niter = 50 pos = results.samples wts = np.exp(results.logwt - results.logz[-1]) means = [] covs = [] for i in range(niter): # curpos = dyfunc.resample_equal(pos, wts) # xid = np.random.randint(len(curpos), size=len(curpos)) sub = rstate.uniform(size=n) < wts / wts.max() ind0 = np.nonzero(sub)[0] ind1 = rstate.choice(ind0, size=len(ind0), replace=True) mean = pos[ind1].mean(axis=0) cov = np.cov(pos[ind1].T) means.append(mean) covs.append(cov) return np.std(means, axis=0), np.std(covs, axis=0) def check_results(results, mean_truth, cov_truth, logz_truth, mean_tol, cov_tol, logz_tol, sig=5): """ Check if means and covariances match match expectations within the tolerances """ pos = results.samples wts = np.exp(results.logwt - results.logz[-1]) mean, cov = dyfunc.mean_and_cov(pos, wts) logz = results.logz[-1] npt.assert_array_less(np.abs(mean - mean_truth), sig * mean_tol) npt.assert_array_less(np.abs(cov - cov_truth), sig * cov_tol) npt.assert_array_less(np.abs((logz_truth - logz)), sig * logz_tol) # GAUSSIAN TEST class Gaussian: def __init__(self, corr=.95, prior_win=10): self.ndim = 3 self.mean = np.linspace(-1, 1, self.ndim) self.cov = np.identity(self.ndim) # set covariance to identity matrix self.cov[self.cov == 0] = corr # set off-diagonal terms (strongly correlated) self.cov_inv = linalg.inv(self.cov) # precision matrix self.lnorm = -0.5 * (np.log(2 * np.pi) * self.ndim + np.log(linalg.det(self.cov))) self.prior_win = prior_win # +/- on both sides self.logz_truth = self.ndim * (-np.log(2 * self.prior_win)) # 3-D correlated multivariate normal log-likelihood def loglikelihood(self, x): """Multivariate normal log-likelihood.""" return -0.5 * np.dot( (x - self.mean), np.dot(self.cov_inv, (x - self.mean))) + self.lnorm # prior transform def prior_transform(self, u): """Flat prior between -10. and 10.""" return self.prior_win * (2. * u - 1.) # gradient (no jacobian) def grad_x(self, x): """Multivariate normal log-likelihood gradient.""" return -np.dot(self.cov_inv, (x - self.mean)) # gradient (with jacobian) def grad_u(self, x): """Multivariate normal log-likelihood gradient.""" return -np.dot(self.cov_inv, x - self.mean) * 2 * self.prior_win def check_results_gau(results, g, rstate, sig=5, logz_tol=None): if logz_tol is None: logz_tol = sig * results.logzerr[-1] mean_tol, cov_tol = bootstrap_tol(results, rstate) # just check that resample_equal works dyfunc.resample_equal(results.samples, np.exp(results.logwt - results.logz[-1])) check_results(results, g.mean, g.cov, g.logz_truth, mean_tol, cov_tol, logz_tol, sig=sig) def test_gaussian(): sig = 5 rstate = get_rstate() g = Gaussian() sampler = dynesty.NestedSampler(g.loglikelihood, g.prior_transform, g.ndim, nlive=nlive, rstate=rstate) sampler.run_nested(print_progress=printing) # check that jitter/resample/simulate_run work # for not dynamic sampler dyfunc.jitter_run(sampler.results, rstate=rstate) dyfunc.resample_run(sampler.results, rstate=rstate) dyfunc.simulate_run(sampler.results, rstate=rstate) # add samples # check continuation behavior sampler.run_nested(dlogz=0.1, print_progress=printing) # get errors nerr = 3 result_list = [] for i in range(nerr): sampler.reset() sampler.run_nested(print_progress=False) results = sampler.results result_list.append(results) pos = results.samples wts = np.exp(results.logwt - results.logz[-1]) mean, cov = dyfunc.mean_and_cov(pos, wts) logz = results.logz[-1] assert (np.abs(logz - g.logz_truth) < sig * results.logzerr[-1]) res_comb = dyfunc.merge_runs(result_list) assert (np.abs(res_comb.logz[-1] - g.logz_truth) < sig * results.logzerr[-1]) # check summary res = sampler.results res.summary() # try all combinations excepte none/unif @pytest.mark.parametrize( "bound,sample", list( itertools.product(['single', 'multi', 'balls', 'cubes', 'none'], ['unif', 'rwalk', 'slice', 'rslice']))) def test_bounding_sample(bound, sample): # check various bounding methods rstate = get_rstate() if bound == 'none': if sample != 'unif': g = Gaussian(0.1) else: g = Gaussian(corr=0., prior_win=3) # make live easy if bound is none else: g = Gaussian() sampler = dynesty.NestedSampler(g.loglikelihood, g.prior_transform, g.ndim, nlive=nlive, bound=bound, sample=sample, rstate=rstate) sampler.run_nested(print_progress=printing) check_results_gau(sampler.results, g, rstate) @pytest.mark.parametrize("bound,sample", itertools.product( ['single', 'multi', 'balls', 'cubes'], ['unif'])) def test_bounding_bootstrap(bound, sample): # check various bounding methods rstate = get_rstate() g = Gaussian() sampler = dynesty.NestedSampler(g.loglikelihood, g.prior_transform, g.ndim, nlive=nlive, bound=bound, sample=sample, bootstrap=5, rstate=rstate) sampler.run_nested(print_progress=printing) check_results_gau(sampler.results, g, rstate) # extra checks for gradients def test_slice_nograd(): rstate = get_rstate() g = Gaussian() sampler = dynesty.NestedSampler(g.loglikelihood, g.prior_transform, g.ndim, nlive=nlive, sample='hslice', rstate=rstate) sampler.run_nested(print_progress=printing) check_results_gau(sampler.results, g, rstate) def test_slice_grad(): rstate = get_rstate() g = Gaussian() sampler = dynesty.NestedSampler(g.loglikelihood, g.prior_transform, g.ndim, nlive=nlive, sample='hslice', gradient=g.grad_x, compute_jac=True, rstate=rstate) sampler.run_nested(print_progress=printing) check_results_gau(sampler.results, g, rstate) def test_slice_grad1(): rstate = get_rstate() g = Gaussian() sampler = dynesty.NestedSampler(g.loglikelihood, g.prior_transform, g.ndim, nlive=nlive, sample='hslice', gradient=g.grad_u, rstate=rstate) sampler.run_nested(print_progress=printing) check_results_gau(sampler.results, g, rstate) def test_dynamic(): # check dynamic nested sampling behavior rstate = get_rstate() g = Gaussian() dsampler = dynesty.DynamicNestedSampler(g.loglikelihood, g.prior_transform, g.ndim, rstate=rstate) dsampler.run_nested(print_progress=printing) # chechk explicit adding batches dsampler.add_batch(mode='auto') dsampler.add_batch(mode='weight') dsampler.add_batch(mode='full') dsampler.add_batch(logl_bounds=(-10, 0), mode='manual') dsampler.add_batch(logl_bounds=(-10000000, -1000), mode='manual') check_results_gau(dsampler.results, g, rstate) # check error analysis functions dres = dyfunc.jitter_run(dsampler.results, rstate=rstate) check_results_gau(dres, g, rstate) dres = dyfunc.resample_run(dsampler.results, rstate=rstate) check_results_gau(dres, g, rstate) dres = dyfunc.simulate_run(dsampler.results, rstate=rstate) check_results_gau(dres, g, rstate) dyfunc.kld_error(dsampler.results, rstate=rstate) def test_ravel_unravel(): """ Here I test that ravel/unravel preserves things correctly """ rstate = get_rstate() g = Gaussian() dsampler = dynesty.DynamicNestedSampler(g.loglikelihood, g.prior_transform, g.ndim, bound='single', sample='unif', rstate=rstate, nlive=nlive) maxiter = 1800 dsampler.run_nested(maxiter=maxiter, use_stop=False, nlive_batch=100) dres = dsampler.results dres_list = dyfunc.unravel_run(dres) dres_merge = dyfunc.merge_runs(dres_list) assert np.abs(dres.logz[-1] - dres_merge.logz[-1]) < 0.01
''' @author: shylent ''' from tftp.backend import FilesystemSynchronousBackend, IReader, IWriter from tftp.bootstrap import RemoteOriginWriteSession, RemoteOriginReadSession from tftp.datagram import (WRQDatagram, TFTPDatagramFactory, split_opcode, ERR_ILLEGAL_OP, RRQDatagram, ERR_ACCESS_VIOLATION, ERR_FILE_EXISTS, ERR_FILE_NOT_FOUND, ERR_NOT_DEFINED) from tftp.errors import (Unsupported, AccessViolation, FileExists, FileNotFound, BackendError) from tftp.netascii import NetasciiReceiverProxy, NetasciiSenderProxy from tftp.protocol import TFTP from twisted.internet import reactor from twisted.internet.address import IPv4Address from twisted.internet.defer import Deferred, inlineCallbacks from twisted.internet.protocol import DatagramProtocol from twisted.internet.task import Clock from twisted.python import context from twisted.python.filepath import FilePath from twisted.test.proto_helpers import StringTransport from twisted.trial import unittest import tempfile class DummyBackend(object): pass def BackendFactory(exc_val=None): if exc_val is not None: class FailingBackend(object): def get_reader(self, filename): raise exc_val def get_writer(self, filename): raise exc_val return FailingBackend() else: return DummyBackend() class FakeTransport(StringTransport): stopListening = StringTransport.loseConnection def write(self, bytes, addr=None): StringTransport.write(self, bytes) def connect(self, host, port): self._connectedAddr = (host, port) class DispatchErrors(unittest.TestCase): port = 11111 def setUp(self): self.clock = Clock() self.transport = FakeTransport( hostAddress=IPv4Address('UDP', '127.0.0.1', self.port)) def test_malformed_datagram(self): tftp = TFTP(BackendFactory(), _clock=self.clock) tftp.datagramReceived('foobar', ('127.0.0.1', 1111)) self.failIf(self.transport.disconnecting) self.failIf(self.transport.value()) test_malformed_datagram.skip = 'Not done yet' def test_bad_mode(self): tftp = TFTP(DummyBackend(), _clock=self.clock) tftp.transport = self.transport wrq_datagram = WRQDatagram('foobar', 'badmode', {}) tftp.datagramReceived(wrq_datagram.to_wire(), ('127.0.0.1', 1111)) error_datagram = TFTPDatagramFactory(*split_opcode(self.transport.value())) self.assertEqual(error_datagram.errorcode, ERR_ILLEGAL_OP) def test_unsupported(self): tftp = TFTP(BackendFactory(Unsupported("I don't support you")), _clock=self.clock) tftp.transport = self.transport wrq_datagram = WRQDatagram('foobar', 'netascii', {}) tftp.datagramReceived(wrq_datagram.to_wire(), ('127.0.0.1', 1111)) self.clock.advance(1) error_datagram = TFTPDatagramFactory(*split_opcode(self.transport.value())) self.assertEqual(error_datagram.errorcode, ERR_ILLEGAL_OP) self.transport.clear() rrq_datagram = RRQDatagram('foobar', 'octet', {}) tftp.datagramReceived(rrq_datagram.to_wire(), ('127.0.0.1', 1111)) self.clock.advance(1) error_datagram = TFTPDatagramFactory(*split_opcode(self.transport.value())) self.assertEqual(error_datagram.errorcode, ERR_ILLEGAL_OP) def test_access_violation(self): tftp = TFTP(BackendFactory(AccessViolation("No!")), _clock=self.clock) tftp.transport = self.transport wrq_datagram = WRQDatagram('foobar', 'netascii', {}) tftp.datagramReceived(wrq_datagram.to_wire(), ('127.0.0.1', 1111)) self.clock.advance(1) error_datagram = TFTPDatagramFactory(*split_opcode(self.transport.value())) self.assertEqual(error_datagram.errorcode, ERR_ACCESS_VIOLATION) self.transport.clear() rrq_datagram = RRQDatagram('foobar', 'octet', {}) tftp.datagramReceived(rrq_datagram.to_wire(), ('127.0.0.1', 1111)) self.clock.advance(1) error_datagram = TFTPDatagramFactory(*split_opcode(self.transport.value())) self.assertEqual(error_datagram.errorcode, ERR_ACCESS_VIOLATION) def test_file_exists(self): tftp = TFTP(BackendFactory(FileExists("Already have one")), _clock=self.clock) tftp.transport = self.transport wrq_datagram = WRQDatagram('foobar', 'netascii', {}) tftp.datagramReceived(wrq_datagram.to_wire(), ('127.0.0.1', 1111)) self.clock.advance(1) error_datagram = TFTPDatagramFactory(*split_opcode(self.transport.value())) self.assertEqual(error_datagram.errorcode, ERR_FILE_EXISTS) def test_file_not_found(self): tftp = TFTP(BackendFactory(FileNotFound("Not found")), _clock=self.clock) tftp.transport = self.transport rrq_datagram = RRQDatagram('foobar', 'netascii', {}) tftp.datagramReceived(rrq_datagram.to_wire(), ('127.0.0.1', 1111)) self.clock.advance(1) error_datagram = TFTPDatagramFactory(*split_opcode(self.transport.value())) self.assertEqual(error_datagram.errorcode, ERR_FILE_NOT_FOUND) def test_generic_backend_error(self): tftp = TFTP(BackendFactory(BackendError("A backend that couldn't")), _clock=self.clock) tftp.transport = self.transport rrq_datagram = RRQDatagram('foobar', 'netascii', {}) tftp.datagramReceived(rrq_datagram.to_wire(), ('127.0.0.1', 1111)) self.clock.advance(1) error_datagram = TFTPDatagramFactory(*split_opcode(self.transport.value())) self.assertEqual(error_datagram.errorcode, ERR_NOT_DEFINED) self.transport.clear() rrq_datagram = RRQDatagram('foobar', 'octet', {}) tftp.datagramReceived(rrq_datagram.to_wire(), ('127.0.0.1', 1111)) self.clock.advance(1) error_datagram = TFTPDatagramFactory(*split_opcode(self.transport.value())) self.assertEqual(error_datagram.errorcode, ERR_NOT_DEFINED) class DummyClient(DatagramProtocol): def __init__(self, *args, **kwargs): self.ready = Deferred() def startProtocol(self): self.ready.callback(None) class TFTPWrapper(TFTP): def _startSession(self, *args, **kwargs): d = TFTP._startSession(self, *args, **kwargs) def save_session(session): self.session = session return session d.addCallback(save_session) return d class SuccessfulDispatch(unittest.TestCase): def setUp(self): self.tmp_dir_path = tempfile.mkdtemp() with FilePath(self.tmp_dir_path).child('nonempty').open('w') as fd: fd.write('Something uninteresting') self.backend = FilesystemSynchronousBackend(self.tmp_dir_path) self.tftp = TFTPWrapper(self.backend) self.client = DummyClient() reactor.listenUDP(0, self.client) self.server_port = reactor.listenUDP(1069, self.tftp) # Ok. I am going to hell for these two tests def test_WRQ(self): self.client.transport.write(WRQDatagram('foobar', 'NetASCiI', {}).to_wire(), ('127.0.0.1', 1069)) d = Deferred() def cb(ign): self.assertIsInstance(self.tftp.session, RemoteOriginWriteSession) self.assertIsInstance(self.tftp.session.backend, NetasciiReceiverProxy) self.tftp.session.cancel() d.addCallback(cb) reactor.callLater(0.5, d.callback, None) return d def test_RRQ(self): self.client.transport.write(RRQDatagram('nonempty', 'NetASCiI', {}).to_wire(), ('127.0.0.1', 1069)) d = Deferred() def cb(ign): self.assertIsInstance(self.tftp.session, RemoteOriginReadSession) self.assertIsInstance(self.tftp.session.backend, NetasciiSenderProxy) self.tftp.session.cancel() d.addCallback(cb) reactor.callLater(0.5, d.callback, None) return d def tearDown(self): self.tftp.transport.stopListening() self.client.transport.stopListening() class FilesystemAsyncBackend(FilesystemSynchronousBackend): def __init__(self, base_path, clock): super(FilesystemAsyncBackend, self).__init__( base_path, can_read=True, can_write=True) self.clock = clock def get_reader(self, file_name): d_get = super(FilesystemAsyncBackend, self).get_reader(file_name) d = Deferred() # d_get has already fired, so don't chain d_get to d until later, # otherwise d will be fired too early. self.clock.callLater(0, d_get.chainDeferred, d) return d def get_writer(self, file_name): d_get = super(FilesystemAsyncBackend, self).get_writer(file_name) d = Deferred() # d_get has already fired, so don't chain d_get to d until later, # otherwise d will be fired too early. self.clock.callLater(0, d_get.chainDeferred, d) return d class SuccessfulAsyncDispatch(unittest.TestCase): def setUp(self): self.clock = Clock() self.tmp_dir_path = tempfile.mkdtemp() with FilePath(self.tmp_dir_path).child('nonempty').open('w') as fd: fd.write('Something uninteresting') self.backend = FilesystemAsyncBackend(self.tmp_dir_path, self.clock) self.tftp = TFTP(self.backend, self.clock) def test_get_reader_defers(self): rrq_datagram = RRQDatagram('nonempty', 'NetASCiI', {}) rrq_addr = ('127.0.0.1', 1069) rrq_mode = "octet" d = self.tftp._startSession(rrq_datagram, rrq_addr, rrq_mode) self.assertFalse(d.called) self.clock.advance(1) self.assertTrue(d.called) self.assertTrue(IReader.providedBy(d.result.backend)) def test_get_writer_defers(self): wrq_datagram = WRQDatagram('foobar', 'NetASCiI', {}) wrq_addr = ('127.0.0.1', 1069) wrq_mode = "octet" d = self.tftp._startSession(wrq_datagram, wrq_addr, wrq_mode) self.assertFalse(d.called) self.clock.advance(1) self.assertTrue(d.called) self.assertTrue(IWriter.providedBy(d.result.backend)) class CapturedContext(Exception): """A donkey, to carry the call context back up the stack.""" def __init__(self, args, names): super(CapturedContext, self).__init__(*args) self.context = dict((name, context.get(name)) for name in names) class ContextCapturingBackend(object): """A fake `IBackend` that raises `CapturedContext`. Calling `get_reader` or `get_writer` raises a `CapturedContext` exception, which captures the values of the call context for the given `names`. """ def __init__(self, *names): self.names = names def get_reader(self, file_name): raise CapturedContext(("get_reader", file_name), self.names) def get_writer(self, file_name): raise CapturedContext(("get_writer", file_name), self.names) class HostTransport(object): """A fake `ITransport` that only responds to `getHost`.""" def __init__(self, host): self.host = host def getHost(self): return IPv4Address("UDP", *self.host) class BackendCallingContext(unittest.TestCase): def setUp(self): super(BackendCallingContext, self).setUp() self.backend = ContextCapturingBackend("local", "remote") self.tftp = TFTP(self.backend) self.tftp.transport = HostTransport(("12.34.56.78", 1234)) @inlineCallbacks def test_context_rrq(self): rrq_datagram = RRQDatagram('nonempty', 'NetASCiI', {}) rrq_addr = ('127.0.0.1', 1069) error = yield self.assertFailure( self.tftp._startSession(rrq_datagram, rrq_addr, "octet"), CapturedContext) self.assertEqual(("get_reader", rrq_datagram.filename), error.args) self.assertEqual( {"local": self.tftp.transport.host, "remote": rrq_addr}, error.context) @inlineCallbacks def test_context_wrq(self): wrq_datagram = WRQDatagram('nonempty', 'NetASCiI', {}) wrq_addr = ('127.0.0.1', 1069) error = yield self.assertFailure( self.tftp._startSession(wrq_datagram, wrq_addr, "octet"), CapturedContext) self.assertEqual(("get_writer", wrq_datagram.filename), error.args) self.assertEqual( {"local": self.tftp.transport.host, "remote": wrq_addr}, error.context)
""" @package mi.dataset.driver.moas.gl.adcpa.test_driver @file marine-integrations/mi/dataset/driver/moas.gl/adcpa/driver.py @author Jeff Roy (Raytheon) @brief Test cases for moas_gl_adcpa driver (for both telemetered and recovered data) USAGE: Make tests verbose and provide stdout * From the IDK $ bin/dsa/test_driver $ bin/dsa/test_driver -i [-t testname] $ bin/dsa/test_driver -q [-t testname] """ __author__ = 'Jeff Roy (Raytheon)' __license__ = 'Apache 2.0' from nose.plugins.attrib import attr from mi.core.log import get_logger; log = get_logger() import os from mi.idk.dataset.unit_test import DataSetTestCase from mi.idk.dataset.unit_test import DataSetIntegrationTestCase from mi.idk.dataset.unit_test import DataSetQualificationTestCase from mi.idk.exceptions import IDKConfigMissing, IDKException from mi.idk.util import remove_all_files from mi.dataset.parser.adcpa_m_glider import DataParticleType from mi.dataset.driver.moas.gl.adcpa.driver import \ AdcpaDataSetDriver, \ DataTypeKey, \ AdcpaMGliderInstrumentParticle, \ AdcpaMGliderRecoveredParticle from mi.dataset.dataset_driver import \ DataSourceConfigKey, \ DataSetDriverConfigKeys, \ DriverParameter from pyon.agent.agent import ResourceAgentState DIR_ADCPA_LIVE = '/tmp/dsatest1' DIR_ADCPA_RECOVERED = '/tmp/dsatest2' # Fill in driver details DataSetTestCase.initialize( driver_module='mi.dataset.driver.moas.gl.adcpa.driver', driver_class='AdcpaDataSetDriver', agent_resource_id='123xyz', agent_name='Agent007', agent_packet_config=AdcpaDataSetDriver.stream_config(), startup_config={ DataSourceConfigKey.RESOURCE_ID: 'moas_gl_adcpa', DataSourceConfigKey.HARVESTER: { DataTypeKey.ADCPA_INSTRUMENT: { DataSetDriverConfigKeys.DIRECTORY: DIR_ADCPA_LIVE, DataSetDriverConfigKeys.PATTERN: '*.PD0', DataSetDriverConfigKeys.FREQUENCY: 1, }, DataTypeKey.ADCPA_RECOVERED: { DataSetDriverConfigKeys.DIRECTORY: DIR_ADCPA_RECOVERED, DataSetDriverConfigKeys.PATTERN: '*.PD0', DataSetDriverConfigKeys.FREQUENCY: 1, } }, DataSourceConfigKey.PARSER: { DataTypeKey.ADCPA_INSTRUMENT: {}, DataTypeKey.ADCPA_RECOVERED: {} } } ) # The integration and qualification tests generated here are suggested tests, # but may not be enough to fully test your driver. Additional tests should be # written as needed. ############################################################################### # INTEGRATION TESTS # # Device specific integration tests are for # # testing device specific capabilities # ############################################################################### @attr('INT', group='mi') class IntegrationTest(DataSetIntegrationTestCase): def assert_file_ingested(self, filename, data_key): """ Assert that a particular file was ingested Need to override for multiple harvester since we have the additional data_key If the ingested flag is not set in the driver state for this file, fail the test @ param filename name of the file to check that it was ingested using the ingested flag """ log.debug("last state callback result %s", self.state_callback_result[-1]) last_state = self.state_callback_result[-1][data_key] if not filename in last_state or not last_state[filename]['ingested']: self.fail("File %s was not ingested" % filename) def test_get(self): """ Test that we can get data from multiple files. """ log.info("================ START INTEG TEST GET =====================") # Start sampling. self.clear_sample_data() self.driver.start_sampling() log.info("LA101636_20.PD0 placed in Live directory") self.clear_async_data() self.create_sample_data_set_dir( 'LA101636_20.PD0', DIR_ADCPA_LIVE) self.assert_data(AdcpaMGliderInstrumentParticle, 'LA101636_20.yml', count=20, timeout=10) log.info("LB180210_50.PD0 placed in Recovered directory") self.clear_async_data() self.create_sample_data_set_dir( 'LB180210_50.PD0', DIR_ADCPA_RECOVERED) self.assert_data(AdcpaMGliderRecoveredParticle, 'LB180210_50_recovered.yml', count=50, timeout=10) def test_get_any_order(self): """ Test that we can get data from files for all harvesters / parsers. """ log.info("=========== START INTEG TEST GET ANY ORDER ================") # Start sampling. self.clear_sample_data() self.driver.start_sampling() log.info("LA101636_20.PD0 placed in Live directory") self.clear_async_data() self.create_sample_data_set_dir( 'LA101636_20.PD0', DIR_ADCPA_LIVE) log.info("LB180210_50.PD0 placed in Recovered directory") self.clear_async_data() self.create_sample_data_set_dir( 'LB180210_50.PD0', DIR_ADCPA_RECOVERED) # get the first 5 particles from the live directory self.assert_data(AdcpaMGliderInstrumentParticle, 'LA101636_20_1_5.yml', count=5, timeout=10) # get the first 12 particles from the recovered directory self.assert_data(AdcpaMGliderRecoveredParticle, 'LB180210_50_1_12.yml', count=12, timeout=10) # get the next 15 particles from the live directory self.assert_data(AdcpaMGliderInstrumentParticle, 'LA101636_20_6_20.yml', count=15, timeout=10) # get the next 8 particles from the recovered directory self.assert_data(AdcpaMGliderRecoveredParticle, 'LB180210_50_13_20.yml', count=8, timeout=10) def test_stop_resume(self): """ Test the ability to stop and restart the process """ log.info("=========== START INTEG TEST STOP RESUME ================") filename_1 = 'File_1.PD0' filename_2 = 'File_2.PD0' filename_3 = 'File_3.PD0' filename_4 = 'File_4.PD0' path_1 = self.create_sample_data_set_dir('LA101636_20.PD0', DIR_ADCPA_LIVE, filename_1) path_2 = self.create_sample_data_set_dir('LA101636_20.PD0', DIR_ADCPA_LIVE, filename_2) path_3 = self.create_sample_data_set_dir('LB180210_50.PD0', DIR_ADCPA_RECOVERED, filename_3) path_4 = self.create_sample_data_set_dir('LB180210_50.PD0', DIR_ADCPA_RECOVERED, filename_4) # these files have 446 byte ensembles in them ensemble_bytes = 446 position_1 = 20 * ensemble_bytes position_2 = 5 * ensemble_bytes position_3 = 20 * ensemble_bytes position_4 = 12 * ensemble_bytes # Create and store the new driver state. # Set status of file 1 to completely read. # Set status of file 2 to start reading at record 6 of a 20 record file. # Set status of file 3 to completely read. # Set status of file 4 to start reading at record 13 of a 50 record file. state = {DataTypeKey.ADCPA_INSTRUMENT: {filename_1: self.get_file_state(path_1, True, position_1), filename_2: self.get_file_state(path_2, False, position_2)}, DataTypeKey.ADCPA_RECOVERED: {filename_3: self.get_file_state(path_3, True, position_3), filename_4: self.get_file_state(path_4, False, position_4)} } # set the driver to the predetermined state and start sampling self.driver = self._get_driver_object(memento=state) self.driver.start_sampling() # get the next 15 particles from the live directory self.assert_data(AdcpaMGliderInstrumentParticle, 'LA101636_20_6_20.yml', count=15, timeout=10) # get the next 8 particles from the recovered directory self.assert_data(AdcpaMGliderRecoveredParticle, 'LB180210_50_13_20.yml', count=8, timeout=10) def test_stop_start_resume(self): """ Test the ability to stop and restart sampling, ingesting files in the correct order """ log.info("========== START INTEG TEST STOP START RESUME ===============") self.clear_async_data() self.driver.start_sampling() filename_1 = 'File_1.PD0' filename_2 = 'File_2.PD0' filename_3 = 'File_3.PD0' filename_4 = 'File_4.PD0' self.create_sample_data_set_dir('LA101636_20.PD0', DIR_ADCPA_LIVE, filename_1) self.create_sample_data_set_dir('LA101636_20.PD0', DIR_ADCPA_LIVE, filename_2) self.create_sample_data_set_dir('LB180210_50.PD0', DIR_ADCPA_RECOVERED, filename_3) self.create_sample_data_set_dir('LB180210_50.PD0', DIR_ADCPA_RECOVERED, filename_4) # Read all of the first live data file # Verify that the entire file has been read. self.assert_data(AdcpaMGliderInstrumentParticle, 'LA101636_20.yml', count=20, timeout=10) self.assert_file_ingested(filename_1, DataTypeKey.ADCPA_INSTRUMENT) # Read all of the first recovered data file # Verify that the entire file has been read. self.assert_data(AdcpaMGliderRecoveredParticle, 'LB180210_50_recovered.yml', count=50, timeout=10) self.assert_file_ingested(filename_3, DataTypeKey.ADCPA_RECOVERED) # get the first 5 particles from the 2nd live file self.assert_data(AdcpaMGliderInstrumentParticle, 'LA101636_20_1_5.yml', count=5, timeout=10) # get the first 12 particles from the recovered directory self.assert_data(AdcpaMGliderRecoveredParticle, 'LB180210_50_1_12.yml', count=12, timeout=10) # Stop and then start sampling, resuming from where we left off. self.driver.stop_sampling() self.driver.start_sampling() # get the next 15 particles from the live directory self.assert_data(AdcpaMGliderInstrumentParticle, 'LA101636_20_6_20.yml', count=15, timeout=10) self.assert_file_ingested(filename_2, DataTypeKey.ADCPA_INSTRUMENT) # get the next 8 particles from the recovered directory self.assert_data(AdcpaMGliderRecoveredParticle, 'LB180210_50_13_20.yml', count=8, timeout=10) self.assert_file_ingested(filename_4, DataTypeKey.ADCPA_RECOVERED) ############################################################################### # QUALIFICATION TESTS # # Device specific qualification tests are for # # testing device specific capabilities # ############################################################################### @attr('QUAL', group='mi') class QualificationTest(DataSetQualificationTestCase): def clear_sample_data(self): """ Need to override this from base class to clean all directories """ data_dirs = self.create_data_dir() log.debug("Startup Config: %s", self._driver_config().get('startup_config')) for data_dir in data_dirs: log.debug("Clean all data from %s", data_dir) remove_all_files(data_dir) def create_data_dir(self): """ Verify the test data directory is created and exists. Return the path to the directory. @return: path to data directory @raise: IDKConfigMissing no harvester config @raise: IDKException if data_dir exists, but not a directory """ startup_config = self._driver_config().get('startup_config') if not startup_config: raise IDKConfigMissing("Driver config missing 'startup_config'") harvester_config = startup_config.get('harvester') if not harvester_config: raise IDKConfigMissing("Startup config missing 'harvester' config") data_dir = [] for key in harvester_config: data_dir_key = harvester_config[key].get("directory") if not data_dir_key: raise IDKConfigMissing("Harvester config missing 'directory'") if not os.path.exists(data_dir_key): log.debug("Creating data dir: %s", data_dir_key) os.makedirs(data_dir_key) elif not os.path.isdir(data_dir_key): raise IDKException("%s is not a directory" % data_dir_key) data_dir.append(data_dir_key) return data_dir def test_publish_path(self): """ Setup an agent/driver/harvester/parser and verify that data is published out the agent """ log.info("=========== START QUAL TEST PUBLISH PATH =================") log.info("LA101636_20.PD0 placed in Live directory") self.create_sample_data_set_dir( 'LA101636_20.PD0', DIR_ADCPA_LIVE) log.info("LB180210_50.PD0 placed in Recovered directory") self.create_sample_data_set_dir( 'LB180210_50.PD0', DIR_ADCPA_RECOVERED) self.assert_initialize() result = self.data_subscribers.get_samples(DataParticleType.ADCPA_M_GLIDER_INSTRUMENT, 20, 100) self.assert_data_values(result, 'LA101636_20.yml') result = self.data_subscribers.get_samples(DataParticleType.ADCPA_M_GLIDER_RECOVERED, 50, 100) self.assert_data_values(result, 'LB180210_50_recovered.yml') def test_large_import(self): """ Test importing a large number of samples from the files at once """ log.info("=========== START QUAL TEST LARGE IMPORT =================") # create the sample data for both live and recovered # using files with thousands of records self.create_sample_data_set_dir( 'LA101636.PD0', DIR_ADCPA_LIVE) self.create_sample_data_set_dir( 'LB180210.PD0', DIR_ADCPA_RECOVERED) #initialise the driver and start sampling self.assert_initialize() num_samples = 200 max_time = 600 # seconds #get a bunch of live praticles self.data_subscribers.get_samples(DataParticleType.ADCPA_M_GLIDER_INSTRUMENT, num_samples, max_time) #get a bunch of reciovere praticles self.data_subscribers.get_samples(DataParticleType.ADCPA_M_GLIDER_RECOVERED, num_samples, max_time) def test_stop_start(self): """ Test the agents ability to start data flowing, stop, then restart at the correct spot. """ log.info("========== START QUAL TEST STOP START ===============") log.info("LA101636_20.PD0 placed in Live directory") self.create_sample_data_set_dir( 'LA101636_20.PD0', DIR_ADCPA_LIVE) log.info("LB180210_50.PD0 placed in Recovered directory") self.create_sample_data_set_dir( 'LB180210_50.PD0', DIR_ADCPA_RECOVERED) #put the driver in command mode so it can be started and stopped self.assert_initialize(final_state=ResourceAgentState.COMMAND) self.dataset_agent_client.set_resource( {DriverParameter.RECORDS_PER_SECOND: 1}) self.assert_start_sampling() #get 5 records from the live data, verify the values result = self.data_subscribers.get_samples(DataParticleType.ADCPA_M_GLIDER_INSTRUMENT, 5, 100) self.assert_data_values(result, 'LA101636_20_1_5.yml') #get 12 records from the recovered data, verify the values result = self.data_subscribers.get_samples(DataParticleType.ADCPA_M_GLIDER_RECOVERED, 12, 100) self.assert_data_values(result, 'LB180210_50_1_12.yml') # stop sampling self.assert_stop_sampling() #restart sampling self.assert_start_sampling() #get 5 records from the live data, verify the values result = self.data_subscribers.get_samples(DataParticleType.ADCPA_M_GLIDER_INSTRUMENT, 15, 100) self.assert_data_values(result, 'LA101636_20_6_20.yml') #verify the queue is empty self.assert_sample_queue_size(DataParticleType.ADCPA_M_GLIDER_INSTRUMENT, 0) #get 12 records from the recovered data, verify the values result = self.data_subscribers.get_samples(DataParticleType.ADCPA_M_GLIDER_RECOVERED, 8, 100) self.assert_data_values(result, 'LB180210_50_13_20.yml') def test_shutdown_restart(self): """ Test a full stop of the dataset agent, then restart the agent and confirm it restarts at the correct spot. """ log.info("========== START QUAL TEST SHUTDOWN RESTART ===============") log.info("LA101636_20.PD0 placed in Live directory") self.create_sample_data_set_dir( 'LA101636_20.PD0', DIR_ADCPA_LIVE) log.info("LB180210_50.PD0 placed in Recovered directory") self.create_sample_data_set_dir( 'LB180210_50.PD0', DIR_ADCPA_RECOVERED) #put the driver in command mode so it can be started and stopped self.assert_initialize(final_state=ResourceAgentState.COMMAND) self.dataset_agent_client.set_resource( {DriverParameter.RECORDS_PER_SECOND: 1}) self.assert_start_sampling() #get 5 records from the live data, verify the values result = self.data_subscribers.get_samples(DataParticleType.ADCPA_M_GLIDER_INSTRUMENT, 5, 100) self.assert_data_values(result, 'LA101636_20_1_5.yml') #get 12 records from the recovered data, verify the values result = self.data_subscribers.get_samples(DataParticleType.ADCPA_M_GLIDER_RECOVERED, 12, 100) self.assert_data_values(result, 'LB180210_50_1_12.yml') # stop sampling self.assert_stop_sampling() self.stop_dataset_agent_client() # Re-start the agent self.init_dataset_agent_client() # Re-initialize self.assert_initialize(final_state=ResourceAgentState.COMMAND) #restart sampling self.assert_start_sampling() #get 5 records from the live data, verify the values result = self.data_subscribers.get_samples(DataParticleType.ADCPA_M_GLIDER_INSTRUMENT, 15, 100) self.assert_data_values(result, 'LA101636_20_6_20.yml') #verify the queue is empty self.assert_sample_queue_size(DataParticleType.ADCPA_M_GLIDER_INSTRUMENT, 0) #get 12 records from the recovered data, verify the values result = self.data_subscribers.get_samples(DataParticleType.ADCPA_M_GLIDER_RECOVERED, 8, 100) self.assert_data_values(result, 'LB180210_50_13_20.yml')
from __future__ import absolute_import, division import json import logging import random import re import requests import sys import time import uuid from cStringIO import StringIO from contextlib import closing from datetime import datetime from flask import current_app from lxml import etree, objectify from typing import Any # NOQA from changes.artifacts.analytics_json import AnalyticsJsonHandler from changes.artifacts.coverage import CoverageHandler from changes.artifacts.dummylogfile import DummyLogFileHandler from changes.artifacts.manager import Manager from changes.artifacts.manifest_json import ManifestJsonHandler from changes.artifacts.xunit import XunitHandler from changes.backends.base import BaseBackend, UnrecoverableException from changes.buildsteps.base import BuildStep from changes.config import db, redis, statsreporter from changes.constants import Result, Status from changes.db.utils import get_or_create from changes.jobs.sync_job_step import sync_job_step from changes.lib.artifact_store_lib import ArtifactStoreClient from changes.models.artifact import Artifact from changes.models.failurereason import FailureReason from changes.models.jobphase import JobPhase from changes.models.jobstep import JobStep from changes.models.log import LogSource, LOG_CHUNK_SIZE from changes.models.node import Cluster, ClusterNode, Node from changes.storage.artifactstore import ArtifactStoreFileStorage from changes.utils.http import build_patch_uri from changes.utils.text import chunked RESULT_MAP = { 'SUCCESS': Result.passed, 'ABORTED': Result.aborted, 'FAILURE': Result.failed, 'REGRESSION': Result.failed, 'UNSTABLE': Result.failed, } QUEUE_ID_XPATH = '/queue/item[action/parameter/name="CHANGES_BID" and action/parameter/value="{job_id}"]/id' BUILD_ID_XPATH = ('/freeStyleProject/build[action/parameter/name="CHANGES_BID" and ' 'action/parameter/value="{job_id}"]/number') ID_XML_RE = re.compile(r'<id>(\d+)</id>') LOG_SYNC_TIMEOUT_SECS = 30 # Redis key for storing the master blacklist set # The blacklist is used to temporarily remove jenkins masters from the pool of available masters. MASTER_BLACKLIST_KEY = 'jenkins_master_blacklist' # Default name for the Jenkins console log. # Note that artifactstore may alter the name for deduplication, so this cannot directly be used. JENKINS_LOG_NAME = 'jenkins-console' class NotFound(Exception): """Indicates a 404 response from the Jenkins API.""" pass class JenkinsBuilder(BaseBackend): def __init__(self, master_urls=None, diff_urls=None, job_name=None, auth_keyname=None, verify=True, cluster=None, debug_config=None, *args, **kwargs): super(JenkinsBuilder, self).__init__(*args, **kwargs) self.master_urls = master_urls self.diff_urls = diff_urls assert self.master_urls, 'No Jenkins masters specified' self.logger = logging.getLogger('jenkins') self.job_name = job_name self.http_session = requests.Session() self.auth = self.app.config[auth_keyname] if auth_keyname else None self.verify = verify self.cluster = cluster self.debug_config = debug_config or {} self.artifact_store_client = ArtifactStoreClient(current_app.config['ARTIFACTS_SERVER']) def report_response_status(r, *args, **kwargs): statsreporter.stats().incr('jenkins_api_response_{}'.format(r.status_code)) self.http_session.hooks['response'].append(report_response_status) def _get_text_response(self, master_base_url, path, method='GET', params=None, data=None): """Make an HTTP request and return a text response. Params: master_base_url (str): Jenkins master URL, in scheme://host form. path (str): URL path on the master to access. method (str): HTTP verb to use; Either 'GET' or 'POST'; 'GET' is the default. params (dict): Optional dictionary of URL parameters to append to the URL. data (dict): Optional body to attach to the request. If a dict is provided, it will be form-encoded. Returns: Content of the response, in unicode. Raises: NotFound if the server responded with a 404 status. Exception for other error status codes. """ url = '{}/{}'.format(master_base_url, path.lstrip('/')) if params is None: params = {} self.logger.info('Fetching %r', url) resp = getattr(self.http_session, method.lower())(url, params=params, data=data, allow_redirects=False, timeout=30, auth=self.auth, verify=self.verify) if resp.status_code == 404: raise NotFound elif not (200 <= resp.status_code < 400): exception_msg = 'Invalid response. Status code for %s was %s' attrs = url, resp.status_code self.logger.exception(exception_msg, *attrs) raise Exception(exception_msg % attrs) return resp.text def _get_json_response(self, master_base_url, path): """Makes a Jenkins API request and returns the JSON response Args: master_base_url (str): Jenkins master URL, in scheme://host form. path (str): URL path on the master to access. Returns: Parsed JSON from the request. Raises: NotFound if the server responded with a 404 status. Exception for other error status codes. ValueError if the response wasn't valid JSON. """ path = '{}/api/json/'.format(path.strip('/')) text = self._get_text_response(master_base_url, path, method='GET') return json.loads(text) def _parse_parameters(self, json): params = {} for action in json['actions']: params.update( (p['name'], p.get('value')) for p in action.get('parameters', []) ) return params def _get_artifactstore_bucket(self, step): # Create the artifactstore bucket, if it doesn't already exist bucket_name = step.data.get('jenkins_bucket_name') if not bucket_name: bucket_name = self.artifact_store_client.create_bucket(step.id.hex + '-jenkins').name step.data['jenkins_bucket_name'] = bucket_name db.session.add(step) db.session.commit() return bucket_name def _create_job_step(self, phase, data, force_create=False, cluster=None, **defaults): """ Gets or creates the primary JobStep for a Jenkins Job. Args: phase (JobPhase): JobPhase the JobStep should be part of. data (dict): JSON-serializable data associated with the Jenkins build. force_create (bool): Force this JobStep to be created (rather than retrieved). This is used when replacing a JobStep to make sure we don't just get the old one. cluster (Optional[str]): Cluster in which the JobStep will be run. Returns: JobStep: The JobStep that was retrieved or created. """ defaults['data'] = data if cluster: defaults['cluster'] = cluster # TODO(kylec): Get rid of the kwargs. if not defaults.get('label'): # we update this once we have the build_no for this jobstep defaults['label'] = '<Creating Jenkins build>' where = { 'job': phase.job, 'project': phase.project, 'phase': phase, } if force_create: # uuid is unique which forces jobstep to be created where['id'] = uuid.uuid4() step, created = get_or_create(JobStep, where=where, defaults=defaults) assert created or not force_create BuildStep.handle_debug_infra_failures(step, self.debug_config, 'primary') return step def fetch_artifact(self, jobstep, artifact_data): """ Fetch an artifact from a Jenkins job. Args: jobstep (JobStep): The JobStep associated with the artifact. artifact_data (dict): Jenkins job artifact metadata dictionary. Returns: A streamed requests Response object. Raises: HTTPError: if the response code didn't indicate success. Timeout: if the server took too long to respond. """ url = '{base}/job/{job}/{build}/artifact/{artifact}'.format( base=jobstep.data['master'], job=jobstep.data['job_name'], build=jobstep.data['build_no'], artifact=artifact_data['relativePath'], ) return self._streaming_get(url) def sync_artifact(self, artifact): jobstep = artifact.step resp = self.fetch_artifact(jobstep, artifact.data) # NB: Accessing Response.content results in the entire artifact # being loaded into memory. if len(resp.content) == 0: # Artifact store does not support empty artifacts, and they're not very useful, so just discard them. self.logger.info('Artifact %s from jobstep %s is empty, discarding' % (artifact.name, jobstep.id.hex)) return bucket_name = self._get_artifactstore_bucket(jobstep) artifact.file.storage = 'changes.storage.artifactstore.ArtifactStoreFileStorage' filename = ArtifactStoreFileStorage.get_filename_from_artifact_name(bucket_name, artifact.id.hex) artifact.file.save(StringIO(resp.content), filename, path=artifact.name) # commit file save regardless of whether handler is successful db.session.commit() # TODO(dcramer): requests doesnt seem to provide a non-binary file-like # API, so we're stuffing it into StringIO try: self.get_artifact_manager(jobstep).process(artifact, StringIO(resp.content)) except Exception: self.logger.exception( 'Failed to sync test results for job step %s', jobstep.id) def _sync_log(self, jobstep): bucket_name = self._get_artifactstore_bucket(jobstep) # Note: artifactstore may alter the log name to deduplicate it, so always use data.get('log_artifact_name') artifact_name = jobstep.data.get('log_artifact_name') if not artifact_name: artifact_name = self.artifact_store_client\ .create_chunked_artifact(bucket_name, artifact_name=JENKINS_LOG_NAME).name jobstep.data['log_artifact_name'] = artifact_name db.session.add(jobstep) db.session.commit() logsource, created = get_or_create(LogSource, where={ 'name': artifact_name, 'step': jobstep, }, defaults={ 'job': jobstep.job, 'project': jobstep.project, 'date_created': jobstep.date_started, 'in_artifact_store': True, }) if created: offset = 0 else: offset = jobstep.data.get('log_offset', 0) url = '{base}/job/{job}/{build}/logText/progressiveText/'.format( base=jobstep.data['master'], job=jobstep.data['job_name'], build=jobstep.data['build_no'], ) start_time = time.time() with closing(self._streaming_get(url, params={'start': offset})) as resp: log_length = int(resp.headers['X-Text-Size']) # When you request an offset that doesnt exist in the build log, Jenkins # will instead return the entire log. Jenkins also seems to provide us # with X-Text-Size which indicates the total size of the log if offset > log_length: return # Jenkins will suggest to us that there is more data when the job has # yet to complete has_more = resp.headers.get('X-More-Data') == 'true' # XXX: requests doesnt seem to guarantee chunk_size, so we force it # with our own helper iterator = resp.iter_content() for chunk in chunked(iterator, LOG_CHUNK_SIZE): chunk_size = len(chunk) try: self.artifact_store_client.post_artifact_chunk(bucket_name, artifact_name, offset, chunk) offset += chunk_size if time.time() > start_time + LOG_SYNC_TIMEOUT_SECS: raise RuntimeError('TOO LONG TO DOWNLOAD LOG: %s' % logsource.get_url()) except Exception as e: # On an exception or a timeout, attempt to truncate the log # Catch all exceptions, including timeouts and HTTP errors self.logger.warning('Exception when uploading logchunks: %s', e.message) has_more = False warning = ("\nLOG TRUNCATED. SEE FULL LOG AT " "{base}/job/{job}/{build}/consoleText\n").format( base=jobstep.data['master'], job=jobstep.data['job_name'], build=jobstep.data['build_no']) self.artifact_store_client.post_artifact_chunk(bucket_name, artifact_name, offset, warning) break # We **must** track the log offset externally as Jenkins embeds encoded # links and we cant accurately predict the next `start` param. jobstep.data['log_offset'] = log_length db.session.add(jobstep) if not has_more: self.artifact_store_client.close_chunked_artifact(bucket_name, artifact_name) return True if has_more else None def _pick_master(self, job_name, is_diff=False): """ Identify a master to run the given job on. The master with the lowest queue for the given job is chosen. By random sorting the first empty queue will be prioritized. """ candidate_urls = self.master_urls if is_diff and self.diff_urls: candidate_urls = self.diff_urls blacklist = redis.smembers(MASTER_BLACKLIST_KEY) master_urls = [c for c in candidate_urls if c not in blacklist] if len(master_urls) == 0: raise ValueError("No masters to pick from.") if len(master_urls) == 1: return master_urls[0] random.shuffle(master_urls) best_match = (sys.maxint, None) for url in master_urls: try: queued_jobs = self._count_queued_jobs(url, job_name) except: self.logger.exception("Couldn't count queued jobs on master %s", url) continue if queued_jobs == 0: return url if best_match[0] > queued_jobs: best_match = (queued_jobs, url) best = best_match[1] if not best: raise Exception("Unable to successfully pick a master from {}.".format(master_urls)) return best def _count_queued_jobs(self, master_base_url, job_name): response = self._get_json_response( master_base_url=master_base_url, path='/queue/', ) return sum(( 1 for i in response['items'] if i['task']['name'] == job_name )) def _find_job(self, master_base_url, job_name, changes_bid): """ Given a job identifier, we attempt to poll the various endpoints for a limited amount of time, trying to match up either a queued item or a running job that has the CHANGES_BID parameter. This is necessary because Jenkins does not give us any identifying information when we create a job initially. The changes_bid parameter should be the corresponding value to look for in the CHANGES_BID parameter. The result is a mapping with the following keys: - queued: is it currently present in the queue - item_id: the queued item ID, if available - build_no: the build number, if available """ # Check the queue first to ensure that we don't miss a transition # from queue -> active jobs item_id = self._find_queue_item_id(master_base_url, changes_bid) build_no = None if item_id: # Saw it in the queue, so we don't know the build number yet. build_no = None else: # Didn't see it in the queue, look for the build number on the assumption that it has begun. build_no = self._find_build_no(master_base_url, job_name, changes_bid) if build_no or item_id: # If we found either, we know the Jenkins build exists and we can probably find it again. return { 'job_name': job_name, 'queued': bool(item_id), 'item_id': item_id, 'build_no': build_no, 'uri': None, } return None def _find_queue_item_id(self, master_base_url, changes_bid): """Looks in a Jenkins master's queue for an item, and returns the ID if found. Args: master_base_url (str): Jenkins master URL, in scheme://host form. changes_bid (str): The identifier for this Jenkins build, typically the JobStep ID. Returns: str: Queue item id if found, otherwise None. """ xpath = QUEUE_ID_XPATH.format(job_id=changes_bid) try: response = self._get_text_response( master_base_url=master_base_url, path='/queue/api/xml/', params={ 'xpath': xpath, 'wrapper': 'x', }, ) except NotFound: return None # it's possible that we managed to create multiple jobs in certain # situations, so let's just get the newest one try: match = etree.fromstring(response).iter('id').next() except StopIteration: return None return match.text def _find_build_no(self, master_base_url, job_name, changes_bid): """Looks in a Jenkins master's list of current/recent builds for one with the given CHANGES_BID, and returns the build number if found. Args: master_base_url (str): Jenkins master URL, in scheme://host form. job_name (str): Name of the Jenkins project/job to look for the build in; ex: 'generic_build'. changes_bid (str): The identifier for this Jenkins build, typically the JobStep ID. Returns: str: build number of the build if found, otherwise None. """ xpath = BUILD_ID_XPATH.format(job_id=changes_bid) try: response = self._get_text_response( master_base_url=master_base_url, path='/job/{job_name}/api/xml/'.format(job_name=job_name), params={ 'depth': 1, 'xpath': xpath, 'wrapper': 'x', }, ) except NotFound: return None # it's possible that we managed to create multiple jobs in certain # situations, so let's just get the newest one try: match = etree.fromstring(response).iter('number').next() except StopIteration: return None return match.text def _get_node(self, master_base_url, label): node, created = get_or_create(Node, {'label': label}) if not created: return node try: response = self._get_text_response( master_base_url=master_base_url, path='/computer/{}/config.xml'.format(label), ) except NotFound: return node # lxml expects the response to be in bytes, so let's assume it's utf-8 # and send it back as the original format response = response.encode('utf-8') xml = objectify.fromstring(response) cluster_names = xml.label.text.split(' ') for cluster_name in cluster_names: # remove swarm client as a cluster label as its not useful if cluster_name == 'swarm': continue cluster, _ = get_or_create(Cluster, {'label': cluster_name}) get_or_create(ClusterNode, {'node': node, 'cluster': cluster}) return node def _sync_step_from_queue(self, step): try: item = self._get_json_response( step.data['master'], '/queue/item/{}'.format(step.data['item_id']), ) except NotFound: # The build might've left the Jenkins queue since we last checked; look for the build_no of the # running build. build_no = self._find_build_no(step.data['master'], step.data['job_name'], changes_bid=step.id.hex) if build_no: step.data['queued'] = False step.data['build_no'] = build_no db.session.add(step) self._sync_step_from_active(step) return step.status = Status.finished step.result = Result.infra_failed db.session.add(step) self.logger.exception("Queued step not found in queue: {} (build: {})".format(step.id, step.job.build_id)) statsreporter.stats().incr('jenkins_item_not_found_in_queue') return if item.get('executable'): build_no = item['executable']['number'] step.data['queued'] = False step.data['build_no'] = build_no step.data['uri'] = item['executable']['url'] db.session.add(step) if item['blocked']: step.status = Status.queued db.session.add(step) elif item.get('cancelled') and not step.data.get('build_no'): step.status = Status.finished step.result = Result.aborted db.session.add(step) elif item.get('executable'): self._sync_step_from_active(step) return def _get_jenkins_job(self, step): try: job_name = step.data['job_name'] build_no = step.data['build_no'] except KeyError: raise UnrecoverableException('Missing Jenkins job information') try: return self._get_json_response( step.data['master'], '/job/{}/{}'.format(job_name, build_no), ) except NotFound: raise UnrecoverableException('Unable to find job in Jenkins') def _sync_step_from_active(self, step): item = self._get_jenkins_job(step) if not step.data.get('uri'): step.data['uri'] = item['url'] # TODO(dcramer): we're doing a lot of work here when we might # not need to due to it being sync'd previously node = self._get_node(step.data['master'], item['builtOn']) step.node = node step.date_started = datetime.utcfromtimestamp( item['timestamp'] / 1000) if item['building']: step.status = Status.in_progress else: step.status = Status.finished step.result = RESULT_MAP[item['result']] step.date_finished = datetime.utcfromtimestamp( (item['timestamp'] + item['duration']) / 1000) if step.status == Status.finished: self._sync_results(step, item) if db.session.is_modified(step): db.session.add(step) db.session.commit() def _sync_results(self, step, item): artifacts = item.get('artifacts', ()) # Detect and warn if there are duplicate artifact file names as we were relying on # uniqueness before. artifact_filenames = set() for artifact in artifacts: if artifact['fileName'] in artifact_filenames: self.logger.warning('Duplicate artifact filename found: %s', artifact['fileName']) artifact_filenames.add(artifact['fileName']) self._sync_generic_results(step, artifacts) # sync console log self.logger.info('Syncing console log for %s', step.id) try: result = True while result: result = self._sync_log(step) except Exception: db.session.rollback() current_app.logger.exception( 'Unable to sync console log for job step %r', step.id.hex) def verify_final_artifacts(self, step, artifacts): # If the Jenkins run was aborted or timed out, we don't expect a manifest file. if (step.result != Result.aborted and not step.data.get('timed_out', False) and not any(ManifestJsonHandler.can_process(a.name) for a in artifacts)): db.session.add(FailureReason( step_id=step.id, job_id=step.job.id, build_id=step.job.build_id, project_id=step.job.project_id, reason='missing_manifest_json', )) step.result = Result.infra_failed db.session.add(step) db.session.commit() def _get_artifact_path(self, artifact_data): """Given the artifact's info from Jenkins, return a relative path to be used as a unique name in the database. This assumes that Jenkins is set up to collect artifacts from a directory named "artifacts" if Jenkins says the relative path starts with "artifacts/". In those cases, remove the "artifacts/" prefix. """ artifact_dir = 'artifacts/' if artifact_data['relativePath'].startswith(artifact_dir): return artifact_data['relativePath'][len(artifact_dir):] return artifact_data['relativePath'] def _handle_generic_artifact(self, jobstep, artifact): artifact, created = get_or_create(Artifact, where={ 'step': jobstep, 'name': self._get_artifact_path(artifact), }, defaults={ 'project': jobstep.project, 'job': jobstep.job, 'data': artifact, }) if not created: db.session.commit() def _sync_generic_results(self, step, artifacts): # sync artifacts self.logger.info('Syncing artifacts for %s', step.id) for artifact in artifacts: self._handle_generic_artifact(jobstep=step, artifact=artifact) def sync_job(self, job): """ Steps get created during the create_job and sync_step phases so we only rely on those steps syncing. """ def sync_step(self, step): if step.data.get('queued'): self._sync_step_from_queue(step) else: self._sync_step_from_active(step) def cancel_step(self, step): # The Jenkins build_no won't exist if the job is still queued. if step.data.get('build_no'): url = '/job/{}/{}/stop/'.format( step.data['job_name'], step.data['build_no']) elif step.data.get('item_id'): url = '/queue/cancelItem?id={}'.format(step.data['item_id']) else: url = None step.status = Status.finished step.result = Result.aborted step.date_finished = datetime.utcnow() db.session.add(step) db.session.flush() if not url: # We don't know how to cancel the step or even if it is running, so # we've done all we can. return try: self._get_text_response( master_base_url=step.data['master'], path=url, method='POST', ) except NotFound: return except Exception: self.logger.exception('Unable to cancel build upstream') # If the build timed out and is in progress (off the Jenkins queue), # try to grab the logs. if not step.data.get('queued') and step.data.get('timed_out', False): try: self._sync_log(step) except Exception: self.logger.exception( 'Unable to fully sync console log for job step %r', step.id.hex) def get_job_parameters(self, job, changes_bid): # type: (Any, str) -> Dict[str, str] # TODO(kylec): Take a Source rather than a Job; we don't need a Job. """ Args: job (Job): Job to use. changes_bid (str): Changes BID; typically JobStep ID. Returns: dict: Parameters to be supplied to Jenkins for the job. """ params = {'CHANGES_BID': changes_bid} source = job.build.source if source.revision_sha: params['REVISION'] = source.revision_sha if source.patch: params['PATCH_URL'] = build_patch_uri(source.patch.id) phab_diff_id = source.data.get('phabricator.diffID') if phab_diff_id: params['PHAB_DIFF_ID'] = phab_diff_id phab_revision_id = source.data.get('phabricator.revisionID') if phab_revision_id: params['PHAB_REVISION_ID'] = phab_revision_id if self.cluster: params['CLUSTER'] = self.cluster return params def create_jenkins_job_from_params(self, changes_bid, params, job_name=None, is_diff=False): if job_name is None: job_name = self.job_name if not job_name: raise UnrecoverableException('Missing Jenkins project configuration') json_data = { 'parameter': params } master = self._pick_master(job_name, is_diff) # TODO: Jenkins will return a 302 if it cannot queue the job which I # believe implies that there is already a job with the same parameters # queued. self._get_text_response( master_base_url=master, path='/job/{}/build'.format(job_name), method='POST', data={ 'json': json.dumps(json_data), }, ) # we retry for a period of time as Jenkins doesn't have strong consistency # guarantees and the job may not show up right away t = time.time() + 5 job_data = None while time.time() < t: job_data = self._find_job(master, job_name, changes_bid) if job_data: break time.sleep(0.3) if job_data is None: raise Exception('Unable to find matching job after creation. GLHF') job_data['master'] = master return job_data def get_default_job_phase_label(self, job, job_name): return 'Build {0}'.format(job_name) def create_job(self, job, replaces=None): """ Creates a job within Jenkins. Due to the way the API works, this consists of two steps: - Submitting the job - Polling for the newly created job to associate either a queue ID or a finalized build number. """ phase, created = get_or_create(JobPhase, where={ 'job': job, 'label': self.get_default_job_phase_label(job, self.job_name), 'project': job.project, }, defaults={ 'status': job.status, }) assert not created or not replaces step = self._create_job_step( phase=phase, data={'job_name': self.job_name}, status=job.status, force_create=bool(replaces), cluster=self.cluster ) if replaces: replaces.replacement_id = step.id db.session.add(replaces) db.session.commit() # now create the jenkins build # we don't commit immediately because we also want to update the job # and jobstep using the job_data we get from jenkins job_data = self.create_jenkins_build(step, commit=False) if job_data['queued']: job.status = Status.queued else: job.status = Status.in_progress db.session.add(job) assert 'master' in step.data assert 'job_name' in step.data assert 'build_no' in step.data or 'item_id' in step.data # now we have the build_no/item_id and can set the full jobstep label step.label = '{0} #{1}'.format(step.data['job_name'], step.data['build_no'] or step.data['item_id']) db.session.add(step) db.session.commit() sync_job_step.delay( step_id=step.id.hex, task_id=step.id.hex, parent_task_id=job.id.hex, ) return step def create_jenkins_build(self, step, job_name=None, commit=True, **kwargs): """ Create a jenkins build for the given jobstep. If the given step already has a jenkins build associated with it, this will not perform any work. If not, this creates the build, updates the step to refer to the new build, optionally committing these changes. Args: step (JobStep): The shard we'd like to launch a jenkins build for. job_name (str): Job's name. Default is self.job_name. commit (bool): Whether to commit changes to database at the end. kwargs: Additional arguments to be passed to get_job_parameters() """ if step.data.get('build_no') or step.data.get('item_id'): return params_dict = self.get_job_parameters(step.job, changes_bid=step.id.hex, **kwargs) jenkins_params = [{'name': k, 'value': v} for k, v in params_dict.iteritems()] is_diff = not step.job.source.is_commit() job_data = self.create_jenkins_job_from_params( changes_bid=step.id.hex, params=jenkins_params, job_name=job_name, is_diff=is_diff ) step.data.update(job_data) db.session.add(step) # Hook that allows other builders to add commands for the jobstep # which tells changes-client what to run. # TODO(kylec): Stop passing the params as env once the data is available # in changes-client by other means. self.create_commands(step, env=params_dict) if commit: db.session.commit() return job_data def get_artifact_manager(self, jobstep): handlers = [CoverageHandler, XunitHandler, ManifestJsonHandler, AnalyticsJsonHandler] if self.debug_config.get('fetch_jenkins_logs'): handlers.append(DummyLogFileHandler) return Manager(handlers) def create_commands(self, step, env): """ Args: step (JobStep): The JobStep to create commands under. env (dict): Environment variables for the commands. """ pass def can_snapshot(self): return False def _streaming_get(self, url, params=None): """ Perform an HTTP GET request with a streaming response. Args: url (str): The url to fetch. params (dict): Optional dictionary of query parameters. Returns: A streamed requests Response object. Raises: HTTPError: if the response code didn't indicate success. Timeout: if the server took too long to respond. """ resp = self.http_session.get(url, stream=True, timeout=15, params=params, auth=self.auth, verify=self.verify) resp.raise_for_status() return resp
# -*- coding: utf-8 -*- # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import warnings from typing import Callable, Dict, Optional, Sequence, Tuple, Union from google.api_core import grpc_helpers from google.api_core import operations_v1 from google.api_core import gapic_v1 import google.auth # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore import grpc # type: ignore from google.cloud.domains_v1.types import domains from google.longrunning import operations_pb2 # type: ignore from .base import DomainsTransport, DEFAULT_CLIENT_INFO class DomainsGrpcTransport(DomainsTransport): """gRPC backend transport for Domains. The Cloud Domains API enables management and configuration of domain names. This class defines the same methods as the primary client, so the primary client can load the underlying transport implementation and call it. It sends protocol buffers over the wire using gRPC (which is built on top of HTTP/2); the ``grpcio`` package must be installed. """ _stubs: Dict[str, Callable] def __init__( self, *, host: str = "domains.googleapis.com", credentials: ga_credentials.Credentials = None, credentials_file: str = None, scopes: Sequence[str] = None, channel: grpc.Channel = None, api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, ) -> None: """Instantiate the transport. Args: host (Optional[str]): The hostname to connect to. credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if ``channel`` is provided. credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if ``channel`` is provided. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if ``channel`` is provided. channel (Optional[grpc.Channel]): A ``Channel`` instance through which to make calls. api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. If provided, it overrides the ``host`` argument and tries to create a mutual TLS channel with client SSL credentials from ``client_cert_source`` or application default SSL credentials. client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): Deprecated. A callback to provide client SSL certificate bytes and private key bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials for the grpc channel. It is ignored if ``channel`` is provided. client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): A callback to provide client certificate bytes and private key bytes, both in PEM format. It is used to configure a mutual TLS channel. It is ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials self._stubs: Dict[str, Callable] = {} self._operations_client: Optional[operations_v1.OperationsClient] = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) if client_cert_source: warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: # Ignore credentials if a channel was passed. credentials = False # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None else: if api_mtls_endpoint: host = api_mtls_endpoint # Create SSL credentials with client_cert_source or application # default SSL credentials. if client_cert_source: cert, key = client_cert_source() self._ssl_channel_credentials = grpc.ssl_channel_credentials( certificate_chain=cert, private_key=key ) else: self._ssl_channel_credentials = SslCredentials().ssl_credentials else: if client_cert_source_for_mtls and not ssl_channel_credentials: cert, key = client_cert_source_for_mtls() self._ssl_channel_credentials = grpc.ssl_channel_credentials( certificate_chain=cert, private_key=key ) # The base transport sets the host, credentials and scopes super().__init__( host=host, credentials=credentials, credentials_file=credentials_file, scopes=scopes, quota_project_id=quota_project_id, client_info=client_info, always_use_jwt_access=always_use_jwt_access, ) if not self._grpc_channel: self._grpc_channel = type(self).create_channel( self._host, # use the credentials which are saved credentials=self._credentials, # Set ``credentials_file`` to ``None`` here as # the credentials that we saved earlier should be used. credentials_file=None, scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) # Wrap messages. This must be done after self._grpc_channel exists self._prep_wrapped_messages(client_info) @classmethod def create_channel( cls, host: str = "domains.googleapis.com", credentials: ga_credentials.Credentials = None, credentials_file: str = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, **kwargs, ) -> grpc.Channel: """Create and return a gRPC channel object. Args: host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is mutually exclusive with credentials. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. quota_project_id (Optional[str]): An optional project to use for billing and quota. kwargs (Optional[dict]): Keyword arguments, which are passed to the channel creation. Returns: grpc.Channel: A gRPC channel object. Raises: google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ return grpc_helpers.create_channel( host, credentials=credentials, credentials_file=credentials_file, quota_project_id=quota_project_id, default_scopes=cls.AUTH_SCOPES, scopes=scopes, default_host=cls.DEFAULT_HOST, **kwargs, ) @property def grpc_channel(self) -> grpc.Channel: """Return the channel designed to connect to this service. """ return self._grpc_channel @property def operations_client(self) -> operations_v1.OperationsClient: """Create the client designed to process long-running operations. This property caches on the instance; repeated calls return the same client. """ # Quick check: Only create a new client if we do not already have one. if self._operations_client is None: self._operations_client = operations_v1.OperationsClient(self.grpc_channel) # Return the client from cache. return self._operations_client @property def search_domains( self, ) -> Callable[[domains.SearchDomainsRequest], domains.SearchDomainsResponse]: r"""Return a callable for the search domains method over gRPC. Searches for available domain names similar to the provided query. Availability results from this method are approximate; call ``RetrieveRegisterParameters`` on a domain before registering to confirm availability. Returns: Callable[[~.SearchDomainsRequest], ~.SearchDomainsResponse]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "search_domains" not in self._stubs: self._stubs["search_domains"] = self.grpc_channel.unary_unary( "/google.cloud.domains.v1.Domains/SearchDomains", request_serializer=domains.SearchDomainsRequest.serialize, response_deserializer=domains.SearchDomainsResponse.deserialize, ) return self._stubs["search_domains"] @property def retrieve_register_parameters( self, ) -> Callable[ [domains.RetrieveRegisterParametersRequest], domains.RetrieveRegisterParametersResponse, ]: r"""Return a callable for the retrieve register parameters method over gRPC. Gets parameters needed to register a new domain name, including price and up-to-date availability. Use the returned values to call ``RegisterDomain``. Returns: Callable[[~.RetrieveRegisterParametersRequest], ~.RetrieveRegisterParametersResponse]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "retrieve_register_parameters" not in self._stubs: self._stubs["retrieve_register_parameters"] = self.grpc_channel.unary_unary( "/google.cloud.domains.v1.Domains/RetrieveRegisterParameters", request_serializer=domains.RetrieveRegisterParametersRequest.serialize, response_deserializer=domains.RetrieveRegisterParametersResponse.deserialize, ) return self._stubs["retrieve_register_parameters"] @property def register_domain( self, ) -> Callable[[domains.RegisterDomainRequest], operations_pb2.Operation]: r"""Return a callable for the register domain method over gRPC. Registers a new domain name and creates a corresponding ``Registration`` resource. Call ``RetrieveRegisterParameters`` first to check availability of the domain name and determine parameters like price that are needed to build a call to this method. A successful call creates a ``Registration`` resource in state ``REGISTRATION_PENDING``, which resolves to ``ACTIVE`` within 1-2 minutes, indicating that the domain was successfully registered. If the resource ends up in state ``REGISTRATION_FAILED``, it indicates that the domain was not registered successfully, and you can safely delete the resource and retry registration. Returns: Callable[[~.RegisterDomainRequest], ~.Operation]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "register_domain" not in self._stubs: self._stubs["register_domain"] = self.grpc_channel.unary_unary( "/google.cloud.domains.v1.Domains/RegisterDomain", request_serializer=domains.RegisterDomainRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["register_domain"] @property def retrieve_transfer_parameters( self, ) -> Callable[ [domains.RetrieveTransferParametersRequest], domains.RetrieveTransferParametersResponse, ]: r"""Return a callable for the retrieve transfer parameters method over gRPC. Gets parameters needed to transfer a domain name from another registrar to Cloud Domains. For domains managed by Google Domains, transferring to Cloud Domains is not supported. Use the returned values to call ``TransferDomain``. Returns: Callable[[~.RetrieveTransferParametersRequest], ~.RetrieveTransferParametersResponse]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "retrieve_transfer_parameters" not in self._stubs: self._stubs["retrieve_transfer_parameters"] = self.grpc_channel.unary_unary( "/google.cloud.domains.v1.Domains/RetrieveTransferParameters", request_serializer=domains.RetrieveTransferParametersRequest.serialize, response_deserializer=domains.RetrieveTransferParametersResponse.deserialize, ) return self._stubs["retrieve_transfer_parameters"] @property def transfer_domain( self, ) -> Callable[[domains.TransferDomainRequest], operations_pb2.Operation]: r"""Return a callable for the transfer domain method over gRPC. Transfers a domain name from another registrar to Cloud Domains. For domains managed by Google Domains, transferring to Cloud Domains is not supported. Before calling this method, go to the domain's current registrar to unlock the domain for transfer and retrieve the domain's transfer authorization code. Then call ``RetrieveTransferParameters`` to confirm that the domain is unlocked and to get values needed to build a call to this method. A successful call creates a ``Registration`` resource in state ``TRANSFER_PENDING``. It can take several days to complete the transfer process. The registrant can often speed up this process by approving the transfer through the current registrar, either by clicking a link in an email from the registrar or by visiting the registrar's website. A few minutes after transfer approval, the resource transitions to state ``ACTIVE``, indicating that the transfer was successful. If the transfer is rejected or the request expires without being approved, the resource can end up in state ``TRANSFER_FAILED``. If transfer fails, you can safely delete the resource and retry the transfer. Returns: Callable[[~.TransferDomainRequest], ~.Operation]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "transfer_domain" not in self._stubs: self._stubs["transfer_domain"] = self.grpc_channel.unary_unary( "/google.cloud.domains.v1.Domains/TransferDomain", request_serializer=domains.TransferDomainRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["transfer_domain"] @property def list_registrations( self, ) -> Callable[ [domains.ListRegistrationsRequest], domains.ListRegistrationsResponse ]: r"""Return a callable for the list registrations method over gRPC. Lists the ``Registration`` resources in a project. Returns: Callable[[~.ListRegistrationsRequest], ~.ListRegistrationsResponse]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_registrations" not in self._stubs: self._stubs["list_registrations"] = self.grpc_channel.unary_unary( "/google.cloud.domains.v1.Domains/ListRegistrations", request_serializer=domains.ListRegistrationsRequest.serialize, response_deserializer=domains.ListRegistrationsResponse.deserialize, ) return self._stubs["list_registrations"] @property def get_registration( self, ) -> Callable[[domains.GetRegistrationRequest], domains.Registration]: r"""Return a callable for the get registration method over gRPC. Gets the details of a ``Registration`` resource. Returns: Callable[[~.GetRegistrationRequest], ~.Registration]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_registration" not in self._stubs: self._stubs["get_registration"] = self.grpc_channel.unary_unary( "/google.cloud.domains.v1.Domains/GetRegistration", request_serializer=domains.GetRegistrationRequest.serialize, response_deserializer=domains.Registration.deserialize, ) return self._stubs["get_registration"] @property def update_registration( self, ) -> Callable[[domains.UpdateRegistrationRequest], operations_pb2.Operation]: r"""Return a callable for the update registration method over gRPC. Updates select fields of a ``Registration`` resource, notably ``labels``. To update other fields, use the appropriate custom update method: - To update management settings, see ``ConfigureManagementSettings`` - To update DNS configuration, see ``ConfigureDnsSettings`` - To update contact information, see ``ConfigureContactSettings`` Returns: Callable[[~.UpdateRegistrationRequest], ~.Operation]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_registration" not in self._stubs: self._stubs["update_registration"] = self.grpc_channel.unary_unary( "/google.cloud.domains.v1.Domains/UpdateRegistration", request_serializer=domains.UpdateRegistrationRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["update_registration"] @property def configure_management_settings( self, ) -> Callable[ [domains.ConfigureManagementSettingsRequest], operations_pb2.Operation ]: r"""Return a callable for the configure management settings method over gRPC. Updates a ``Registration``'s management settings. Returns: Callable[[~.ConfigureManagementSettingsRequest], ~.Operation]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "configure_management_settings" not in self._stubs: self._stubs[ "configure_management_settings" ] = self.grpc_channel.unary_unary( "/google.cloud.domains.v1.Domains/ConfigureManagementSettings", request_serializer=domains.ConfigureManagementSettingsRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["configure_management_settings"] @property def configure_dns_settings( self, ) -> Callable[[domains.ConfigureDnsSettingsRequest], operations_pb2.Operation]: r"""Return a callable for the configure dns settings method over gRPC. Updates a ``Registration``'s DNS settings. Returns: Callable[[~.ConfigureDnsSettingsRequest], ~.Operation]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "configure_dns_settings" not in self._stubs: self._stubs["configure_dns_settings"] = self.grpc_channel.unary_unary( "/google.cloud.domains.v1.Domains/ConfigureDnsSettings", request_serializer=domains.ConfigureDnsSettingsRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["configure_dns_settings"] @property def configure_contact_settings( self, ) -> Callable[[domains.ConfigureContactSettingsRequest], operations_pb2.Operation]: r"""Return a callable for the configure contact settings method over gRPC. Updates a ``Registration``'s contact settings. Some changes require confirmation by the domain's registrant contact . Returns: Callable[[~.ConfigureContactSettingsRequest], ~.Operation]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "configure_contact_settings" not in self._stubs: self._stubs["configure_contact_settings"] = self.grpc_channel.unary_unary( "/google.cloud.domains.v1.Domains/ConfigureContactSettings", request_serializer=domains.ConfigureContactSettingsRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["configure_contact_settings"] @property def export_registration( self, ) -> Callable[[domains.ExportRegistrationRequest], operations_pb2.Operation]: r"""Return a callable for the export registration method over gRPC. Exports a ``Registration`` resource, such that it is no longer managed by Cloud Domains. When an active domain is successfully exported, you can continue to use the domain in `Google Domains <https://domains.google/>`__ until it expires. The calling user becomes the domain's sole owner in Google Domains, and permissions for the domain are subsequently managed there. The domain does not renew automatically unless the new owner sets up billing in Google Domains. Returns: Callable[[~.ExportRegistrationRequest], ~.Operation]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "export_registration" not in self._stubs: self._stubs["export_registration"] = self.grpc_channel.unary_unary( "/google.cloud.domains.v1.Domains/ExportRegistration", request_serializer=domains.ExportRegistrationRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["export_registration"] @property def delete_registration( self, ) -> Callable[[domains.DeleteRegistrationRequest], operations_pb2.Operation]: r"""Return a callable for the delete registration method over gRPC. Deletes a ``Registration`` resource. This method works on any ``Registration`` resource using `Subscription or Commitment billing </domains/pricing#billing-models>`__, provided that the resource was created at least 1 day in the past. For ``Registration`` resources using `Monthly billing </domains/pricing#billing-models>`__, this method works if: - ``state`` is ``EXPORTED`` with ``expire_time`` in the past - ``state`` is ``REGISTRATION_FAILED`` - ``state`` is ``TRANSFER_FAILED`` When an active registration is successfully deleted, you can continue to use the domain in `Google Domains <https://domains.google/>`__ until it expires. The calling user becomes the domain's sole owner in Google Domains, and permissions for the domain are subsequently managed there. The domain does not renew automatically unless the new owner sets up billing in Google Domains. Returns: Callable[[~.DeleteRegistrationRequest], ~.Operation]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_registration" not in self._stubs: self._stubs["delete_registration"] = self.grpc_channel.unary_unary( "/google.cloud.domains.v1.Domains/DeleteRegistration", request_serializer=domains.DeleteRegistrationRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["delete_registration"] @property def retrieve_authorization_code( self, ) -> Callable[ [domains.RetrieveAuthorizationCodeRequest], domains.AuthorizationCode ]: r"""Return a callable for the retrieve authorization code method over gRPC. Gets the authorization code of the ``Registration`` for the purpose of transferring the domain to another registrar. You can call this method only after 60 days have elapsed since the initial domain registration. Returns: Callable[[~.RetrieveAuthorizationCodeRequest], ~.AuthorizationCode]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "retrieve_authorization_code" not in self._stubs: self._stubs["retrieve_authorization_code"] = self.grpc_channel.unary_unary( "/google.cloud.domains.v1.Domains/RetrieveAuthorizationCode", request_serializer=domains.RetrieveAuthorizationCodeRequest.serialize, response_deserializer=domains.AuthorizationCode.deserialize, ) return self._stubs["retrieve_authorization_code"] @property def reset_authorization_code( self, ) -> Callable[[domains.ResetAuthorizationCodeRequest], domains.AuthorizationCode]: r"""Return a callable for the reset authorization code method over gRPC. Resets the authorization code of the ``Registration`` to a new random string. You can call this method only after 60 days have elapsed since the initial domain registration. Returns: Callable[[~.ResetAuthorizationCodeRequest], ~.AuthorizationCode]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "reset_authorization_code" not in self._stubs: self._stubs["reset_authorization_code"] = self.grpc_channel.unary_unary( "/google.cloud.domains.v1.Domains/ResetAuthorizationCode", request_serializer=domains.ResetAuthorizationCodeRequest.serialize, response_deserializer=domains.AuthorizationCode.deserialize, ) return self._stubs["reset_authorization_code"] def close(self): self.grpc_channel.close() __all__ = ("DomainsGrpcTransport",)
## # Copyright (c) 2005-2014 Apple Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ## from pycalendar.datetime import DateTime from pycalendar.period import Period from twext.python.clsprop import classproperty import txweb2.dav.test.util from txweb2.http_headers import MimeType from txweb2.stream import MemoryStream from twisted.internet.defer import inlineCallbacks, succeed, returnValue from twistedcaldav import caldavxml from twistedcaldav.caldavxml import TimeRange from twistedcaldav.ical import Component, normalize_iCalStr from txdav.caldav.datastore.query.filter import Filter from txdav.caldav.datastore.scheduling.freebusy import generateFreeBusyInfo from txdav.caldav.datastore.scheduling.ischedule.localservers import ServersDB, Server from txdav.caldav.datastore.sql import ManagedAttachment from txdav.caldav.datastore.test.common import CaptureProtocol from txdav.common.datastore.podding.conduit import PoddingConduit, \ FailedCrossPodRequestError from txdav.common.datastore.podding.resource import ConduitResource from txdav.common.datastore.podding.test.util import MultiStoreConduitTest, \ FakeConduitRequest from txdav.common.datastore.sql_tables import _BIND_STATUS_ACCEPTED from txdav.common.datastore.test.util import populateCalendarsFrom, CommonCommonTests from txdav.common.icommondatastore import ObjectResourceNameAlreadyExistsError, \ ObjectResourceNameNotAllowedError from txdav.common.idirectoryservice import DirectoryRecordNotFoundError class TestConduit (CommonCommonTests, txweb2.dav.test.util.TestCase): class FakeConduit(object): def recv_fake(self, j): return succeed({ "result": "ok", "back2u": j["echo"], "more": "bits", }) @inlineCallbacks def setUp(self): yield super(TestConduit, self).setUp() serversDB = ServersDB() serversDB.addServer(Server("A", "http://127.0.0.1", "A", True)) serversDB.addServer(Server("B", "http://127.0.0.2", "B", False)) yield self.buildStoreAndDirectory(serversDB=serversDB) self.site.resource.putChild("conduit", ConduitResource(self.site.resource, self.storeUnderTest())) yield self.populate() @inlineCallbacks def populate(self): yield populateCalendarsFrom(self.requirements, self.storeUnderTest()) self.notifierFactory.reset() @classproperty(cache=False) def requirements(cls): #@NoSelf return { "user01": { "calendar_1": { }, "inbox": { }, }, "user02": { "calendar_1": { }, "inbox": { }, }, "user03": { "calendar_1": { }, "inbox": { }, }, } @inlineCallbacks def test_validRequest(self): """ Cross-pod request fails when there is no shared secret header present. """ conduit = PoddingConduit(self.storeUnderTest()) r1, r2 = yield conduit.validRequest("user01", "puser02") self.assertTrue(r1 is not None) self.assertTrue(r2 is not None) self.assertFailure( conduit.validRequest("bogus01", "user02"), DirectoryRecordNotFoundError ) self.assertFailure( conduit.validRequest("user01", "bogus02"), DirectoryRecordNotFoundError ) self.assertFailure( conduit.validRequest("user01", "user02"), FailedCrossPodRequestError ) class TestConduitToConduit(MultiStoreConduitTest): class FakeConduit(PoddingConduit): @inlineCallbacks def send_fake(self, txn, ownerUID, shareeUID): _ignore_owner, sharee = yield self.validRequest(ownerUID, shareeUID) action = { "action": "fake", "echo": "bravo" } result = yield self.sendRequest(txn, sharee, action) returnValue(result) def recv_fake(self, txn, j): return succeed({ "result": "ok", "back2u": j["echo"], "more": "bits", }) def makeConduit(self, store): """ Use our own variant. """ conduit = self.FakeConduit(store) conduit.conduitRequestClass = FakeConduitRequest return conduit @inlineCallbacks def test_fake_action(self): """ Cross-pod request works when conduit does support the action. """ txn = self.transactionUnderTest() store1 = self.storeUnderTest() response = yield store1.conduit.send_fake(txn, "user01", "puser01") self.assertTrue("result" in response) self.assertEqual(response["result"], "ok") self.assertTrue("back2u" in response) self.assertEqual(response["back2u"], "bravo") self.assertTrue("more" in response) self.assertEqual(response["more"], "bits") yield txn.commit() store2 = self.otherStoreUnderTest() txn = store2.newTransaction() response = yield store2.conduit.send_fake(txn, "puser01", "user01") self.assertTrue("result" in response) self.assertEqual(response["result"], "ok") self.assertTrue("back2u" in response) self.assertEqual(response["back2u"], "bravo") self.assertTrue("more" in response) self.assertEqual(response["more"], "bits") yield txn.commit() class TestConduitAPI(MultiStoreConduitTest): """ Test that the conduit api works. """ nowYear = {"now": DateTime.getToday().getYear()} caldata1 = """BEGIN:VCALENDAR VERSION:2.0 CALSCALE:GREGORIAN PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN BEGIN:VEVENT UID:uid1 DTSTART:{now:04d}0102T140000Z DURATION:PT1H CREATED:20060102T190000Z DTSTAMP:20051222T210507Z RRULE:FREQ=WEEKLY SUMMARY:instance END:VEVENT END:VCALENDAR """.replace("\n", "\r\n").format(**nowYear) caldata1_changed = """BEGIN:VCALENDAR VERSION:2.0 CALSCALE:GREGORIAN PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN BEGIN:VEVENT UID:uid1 DTSTART:{now:04d}0102T150000Z DURATION:PT1H CREATED:20060102T190000Z DTSTAMP:20051222T210507Z RRULE:FREQ=WEEKLY SUMMARY:instance changed END:VEVENT END:VCALENDAR """.replace("\n", "\r\n").format(**nowYear) caldata2 = """BEGIN:VCALENDAR VERSION:2.0 CALSCALE:GREGORIAN PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN BEGIN:VEVENT UID:uid2 DTSTART:{now:04d}0102T160000Z DURATION:PT1H CREATED:20060102T190000Z DTSTAMP:20051222T210507Z RRULE:FREQ=WEEKLY SUMMARY:instance END:VEVENT END:VCALENDAR """.replace("\n", "\r\n").format(**nowYear) caldata3 = """BEGIN:VCALENDAR VERSION:2.0 CALSCALE:GREGORIAN PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN BEGIN:VEVENT UID:uid3 DTSTART:{now:04d}0102T160000Z DURATION:PT1H CREATED:20060102T190000Z DTSTAMP:20051222T210507Z RRULE:FREQ=WEEKLY SUMMARY:instance END:VEVENT END:VCALENDAR """.replace("\n", "\r\n").format(**nowYear) @inlineCallbacks def test_basic_share(self): """ Test that basic invite/uninvite works. """ yield self.createShare("user01", "puser01") calendar1 = yield self.calendarUnderTest(home="user01", name="calendar") shared = yield calendar1.shareeView("puser01") self.assertEqual(shared.shareStatus(), _BIND_STATUS_ACCEPTED) yield self.commit() shared = yield self.calendarUnderTest(txn=self.newOtherTransaction(), home="puser01", name="shared-calendar") self.assertTrue(shared is not None) self.assertTrue(shared.external()) yield self.otherCommit() calendar1 = yield self.calendarUnderTest(home="user01", name="calendar") yield calendar1.uninviteUserFromShare("puser01") yield self.commit() shared = yield self.calendarUnderTest(txn=self.newOtherTransaction(), home="puser01", name="shared-calendar") self.assertTrue(shared is None) yield self.otherCommit() @inlineCallbacks def test_countobjects(self): """ Test that action=countobjects works. """ yield self.createShare("user01", "puser01") shared = yield self.calendarUnderTest(txn=self.newOtherTransaction(), home="puser01", name="shared-calendar") count = yield shared.countObjectResources() self.assertEqual(count, 0) yield self.otherCommit() calendar1 = yield self.calendarUnderTest(home="user01", name="calendar") yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1)) count = yield calendar1.countObjectResources() self.assertEqual(count, 1) yield self.commit() shared = yield self.calendarUnderTest(txn=self.newOtherTransaction(), home="puser01", name="shared-calendar") count = yield shared.countObjectResources() self.assertEqual(count, 1) yield self.otherCommit() calendar1 = yield self.calendarUnderTest(home="user01", name="calendar") object1 = yield self.calendarObjectUnderTest(home="user01", calendar_name="calendar", name="1.ics") yield object1.remove() count = yield calendar1.countObjectResources() self.assertEqual(count, 0) yield self.commit() shared = yield self.calendarUnderTest(txn=self.newOtherTransaction(), home="puser01", name="shared-calendar") count = yield shared.countObjectResources() self.assertEqual(count, 0) yield self.otherCommit() @inlineCallbacks def test_listobjects(self): """ Test that action=listobjects works. """ yield self.createShare("user01", "puser01") shared = yield self.calendarUnderTest(txn=self.newOtherTransaction(), home="puser01", name="shared-calendar") objects = yield shared.listObjectResources() self.assertEqual(set(objects), set()) yield self.otherCommit() calendar1 = yield self.calendarUnderTest(home="user01", name="calendar") yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1)) yield calendar1.createCalendarObjectWithName("2.ics", Component.fromString(self.caldata2)) objects = yield calendar1.listObjectResources() self.assertEqual(set(objects), set(("1.ics", "2.ics",))) yield self.commit() shared = yield self.calendarUnderTest(txn=self.newOtherTransaction(), home="puser01", name="shared-calendar") objects = yield shared.listObjectResources() self.assertEqual(set(objects), set(("1.ics", "2.ics",))) yield self.otherCommit() calendar1 = yield self.calendarUnderTest(home="user01", name="calendar") object1 = yield self.calendarObjectUnderTest(home="user01", calendar_name="calendar", name="1.ics") yield object1.remove() objects = yield calendar1.listObjectResources() self.assertEqual(set(objects), set(("2.ics",))) yield self.commit() shared = yield self.calendarUnderTest(txn=self.newOtherTransaction(), home="puser01", name="shared-calendar") objects = yield shared.listObjectResources() self.assertEqual(set(objects), set(("2.ics",))) yield self.otherCommit() @inlineCallbacks def test_synctoken(self): """ Test that action=synctoken works. """ yield self.createShare("user01", "puser01") calendar1 = yield self.calendarUnderTest(home="user01", name="calendar") token1_1 = yield calendar1.syncToken() yield self.commit() shared = yield self.calendarUnderTest(txn=self.newOtherTransaction(), home="puser01", name="shared-calendar") token2_1 = yield shared.syncToken() yield self.otherCommit() self.assertEqual(token1_1, token2_1) calendar1 = yield self.calendarUnderTest(home="user01", name="calendar") yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1)) yield self.commit() calendar1 = yield self.calendarUnderTest(home="user01", name="calendar") token1_2 = yield calendar1.syncToken() yield self.commit() shared = yield self.calendarUnderTest(txn=self.newOtherTransaction(), home="puser01", name="shared-calendar") token2_2 = yield shared.syncToken() yield self.otherCommit() self.assertNotEqual(token1_1, token1_2) self.assertEqual(token1_2, token2_2) calendar1 = yield self.calendarUnderTest(home="user01", name="calendar") object1 = yield self.calendarObjectUnderTest(home="user01", calendar_name="calendar", name="1.ics") yield object1.remove() count = yield calendar1.countObjectResources() self.assertEqual(count, 0) yield self.commit() calendar1 = yield self.calendarUnderTest(home="user01", name="calendar") token1_3 = yield calendar1.syncToken() yield self.commit() shared = yield self.calendarUnderTest(txn=self.newOtherTransaction(), home="puser01", name="shared-calendar") token2_3 = yield shared.syncToken() yield self.otherCommit() self.assertNotEqual(token1_1, token1_3) self.assertNotEqual(token1_2, token1_3) self.assertEqual(token1_3, token2_3) @inlineCallbacks def test_resourcenamessincerevision(self): """ Test that action=synctoken works. """ yield self.createShare("user01", "puser01") calendar1 = yield self.calendarUnderTest(home="user01", name="calendar") token1_1 = yield calendar1.syncToken() yield self.commit() shared = yield self.calendarUnderTest(txn=self.newOtherTransaction(), home="puser01", name="shared-calendar") token2_1 = yield shared.syncToken() yield self.otherCommit() calendar1 = yield self.calendarUnderTest(home="user01", name="calendar") yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1)) yield self.commit() calendar1 = yield self.calendarUnderTest(home="user01", name="calendar") token1_2 = yield calendar1.syncToken() names1 = yield calendar1.resourceNamesSinceToken(token1_1) self.assertEqual(names1, ([u"1.ics"], [], [],)) yield self.commit() shared = yield self.calendarUnderTest(txn=self.newOtherTransaction(), home="puser01", name="shared-calendar") token2_2 = yield shared.syncToken() names2 = yield shared.resourceNamesSinceToken(token2_1) self.assertEqual(names2, ([u"1.ics"], [], [],)) yield self.otherCommit() calendar1 = yield self.calendarUnderTest(home="user01", name="calendar") object1 = yield self.calendarObjectUnderTest(home="user01", calendar_name="calendar", name="1.ics") yield object1.remove() count = yield calendar1.countObjectResources() self.assertEqual(count, 0) yield self.commit() calendar1 = yield self.calendarUnderTest(home="user01", name="calendar") token1_3 = yield calendar1.syncToken() names1 = yield calendar1.resourceNamesSinceToken(token1_2) self.assertEqual(names1, ([], [u"1.ics"], [],)) yield self.commit() shared = yield self.calendarUnderTest(txn=self.newOtherTransaction(), home="puser01", name="shared-calendar") token2_3 = yield shared.syncToken() names2 = yield shared.resourceNamesSinceToken(token2_2) self.assertEqual(names2, ([], [u"1.ics"], [],)) yield self.otherCommit() calendar1 = yield self.calendarUnderTest(home="user01", name="calendar") names1 = yield calendar1.resourceNamesSinceToken(token1_3) self.assertEqual(names1, ([], [], [],)) yield self.commit() shared = yield self.calendarUnderTest(txn=self.newOtherTransaction(), home="puser01", name="shared-calendar") names2 = yield shared.resourceNamesSinceToken(token2_3) self.assertEqual(names2, ([], [], [],)) yield self.otherCommit() @inlineCallbacks def test_resourceuidforname(self): """ Test that action=resourceuidforname works. """ yield self.createShare("user01", "puser01") calendar1 = yield self.calendarUnderTest(home="user01", name="calendar") yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1)) yield self.commit() calendar1 = yield self.calendarUnderTest(home="user01", name="calendar") uid = yield calendar1.resourceUIDForName("1.ics") self.assertEqual(uid, "uid1") uid = yield calendar1.resourceUIDForName("2.ics") self.assertTrue(uid is None) yield self.commit() shared = yield self.calendarUnderTest(txn=self.newOtherTransaction(), home="puser01", name="shared-calendar") uid = yield shared.resourceUIDForName("1.ics") self.assertEqual(uid, "uid1") uid = yield shared.resourceUIDForName("2.ics") self.assertTrue(uid is None) yield self.otherCommit() @inlineCallbacks def test_resourcenameforuid(self): """ Test that action=resourcenameforuid works. """ yield self.createShare("user01", "puser01") calendar1 = yield self.calendarUnderTest(home="user01", name="calendar") yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1)) yield self.commit() calendar1 = yield self.calendarUnderTest(home="user01", name="calendar") name = yield calendar1.resourceNameForUID("uid1") self.assertEqual(name, "1.ics") name = yield calendar1.resourceNameForUID("uid2") self.assertTrue(name is None) yield self.commit() shared = yield self.calendarUnderTest(txn=self.newOtherTransaction(), home="puser01", name="shared-calendar") name = yield shared.resourceNameForUID("uid1") self.assertEqual(name, "1.ics") name = yield shared.resourceNameForUID("uid2") self.assertTrue(name is None) yield self.otherCommit() @inlineCallbacks def test_search(self): """ Test that action=resourcenameforuid works. """ yield self.createShare("user01", "puser01") calendar1 = yield self.calendarUnderTest(home="user01", name="calendar") yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1)) yield self.commit() filter = caldavxml.Filter( caldavxml.ComponentFilter( *[caldavxml.ComponentFilter( **{"name":("VEVENT", "VFREEBUSY", "VAVAILABILITY")} )], **{"name": "VCALENDAR"} ) ) filter = Filter(filter) calendar1 = yield self.calendarUnderTest(home="user01", name="calendar") names = [item[0] for item in (yield calendar1.search(filter))] self.assertEqual(names, ["1.ics", ]) yield self.commit() shared = yield self.calendarUnderTest(txn=self.newOtherTransaction(), home="puser01", name="shared-calendar") names = [item[0] for item in (yield shared.search(filter))] self.assertEqual(names, ["1.ics", ]) yield self.otherCommit() @inlineCallbacks def test_loadallobjects(self): """ Test that action=loadallobjects works. """ yield self.createShare("user01", "puser01") calendar1 = yield self.calendarUnderTest(home="user01", name="calendar") resource1 = yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1)) resource_id1 = resource1.id() resource2 = yield calendar1.createCalendarObjectWithName("2.ics", Component.fromString(self.caldata2)) resource_id2 = resource2.id() yield self.commit() shared = yield self.calendarUnderTest(txn=self.newOtherTransaction(), home="puser01", name="shared-calendar") resources = yield shared.objectResources() byname = dict([(obj.name(), obj) for obj in resources]) byuid = dict([(obj.uid(), obj) for obj in resources]) self.assertEqual(len(resources), 2) self.assertEqual(set([obj.name() for obj in resources]), set(("1.ics", "2.ics",))) self.assertEqual(set([obj.uid() for obj in resources]), set(("uid1", "uid2",))) self.assertEqual(set([obj.id() for obj in resources]), set((resource_id1, resource_id2,))) resource = yield shared.objectResourceWithName("1.ics") self.assertTrue(resource is byname["1.ics"]) resource = yield shared.objectResourceWithName("2.ics") self.assertTrue(resource is byname["2.ics"]) resource = yield shared.objectResourceWithName("Missing.ics") self.assertTrue(resource is None) resource = yield shared.objectResourceWithUID("uid1") self.assertTrue(resource is byuid["uid1"]) resource = yield shared.objectResourceWithUID("uid2") self.assertTrue(resource is byuid["uid2"]) resource = yield shared.objectResourceWithUID("uid-missing") self.assertTrue(resource is None) resource = yield shared.objectResourceWithID(resource_id1) self.assertTrue(resource is byname["1.ics"]) resource = yield shared.objectResourceWithID(resource_id2) self.assertTrue(resource is byname["2.ics"]) resource = yield shared.objectResourceWithID(0) self.assertTrue(resource is None) yield self.otherCommit() calendar1 = yield self.calendarUnderTest(home="user01", name="calendar") object1 = yield self.calendarObjectUnderTest(home="user01", calendar_name="calendar", name="1.ics") yield object1.remove() yield self.commit() shared = yield self.calendarUnderTest(txn=self.newOtherTransaction(), home="puser01", name="shared-calendar") resources = yield shared.objectResources() byname = dict([(obj.name(), obj) for obj in resources]) byuid = dict([(obj.uid(), obj) for obj in resources]) self.assertEqual(len(resources), 1) self.assertEqual(set([obj.name() for obj in resources]), set(("2.ics",))) self.assertEqual(set([obj.uid() for obj in resources]), set(("uid2",))) self.assertEqual(set([obj.id() for obj in resources]), set((resource_id2,))) resource = yield shared.objectResourceWithName("1.ics") self.assertTrue(resource is None) resource = yield shared.objectResourceWithName("2.ics") self.assertTrue(resource is byname["2.ics"]) resource = yield shared.objectResourceWithName("Missing.ics") self.assertTrue(resource is None) resource = yield shared.objectResourceWithUID("uid1") self.assertTrue(resource is None) resource = yield shared.objectResourceWithUID("uid2") self.assertTrue(resource is byuid["uid2"]) resource = yield shared.objectResourceWithUID("uid-missing") self.assertTrue(resource is None) resource = yield shared.objectResourceWithID(resource_id1) self.assertTrue(resource is None) resource = yield shared.objectResourceWithID(resource_id2) self.assertTrue(resource is byname["2.ics"]) resource = yield shared.objectResourceWithID(0) self.assertTrue(resource is None) yield self.otherCommit() @inlineCallbacks def test_loadallobjectswithnames(self): """ Test that action=loadallobjectswithnames works. """ yield self.createShare("user01", "puser01") calendar1 = yield self.calendarUnderTest(home="user01", name="calendar") resource1 = yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1)) resource_id1 = resource1.id() yield calendar1.createCalendarObjectWithName("2.ics", Component.fromString(self.caldata2)) resource3 = yield calendar1.createCalendarObjectWithName("3.ics", Component.fromString(self.caldata3)) resource_id3 = resource3.id() yield self.commit() shared = yield self.calendarUnderTest(txn=self.newOtherTransaction(), home="puser01", name="shared-calendar") resources = yield shared.objectResources() self.assertEqual(len(resources), 3) yield self.otherCommit() shared = yield self.calendarUnderTest(txn=self.newOtherTransaction(), home="puser01", name="shared-calendar") resources = yield shared.objectResourcesWithNames(("1.ics", "3.ics",)) byname = dict([(obj.name(), obj) for obj in resources]) byuid = dict([(obj.uid(), obj) for obj in resources]) self.assertEqual(len(resources), 2) self.assertEqual(set([obj.name() for obj in resources]), set(("1.ics", "3.ics",))) self.assertEqual(set([obj.uid() for obj in resources]), set(("uid1", "uid3",))) self.assertEqual(set([obj.id() for obj in resources]), set((resource_id1, resource_id3,))) resource = yield shared.objectResourceWithName("1.ics") self.assertTrue(resource is byname["1.ics"]) resource = yield shared.objectResourceWithName("3.ics") self.assertTrue(resource is byname["3.ics"]) resource = yield shared.objectResourceWithName("Missing.ics") self.assertTrue(resource is None) resource = yield shared.objectResourceWithUID("uid1") self.assertTrue(resource is byuid["uid1"]) resource = yield shared.objectResourceWithUID("uid3") self.assertTrue(resource is byuid["uid3"]) resource = yield shared.objectResourceWithUID("uid-missing") self.assertTrue(resource is None) resource = yield shared.objectResourceWithID(resource_id1) self.assertTrue(resource is byname["1.ics"]) resource = yield shared.objectResourceWithID(resource_id3) self.assertTrue(resource is byname["3.ics"]) resource = yield shared.objectResourceWithID(0) self.assertTrue(resource is None) yield self.otherCommit() calendar1 = yield self.calendarUnderTest(home="user01", name="calendar") object1 = yield self.calendarObjectUnderTest(home="user01", calendar_name="calendar", name="1.ics") yield object1.remove() yield self.commit() shared = yield self.calendarUnderTest(txn=self.newOtherTransaction(), home="puser01", name="shared-calendar") resources = yield shared.objectResourcesWithNames(("1.ics", "3.ics",)) byname = dict([(obj.name(), obj) for obj in resources]) byuid = dict([(obj.uid(), obj) for obj in resources]) self.assertEqual(len(resources), 1) self.assertEqual(set([obj.name() for obj in resources]), set(("3.ics",))) self.assertEqual(set([obj.uid() for obj in resources]), set(("uid3",))) self.assertEqual(set([obj.id() for obj in resources]), set((resource_id3,))) resource = yield shared.objectResourceWithName("1.ics") self.assertTrue(resource is None) resource = yield shared.objectResourceWithName("3.ics") self.assertTrue(resource is byname["3.ics"]) resource = yield shared.objectResourceWithName("Missing.ics") self.assertTrue(resource is None) resource = yield shared.objectResourceWithUID("uid1") self.assertTrue(resource is None) resource = yield shared.objectResourceWithUID("uid3") self.assertTrue(resource is byuid["uid3"]) resource = yield shared.objectResourceWithUID("uid-missing") self.assertTrue(resource is None) resource = yield shared.objectResourceWithID(resource_id1) self.assertTrue(resource is None) resource = yield shared.objectResourceWithID(resource_id3) self.assertTrue(resource is byname["3.ics"]) resource = yield shared.objectResourceWithID(0) self.assertTrue(resource is None) yield self.otherCommit() @inlineCallbacks def test_objectwith(self): """ Test that action=objectwith works. """ yield self.createShare("user01", "puser01") calendar1 = yield self.calendarUnderTest(home="user01", name="calendar") resource = yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1)) resource_id = resource.id() yield self.commit() shared = yield self.calendarUnderTest(txn=self.newOtherTransaction(), home="puser01", name="shared-calendar") resource = yield shared.objectResourceWithName("1.ics") self.assertTrue(resource is not None) self.assertEqual(resource.name(), "1.ics") self.assertEqual(resource.uid(), "uid1") resource = yield shared.objectResourceWithName("2.ics") self.assertTrue(resource is None) yield self.otherCommit() shared = yield self.calendarUnderTest(txn=self.newOtherTransaction(), home="puser01", name="shared-calendar") resource = yield shared.objectResourceWithUID("uid1") self.assertTrue(resource is not None) self.assertEqual(resource.name(), "1.ics") self.assertEqual(resource.uid(), "uid1") resource = yield shared.objectResourceWithUID("uid2") self.assertTrue(resource is None) yield self.otherCommit() shared = yield self.calendarUnderTest(txn=self.newOtherTransaction(), home="puser01", name="shared-calendar") resource = yield shared.objectResourceWithID(resource_id) self.assertTrue(resource is not None) self.assertEqual(resource.name(), "1.ics") self.assertEqual(resource.uid(), "uid1") resource = yield shared.objectResourceWithID(0) self.assertTrue(resource is None) yield self.otherCommit() calendar1 = yield self.calendarUnderTest(home="user01", name="calendar") object1 = yield self.calendarObjectUnderTest(home="user01", calendar_name="calendar", name="1.ics") yield object1.remove() yield self.commit() shared = yield self.calendarUnderTest(txn=self.newOtherTransaction(), home="puser01", name="shared-calendar") resource = yield shared.objectResourceWithName("1.ics") self.assertTrue(resource is None) yield self.otherCommit() shared = yield self.calendarUnderTest(txn=self.newOtherTransaction(), home="puser01", name="shared-calendar") resource = yield shared.objectResourceWithUID("uid1") self.assertTrue(resource is None) yield self.otherCommit() shared = yield self.calendarUnderTest(txn=self.newOtherTransaction(), home="puser01", name="shared-calendar") resource = yield shared.objectResourceWithID(resource_id) self.assertTrue(resource is None) yield self.otherCommit() @inlineCallbacks def test_create(self): """ Test that action=create works. """ yield self.createShare("user01", "puser01") shared = yield self.calendarUnderTest(txn=self.newOtherTransaction(), home="puser01", name="shared-calendar") resource = yield shared.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1)) resource_id = resource.id() self.assertTrue(resource is not None) self.assertEqual(resource.name(), "1.ics") self.assertEqual(resource.uid(), "uid1") self.assertFalse(resource._componentChanged) yield self.otherCommit() shared = yield self.calendarUnderTest(txn=self.newOtherTransaction(), home="puser01", name="shared-calendar") resource = yield shared.objectResourceWithUID("uid1") self.assertTrue(resource is not None) self.assertEqual(resource.name(), "1.ics") self.assertEqual(resource.uid(), "uid1") self.assertEqual(resource.id(), resource_id) yield self.otherCommit() object1 = yield self.calendarObjectUnderTest(home="user01", calendar_name="calendar", name="1.ics") self.assertTrue(object1 is not None) self.assertEqual(object1.name(), "1.ics") self.assertEqual(object1.uid(), "uid1") self.assertEqual(object1.id(), resource_id) yield self.commit() @inlineCallbacks def test_create_exception(self): """ Test that action=create fails when a duplicate name is used. """ yield self.createShare("user01", "puser01") calendar1 = yield self.calendarUnderTest(home="user01", name="calendar") yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1)) yield self.commit() shared = yield self.calendarUnderTest(txn=self.newOtherTransaction(), home="puser01", name="shared-calendar") yield self.failUnlessFailure(shared.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1)), ObjectResourceNameAlreadyExistsError) yield self.otherAbort() shared = yield self.calendarUnderTest(txn=self.newOtherTransaction(), home="puser01", name="shared-calendar") yield self.failUnlessFailure(shared.createCalendarObjectWithName(".2.ics", Component.fromString(self.caldata2)), ObjectResourceNameNotAllowedError) yield self.otherAbort() @inlineCallbacks def test_setcomponent(self): """ Test that action=setcomponent works. """ yield self.createShare("user01", "puser01") calendar1 = yield self.calendarUnderTest(home="user01", name="calendar") yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1)) yield self.commit() shared_object = yield self.calendarObjectUnderTest(txn=self.newOtherTransaction(), home="puser01", calendar_name="shared-calendar", name="1.ics") ical = yield shared_object.component() self.assertTrue(isinstance(ical, Component)) self.assertEqual(normalize_iCalStr(str(ical)), normalize_iCalStr(self.caldata1)) yield self.otherCommit() shared_object = yield self.calendarObjectUnderTest(txn=self.newOtherTransaction(), home="puser01", calendar_name="shared-calendar", name="1.ics") changed = yield shared_object.setComponent(Component.fromString(self.caldata1_changed)) self.assertFalse(changed) ical = yield shared_object.component() self.assertTrue(isinstance(ical, Component)) self.assertEqual(normalize_iCalStr(str(ical)), normalize_iCalStr(self.caldata1_changed)) yield self.otherCommit() object1 = yield self.calendarObjectUnderTest(home="user01", calendar_name="calendar", name="1.ics") ical = yield object1.component() self.assertTrue(isinstance(ical, Component)) self.assertEqual(normalize_iCalStr(str(ical)), normalize_iCalStr(self.caldata1_changed)) yield self.commit() @inlineCallbacks def test_component(self): """ Test that action=component works. """ yield self.createShare("user01", "puser01") calendar1 = yield self.calendarUnderTest(home="user01", name="calendar") yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1)) yield self.commit() shared_object = yield self.calendarObjectUnderTest(txn=self.newOtherTransaction(), home="puser01", calendar_name="shared-calendar", name="1.ics") ical = yield shared_object.component() self.assertTrue(isinstance(ical, Component)) self.assertEqual(normalize_iCalStr(str(ical)), normalize_iCalStr(self.caldata1)) yield self.otherCommit() @inlineCallbacks def test_remove(self): """ Test that action=create works. """ yield self.createShare("user01", "puser01") calendar1 = yield self.calendarUnderTest(home="user01", name="calendar") yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1)) yield self.commit() shared_object = yield self.calendarObjectUnderTest(txn=self.newOtherTransaction(), home="puser01", calendar_name="shared-calendar", name="1.ics") yield shared_object.remove() yield self.otherCommit() shared_object = yield self.calendarObjectUnderTest(txn=self.newOtherTransaction(), home="puser01", calendar_name="shared-calendar", name="1.ics") self.assertTrue(shared_object is None) yield self.otherCommit() object1 = yield self.calendarObjectUnderTest(home="user01", calendar_name="calendar", name="1.ics") self.assertTrue(object1 is None) yield self.commit() @inlineCallbacks def test_freebusy(self): """ Test that action=component works. """ yield self.createShare("user01", "puser01") calendar1 = yield self.calendarUnderTest(home="user01", name="calendar") yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1)) yield self.commit() fbstart = "{now:04d}0102T000000Z".format(**self.nowYear) fbend = "{now:04d}0103T000000Z".format(**self.nowYear) shared = yield self.calendarUnderTest(txn=self.newOtherTransaction(), home="puser01", name="shared-calendar") fbinfo = [[], [], []] matchtotal = yield generateFreeBusyInfo( shared, fbinfo, TimeRange(start=fbstart, end=fbend), 0, excludeuid=None, organizer=None, organizerPrincipal=None, same_calendar_user=False, servertoserver=False, event_details=False, logItems=None ) self.assertEqual(matchtotal, 1) self.assertEqual(fbinfo[0], [Period.parseText("{now:04d}0102T140000Z/PT1H".format(**self.nowYear)), ]) self.assertEqual(len(fbinfo[1]), 0) self.assertEqual(len(fbinfo[2]), 0) yield self.otherCommit() def attachmentToString(self, attachment): """ Convenience to convert an L{IAttachment} to a string. @param attachment: an L{IAttachment} provider to convert into a string. @return: a L{Deferred} that fires with the contents of the attachment. @rtype: L{Deferred} firing C{bytes} """ capture = CaptureProtocol() attachment.retrieve(capture) return capture.deferred @inlineCallbacks def test_add_attachment(self): """ Test that action=add-attachment works. """ yield self.createShare("user01", "puser01") calendar1 = yield self.calendarUnderTest(home="user01", name="calendar") object1 = yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1)) resourceID = object1.id() yield self.commit() shared_object = yield self.calendarObjectUnderTest(txn=self.newOtherTransaction(), home="puser01", calendar_name="shared-calendar", name="1.ics") attachment, location = yield shared_object.addAttachment(None, MimeType.fromString("text/plain"), "test.txt", MemoryStream("Here is some text.")) managedID = attachment.managedID() from txdav.caldav.datastore.sql_external import ManagedAttachmentExternal self.assertTrue(isinstance(attachment, ManagedAttachmentExternal)) self.assertTrue("user01/attachments/test" in location) yield self.otherCommit() cobjs = yield ManagedAttachment.referencesTo(self.transactionUnderTest(), managedID) self.assertEqual(cobjs, set((resourceID,))) attachment = yield ManagedAttachment.load(self.transactionUnderTest(), resourceID, managedID) self.assertEqual(attachment.name(), "test.txt") data = yield self.attachmentToString(attachment) self.assertEqual(data, "Here is some text.") yield self.commit() @inlineCallbacks def test_update_attachment(self): """ Test that action=update-attachment works. """ yield self.createShare("user01", "puser01") calendar1 = yield self.calendarUnderTest(home="user01", name="calendar") yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1)) yield self.commit() object1 = yield self.calendarObjectUnderTest(home="user01", calendar_name="calendar", name="1.ics") resourceID = object1.id() attachment, _ignore_location = yield object1.addAttachment(None, MimeType.fromString("text/plain"), "test.txt", MemoryStream("Here is some text.")) managedID = attachment.managedID() yield self.commit() shared_object = yield self.calendarObjectUnderTest(txn=self.newOtherTransaction(), home="puser01", calendar_name="shared-calendar", name="1.ics") attachment, location = yield shared_object.updateAttachment(managedID, MimeType.fromString("text/plain"), "test.txt", MemoryStream("Here is some more text.")) managedID = attachment.managedID() from txdav.caldav.datastore.sql_external import ManagedAttachmentExternal self.assertTrue(isinstance(attachment, ManagedAttachmentExternal)) self.assertTrue("user01/attachments/test" in location) yield self.otherCommit() cobjs = yield ManagedAttachment.referencesTo(self.transactionUnderTest(), managedID) self.assertEqual(cobjs, set((resourceID,))) attachment = yield ManagedAttachment.load(self.transactionUnderTest(), resourceID, managedID) self.assertEqual(attachment.name(), "test.txt") data = yield self.attachmentToString(attachment) self.assertEqual(data, "Here is some more text.") yield self.commit() @inlineCallbacks def test_remove_attachment(self): """ Test that action=remove-attachment works. """ yield self.createShare("user01", "puser01") calendar1 = yield self.calendarUnderTest(home="user01", name="calendar") yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1)) yield self.commit() object1 = yield self.calendarObjectUnderTest(home="user01", calendar_name="calendar", name="1.ics") resourceID = object1.id() attachment, _ignore_location = yield object1.addAttachment(None, MimeType.fromString("text/plain"), "test.txt", MemoryStream("Here is some text.")) managedID = attachment.managedID() yield self.commit() shared_object = yield self.calendarObjectUnderTest(txn=self.newOtherTransaction(), home="puser01", calendar_name="shared-calendar", name="1.ics") yield shared_object.removeAttachment(None, managedID) yield self.otherCommit() cobjs = yield ManagedAttachment.referencesTo(self.transactionUnderTest(), managedID) self.assertEqual(cobjs, set()) attachment = yield ManagedAttachment.load(self.transactionUnderTest(), resourceID, managedID) self.assertTrue(attachment is None) yield self.commit()
# -*- coding: utf-8 -*- import math from array import array _REGISTER_BYTE_WIDTH = 2 _CONFIGURATION_REGISTER = 0x00 _SHUNT_VOLTAGE_REGISTER = 0x01 _BUS_VOLTAGE_REGISTER = 0x02 _POWER_REGISTER = 0x03 _CURRENT_REGISTER = 0x04 _CALIBRATION_REGISTER = 0x05 _REGISTERS = [ _CONFIGURATION_REGISTER, _SHUNT_VOLTAGE_REGISTER, _BUS_VOLTAGE_REGISTER, _POWER_REGISTER, _CURRENT_REGISTER, _CALIBRATION_REGISTER ] # Shunt voltage register LSB is constant for all PGA settings # At PGA = 8, 320mV / 2^15 = ~10uV # At PGA = 4, 160mV / 2^14 = ~10uV # At PGA = 2, 80mV / 2^13 = ~10uV # At PGA = 1, 40mV / 2^12 = ~10uV # _SHUNT_VOLTAGE_REGISTER_LSB = float(0.04) / float(4096) _SHUNT_VOLTAGE_REGISTER_LSB = 0.00001 # Bus voltage register LSB is constant for all full scale ranges # At FSR = 32V, 32V / 2^13 = ~40mV # At FSR = 16V, 16V / 2^12 = ~40mV # _BUS_VOLTAGE_REGISTER_LSB = float(16) / float(4096) _BUS_VOLTAGE_REGISTER_LSB = 0.004 # --- helper functions --- def _validate_register(register): """Raise INA219Error if the register is invalid""" if not (register in _REGISTERS): raise INA219Error("invalid register: %s" % register) def _raw_bus_voltage_ovf(bus_voltage_register): """The Math Overflow Flag (OVF) is set when the power or current calculations are out of range""" return bus_voltage_register & 0x1 def _raw_bus_voltage_cnvr(bus_voltage_register): """The CNVR bit is set after all conversions, averaging, and multiplications are complete""" return (bus_voltage_register >> 1) & 0x1 def _raw_bus_voltage(bus_voltage_register): """Extract the raw bus voltage bits from the bus voltage register value""" return (bus_voltage_register >> 3) & 0x0FFF def print_shunt_voltage(shunt_voltage): print "Shunt Voltage: {0}V".format(shunt_voltage) def print_shunt_voltage_mV(shunt_voltage): print "Shunt Voltage: {0}mV".format(shunt_voltage * 1000) def print_current(current): print "Current: {0}A".format(current) def print_current_mA(current): print "Current: {0}mA".format(current * 1000) def print_bus_voltage(bus_voltage): print "Bus Voltage: {0}V".format(bus_voltage) def print_bus_voltage_mV(bus_voltage): print "Bus Voltage: {0}mV".format(bus_voltage * 1000) def print_power(power): print "Power: {0}W".format(power) def print_power_mW(power): print "Power: {0}mW".format(power * 1000) def _pretty_print_registers(address, configuration_register, shunt_voltage_register, bus_voltage_register, power_register, current_register, calibration_register): print "----------------------------------------------------" print "| INA219 0x{0:02X} Register | Hex | Binary |".format(address) print "|--------------------------------------------------|" print "| Configuration (0x{0:02X}) | 0x{1:04X} | {1:016b} |".format(_CONFIGURATION_REGISTER, configuration_register) print "| Shunt Voltage (0x{0:02X}) | 0x{1:04X} | {1:016b} |".format(_SHUNT_VOLTAGE_REGISTER, shunt_voltage_register) print "| Bus Voltage (0x{0:02X}) | 0x{1:04X} | {1:016b} |".format(_BUS_VOLTAGE_REGISTER, bus_voltage_register) print "| Power (0x{0:02X}) | 0x{1:04X} | {1:016b} |".format(_POWER_REGISTER, power_register) print "| Current (0x{0:02X}) | 0x{1:04X} | {1:016b} |".format(_CURRENT_REGISTER, current_register) print "| Calibration (0x{0:02X}) | 0x{1:04X} | {1:016b} |".format(_CALIBRATION_REGISTER, calibration_register) print "----------------------------------------------------" def _pretty_print_configuration(configuration_register): rst = (configuration_register >> 15) & 0x1 d14 = (configuration_register >> 14) & 0x1 brng = (configuration_register >> 13) & 0x1 pg1 = (configuration_register >> 12) & 0x1 pg0 = (configuration_register >> 11) & 0x1 badc4 = (configuration_register >> 10) & 0x1 badc3 = (configuration_register >> 9) & 0x1 badc2 = (configuration_register >> 8) & 0x1 badc1 = (configuration_register >> 7) & 0x1 sadc4 = (configuration_register >> 6) & 0x1 sadc3 = (configuration_register >> 5) & 0x1 sadc2 = (configuration_register >> 4) & 0x1 sadc1 = (configuration_register >> 3) & 0x1 mode3 = (configuration_register >> 2) & 0x1 mode2 = (configuration_register >> 1) & 0x1 mode1 = configuration_register & 0x1 print "--------------------------" print "| Configuration Register |" print "|------------------------|" print "| Bit | Bit Name | Value |" print "|------------------------|" print "| D15 | RST | {0:^5} |".format(rst) print "| D14 | - | {0:^5} |".format(d14) print "| D13 | BRNG | {0:^5} |".format(brng) print "| D12 | PG1 | {0:^5} |".format(pg1) print "| D11 | PG0 | {0:^5} |".format(pg0) print "| D10 | BADC4 | {0:^5} |".format(badc4) print "| D9 | BADC3 | {0:^5} |".format(badc3) print "| D8 | BADC2 | {0:^5} |".format(badc2) print "| D7 | BADC1 | {0:^5} |".format(badc1) print "| D6 | SADC4 | {0:^5} |".format(sadc4) print "| D5 | SADC3 | {0:^5} |".format(sadc3) print "| D4 | SADC2 | {0:^5} |".format(sadc2) print "| D3 | SADC1 | {0:^5} |".format(sadc1) print "| D2 | MODE3 | {0:^5} |".format(mode3) print "| D1 | MODE2 | {0:^5} |".format(mode2) print "| D0 | MODE1 | {0:^5} |".format(mode1) print "--------------------------" def _pretty_print_bus_voltage(bus_voltage_register): ovf = _raw_bus_voltage_ovf(bus_voltage_register) cnvr = _raw_bus_voltage_cnvr(bus_voltage_register) raw_bus_voltage = _raw_bus_voltage(bus_voltage_register) print "------------------------------" print "| Bus Voltage Register |" print "|----------------------------|" print "| Bit | Bit Name | Hex |" print "|----------------------------|" print "| D3-D15 | BD0-BD12 | 0x{0:04X} |".format(raw_bus_voltage) print "| D1 | CNVR | 0x{0:<4X} |".format(cnvr) print "| D0 | OVF | 0x{0:<4X} |".format(ovf) print "------------------------------" class INA219Error(Exception): pass class INA219(object): def __init__(self, master, address, configuration, shunt_resistance, max_expected_current, name=None): # reference designator for identification when assembled on a PCB assembly self.name = name # I2C master object self.master = master # I2C slave address self.address = address # value of the configuration register self.configuration = configuration # value of the shunt resistor in ohms self.shunt_resistance = shunt_resistance # max expected current through the shunt resistor in amps self.max_expected_current = max_expected_current # configure & calibrate the device self.configure() self.calibrate() def configure(self): self._configuration_register = self.configuration def calibrate(self): self._calibration_register = self._calculate_calibration() def _calculate_calibration(self): return math.trunc(0.04096 / float(self._current_register_lsb * self.shunt_resistance)) # --- low level register access --- def _write_register(self, register, value=None): _validate_register(register) data = array('B', [0]) data[0] = register if value: value_msb = (value >> 8) & 0xFF value_lsb = value & 0xFF data.insert(1, value_msb) data.insert(2, value_lsb) self.master.i2c_write(self.address, data) def _read_register(self, register): _validate_register(register) data_out = array('B', [0]) data_out[0] = register data_in = self.master.i2c_write_read(self.address, data_out, _REGISTER_BYTE_WIDTH) return (data_in[0] << 8) + data_in[1] # --- register properties --- @property def _configuration_register(self): return self._read_register(_CONFIGURATION_REGISTER) @_configuration_register.setter def _configuration_register(self, value): self._write_register(_CONFIGURATION_REGISTER, value) @property def _shunt_voltage_register(self): return self._read_register(_SHUNT_VOLTAGE_REGISTER) @property def _bus_voltage_register(self): return self._read_register(_BUS_VOLTAGE_REGISTER) @property def _power_register(self): return self._read_register(_POWER_REGISTER) @property def _current_register(self): return self._read_register(_CURRENT_REGISTER) @property def _calibration_register(self): return self._read_register(_CALIBRATION_REGISTER) @_calibration_register.setter def _calibration_register(self, value): self._write_register(_CALIBRATION_REGISTER, value) # --- register LSB properties --- @property def _shunt_voltage_register_lsb(self): return _SHUNT_VOLTAGE_REGISTER_LSB @property def _bus_voltage_register_lsb(self): return _BUS_VOLTAGE_REGISTER_LSB @property def _current_register_lsb(self): return self.max_expected_current / float(32767) @property def _power_register_lsb(self): return 20 * self._current_register_lsb # --- normalized results --- def shunt_voltage(self): """Shunt voltage in volts""" return self._shunt_voltage_register * self._shunt_voltage_register_lsb def bus_voltage(self): """Bus voltage in volts""" return _raw_bus_voltage(self._bus_voltage_register) * self._bus_voltage_register_lsb def bus_voltage_ext(self): """Bus voltage in volts, OVF, CNVR""" bus_voltage_register = self._bus_voltage_register return _raw_bus_voltage(bus_voltage_register) * self._bus_voltage_register_lsb, bool(_raw_bus_voltage_ovf(bus_voltage_register)), bool(_raw_bus_voltage_cnvr(bus_voltage_register)) def power(self): """Power in watts""" return self._power_register * self._power_register_lsb def current(self): """Current in amps""" return self._current_register * self._current_register_lsb # --- debugging helper methods --- def _dump_registers(self): _pretty_print_registers(self.address, self._configuration_register, self._shunt_voltage_register, self._bus_voltage_register, self._power_register, self._current_register, self._calibration_register) def _dump_configuration_register(self): _pretty_print_configuration(self._configuration_register) def _dump_bus_voltage_register(self): _pretty_print_bus_voltage(self._bus_voltage_register)
""" Secure Copy Copies files from local to remote usage: GET scp [user@host:dir/file] [files/dir] PUT scp [file/dir] [file/dir] [user@host:dir] """ import argparse # scp.py # Copyright (C) 2008 James Bardin <j.bardin@gmail.com> """ Created by jbardin https://github.com/jbardin/scp.py Utilities for sending files over ssh using the scp1 protocol. """ __version__ = '0.8.0' import locale import os import sys import re from socket import timeout as SocketTimeout from distutils.version import StrictVersion def install_module_from_github(username, package_name, version): """ Install python module from github zip files """ cmd_string = """ echo Installing {1} {2} ... wget https://github.com/{0}/{1}/archive/v{2}.zip -o $TMPDIR/{1}.zip mkdir $TMPDIR/{1}_src unzip $TMPDIR/{1}.zip -d $TMPDIR/{1}_src rm -f $TMPDIR/{1}.zip mv $TMPDIR/{1}_src/{1} $STASH_ROOT/lib/ rm -rf $TMPDIR/{1}_src echo Done """.format(username, package_name, version ) globals()['_stash'](cmd_string) import paramiko if StrictVersion(paramiko.__version__) < StrictVersion('1.15'): # Install paramiko 1.16.0 to fix a bug with version < 1.15 install_module_from_github('paramiko', 'paramiko', '1.16.0') print 'Please restart Pythonista for changes to take full effect' sys.exit(0) DEBUG = False APP_DIR = os.environ['STASH_ROOT'] # this is quote from the shlex module, added in py3.3 _find_unsafe = re.compile(br'[^\w@%+=:,./~-]').search def _sh_quote(s): """Return a shell-escaped version of the string `s`.""" if not s: return b"" if _find_unsafe(s) is None: return s # use single quotes, and put single quotes into double quotes # the string $'b is then quoted as '$'"'"'b' return b"'" + s.replace(b"'", b"'\"'\"'") + b"'" # Unicode conversion functions; assume UTF-8 def asbytes(s): """Turns unicode into bytes, if needed. Assumes UTF-8. """ if isinstance(s, bytes): return s else: return s.encode('utf-8') def asunicode(s): """Turns bytes into unicode, if needed. Uses UTF-8. """ if isinstance(s, bytes): return s.decode('utf-8', 'replace') else: return s # os.path.sep is unicode on Python 3, no matter the platform bytes_sep = asbytes(os.path.sep) # Unicode conversion function for Windows # Used to convert local paths if the local machine is Windows def asunicode_win(s): """Turns bytes into unicode, if needed. """ if isinstance(s, bytes): return s.decode(locale.getpreferredencoding()) else: return s class SCPClient(object): """ An scp1 implementation, compatible with openssh scp. Raises SCPException for all transport related errors. Local filesystem and OS errors pass through. Main public methods are .put and .get The get method is controlled by the remote scp instance, and behaves accordingly. This means that symlinks are resolved, and the transfer is halted after too many levels of symlinks are detected. The put method uses os.walk for recursion, and sends files accordingly. Since scp doesn't support symlinks, we send file symlinks as the file (matching scp behaviour), but we make no attempt at symlinked directories. """ def __init__(self, transport, buff_size=16384, socket_timeout=5.0, progress=None, sanitize=_sh_quote): """ Create an scp1 client. @param transport: an existing paramiko L{Transport} @type transport: L{Transport} @param buff_size: size of the scp send buffer. @type buff_size: int @param socket_timeout: channel socket timeout in seconds @type socket_timeout: float @param progress: callback - called with (filename, size, sent) during transfers @param sanitize: function - called with filename, should return safe or escaped string. Uses _sh_quote by default. @type progress: function(string, int, int) """ self.transport = transport self.buff_size = buff_size self.socket_timeout = socket_timeout self.channel = None self.preserve_times = False self._progress = progress self._recv_dir = b'' self._rename = False self._utime = None self.sanitize = sanitize self._dirtimes = {} def put(self, files, remote_path=b'.', recursive=False, preserve_times=False): """ Transfer files to remote host. @param files: A single path, or a list of paths to be transfered. recursive must be True to transfer directories. @type files: string OR list of strings @param remote_path: path in which to receive the files on the remote host. defaults to '.' @type remote_path: str @param recursive: transfer files and directories recursively @type recursive: bool @param preserve_times: preserve mtime and atime of transfered files and directories. @type preserve_times: bool """ self.preserve_times = preserve_times self.channel = self.transport.open_session() self.channel.settimeout(self.socket_timeout) scp_command = (b'scp -t ', b'scp -r -t ')[recursive] self.channel.exec_command(scp_command + self.sanitize(asbytes(remote_path))) self._recv_confirm() if not isinstance(files, (list, tuple)): files = [files] if recursive: self._send_recursive(files) else: self._send_files(files) if self.channel: self.channel.close() def get(self, remote_path, local_path='', recursive=False, preserve_times=False): """ Transfer files from remote host to localhost @param remote_path: path to retreive from remote host. since this is evaluated by scp on the remote host, shell wildcards and environment variables may be used. @type remote_path: str @param local_path: path in which to receive files locally @type local_path: str @param recursive: transfer files and directories recursively @type recursive: bool @param preserve_times: preserve mtime and atime of transfered files and directories. @type preserve_times: bool """ if not isinstance(remote_path, (list, tuple)): remote_path = [remote_path] remote_path = [self.sanitize(asbytes(r)) for r in remote_path] self._recv_dir = local_path or os.getcwd() self._rename = (len(remote_path) == 1 and not os.path.isdir(os.path.abspath(local_path))) if len(remote_path) > 1: if not os.path.exists(self._recv_dir): raise SCPException("Local path '%s' does not exist" % asunicode(self._recv_dir)) elif not os.path.isdir(self._recv_dir): raise SCPException("Local path '%s' is not a directory" % asunicode(self._recv_dir)) rcsv = (b'', b' -r')[recursive] prsv = (b'', b' -p')[preserve_times] self.channel = self.transport.open_session() self.channel.settimeout(self.socket_timeout) self.channel.exec_command(b"scp" + rcsv + prsv + b" -f " + b' '.join(remote_path)) self._recv_all() if self.channel: self.channel.close() def _read_stats(self, name): """return just the file stats needed for scp""" stats = os.stat(name) mode = oct(stats.st_mode)[-4:] size = stats.st_size atime = int(stats.st_atime) mtime = int(stats.st_mtime) return (mode, size, mtime, atime) def _send_files(self, files): for name in files: basename = asbytes(os.path.basename(name)) (mode, size, mtime, atime) = self._read_stats(name) if self.preserve_times: self._send_time(mtime, atime) file_hdl = open(name, 'rb') # The protocol can't handle \n in the filename. # Quote them as the control sequence \^J for now, # which is how openssh handles it. self.channel.sendall(("C%s %d " % (mode, size)).encode('ascii') + basename.replace(b'\n', b'\\^J') + b"\n") self._recv_confirm() file_pos = 0 if self._progress: if size == 0: # avoid divide-by-zero self._progress(basename, 1, 1) else: self._progress(basename, size, 0) buff_size = self.buff_size chan = self.channel while file_pos < size: chan.sendall(file_hdl.read(buff_size)) file_pos = file_hdl.tell() if self._progress: self._progress(basename, size, file_pos) chan.sendall('\x00') file_hdl.close() self._recv_confirm() def _chdir(self, from_dir, to_dir): # Pop until we're one level up from our next push. # Push *once* into to_dir. # This is dependent on the depth-first traversal from os.walk # add path.sep to each when checking the prefix, so we can use # path.dirname after common = os.path.commonprefix([from_dir + bytes_sep, to_dir + bytes_sep]) # now take the dirname, since commonprefix is character based, # and we either have a seperator, or a partial name common = os.path.dirname(common) cur_dir = from_dir.rstrip(bytes_sep) while cur_dir != common: cur_dir = os.path.split(cur_dir)[0] self._send_popd() # now we're in our common base directory, so on self._send_pushd(to_dir) def _send_recursive(self, files): for base in files: if not os.path.isdir(base): # filename mixed into the bunch self._send_files([base]) continue last_dir = asbytes(base) for root, dirs, fls in os.walk(base): self._chdir(last_dir, asbytes(root)) self._send_files([os.path.join(root, f) for f in fls]) last_dir = asbytes(root) # back out of the directory for i in range(len(os.path.split(last_dir))): self._send_popd() def _send_pushd(self, directory): (mode, size, mtime, atime) = self._read_stats(directory) basename = asbytes(os.path.basename(directory)) if self.preserve_times: self._send_time(mtime, atime) self.channel.sendall(('D%s 0 ' % mode).encode('ascii') + basename.replace(b'\n', b'\\^J') + b'\n') self._recv_confirm() def _send_popd(self): self.channel.sendall('E\n') self._recv_confirm() def _send_time(self, mtime, atime): self.channel.sendall(('T%d 0 %d 0\n' % (mtime, atime)).encode('ascii')) self._recv_confirm() def _recv_confirm(self): # read scp response msg = b'' try: msg = self.channel.recv(512) except SocketTimeout: raise SCPException('Timout waiting for scp response') # slice off the first byte, so this compare will work in python2 and python3 if msg and msg[0:1] == b'\x00': return elif msg and msg[0:1] == b'\x01': raise SCPException(asunicode(msg[1:])) elif self.channel.recv_stderr_ready(): msg = self.channel.recv_stderr(512) raise SCPException(asunicode(msg)) elif not msg: raise SCPException('No response from server') else: raise SCPException('Invalid response from server', msg) def _recv_all(self): # loop over scp commands, and receive as necessary command = {b'C': self._recv_file, b'T': self._set_time, b'D': self._recv_pushd, b'E': self._recv_popd} while not self.channel.closed: # wait for command as long as we're open self.channel.sendall('\x00') msg = self.channel.recv(1024) if not msg: # chan closed while recving break assert msg[-1:] == b'\n' msg = msg[:-1] code = msg[0:1] try: command[code](msg[1:]) except KeyError: raise SCPException(str(msg).strip()) # directory times can't be set until we're done writing files self._set_dirtimes() def _set_time(self, cmd): try: times = cmd.split(b' ') mtime = int(times[0]) atime = int(times[2]) or mtime except: self.channel.send(b'\x01') raise SCPException('Bad time format') # save for later self._utime = (atime, mtime) def _recv_file(self, cmd): chan = self.channel parts = cmd.strip().split(b' ', 2) try: mode = int(parts[0], 8) size = int(parts[1]) if self._rename: path = self._recv_dir self._rename = False elif os.name == 'nt': path = os.path.join(asunicode_win(self._recv_dir), parts[2].decode('utf-8')) else: path = os.path.join(asbytes(self._recv_dir), parts[2]) except: chan.send('\x01') chan.close() raise SCPException('Bad file format') try: file_hdl = open(path, 'wb') except IOError as e: chan.send(b'\x01' + str(e).encode('utf-8')) chan.close() raise if self._progress: if size == 0: # avoid divide-by-zero self._progress(path, 1, 1) else: self._progress(path, size, 0) buff_size = self.buff_size pos = 0 chan.send(b'\x00') try: while pos < size: # we have to make sure we don't read the final byte if size - pos <= buff_size: buff_size = size - pos file_hdl.write(chan.recv(buff_size)) pos = file_hdl.tell() if self._progress: self._progress(path, size, pos) msg = chan.recv(512) if msg and msg[0:1] != b'\x00': raise SCPException(msg[1:]) except SocketTimeout: chan.close() raise SCPException('Error receiving, socket.timeout') file_hdl.truncate() try: os.utime(path, self._utime) self._utime = None os.chmod(path, mode) # should we notify the other end? finally: file_hdl.close() # '\x00' confirmation sent in _recv_all def _recv_pushd(self, cmd): parts = cmd.split(b' ', 2) try: mode = int(parts[0], 8) if self._rename: path = self._recv_dir self._rename = False elif os.name == 'nt': path = os.path.join(asunicode_win(self._recv_dir), parts[2].decode('utf-8')) else: path = os.path.join(asbytes(self._recv_dir), parts[2]) except: self.channel.send(b'\x01') raise SCPException('Bad directory format') try: if not os.path.exists(path): os.mkdir(path, mode) elif os.path.isdir(path): os.chmod(path, mode) else: raise SCPException('%s: Not a directory' % path) self._dirtimes[path] = (self._utime) self._utime = None self._recv_dir = path except (OSError, SCPException) as e: self.channel.send(b'\x01' + asbytes(str(e))) raise def _recv_popd(self, *cmd): self._recv_dir = os.path.split(self._recv_dir)[0] def _set_dirtimes(self): try: for d in self._dirtimes: os.utime(d, self._dirtimes[d]) finally: self._dirtimes = {} class SCPException(Exception): """SCP exception class""" pass ############################################ def find_ssh_keys(): #dir = os.path.expanduser('~/Documents/.ssh/') files = [] try: for file in os.listdir(APP_DIR+'/.ssh'): if '.' not in file: files.append(APP_DIR+'/.ssh/'+file) except OSError: pass return files def parse_host(arg): user,temp = arg.split('@') host, path = temp.split(':') return host,user,path def scp_callback(filename, size, sent): if size == sent: print filename if __name__ == '__main__': files = [] ap = argparse.ArgumentParser() ap.add_argument('-p', '--password', help='login password') ap.add_argument('files', nargs='*', help='file or module name') args = ap.parse_args() #scp_mode 0 put 1 get if '@' in args.files[0]: scp_mode = 1 else: scp_mode = 0 for file in args.files: if '@' in file: host,user,host_path = parse_host(file) else: files.append(file) ssh = paramiko.SSHClient() #ssh.load_system_host_keys() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) key_filename = find_ssh_keys() if args.password is not None: ssh.connect(host, username=user, password=args.password) else: if len(key_filename) == 0: # no key file found password = raw_input('Enter passsword:') ssh.connect(host, username=user, password=password) else: ssh.connect(host,username=user,key_filename=key_filename) # SCPCLient takes a paramiko transport as its only argument scp = SCPClient(ssh.get_transport(),progress=scp_callback) #scp.put('stash',remote_path='stash/',recursive=True) if scp_mode: print 'Copying from server...' scp.get(host_path, local_path=files[0], recursive=True) else: print 'Copying to server...' scp.put(files, recursive=True, remote_path=host_path) ssh.close()
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for UnicodeEncode op from ragged_string_ops.""" from absl.testing import parameterized import numpy as np from tensorflow.python.eager import def_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import errors_impl as errors from tensorflow.python.framework import tensor_spec from tensorflow.python.framework import test_util from tensorflow.python.ops.ragged import ragged_factory_ops from tensorflow.python.ops.ragged import ragged_string_ops from tensorflow.python.ops.ragged import ragged_tensor from tensorflow.python.ops.ragged import ragged_tensor_value from tensorflow.python.platform import test class UnicodeEncodeOpTest(test.TestCase, parameterized.TestCase): def assertAllEqual(self, rt, expected): with self.cached_session() as sess: value = sess.run(rt) if isinstance(value, np.ndarray): value = value.tolist() elif isinstance(value, ragged_tensor_value.RaggedTensorValue): value = value.to_list() self.assertEqual(value, expected) def testScalar(self): with self.cached_session(): with self.assertRaises(ValueError): ragged_string_ops.unicode_encode(72, "UTF-8") with self.cached_session(): with self.assertRaises(ValueError): ragged_string_ops.unicode_encode(constant_op.constant(72), "UTF-8") def testRequireParams(self): with self.cached_session(): with self.assertRaises(TypeError): ragged_string_ops.unicode_encode() # pylint: disable=no-value-for-parameter with self.cached_session(): with self.assertRaises(TypeError): ragged_string_ops.unicode_encode(72) # pylint: disable=no-value-for-parameter with self.cached_session(): with self.assertRaises(TypeError): ragged_string_ops.unicode_encode(encoding="UTF-8") # pylint: disable=no-value-for-parameter,unexpected-keyword-arg @parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE") def testStrictErrors(self, encoding): test_value = np.array([ord('H'), ord('e'), 0x7FFFFFFF, -1, ord('o')], np.int32) with self.cached_session() as session: with self.assertRaises(errors.InvalidArgumentError): session.run( ragged_string_ops.unicode_encode(test_value, encoding, "strict")) @parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE") @test_util.run_v1_only("b/120545219") def testIgnoreErrors(self, encoding): test_value = np.array([ord('H'), ord('e'), 0x7FFFFFFF, -1, ord('o')], np.int32) expected_value = u"Heo".encode(encoding) unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding, "ignore") self.assertAllEqual(unicode_encode_op, expected_value) @parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE") @test_util.run_v1_only("b/120545219") def testReplaceErrors(self, encoding): test_value = np.array([ord('H'), ord('e'), 0x7FFFFFFF, -1, ord('o')], np.int32) expected_value = u"He\U0000fffd\U0000fffdo".encode(encoding) unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding, "replace") self.assertAllEqual(unicode_encode_op, expected_value) # Test custom replacement character test_value = np.array([ord('H'), ord('e'), 0x7FFFFFFF, -1, ord('o')], np.int32) expected_value = u"Heooo".encode(encoding) unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding, "replace", ord('o')) self.assertAllEqual(unicode_encode_op, expected_value) # Verify "replace" is default test_value = np.array([ord('H'), ord('e'), 0x7FFFFFFF, -1, ord('o')], np.int32) expected_value = u"He\U0000fffd\U0000fffdo".encode(encoding) unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding) self.assertAllEqual(unicode_encode_op, expected_value) # Verify non-default replacement with an unpaired surrogate. test_value = np.array([0xD801], np.int32) expected_value = u"A".encode(encoding) unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding, "replace", 0x41) self.assertAllEqual(unicode_encode_op, expected_value) # Test with a noncharacter code point. test_value = np.array([0x1FFFF], np.int32) expected_value = u"A".encode(encoding) unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding, "replace", 0x41) self.assertAllEqual(unicode_encode_op, expected_value) # Replacement_char must be within range test_value = np.array([ord('H'), ord('e'), 0x7FFFFFFF, -1, ord('o')], np.int32) unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding, "replace", 0x110000) with self.assertRaises(errors.InvalidArgumentError): self.evaluate(unicode_encode_op) # -- regular Tensor tests -- # @parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE") @test_util.run_v1_only("b/120545219") def testVector(self, encoding): test_value = np.array([ord('H'), ord('e'), ord('l'), ord('l'), ord('o')], np.int32) expected_value = u"Hello".encode(encoding) unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding) self.assertAllEqual(unicode_encode_op, expected_value) test_value = np.array([ord('H'), ord('e'), 0xC3, 0xC3, 0x1F604], np.int32) expected_value = u"He\xc3\xc3\U0001f604".encode(encoding) unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding) self.assertAllEqual(unicode_encode_op, expected_value) # Single character string test_value = np.array([ord('H')], np.int32) expected_value = u"H".encode(encoding) unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding) self.assertAllEqual(unicode_encode_op, expected_value) test_value = np.array([0x1F604], np.int32) expected_value = u"\U0001f604".encode(encoding) unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding) self.assertAllEqual(unicode_encode_op, expected_value) @parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE") @test_util.run_v1_only("b/120545219") def testMatrix(self, encoding): test_value = np.array( [[72, 0x1F604, 108, 108, 111], [87, 0x1F604, 114, 108, 100]], np.int32) expected_value = [ u"H\U0001f604llo".encode(encoding), u"W\U0001f604rld".encode(encoding) ] unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding) self.assertAllEqual(unicode_encode_op, expected_value) @parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE") @test_util.run_v1_only("b/120545219") def test3DimMatrix(self, encoding): test_value = constant_op.constant( [[[72, 101, 108, 108, 111], [87, 111, 114, 108, 100]], [[102, 105, 120, 101, 100], [119, 111, 114, 100, 115]], [[72, 121, 112, 101, 114], [99, 117, 98, 101, 46]]], np.int32) expected_value = [[u"Hello".encode(encoding), u"World".encode(encoding)], [u"fixed".encode(encoding), u"words".encode(encoding)], [u"Hyper".encode(encoding), u"cube.".encode(encoding)]] unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding) self.assertAllEqual(unicode_encode_op, expected_value) @parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE") @test_util.run_v1_only("b/120545219") def test4DimMatrix(self, encoding): test_value = constant_op.constant( [[[[72, 101, 108, 108, 111]], [[87, 111, 114, 108, 100]]], [[[102, 105, 120, 101, 100]], [[119, 111, 114, 100, 115]]], [[[72, 121, 112, 101, 114]], [[99, 117, 98, 101, 46]]]], np.int32) expected_value = [[[u"Hello".encode(encoding)], [u"World".encode(encoding)]], [[u"fixed".encode(encoding)], [u"words".encode(encoding)]], [[u"Hyper".encode(encoding)], [u"cube.".encode(encoding)]]] unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding) self.assertAllEqual(unicode_encode_op, expected_value) # -- Ragged Tensor tests -- # @parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE") @test_util.run_v1_only("b/120545219") def testRaggedMatrix(self, encoding): test_value = ragged_factory_ops.constant( [[ord('H'), 0xC3, ord('l'), ord('l'), ord('o')], [ord('W'), 0x1F604, ord('r'), ord('l'), ord('d'), ord('.')]], np.int32) expected_value = [ u"H\xc3llo".encode(encoding), u"W\U0001f604rld.".encode(encoding) ] unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding) self.assertAllEqual(unicode_encode_op, expected_value) @parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE") @test_util.run_v1_only("b/120545219") def test3DimMatrixWithRagged2ndDim(self, encoding): test_value = ragged_factory_ops.constant( [[[72, 101, 108, 108, 111], [87, 111, 114, 108, 100]], [[102, 105, 120, 101, 100]], [[72, 121, 112, 101, 114], [119, 111, 114, 100, 115], [99, 117, 98, 101, 46]]], np.int32) expected_value = [[u"Hello".encode(encoding), u"World".encode(encoding)], [u"fixed".encode(encoding)], [ u"Hyper".encode(encoding), u"words".encode(encoding), u"cube.".encode(encoding) ]] unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding) self.assertAllEqual(unicode_encode_op, expected_value) @parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE") @test_util.run_v1_only("b/120545219") def test3DimMatrixWithRagged3rdDim(self, encoding): test_value = ragged_factory_ops.constant( [[[72, 101, 108, 108, 111], [87, 111, 114, 108, 100, 46]], [[68, 111, 110, 39, 116], [119, 195, 114, 114, 121, 44, 32, 98, 101]], [[0x1F604], []]], np.int32) expected_value = [[u"Hello".encode(encoding), u"World.".encode(encoding)], [ u"Don't".encode(encoding), u"w\xc3rry, be".encode(encoding) ], [u"\U0001f604".encode(encoding), u"".encode(encoding)]] unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding) self.assertAllEqual(unicode_encode_op, expected_value) @parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE") @test_util.run_v1_only("b/120545219") def test3DimMatrixWithRagged2ndAnd3rdDim(self, encoding): test_value = ragged_factory_ops.constant( [[[72, 101, 108, 108, 111], [87, 111, 114, 108, 100, 46]], [], [[0x1F604]]], np.int32) expected_value = [[u"Hello".encode(encoding), u"World.".encode(encoding)], [], [u"\U0001f604".encode(encoding)]] unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding) self.assertAllEqual(unicode_encode_op, expected_value) @parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE") @test_util.run_v1_only("b/120545219") def test4DimRaggedMatrix(self, encoding): test_value = ragged_factory_ops.constant( [[[[72, 101, 108, 108, 111], [87, 111, 114, 108, 100]]], [[[]], [[72, 121, 112, 101]]]], np.int32) expected_value = [[[u"Hello".encode(encoding), u"World".encode(encoding)]], [[u"".encode(encoding)], [u"Hype".encode(encoding)]]] unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding) self.assertAllEqual(unicode_encode_op, expected_value) @parameterized.parameters("UTF-8", "UTF-16-BE", "UTF-32-BE") @test_util.run_v1_only("b/120545219") def testRaggedMatrixWithMultiDimensionInnerValues(self, encoding): test_flat_values = constant_op.constant([[[72, 101, 108, 108, 111], [87, 111, 114, 108, 100]], [[102, 105, 120, 101, 100], [119, 111, 114, 100, 115]], [[72, 121, 112, 101, 114], [99, 117, 98, 101, 46]]]) test_row_splits = [ constant_op.constant([0, 2, 3], dtype=np.int64), constant_op.constant([0, 1, 1, 3], dtype=np.int64) ] test_value = ragged_tensor.RaggedTensor.from_nested_row_splits( test_flat_values, test_row_splits) expected_value = [[[[u"Hello".encode(encoding), u"World".encode(encoding)]], []], [[[u"fixed".encode(encoding), u"words".encode(encoding)], [u"Hyper".encode(encoding), u"cube.".encode(encoding)]]]] unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding) self.assertAllEqual(unicode_encode_op, expected_value) def testUnknownInputRankError(self): # Use a tf.function that erases shape information. @def_function.function(input_signature=[tensor_spec.TensorSpec(None)]) def f(v): return ragged_string_ops.unicode_encode(v, "UTF-8") with self.assertRaisesRegex( ValueError, "Rank of input_tensor must be statically known."): f([72, 101, 108, 108, 111]) if __name__ == "__main__": test.main()
# Copyright (c) 2020 DDN. All rights reserved. # Use of this source code is governed by a MIT-style # license that can be found in the LICENSE file. from collections import defaultdict from django.contrib.contenttypes.models import ContentType from tastypie.validation import Validation from chroma_core.lib.storage_plugin.api import attributes from chroma_core.lib.storage_plugin.base_resource import BaseStorageResource from chroma_core.models import StorageResourceRecord from chroma_api.authentication import AnonymousAuthentication, PatchedDjangoAuthorization from tastypie import fields from chroma_core.lib.storage_plugin.query import ResourceQuery from chroma_api.validation_utils import validate from tastypie.exceptions import NotFound, ImmediateHttpResponse from tastypie import http from django.core.exceptions import ObjectDoesNotExist from chroma_api.storage_resource_class import filter_class_ids from chroma_api.chroma_model_resource import ChromaModelResource from chroma_core.services.plugin_runner.scan_daemon_interface import ScanDaemonRpcInterface class StorageResourceValidation(Validation): def is_valid(self, bundle, request=None): from chroma_core.lib.storage_plugin.manager import storage_plugin_manager from chroma_core.lib.storage_plugin.manager import PluginNotFound errors = defaultdict(list) if "alias" in bundle.data and bundle.data["alias"] is not None: alias = bundle.data["alias"] if alias.strip() == "": errors["alias"].append("May not be blank") elif alias != alias.strip(): errors["alias"].append("No trailing whitespace allowed") if "plugin_name" in bundle.data: try: storage_plugin_manager.get_plugin_class(bundle.data["plugin_name"]) except PluginNotFound as e: errors["plugin_name"].append(e.__str__()) else: if "class_name" in bundle.data: try: storage_plugin_manager.get_plugin_resource_class( bundle.data["plugin_name"], bundle.data["class_name"] ) except PluginNotFound as e: errors["class_name"].append(e.__str__()) return errors class StorageResourceResource(ChromaModelResource): """ Storage resources are objects within the storage plugin framework. Note: the term 'resource' is used to refer to REST API resources and also in this context to refer to the separate concept of a storage resource. A storage resource is of a class defined by the ``storage_resource_class`` resource. This resource has a special ``ancestor_of`` filter argument, which may be set to the ID of a storage resource to retrieve all the resources that are its ancestors. """ # FIXME: document this fully when the storage plugin API freezes attributes = fields.DictField(help_text="Dictionary of attributes as defined by the storage plugin") alias = fields.CharField(help_text="The human readable name of the resource (may be set by user)") alerts = fields.ListField(help_text="List of active ``alert`` objects which are associated with this resource") propagated_alerts = fields.ListField( help_text="List of active ``alert`` objects which are associated with " "ancestors of this resource" ) default_alias = fields.CharField(help_text="The default human readable name of the resource") plugin_name = fields.CharField( attribute="resource_class__storage_plugin__module_name", help_text="Name of the storage plugin which defines this resource", ) class_name = fields.CharField( attribute="resource_class__class_name", help_text="Name of a ``storage_resource_class``" ) parent_classes = fields.ListField( blank=True, null=True, help_text="List of strings, parent classes of" "this object's class." ) deletable = fields.BooleanField(help_text="If ``true``, this object may be removed with a DELETE operation") def dehydrate_parent_classes(self, bundle): def find_bases(klass, bases=set()): for parent in klass.__bases__: if issubclass(parent, BaseStorageResource): bases.add(parent) bases |= find_bases(parent, bases) return bases return [k.__name__ for k in find_bases(bundle.obj.resource_class.get_class())].sort() def obj_get_list(self, bundle, **kwargs): """Override this to do sorting in a way that depends on kwargs (we need to know what kind of object is being listed in order to resolve the ordering attribute to a model, and apply_sorting's arguments don't give you kwargs)""" objs = super(StorageResourceResource, self).obj_get_list(bundle, **kwargs) objs = self._sort_by_attr(objs, bundle.request.GET, **kwargs) return objs def get_list(self, request, **kwargs): if "ancestor_of" in request.GET: record = StorageResourceRecord.objects.get(id=request.GET["ancestor_of"]) ancestor_records = set(ResourceQuery().record_all_ancestors(record)) bundles = [self.build_bundle(obj=obj, request=request) for obj in ancestor_records] dicts = [self.full_dehydrate(bundle) for bundle in bundles] return self.create_response(request, {"meta": None, "objects": dicts}) else: return super(StorageResourceResource, self).get_list(request, **kwargs) def _sort_by_attr(self, obj_list, options=None, **kwargs): options = options or {} order_by = options.get("order_by", None) if not order_by: return obj_list if order_by.find("attr_") == 0: attr_name = order_by[5:] invert = False elif order_by.find("attr_") == 1: attr_name = order_by[6:] invert = True else: raise RuntimeError("Can't sort on %s" % order_by) try: class_name = kwargs["class_name"] plugin_name = kwargs["plugin_name"] except KeyError: return obj_list else: from chroma_core.lib.storage_plugin.manager import storage_plugin_manager klass, klass_id = storage_plugin_manager.get_plugin_resource_class(plugin_name, class_name) model_klass = klass.attr_model_class(attr_name) filter_args = {model_klass.__name__.lower() + "__key": attr_name} order_attr = model_klass.__name__.lower() + "__value" return obj_list.filter(**filter_args).order_by(("-" if invert else "") + order_attr) def apply_sorting(self, obj_list, options=None): """Pass-through in favour of sorting done in obj_get_list""" return obj_list def dehydrate_propagated_alerts(self, bundle): return [a.to_dict() for a in ResourceQuery().resource_get_propagated_alerts(bundle.obj.to_resource())] def dehydrate_deletable(self, bundle): return bundle.obj.resource_class.user_creatable def dehydrate_default_alias(self, bundle): return bundle.obj.to_resource().get_label() def dehydrate_alias(self, bundle): resource = bundle.obj.to_resource() return bundle.obj.alias_or_name(resource) def dehydrate_alerts(self, bundle): return [a.to_dict() for a in ResourceQuery().resource_get_alerts(bundle.obj.to_resource())] def dehydrate_content_type_id(self, bundle): return ContentType.objects.get_for_model(bundle.obj.__class__).pk def dehydrate_attributes(self, bundle): # a list of dicts, one for each attribute. Excludes hidden attributes. result = {} resource = bundle.obj.to_resource() attr_props = resource.get_all_attribute_properties() for name, props in attr_props: # Exclude password hashes if isinstance(props, attributes.Password): continue val = getattr(resource, name) if isinstance(val, BaseStorageResource): if val._handle: from chroma_api.urls import api raw = api.get_resource_uri(StorageResourceRecord.objects.get(pk=val._handle)) else: raw = None else: raw = val result[name] = { "raw": raw, "markup": props.to_markup(val), "label": props.get_label(name), "class": props.__class__.__name__, } return result class Meta: queryset = StorageResourceRecord.objects.filter(resource_class__id__in=filter_class_ids()) resource_name = "storage_resource" filtering = {"class_name": ["exact"], "plugin_name": ["exact"]} authorization = PatchedDjangoAuthorization() authentication = AnonymousAuthentication() always_return_data = True validation = StorageResourceValidation() def obj_delete(self, bundle, **kwargs): try: obj = self.obj_get(bundle, **kwargs) except ObjectDoesNotExist: raise NotFound("A model instance matching the provided arguments could not be found.") ScanDaemonRpcInterface().remove_resource(obj.id) raise ImmediateHttpResponse(http.HttpAccepted()) @validate def obj_create(self, bundle, **kwargs): # Note: not importing this at module scope so that this module can # be imported without loading plugins (useful at installation) from chroma_core.lib.storage_plugin.manager import storage_plugin_manager resource_class, resource_class_id = storage_plugin_manager.get_plugin_resource_class( bundle.data["plugin_name"], bundle.data["class_name"] ) attrs = {} input_attrs = bundle.data["attrs"] for name, property in resource_class.get_all_attribute_properties(): if name in input_attrs: attrs[name] = property.encrypt(property.cast(input_attrs[name])) elif property.default: attrs[name] = property.default elif not property.optional: # TODO: proper validation raise RuntimeError("%s not optional" % name) # Construct a record record, created = StorageResourceRecord.get_or_create_root(resource_class, resource_class_id, attrs) # record_dict = self.full_dehydrate(self.build_bundle(obj = record)).data bundle.obj = record return bundle @validate def obj_update(self, bundle, **kwargs): bundle.obj = self.cached_obj_get(bundle, **self.remove_api_resource_names(kwargs)) if "alias" in bundle.data: # FIXME: sanitize input for alias (it gets echoed back as markup) alias = bundle.data["alias"] record = bundle.obj if alias == "": record.alias = None else: record.alias = alias record.save() input_attrs = bundle.data attrs = {} resource_class = record.resource_class.get_class() for name, property in resource_class.get_all_attribute_properties(): if name in bundle.data: attrs[name] = property.encrypt(property.cast(input_attrs[name])) if len(attrs): # NB this operation is done inside the storage daemon, because it is # necessary to tear down any running session (e.g. consider modifying the IP # address of a controller) ScanDaemonRpcInterface().modify_resource(record.id, attrs) # Require that something was set if not "alias" in bundle.data or len(attrs): raise ImmediateHttpResponse(http.HttpBadRequest()) return bundle def prepend_urls(self): from django.conf.urls import url return super(StorageResourceResource, self).prepend_urls() + [ url( r"^(?P<resource_name>%s)/(?P<plugin_name>\D\w+)/(?P<class_name>\D\w+)/$" % self._meta.resource_name, self.wrap_view("dispatch_list"), name="dispatch_list", ) ]
import pickle import os import glob import numpy as np from experts import * #This file takes "raw" data as input and computes additionnal stats such as averaged stats, winrate, fatansy points etc. #Returns the averaged stats (all, home and away) of a given player between game start and end #Returns averaged of all games but last by default def average(player, end = -1, start = 0): games_num = len(player['stats']) experience = player['experience'] age = player['age'] height = 6*int(player['height'].split('-')[0]) + int(player['height'].split('-')[1]) weight = int(player['weight']) if end == 0: tmp = [0.]*25 tmp[21] = experience tmp[22] = age tmp[23] = height tmp[24] = weight return tmp, tmp, tmp # print "Please choose a strictly positive number of games" # exit() if end == -1: return average(player, games_num - 1) elif end > games_num: print "not enough games, returned average of all available games (%d)" % games_num return average(player, games_num) elif start >= end: print "start must be smaller then end, returned average of all available games (%d)" % games_num return average(player, games_num) elif start < 0: return average(player, end) else: averaged = [float(sum(x))/float(len(x)) for x in zip(*[match[4:] for match in player['stats'][start:end]])] #Ensuring Percentages are correct (using average as default value) for i, j in zip([3, 6, 9], [0.45, 0.35, 0.75]): averaged[i] = j if averaged[i - 1] == 0 else averaged[i - 2]/averaged[i - 1] won = float([match[3] for match in player['stats'][start:end]].count('W')) winrate = won/end averaged.append(winrate) averaged.append(experience) averaged.append(age) averaged.append(height) averaged.append(weight) home = [match for match in player['stats'][start:end] if match[2][4] == '@'] away = [match for match in player['stats'][start:end] if match[2][4] != '@'] #In order to avoid unreferenced return home_avg = [] away_avg = [] if len(home) != 0: home_avg = [float(sum(x))/float(len(x)) for x in zip(*[match[4:] for match in home])] #Ensuring Percentages are correct for i, j in zip([3, 6, 9], [0.45, 0.35, 0.75]): averaged[i] = j if averaged[i - 1] == 0 else averaged[i - 2]/averaged[i - 1] home_won = float([match[3] for match in home].count('W')) home_winrate = home_won/len(home) home_avg.append(home_winrate) home_avg.append(experience) home_avg.append(age) home_avg.append(height) home_avg.append(weight) if len(away) != 0: away_avg = [float(sum(x))/float(len(x)) for x in zip(*[match[4:] for match in away])] #Ensuring Percentages are correct for i, j in zip([3, 6, 9], [0.45, 0.35, 0.75]): averaged[i] = j if averaged[i - 1] == 0 else averaged[i - 2]/averaged[i - 1] away_won = float([match[3] for match in away].count('W')) away_winrate = away_won/len(away) away_avg.append(away_winrate) away_avg.append(experience) away_avg.append(age) away_avg.append(height) away_avg.append(weight) return averaged, home_avg, away_avg #print average('2011-12', '201149') #computes fantasy points of a given player on his given ith game (last by default) #Allows different way of computing points but has espn values by default def compute_fantasy(player, game_number = -1, PTS = 1, BLK = 1, STL = 1, AST = 1, REB = 1, FGM = 1, FTM = 1, FGA = -1, FTA = -1, TOV = -1): games_num = len(player['stats']) if game_number == -1: return compute_fantasy(player, games_num, PTS, BLK, STL, AST, REB, FGM, FTM, FGA, FTA, TOV) elif game_number >= games_num: print "This game does not exist, returned last game played instead" return compute_fantasy(player, games_num, PTS, BLK, STL, AST, REB, FGM, FTM, FGA, FTA, TOV) else: game = player['stats'][game_number] score = PTS*game[22] + BLK*game[19] + STL*game[18] + AST*game[17] + REB*game[16] + FGM*game[5] \ + FTM*game[11] + FGA*game[6] + FTA*game[12] + TOV*game[20] return score #returns weighted average with avg1 more important than avg by factor of weight def weighted_average(avg1, avg2, weight = 2): if len(avg1) == 0: return avg2 elif len(avg2) == 0: return avg1 avg = [] for i, a, b, in enumerate(zip(avg1, avg2)): #to ensure correct percentages if i in [3, 6, 9]: tmp = avg[i - 2]/avg[i - 1] else: tmp = (a*weight + b)/(weight + 1) avg.append(tmp) return avg def baseline(season, best_players = 0): errors = [] if best_players == 0: players = glob.glob('data' + os.sep + season + os.sep + 'player_stats' + os.sep + "*.pkl") else: best = get_fantasies(season, 'OCT 20, ' + season[:4], 'DEC 15, ' + season[:4]) players = [] for player in best[:best_players]: players.append(player[0]) for file in players: playerID = file[26:-4] if best_players == 0 else file player = pickle.load(open('data' + os.sep + season + os.sep + 'player_stats' + os.sep + playerID + '.pkl', 'rb')) games_num = len(player['stats']) for i in range(games_num - 1): next_points = compute_fantasy(player, i + 1) curr_points = compute_fantasy(player, i) errors.append(abs(next_points - curr_points)) error = np.mean(errors), np.max(errors) file = open('data' + os.sep + season + os.sep + 'averages' + os.sep + 'baseline.txt', "w") file.write("{}".format(error)) file.close() print "Average error and max error for season {} is {}".format(season, error) return error def baselines(seasons, best_players = 0, avg = False): avg_error = 0. avg_max = 0. for season in seasons: print "computing for season {}".format(season) if not avg: error = baseline(season, best_players) else: error = avg_baseline(season, best_players) avg_error += error[0] avg_max += error[1] result = avg_error/len(seasons), avg_max/len(seasons) print "Average error and Averaged max error over all seasons is %s" % (result,) return result def avg_baseline(season, best_players = 0): errors = [] if best_players == 0: players = glob.glob('data' + os.sep + season + os.sep + 'player_stats' + os.sep + "*.pkl") else: best = get_fantasies(season, 'OCT 20, ' + season[:4], 'DEC 15, ' + season[:4]) players = [] for player in best[:best_players]: players.append(player[0]) for file in players: playerID = file[26:-4] if best_players == 0 else file player = pickle.load(open('data' + os.sep + season + os.sep + 'player_stats' + os.sep + playerID + '.pkl', 'rb')) games_num = len(player['stats']) points = [] for i in range(games_num - 1): points.append(compute_fantasy(player, i)) next_points = compute_fantasy(player, i + 1) avg_points = np.mean(points) errors.append(abs(next_points - avg_points)) error = np.mean(errors), np.max(errors) print "Average error and max error for season {} is {}".format(season, error) return error #print compute_fantasy('2011-12', '977', 0) # positions = [] # for file in os.listdir("data/2006-07/player_stats"): # player = pickle.load(open("data/2006-07/player_stats/" + file, 'rb')) # print player['experience'] # position = player['position'] # # if position not in positions: # positions.append(position) # # print positions # #print average('2005-06', '15')[0] #player = pickle.load(open('data' + os.sep + 'sample_' + os.sep + 'player_stats' + os.sep + '708' + '.pkl', 'rb')) #print player['stats'] #print len(player['stats']) #print average('sample_', player, 47, -2)[0]
""" Oracle database backend for Django. Requires cx_Oracle: http://cx-oracle.sourceforge.net/ """ from __future__ import unicode_literals import decimal import re import sys import warnings def _setup_environment(environ): import platform # Cygwin requires some special voodoo to set the environment variables # properly so that Oracle will see them. if platform.system().upper().startswith('CYGWIN'): try: import ctypes except ImportError as e: from django.core.exceptions import ImproperlyConfigured raise ImproperlyConfigured("Error loading ctypes: %s; " "the Oracle backend requires ctypes to " "operate correctly under Cygwin." % e) kernel32 = ctypes.CDLL('kernel32') for name, value in environ: kernel32.SetEnvironmentVariableA(name, value) else: import os os.environ.update(environ) _setup_environment([ # Oracle takes client-side character set encoding from the environment. ('NLS_LANG', '.UTF8'), # This prevents unicode from getting mangled by getting encoded into the # potentially non-unicode database character set. ('ORA_NCHAR_LITERAL_REPLACE', 'TRUE'), ]) try: import cx_Oracle as Database except ImportError as e: from django.core.exceptions import ImproperlyConfigured raise ImproperlyConfigured("Error loading cx_Oracle module: %s" % e) from django.db import utils from django.db.backends import * from django.db.backends.oracle.client import DatabaseClient from django.db.backends.oracle.creation import DatabaseCreation from django.db.backends.oracle.introspection import DatabaseIntrospection from django.utils.encoding import force_bytes, force_text DatabaseError = Database.DatabaseError IntegrityError = Database.IntegrityError # Check whether cx_Oracle was compiled with the WITH_UNICODE option if cx_Oracle is pre-5.1. This will # also be True for cx_Oracle 5.1 and in Python 3.0. See #19606 if int(Database.version.split('.', 1)[0]) >= 5 and \ (int(Database.version.split('.', 2)[1]) >= 1 or not hasattr(Database, 'UNICODE')): convert_unicode = force_text else: convert_unicode = force_bytes class DatabaseFeatures(BaseDatabaseFeatures): empty_fetchmany_value = () needs_datetime_string_cast = False interprets_empty_strings_as_nulls = True uses_savepoints = True has_select_for_update = True has_select_for_update_nowait = True can_return_id_from_insert = True allow_sliced_subqueries = False supports_subqueries_in_group_by = False supports_transactions = True supports_timezones = False supports_bitwise_or = False can_defer_constraint_checks = True ignores_nulls_in_unique_constraints = False has_bulk_insert = True supports_tablespaces = True supports_sequence_reset = False class DatabaseOperations(BaseDatabaseOperations): compiler_module = "django.db.backends.oracle.compiler" def autoinc_sql(self, table, column): # To simulate auto-incrementing primary keys in Oracle, we have to # create a sequence and a trigger. sq_name = self._get_sequence_name(table) tr_name = self._get_trigger_name(table) tbl_name = self.quote_name(table) col_name = self.quote_name(column) sequence_sql = """ DECLARE i INTEGER; BEGIN SELECT COUNT(*) INTO i FROM USER_CATALOG WHERE TABLE_NAME = '%(sq_name)s' AND TABLE_TYPE = 'SEQUENCE'; IF i = 0 THEN EXECUTE IMMEDIATE 'CREATE SEQUENCE "%(sq_name)s"'; END IF; END; /""" % locals() trigger_sql = """ CREATE OR REPLACE TRIGGER "%(tr_name)s" BEFORE INSERT ON %(tbl_name)s FOR EACH ROW WHEN (new.%(col_name)s IS NULL) BEGIN SELECT "%(sq_name)s".nextval INTO :new.%(col_name)s FROM dual; END; /""" % locals() return sequence_sql, trigger_sql def cache_key_culling_sql(self): return """ SELECT cache_key FROM (SELECT cache_key, rank() OVER (ORDER BY cache_key) AS rank FROM %s) WHERE rank = %%s + 1 """ def date_extract_sql(self, lookup_type, field_name): if lookup_type == 'week_day': # TO_CHAR(field, 'D') returns an integer from 1-7, where 1=Sunday. return "TO_CHAR(%s, 'D')" % field_name else: # http://docs.oracle.com/cd/B19306_01/server.102/b14200/functions050.htm return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name) def date_interval_sql(self, sql, connector, timedelta): """ Implements the interval functionality for expressions format for Oracle: (datefield + INTERVAL '3 00:03:20.000000' DAY(1) TO SECOND(6)) """ minutes, seconds = divmod(timedelta.seconds, 60) hours, minutes = divmod(minutes, 60) days = str(timedelta.days) day_precision = len(days) fmt = "(%s %s INTERVAL '%s %02d:%02d:%02d.%06d' DAY(%d) TO SECOND(6))" return fmt % (sql, connector, days, hours, minutes, seconds, timedelta.microseconds, day_precision) def date_trunc_sql(self, lookup_type, field_name): # http://docs.oracle.com/cd/B19306_01/server.102/b14200/functions230.htm#i1002084 if lookup_type in ('year', 'month'): return "TRUNC(%s, '%s')" % (field_name, lookup_type.upper()) else: return "TRUNC(%s)" % field_name # Oracle crashes with "ORA-03113: end-of-file on communication channel" # if the time zone name is passed in parameter. Use interpolation instead. # https://groups.google.com/forum/#!msg/django-developers/zwQju7hbG78/9l934yelwfsJ # This regexp matches all time zone names from the zoneinfo database. _tzname_re = re.compile(r'^[\w/:+-]+$') def _convert_field_to_tz(self, field_name, tzname): if not self._tzname_re.match(tzname): raise ValueError("Invalid time zone name: %s" % tzname) # Convert from UTC to local time, returning TIMESTAMP WITH TIME ZONE. result = "(FROM_TZ(%s, '0:00') AT TIME ZONE '%s')" % (field_name, tzname) # Extracting from a TIMESTAMP WITH TIME ZONE ignore the time zone. # Convert to a DATETIME, which is called DATE by Oracle. There's no # built-in function to do that; the easiest is to go through a string. result = "TO_CHAR(%s, 'YYYY-MM-DD HH24:MI:SS')" % result result = "TO_DATE(%s, 'YYYY-MM-DD HH24:MI:SS')" % result # Re-convert to a TIMESTAMP because EXTRACT only handles the date part # on DATE values, even though they actually store the time part. return "CAST(%s AS TIMESTAMP)" % result def datetime_extract_sql(self, lookup_type, field_name, tzname): if settings.USE_TZ: field_name = self._convert_field_to_tz(field_name, tzname) if lookup_type == 'week_day': # TO_CHAR(field, 'D') returns an integer from 1-7, where 1=Sunday. sql = "TO_CHAR(%s, 'D')" % field_name else: # http://docs.oracle.com/cd/B19306_01/server.102/b14200/functions050.htm sql = "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name) return sql, [] def datetime_trunc_sql(self, lookup_type, field_name, tzname): if settings.USE_TZ: field_name = self._convert_field_to_tz(field_name, tzname) # http://docs.oracle.com/cd/B19306_01/server.102/b14200/functions230.htm#i1002084 if lookup_type in ('year', 'month'): sql = "TRUNC(%s, '%s')" % (field_name, lookup_type.upper()) elif lookup_type == 'day': sql = "TRUNC(%s)" % field_name elif lookup_type == 'hour': sql = "TRUNC(%s, 'HH24')" % field_name elif lookup_type == 'minute': sql = "TRUNC(%s, 'MI')" % field_name else: sql = field_name # Cast to DATE removes sub-second precision. return sql, [] def convert_values(self, value, field): if isinstance(value, Database.LOB): value = value.read() if field and field.get_internal_type() == 'TextField': value = force_text(value) # Oracle stores empty strings as null. We need to undo this in # order to adhere to the Django convention of using the empty # string instead of null, but only if the field accepts the # empty string. if value is None and field and field.empty_strings_allowed: value = '' # Convert 1 or 0 to True or False elif value in (1, 0) and field and field.get_internal_type() in ('BooleanField', 'NullBooleanField'): value = bool(value) # Force floats to the correct type elif value is not None and field and field.get_internal_type() == 'FloatField': value = float(value) # Convert floats to decimals elif value is not None and field and field.get_internal_type() == 'DecimalField': value = util.typecast_decimal(field.format_number(value)) # cx_Oracle always returns datetime.datetime objects for # DATE and TIMESTAMP columns, but Django wants to see a # python datetime.date, .time, or .datetime. We use the type # of the Field to determine which to cast to, but it's not # always available. # As a workaround, we cast to date if all the time-related # values are 0, or to time if the date is 1/1/1900. # This could be cleaned a bit by adding a method to the Field # classes to normalize values from the database (the to_python # method is used for validation and isn't what we want here). elif isinstance(value, Database.Timestamp): if field and field.get_internal_type() == 'DateTimeField': pass elif field and field.get_internal_type() == 'DateField': value = value.date() elif field and field.get_internal_type() == 'TimeField' or (value.year == 1900 and value.month == value.day == 1): value = value.time() elif value.hour == value.minute == value.second == value.microsecond == 0: value = value.date() return value def datetime_cast_sql(self): return "TO_TIMESTAMP(%s, 'YYYY-MM-DD HH24:MI:SS.FF')" def deferrable_sql(self): return " DEFERRABLE INITIALLY DEFERRED" def drop_sequence_sql(self, table): return "DROP SEQUENCE %s;" % self.quote_name(self._get_sequence_name(table)) def fetch_returned_insert_id(self, cursor): return int(cursor._insert_id_var.getvalue()) def field_cast_sql(self, db_type): if db_type and db_type.endswith('LOB'): return "DBMS_LOB.SUBSTR(%s)" else: return "%s" def last_executed_query(self, cursor, sql, params): # http://cx-oracle.sourceforge.net/html/cursor.html#Cursor.statement # The DB API definition does not define this attribute. statement = cursor.statement if not six.PY3 and not isinstance(statement, unicode): statement = statement.decode('utf-8') # Unlike Psycopg's `query` and MySQLdb`'s `_last_executed`, CxOracle's # `statement` doesn't contain the query parameters. refs #20010. return super(DatabaseOperations, self).last_executed_query(cursor, statement, params) def last_insert_id(self, cursor, table_name, pk_name): sq_name = self._get_sequence_name(table_name) cursor.execute('SELECT "%s".currval FROM dual' % sq_name) return cursor.fetchone()[0] def lookup_cast(self, lookup_type): if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'): return "UPPER(%s)" return "%s" def max_in_list_size(self): return 1000 def max_name_length(self): return 30 def prep_for_iexact_query(self, x): return x def process_clob(self, value): if value is None: return '' return force_text(value.read()) def quote_name(self, name): # SQL92 requires delimited (quoted) names to be case-sensitive. When # not quoted, Oracle has case-insensitive behavior for identifiers, but # always defaults to uppercase. # We simplify things by making Oracle identifiers always uppercase. if not name.startswith('"') and not name.endswith('"'): name = '"%s"' % util.truncate_name(name.upper(), self.max_name_length()) # Oracle puts the query text into a (query % args) construct, so % signs # in names need to be escaped. The '%%' will be collapsed back to '%' at # that stage so we aren't really making the name longer here. name = name.replace('%','%%') return name.upper() def random_function_sql(self): return "DBMS_RANDOM.RANDOM" def regex_lookup_9(self, lookup_type): raise NotImplementedError("Regexes are not supported in Oracle before version 10g.") def regex_lookup_10(self, lookup_type): if lookup_type == 'regex': match_option = "'c'" else: match_option = "'i'" return 'REGEXP_LIKE(%%s, %%s, %s)' % match_option def regex_lookup(self, lookup_type): # If regex_lookup is called before it's been initialized, then create # a cursor to initialize it and recur. self.connection.cursor() return self.connection.ops.regex_lookup(lookup_type) def return_insert_id(self): return "RETURNING %s INTO %%s", (InsertIdVar(),) def savepoint_create_sql(self, sid): return convert_unicode("SAVEPOINT " + self.quote_name(sid)) def savepoint_rollback_sql(self, sid): return convert_unicode("ROLLBACK TO SAVEPOINT " + self.quote_name(sid)) def sql_flush(self, style, tables, sequences): # Return a list of 'TRUNCATE x;', 'TRUNCATE y;', # 'TRUNCATE z;'... style SQL statements if tables: # Oracle does support TRUNCATE, but it seems to get us into # FK referential trouble, whereas DELETE FROM table works. sql = ['%s %s %s;' % \ (style.SQL_KEYWORD('DELETE'), style.SQL_KEYWORD('FROM'), style.SQL_FIELD(self.quote_name(table))) for table in tables] # Since we've just deleted all the rows, running our sequence # ALTER code will reset the sequence to 0. sql.extend(self.sequence_reset_by_name_sql(style, sequences)) return sql else: return [] def sequence_reset_by_name_sql(self, style, sequences): sql = [] for sequence_info in sequences: sequence_name = self._get_sequence_name(sequence_info['table']) table_name = self.quote_name(sequence_info['table']) column_name = self.quote_name(sequence_info['column'] or 'id') query = _get_sequence_reset_sql() % {'sequence': sequence_name, 'table': table_name, 'column': column_name} sql.append(query) return sql def sequence_reset_sql(self, style, model_list): from django.db import models output = [] query = _get_sequence_reset_sql() for model in model_list: for f in model._meta.local_fields: if isinstance(f, models.AutoField): table_name = self.quote_name(model._meta.db_table) sequence_name = self._get_sequence_name(model._meta.db_table) column_name = self.quote_name(f.column) output.append(query % {'sequence': sequence_name, 'table': table_name, 'column': column_name}) # Only one AutoField is allowed per model, so don't # continue to loop break for f in model._meta.many_to_many: if not f.rel.through: table_name = self.quote_name(f.m2m_db_table()) sequence_name = self._get_sequence_name(f.m2m_db_table()) column_name = self.quote_name('id') output.append(query % {'sequence': sequence_name, 'table': table_name, 'column': column_name}) return output def start_transaction_sql(self): return '' def tablespace_sql(self, tablespace, inline=False): if inline: return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace) else: return "TABLESPACE %s" % self.quote_name(tablespace) def value_to_db_datetime(self, value): if value is None: return None # Oracle doesn't support tz-aware datetimes if timezone.is_aware(value): if settings.USE_TZ: value = value.astimezone(timezone.utc).replace(tzinfo=None) else: raise ValueError("Oracle backend does not support timezone-aware datetimes when USE_TZ is False.") return six.text_type(value) def value_to_db_time(self, value): if value is None: return None if isinstance(value, six.string_types): return datetime.datetime.strptime(value, '%H:%M:%S') # Oracle doesn't support tz-aware times if timezone.is_aware(value): raise ValueError("Oracle backend does not support timezone-aware times.") return datetime.datetime(1900, 1, 1, value.hour, value.minute, value.second, value.microsecond) def year_lookup_bounds_for_date_field(self, value): first = '%s-01-01' second = '%s-12-31' return [first % value, second % value] def combine_expression(self, connector, sub_expressions): "Oracle requires special cases for %% and & operators in query expressions" if connector == '%%': return 'MOD(%s)' % ','.join(sub_expressions) elif connector == '&': return 'BITAND(%s)' % ','.join(sub_expressions) elif connector == '|': raise NotImplementedError("Bit-wise or is not supported in Oracle.") return super(DatabaseOperations, self).combine_expression(connector, sub_expressions) def _get_sequence_name(self, table): name_length = self.max_name_length() - 3 return '%s_SQ' % util.truncate_name(table, name_length).upper() def _get_trigger_name(self, table): name_length = self.max_name_length() - 3 return '%s_TR' % util.truncate_name(table, name_length).upper() def bulk_insert_sql(self, fields, num_values): items_sql = "SELECT %s FROM DUAL" % ", ".join(["%s"] * len(fields)) return " UNION ALL ".join([items_sql] * num_values) class _UninitializedOperatorsDescriptor(object): def __get__(self, instance, owner): # If connection.operators is looked up before a connection has been # created, transparently initialize connection.operators to avert an # AttributeError. if instance is None: raise AttributeError("operators not available as class attribute") # Creating a cursor will initialize the operators. instance.cursor().close() return instance.__dict__['operators'] class DatabaseWrapper(BaseDatabaseWrapper): vendor = 'oracle' operators = _UninitializedOperatorsDescriptor() _standard_operators = { 'exact': '= %s', 'iexact': '= UPPER(%s)', 'contains': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)", 'icontains': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)", 'gt': '> %s', 'gte': '>= %s', 'lt': '< %s', 'lte': '<= %s', 'startswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)", 'endswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)", 'istartswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)", 'iendswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)", } _likec_operators = _standard_operators.copy() _likec_operators.update({ 'contains': "LIKEC %s ESCAPE '\\'", 'icontains': "LIKEC UPPER(%s) ESCAPE '\\'", 'startswith': "LIKEC %s ESCAPE '\\'", 'endswith': "LIKEC %s ESCAPE '\\'", 'istartswith': "LIKEC UPPER(%s) ESCAPE '\\'", 'iendswith': "LIKEC UPPER(%s) ESCAPE '\\'", }) Database = Database def __init__(self, *args, **kwargs): super(DatabaseWrapper, self).__init__(*args, **kwargs) self.features = DatabaseFeatures(self) use_returning_into = self.settings_dict["OPTIONS"].get('use_returning_into', True) self.features.can_return_id_from_insert = use_returning_into self.ops = DatabaseOperations(self) self.client = DatabaseClient(self) self.creation = DatabaseCreation(self) self.introspection = DatabaseIntrospection(self) self.validation = BaseDatabaseValidation(self) def _connect_string(self): settings_dict = self.settings_dict if not settings_dict['HOST'].strip(): settings_dict['HOST'] = 'localhost' if settings_dict['PORT'].strip(): dsn = Database.makedsn(settings_dict['HOST'], int(settings_dict['PORT']), settings_dict['NAME']) else: dsn = settings_dict['NAME'] return "%s/%s@%s" % (settings_dict['USER'], settings_dict['PASSWORD'], dsn) def get_connection_params(self): conn_params = self.settings_dict['OPTIONS'].copy() if 'use_returning_into' in conn_params: del conn_params['use_returning_into'] return conn_params def get_new_connection(self, conn_params): conn_string = convert_unicode(self._connect_string()) return Database.connect(conn_string, **conn_params) def init_connection_state(self): cursor = self.create_cursor() # Set the territory first. The territory overrides NLS_DATE_FORMAT # and NLS_TIMESTAMP_FORMAT to the territory default. When all of # these are set in single statement it isn't clear what is supposed # to happen. cursor.execute("ALTER SESSION SET NLS_TERRITORY = 'AMERICA'") # Set oracle date to ansi date format. This only needs to execute # once when we create a new connection. We also set the Territory # to 'AMERICA' which forces Sunday to evaluate to a '1' in # TO_CHAR(). cursor.execute( "ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS'" " NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS.FF'" + (" TIME_ZONE = 'UTC'" if settings.USE_TZ else '')) cursor.close() if 'operators' not in self.__dict__: # Ticket #14149: Check whether our LIKE implementation will # work for this connection or we need to fall back on LIKEC. # This check is performed only once per DatabaseWrapper # instance per thread, since subsequent connections will use # the same settings. cursor = self.create_cursor() try: cursor.execute("SELECT 1 FROM DUAL WHERE DUMMY %s" % self._standard_operators['contains'], ['X']) except utils.DatabaseError: self.operators = self._likec_operators else: self.operators = self._standard_operators cursor.close() # There's no way for the DatabaseOperations class to know the # currently active Oracle version, so we do some setups here. # TODO: Multi-db support will need a better solution (a way to # communicate the current version). if self.oracle_version is not None and self.oracle_version <= 9: self.ops.regex_lookup = self.ops.regex_lookup_9 else: self.ops.regex_lookup = self.ops.regex_lookup_10 try: self.connection.stmtcachesize = 20 except: # Django docs specify cx_Oracle version 4.3.1 or higher, but # stmtcachesize is available only in 4.3.2 and up. pass def create_cursor(self): return FormatStylePlaceholderCursor(self.connection) def _commit(self): if self.connection is not None: try: return self.connection.commit() except Database.DatabaseError as e: # cx_Oracle 5.0.4 raises a cx_Oracle.DatabaseError exception # with the following attributes and values: # code = 2091 # message = 'ORA-02091: transaction rolled back # 'ORA-02291: integrity constraint (TEST_DJANGOTEST.SYS # _C00102056) violated - parent key not found' # We convert that particular case to our IntegrityError exception x = e.args[0] if hasattr(x, 'code') and hasattr(x, 'message') \ and x.code == 2091 and 'ORA-02291' in x.message: six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2]) raise # Oracle doesn't support savepoint commits. Ignore them. def _savepoint_commit(self, sid): pass def _set_autocommit(self, autocommit): self.connection.autocommit = autocommit def check_constraints(self, table_names=None): """ To check constraints, we set constraints to immediate. Then, when, we're done we must ensure they are returned to deferred. """ self.cursor().execute('SET CONSTRAINTS ALL IMMEDIATE') self.cursor().execute('SET CONSTRAINTS ALL DEFERRED') def is_usable(self): try: if hasattr(self.connection, 'ping'): # Oracle 10g R2 and higher self.connection.ping() else: # Use a cx_Oracle cursor directly, bypassing Django's utilities. self.connection.cursor().execute("SELECT 1 FROM DUAL") except DatabaseError: return False else: return True @cached_property def oracle_version(self): with self.temporary_connection(): version = self.connection.version try: return int(version.split('.')[0]) except ValueError: return None class OracleParam(object): """ Wrapper object for formatting parameters for Oracle. If the string representation of the value is large enough (greater than 4000 characters) the input size needs to be set as CLOB. Alternatively, if the parameter has an `input_size` attribute, then the value of the `input_size` attribute will be used instead. Otherwise, no input size will be set for the parameter when executing the query. """ def __init__(self, param, cursor, strings_only=False): # With raw SQL queries, datetimes can reach this function # without being converted by DateTimeField.get_db_prep_value. if settings.USE_TZ and isinstance(param, datetime.datetime): if timezone.is_naive(param): warnings.warn("Oracle received a naive datetime (%s)" " while time zone support is active." % param, RuntimeWarning) default_timezone = timezone.get_default_timezone() param = timezone.make_aware(param, default_timezone) param = param.astimezone(timezone.utc).replace(tzinfo=None) # Oracle doesn't recognize True and False correctly in Python 3. # The conversion done below works both in 2 and 3. if param is True: param = "1" elif param is False: param = "0" if hasattr(param, 'bind_parameter'): self.force_bytes = param.bind_parameter(cursor) elif isinstance(param, six.memoryview): self.force_bytes = param else: self.force_bytes = convert_unicode(param, cursor.charset, strings_only) if hasattr(param, 'input_size'): # If parameter has `input_size` attribute, use that. self.input_size = param.input_size elif isinstance(param, six.string_types) and len(param) > 4000: # Mark any string param greater than 4000 characters as a CLOB. self.input_size = Database.CLOB else: self.input_size = None class VariableWrapper(object): """ An adapter class for cursor variables that prevents the wrapped object from being converted into a string when used to instanciate an OracleParam. This can be used generally for any other object that should be passed into Cursor.execute as-is. """ def __init__(self, var): self.var = var def bind_parameter(self, cursor): return self.var def __getattr__(self, key): return getattr(self.var, key) def __setattr__(self, key, value): if key == 'var': self.__dict__[key] = value else: setattr(self.var, key, value) class InsertIdVar(object): """ A late-binding cursor variable that can be passed to Cursor.execute as a parameter, in order to receive the id of the row created by an insert statement. """ def bind_parameter(self, cursor): param = cursor.cursor.var(Database.NUMBER) cursor._insert_id_var = param return param class FormatStylePlaceholderCursor(object): """ Django uses "format" (e.g. '%s') style placeholders, but Oracle uses ":var" style. This fixes it -- but note that if you want to use a literal "%s" in a query, you'll need to use "%%s". We also do automatic conversion between Unicode on the Python side and UTF-8 -- for talking to Oracle -- in here. """ charset = 'utf-8' def __init__(self, connection): self.cursor = connection.cursor() # Necessary to retrieve decimal values without rounding error. self.cursor.numbersAsStrings = True # Default arraysize of 1 is highly sub-optimal. self.cursor.arraysize = 100 def _format_params(self, params): return tuple([OracleParam(p, self, True) for p in params]) def _guess_input_sizes(self, params_list): sizes = [None] * len(params_list[0]) for params in params_list: for i, value in enumerate(params): if value.input_size: sizes[i] = value.input_size self.setinputsizes(*sizes) def _param_generator(self, params): return [p.force_bytes for p in params] def execute(self, query, params=None): # cx_Oracle wants no trailing ';' for SQL statements. For PL/SQL, it # it does want a trailing ';' but not a trailing '/'. However, these # characters must be included in the original query in case the query # is being passed to SQL*Plus. if query.endswith(';') or query.endswith('/'): query = query[:-1] if params is None: params = [] query = convert_unicode(query, self.charset) else: params = self._format_params(params) args = [(':arg%d' % i) for i in range(len(params))] query = convert_unicode(query % tuple(args), self.charset) self._guess_input_sizes([params]) try: return self.cursor.execute(query, self._param_generator(params)) except Database.DatabaseError as e: # cx_Oracle <= 4.4.0 wrongly raises a DatabaseError for ORA-01400. if hasattr(e.args[0], 'code') and e.args[0].code == 1400 and not isinstance(e, IntegrityError): six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2]) raise def executemany(self, query, params=None): # cx_Oracle doesn't support iterators, convert them to lists if params is not None and not isinstance(params, (list, tuple)): params = list(params) try: args = [(':arg%d' % i) for i in range(len(params[0]))] except (IndexError, TypeError): # No params given, nothing to do return None # cx_Oracle wants no trailing ';' for SQL statements. For PL/SQL, it # it does want a trailing ';' but not a trailing '/'. However, these # characters must be included in the original query in case the query # is being passed to SQL*Plus. if query.endswith(';') or query.endswith('/'): query = query[:-1] query = convert_unicode(query % tuple(args), self.charset) formatted = [self._format_params(i) for i in params] self._guess_input_sizes(formatted) try: return self.cursor.executemany(query, [self._param_generator(p) for p in formatted]) except Database.DatabaseError as e: # cx_Oracle <= 4.4.0 wrongly raises a DatabaseError for ORA-01400. if hasattr(e.args[0], 'code') and e.args[0].code == 1400 and not isinstance(e, IntegrityError): six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2]) raise def fetchone(self): row = self.cursor.fetchone() if row is None: return row return _rowfactory(row, self.cursor) def fetchmany(self, size=None): if size is None: size = self.arraysize return tuple([_rowfactory(r, self.cursor) for r in self.cursor.fetchmany(size)]) def fetchall(self): return tuple([_rowfactory(r, self.cursor) for r in self.cursor.fetchall()]) def var(self, *args): return VariableWrapper(self.cursor.var(*args)) def arrayvar(self, *args): return VariableWrapper(self.cursor.arrayvar(*args)) def __getattr__(self, attr): if attr in self.__dict__: return self.__dict__[attr] else: return getattr(self.cursor, attr) def __iter__(self): return CursorIterator(self.cursor) class CursorIterator(six.Iterator): """Cursor iterator wrapper that invokes our custom row factory.""" def __init__(self, cursor): self.cursor = cursor self.iter = iter(cursor) def __iter__(self): return self def __next__(self): return _rowfactory(next(self.iter), self.cursor) def _rowfactory(row, cursor): # Cast numeric values as the appropriate Python type based upon the # cursor description, and convert strings to unicode. casted = [] for value, desc in zip(row, cursor.description): if value is not None and desc[1] is Database.NUMBER: precision, scale = desc[4:6] if scale == -127: if precision == 0: # NUMBER column: decimal-precision floating point # This will normally be an integer from a sequence, # but it could be a decimal value. if '.' in value: value = decimal.Decimal(value) else: value = int(value) else: # FLOAT column: binary-precision floating point. # This comes from FloatField columns. value = float(value) elif precision > 0: # NUMBER(p,s) column: decimal-precision fixed point. # This comes from IntField and DecimalField columns. if scale == 0: value = int(value) else: value = decimal.Decimal(value) elif '.' in value: # No type information. This normally comes from a # mathematical expression in the SELECT list. Guess int # or Decimal based on whether it has a decimal point. value = decimal.Decimal(value) else: value = int(value) # datetimes are returned as TIMESTAMP, except the results # of "dates" queries, which are returned as DATETIME. elif desc[1] in (Database.TIMESTAMP, Database.DATETIME): # Confirm that dt is naive before overwriting its tzinfo. if settings.USE_TZ and value is not None and timezone.is_naive(value): value = value.replace(tzinfo=timezone.utc) elif desc[1] in (Database.STRING, Database.FIXED_CHAR, Database.LONG_STRING): value = to_unicode(value) casted.append(value) return tuple(casted) def to_unicode(s): """ Convert strings to Unicode objects (and return all other data types unchanged). """ if isinstance(s, six.string_types): return force_text(s) return s def _get_sequence_reset_sql(): # TODO: colorize this SQL code with style.SQL_KEYWORD(), etc. return """ DECLARE table_value integer; seq_value integer; BEGIN SELECT NVL(MAX(%(column)s), 0) INTO table_value FROM %(table)s; SELECT NVL(last_number - cache_size, 0) INTO seq_value FROM user_sequences WHERE sequence_name = '%(sequence)s'; WHILE table_value > seq_value LOOP SELECT "%(sequence)s".nextval INTO seq_value FROM dual; END LOOP; END; /"""
from fixtures import * import cim def test_mapping_type_guess_xp(): ''' test automatic detection of winxp repositories. ''' repodir = os.path.join(os.path.dirname(__file__), 'repos') xpdir = os.path.join(repodir, 'xp') repopath = os.path.join(xpdir, 'mapping-only') assert cim.CIM.guess_cim_type(repopath) == cim.CIM_TYPE_XP def test_mapping_type_guess_win7(): ''' test automatic detection of win7 repositories. ''' repodir = os.path.join(os.path.dirname(__file__), 'repos') win7dir = os.path.join(repodir, 'win7') repopath = os.path.join(win7dir, 'deleted-instance') assert cim.CIM.guess_cim_type(repopath) == cim.CIM_TYPE_WIN7 ############ INDEX MAPPING ############################################### def test_index_mapping(repo): """ demonstrate extraction of basic information from the mapping header. Args: repo (cim.CIM): the deleted-instance repo Returns: None """ mapping = repo.index_mapping # collected empirically. assert len(mapping.map.entries) == 7824 assert mapping.map.free_dword_count == 241 assert mapping.map.header.physical_page_count == 547 assert mapping.map.header.mapping_entry_count == 326 assert mapping.get_physical_page_number(logical_page_number=0) == 13 assert mapping.get_logical_page_number(physical_page_number=13) == 0 def test_index_mapping_inconsistencies(repo): """ find logical pages where the physical page does not map back to it. this is probably where there are two logical pages that point to the same physical page. Args: repo (cim.CIM): the deleted-instance repo Returns: None """ mapping = repo.index_mapping # logical pages where the physical page does not map back to it. # that is, there must be two logical pages that point here. inconsistencies = [] for i in range(mapping.map.header.mapping_entry_count): try: pnum = mapping.get_physical_page_number(logical_page_number=i) if i != mapping.get_logical_page_number(physical_page_number=pnum): inconsistencies.append(i) except cim.UnmappedPage: continue # collected empirically. assert inconsistencies == [] def test_unmapped_index_logical_pages(repo): """ find logical pages that have no physical page. presumably you can't fetch these pages. Args: repo (cim.CIM): the deleted-instance repo Returns: None """ mapping = repo.index_mapping unmapped_pages = [] for i in range(mapping.map.header.mapping_entry_count): if not mapping.is_logical_page_mapped(i): unmapped_pages.append(i) continue # collected empirically. assert unmapped_pages == [91, 160, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 227, 228, 230] def test_unallocated_index_physical_pages(repo): """ find physical pages that have no logical page. to do this, need to actually reference the size of the index. this should contain unallocated data. Args: repo (cim.CIM): the deleted-instance repo Returns: None """ mapping = repo.index_mapping index = repo.logical_index_store unmapped_pages = [] for i in range(index.page_count): if not mapping.is_physical_page_mapped(i): unmapped_pages.append(i) continue # collected empirically. assert unmapped_pages == [4, 8, 40, 48, 62, 70, 74, 84, 116, 117, 118, 119, 122, 126, 131, 132, 134, 142, 153, 156, 159, 161, 165, 167, 169, 179, 181, 182, 184, 185, 186, 188, 190, 192, 195, 199, 203, 205, 207, 209, 210, 212, 213, 214, 216, 217, 218, 225, 230, 232, 234, 238, 239, 241, 244, 245, 253, 254, 258, 260, 262, 264, 265, 266, 268, 269, 273, 274, 275, 277, 279, 283, 284, 286, 292, 293, 294, 295, 296, 301, 309, 311, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 325, 330, 331, 334, 341, 347, 349, 352, 354, 355, 357, 358, 365, 366, 367, 372, 373, 375, 379, 380, 381, 383, 384, 386, 387, 388, 390, 391, 392, 393, 394, 395, 396, 398, 401, 403, 404, 406, 407, 408, 409, 410, 414, 415, 417, 419, 420, 422, 424, 425, 426, 430, 432, 433, 434, 435, 436, 437, 438, 439, 440, 442, 443, 447, 448, 449, 452, 453, 454, 455, 456, 457, 458, 459, 460, 461, 462, 463, 464, 465, 466, 467, 468, 470, 471, 474, 475, 476, 477, 478, 479, 480, 481, 486, 487, 489, 490, 491, 496, 497, 498, 499, 500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 510, 511, 512, 513, 514, 515, 516, 517, 518, 519, 520, 521, 522, 523, 524, 525, 526, 527, 528, 529, 530, 531, 532, 533, 534, 535, 536, 537, 538, 539, 540, 541, 542, 543, 544, 545, 546] ############ DATA MAPPING ############################################### def test_data_mapping(repo): """ Args: repo (cim.CIM): the deleted-instance repo Returns: None """ mapping = repo.data_mapping # collected empirically. assert len(mapping.map.entries) == 41448 assert mapping.map.free_dword_count == 159 assert mapping.map.header.physical_page_count == 1886 assert mapping.map.header.mapping_entry_count == 1727 assert mapping.get_physical_page_number(logical_page_number=0) == 0 assert mapping.get_logical_page_number(physical_page_number=0) == 0 def test_data_mapping_inconsistencies(repo): """ find logical pages where the physical page does not map back to it. this is probably where there are two logical pages that point to the same physical page. Args: repo (cim.CIM): the deleted-instance repo Returns: None """ mapping = repo.data_mapping # logical pages where the physical page does not map back to it. # that is, there must be two logical pages that point here. inconsistencies = [] for i in range(mapping.map.header.mapping_entry_count): try: pnum = mapping.get_physical_page_number(logical_page_number=i) if i != mapping.get_logical_page_number(physical_page_number=pnum): inconsistencies.append(i) except cim.UnmappedPage: continue # collected empirically. assert inconsistencies == [] def test_unmapped_data_logical_pages(repo): """ find logical pages that have no physical page. presumably you can't fetch these pages. Args: repo (cim.CIM): the deleted-instance repo Returns: None """ mapping = repo.index_mapping unmapped_pages = [] for i in range(mapping.map.header.mapping_entry_count): if not mapping.is_logical_page_mapped(i): unmapped_pages.append(i) continue # collected empirically. assert unmapped_pages == [91, 160, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 227, 228, 230]
# Remote Control - As The Remote Device # # This script configures your OpenMV Cam as a co-processor that can be remotely controlled by # another microcontroller or computer such as an Arduino, ESP8266/ESP32, RaspberryPi, and # even another OpenMV Cam. # # This script is designed to pair with "popular_features_as_the_controller_device.py". import image, network, math, rpc, sensor, struct, tf sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) sensor.skip_frames(time = 2000) # The RPC library above is installed on your OpenMV Cam and provides mutliple classes for # allowing your OpenMV Cam to be controlled over CAN, I2C, SPI, UART, USB VCP, or LAN/WLAN. ################################################################ # Choose the interface you wish to control your OpenMV Cam over. ################################################################ # Uncomment the below line to setup your OpenMV Cam for control over CAN. # # * message_id - CAN message to use for data transport on the can bus (11-bit). # * bit_rate - CAN bit rate. # * sample_point - Tseg1/Tseg2 ratio. Typically 75%. (50.0, 62.5, 75, 87.5, etc.) # # NOTE: Master and slave message ids and can bit rates must match. Connect master can high to slave # can high and master can low to slave can lo. The can bus must be terminated with 120 ohms. # # interface = rpc.rpc_can_slave(message_id=0x7FF, bit_rate=250000, sample_point=75) # Uncomment the below line to setup your OpenMV Cam for control over I2C. # # * slave_addr - I2C address. # # NOTE: Master and slave addresses must match. Connect master scl to slave scl and master sda # to slave sda. You must use external pull ups. Finally, both devices must share a ground. # # interface = rpc.rpc_i2c_slave(slave_addr=0x12) # Uncomment the below line to setup your OpenMV Cam for control over SPI. # # * cs_pin - Slave Select Pin. # * clk_polarity - Idle clock level (0 or 1). # * clk_phase - Sample data on the first (0) or second edge (1) of the clock. # # NOTE: Master and slave settings much match. Connect CS, SCLK, MOSI, MISO to CS, SCLK, MOSI, MISO. # Finally, both devices must share a common ground. # # interface = rpc.rpc_spi_slave(cs_pin="P3", clk_polarity=1, clk_phase=0) # Uncomment the below line to setup your OpenMV Cam for control over UART. # # * baudrate - Serial Baudrate. # # NOTE: Master and slave baud rates must match. Connect master tx to slave rx and master rx to # slave tx. Finally, both devices must share a common ground. # interface = rpc.rpc_uart_slave(baudrate=115200) # Uncomment the below line to setup your OpenMV Cam for control over a USB VCP. # # interface = rpc.rpc_usb_vcp_slave() # Uncomment the below line to setup your OpenMV Cam for control over the lan. # # network_if = network.LAN() # network_if.active(True) # network_if.ifconfig('dhcp') # # interface = rpc.rpc_network_slave(network_if) # Uncomment the below line to setup your OpenMV Cam for control over the wlan. # # network_if = network.WLAN(network.STA_IF) # network_if.active(True) # network_if.connect('your-ssid', 'your-password') # # interface = rpc.rpc_network_slave(network_if) ################################################################ # Call Backs ################################################################ # Helper methods used by the call backs below. def draw_detections(img, dects): for d in dects: c = d.corners() l = len(c) for i in range(l): img.draw_line(c[(i+0)%l] + c[(i+1)%l], color = (0, 255, 0)) img.draw_rectangle(d.rect(), color = (255, 0, 0)) # Remote control works via call back methods that the controller # device calls via the rpc module on this device. Call backs # are functions which take a bytes() object as their argument # and return a bytes() object as their result. The rpc module # takes care of moving the bytes() objects across the link. # bytes() may be the micropython int max in size. # When called returns x, y, w, and h of the largest face within view. # # data is unused def face_detection(data): sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.QVGA) faces = sensor.snapshot().gamma_corr(contrast=1.5).find_features(image.HaarCascade("frontalface")) if not faces: return bytes() # No detections. for f in faces: sensor.get_fb().draw_rectangle(f, color = (255, 255, 255)) out_face = max(faces, key = lambda f: f[2] * f[3]) return struct.pack("<HHHH", out_face[0], out_face[1], out_face[2], out_face[3]) # When called returns if there's a "person" or "no_person" within view. # # data is unused labels, net = tf.load_builtin_model('person_detection') def person_detection(data): global net sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) scores = net.classify(sensor.snapshot())[0].output() return labels[scores.index(max(scores))].encode() # When called returns the payload string for the largest qrcode # within the OpenMV Cam's field-of-view. # # data is unused def qrcode_detection(data): sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.VGA) sensor.set_windowing((320, 240)) codes = sensor.snapshot().find_qrcodes() if not codes: return bytes() # No detections. draw_detections(sensor.get_fb(), codes) return max(codes, key = lambda c: c.w() * c.h()).payload().encode() # When called returns a json list of json qrcode objects for all qrcodes in view. # # data is unused def all_qrcode_detection(data): sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.VGA) sensor.set_windowing((320, 240)) codes = sensor.snapshot().find_qrcodes() if not codes: return bytes() # No detections. draw_detections(sensor.get_fb(), codes) return str(codes).encode() # When called returns the x/y centroid, id number, and rotation of the largest # AprilTag within the OpenMV Cam's field-of-view. # # data is unused def apriltag_detection(data): sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QQVGA) tags = sensor.snapshot().find_apriltags() if not tags: return bytes() # No detections. draw_detections(sensor.get_fb(), tags) output_tag = max(tags, key = lambda t: t.w() * t.h()) return struct.pack("<HHHH", output_tag.cx(), output_tag.cy(), output_tag.id(), int(math.degrees(output_tag.rotation()))) # When called returns a json list of json apriltag objects for all apriltags in view. # # data is unused def all_apriltag_detection(data): sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QQVGA) tags = sensor.snapshot().find_apriltags() if not tags: return bytes() # No detections. draw_detections(sensor.get_fb(), tags) return str(tags).encode() # When called returns the payload string for the largest datamatrix # within the OpenMV Cam's field-of-view. # # data is unused def datamatrix_detection(data): sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.VGA) sensor.set_windowing((320, 240)) codes = sensor.snapshot().find_datamatrices() if not codes: return bytes() # No detections. draw_detections(sensor.get_fb(), codes) return max(codes, key = lambda c: c.w() * c.h()).payload().encode() # When called returns a json list of json datamatrix objects for all datamatrices in view. # # data is unused def all_datamatrix_detection(data): sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.VGA) sensor.set_windowing((320, 240)) codes = sensor.snapshot().find_datamatrices() if not codes: return bytes() # No detections. draw_detections(sensor.get_fb(), codes) return str(codes).encode() # When called returns the payload string for the largest barcode # within the OpenMV Cam's field-of-view. # # data is unused def barcode_detection(data): sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.VGA) sensor.set_windowing((sensor.width(), sensor.height()//8)) codes = sensor.snapshot().find_barcodes() if not codes: return bytes() # No detections. return max(codes, key = lambda c: c.w() * c.h()).payload().encode() # When called returns a json list of json barcode objects for all barcodes in view. # # data is unused def all_barcode_detection(data): sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.VGA) sensor.set_windowing((sensor.width(), sensor.height()//8)) codes = sensor.snapshot().find_barcodes() if not codes: return bytes() # No detections. return str(codes).encode() # When called returns the x/y centroid of the largest blob # within the OpenMV Cam's field-of-view. # # data is the 6-byte color tracking threshold tuple of L_MIN, L_MAX, A_MIN, A_MAX, B_MIN, B_MAX. def color_detection(data): sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) thresholds = struct.unpack("<bbbbbb", data) blobs = sensor.snapshot().find_blobs([thresholds], pixels_threshold=500, area_threshold=500, merge=True, margin=20) if not blobs: return bytes() # No detections. for b in blobs: sensor.get_fb().draw_rectangle(b.rect(), color = (255, 0, 0)) sensor.get_fb().draw_cross(b.cx(), b.cy(), color = (0, 255, 0)) out_blob = max(blobs, key = lambda b: b.density()) return struct.pack("<HH", out_blob.cx(), out_blob.cy()) # When called returns a jpeg compressed image from the OpenMV # Cam in one RPC call. # # data is unused def jpeg_snapshot(data): sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) return sensor.snapshot().compress(quality=90).bytearray() # Register call backs. interface.register_callback(face_detection) interface.register_callback(person_detection) interface.register_callback(qrcode_detection) interface.register_callback(all_qrcode_detection) interface.register_callback(apriltag_detection) interface.register_callback(all_apriltag_detection) interface.register_callback(datamatrix_detection) interface.register_callback(all_datamatrix_detection) interface.register_callback(barcode_detection) interface.register_callback(all_barcode_detection) interface.register_callback(color_detection) interface.register_callback(jpeg_snapshot) # Once all call backs have been registered we can start # processing remote events. interface.loop() does not return. interface.loop()
########################################################################## # # Copyright (c) 2011, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # * Neither the name of Image Engine Design nor the names of any # other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## from __future__ import with_statement import unittest import os import sys import threading import IECore import IECoreRI class SXRendererTest( unittest.TestCase ) : def __loadImage( self, fileName ) : i = IECore.Reader.create( fileName ).read() r = i["R"].data g = i["G"].data b = i["B"].data result = IECore.V3fVectorData() v = IECore.V3f for i in range( 0, len( r ) ) : result.append( v( r[i], g[i], b[i] ) ) return result def __saveImage( self, data, dataWindow, fileName ) : image = IECore.ImagePrimitive( dataWindow, dataWindow ) if isinstance( data, IECore.FloatVectorData ) : image["R"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Vertex, data ) image["G"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Vertex, data ) image["B"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Vertex, data ) else : r = IECore.FloatVectorData() g = IECore.FloatVectorData() b = IECore.FloatVectorData() for c in data : r.append( c[0] ) g.append( c[1] ) b.append( c[2] ) image["R"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Vertex, r ) image["G"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Vertex, g ) image["B"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Vertex, b ) IECore.Writer.create( image, fileName ).write() def __rectanglePoints( self, box ) : p = IECore.V3fVectorData() n = IECore.V3fVectorData() i = IECore.V3fVectorData() dPdu = IECore.V3fVectorData() dPdv = IECore.V3fVectorData() s = IECore.FloatVectorData() t = IECore.FloatVectorData() for y in range( box.min.y, box.max.y + 1 ) : for x in range( box.min.x, box.max.x + 1 ) : p.append( IECore.V3f( x, y, 0 ) ) n.append( IECore.V3f( 0, 0, 1 ) ) i.append( IECore.V3f( 0, 0, -1 ) ) dPdu.append( IECore.V3f( 2, 0, 0 ) ) dPdv.append( IECore.V3f( 0, 2, 0 ) ) s.append( float( x ) / box.size().x ) t.append( float( y ) / box.size().y ) return IECore.CompoundData( { "P" : p, "N" : n, "Ng" : n, "I" : i, "dPdu" : dPdu, "dPdv" : dPdv, "s" : s, "t" : t, } ) def __assertVectorDataAlmostEqual( self, data1, data2 ) : self.assertEqual( len( data1 ), len( data2 ) ) self.assertEqual( data1.typeName(), data2.typeName() ) if isinstance( data1, IECore.Color3fVectorData ) : for i in range( 0, len( data1 ) ) : self.failUnless( data1[i].equalWithAbsError( data2[i], 0.000001 ) ) else : for i in range( 0, len( data1 ) ) : self.assertAlmostEqual( data1[i], data2[i], 6 ) def test( self ) : r = IECoreRI.SXRenderer() points = IECore.CompoundData( { "N" : self.__loadImage( "test/IECoreRI/data/sxInput/cowN.exr" ), "Ng" : self.__loadImage( "test/IECoreRI/data/sxInput/cowN.exr" ), "P" : self.__loadImage( "test/IECoreRI/data/sxInput/cowP.exr" ), "I" : self.__loadImage( "test/IECoreRI/data/sxInput/cowI.exr" ), } ) self.assertEqual( os.system( "shaderdl -o test/IECoreRI/shaders/sxTest.sdl test/IECoreRI/shaders/sxTest.sl" ), 0 ) r.shader( "surface", "test/IECoreRI/shaders/sxTest.sdl", { "noiseFrequency" : 1.0, "tint" : IECore.Color3f( 1 ) } ) s = r.shade( points ) self.assertEqual( len( s ), 6 ) self.failUnless( "outputFloat" in s ) self.failUnless( "outputColor" in s ) self.failUnless( "Ci" in s ) self.failUnless( "Oi" in s ) self.failUnless( "P" in s ) self.failUnless( "N" in s ) self.__assertVectorDataAlmostEqual( s["outputFloat"], IECore.ObjectReader( "test/IECoreRI/data/sxOutput/cowFloat.cob" ).read() ) self.__assertVectorDataAlmostEqual( s["outputColor"], IECore.ObjectReader( "test/IECoreRI/data/sxOutput/cowColor.cob" ).read() ) self.__assertVectorDataAlmostEqual( s["Ci"], IECore.ObjectReader( "test/IECoreRI/data/sxOutput/cowCI.cob" ).read() ) self.__assertVectorDataAlmostEqual( s["Oi"], IECore.ObjectReader( "test/IECoreRI/data/sxOutput/cowOI.cob" ).read() ) def testSplineParameter( self ) : self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/splineTest.sdl test/IECoreRI/shaders/splineTest.sl" ), 0 ) r = IECoreRI.SXRenderer() r.shader( "surface", "test/IECoreRI/shaders/splineTest.sdl", { "spl" : IECore.SplinefColor3fData( IECore.SplinefColor3f( IECore.CubicBasisf.catmullRom(), ( ( 0, IECore.Color3f( 1, 0, 0 ) ), ( 0, IECore.Color3f( 1, 0, 0 ) ), ( 1, IECore.Color3f( 0, 0, 1 ) ), ( 1, IECore.Color3f( 0, 0, 1 ) ), ) ) ) } ) b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 10 ) ) s = r.shade( self.__rectanglePoints( b ) ) self.__assertVectorDataAlmostEqual( s["Ci"], IECore.ObjectReader( "test/IECoreRI/data/sxOutput/spline.cob" ).read() ) # make sure that users don't have to provide values for every varying shader parameter if # they don't want to. this used to crash. def testMissingPredefinedVariables( self ) : self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/splineTest.sdl test/IECoreRI/shaders/splineTest.sl" ), 0 ) r = IECoreRI.SXRenderer() r.shader( "surface", "test/IECoreRI/shaders/splineTest.sdl", {} ) b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 100 ) ) points = self.__rectanglePoints( b ) del points["t"] # remove information the shader requires s = r.shade( points ) def testParameterTypes( self ) : self.assertEqual( os.system( "shaderdl -o test/IECoreRI/shaders/sxParameterTest.sdl test/IECoreRI/shaders/sxParameterTest.sl" ), 0 ) r = IECoreRI.SXRenderer() r.shader( "surface", "test/IECoreRI/shaders/sxParameterTest.sdl", { "mustBeOne" : 1.0, "mustBeRed" : IECore.Color3f( 1, 0, 0 ), "mustBeTwo" : IECore.V3f( 2 ), "mustBeThree" : IECore.V3f( 3 ), "mustBeFour" : IECore.V3f( 4 ), "mustBeHelloWorld" : "helloWorld", "mustBeOneTwoThree" : IECore.V3f( 1, 2, 3 ), } ) b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 1 ) ) s = r.shade( self.__rectanglePoints( b ) ) self.assertEqual( s["Ci"][0], IECore.Color3f( 0, 1, 0 ) ) def testFloat3PrimitiveVariable( self ) : self.assertEqual( os.system( "shaderdl -o test/IECoreRI/shaders/sxParameterTest.sdl test/IECoreRI/shaders/sxParameterTest.sl" ), 0 ) r = IECoreRI.SXRenderer() r.shader( "surface", "test/IECoreRI/shaders/sxParameterTest.sdl", { "mustBeOne" : 1.0, "mustBeRed" : IECore.Color3f( 1, 0, 0 ), "mustBeTwo" : IECore.V3f( 2 ), "mustBeThree" : IECore.V3f( 3 ), "mustBeFour" : IECore.V3f( 4 ), "mustBeHelloWorld" : "helloWorld", } ) b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 10 ) ) points = self.__rectanglePoints( b ) points["mustBeOneTwoThree"] = IECore.V3fVectorData( [ IECore.V3f( 1, 2, 3 ) ] * len( points["P"] ) ) s = r.shade( points ) for c in s["Ci"] : self.assertEqual( c, IECore.Color3f( 0, 1, 0 ) ) def testIntParameterSupport( self ) : self.assertEqual( os.system( "shaderdl -o test/IECoreRI/shaders/sxParameterTest.sdl test/IECoreRI/shaders/sxParameterTest.sl" ), 0 ) r = IECoreRI.SXRenderer() r.shader( "surface", "test/IECoreRI/shaders/sxParameterTest.sdl", { "mustBeOne" : IECore.IntData( 1 ), "mustBeRed" : IECore.Color3f( 1, 0, 0 ), "mustBeTwo" : IECore.V3f( 2 ), "mustBeThree" : IECore.V3f( 3 ), "mustBeFour" : IECore.V3f( 4 ), "mustBeHelloWorld" : "helloWorld", "mustBeOneTwoThree" : IECore.V3f( 1, 2, 3 ), } ) b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 1 ) ) s = r.shade( self.__rectanglePoints( b ) ) self.assertEqual( s["Ci"][0], IECore.Color3f( 0, 1, 0 ) ) def testBoolParameterSupport( self ) : self.assertEqual( os.system( "shaderdl -o test/IECoreRI/shaders/sxParameterTest.sdl test/IECoreRI/shaders/sxParameterTest.sl" ), 0 ) r = IECoreRI.SXRenderer() r.shader( "surface", "test/IECoreRI/shaders/sxParameterTest.sdl", { "mustBeOne" : IECore.BoolData( True ), "mustBeRed" : IECore.Color3f( 1, 0, 0 ), "mustBeTwo" : IECore.V3f( 2 ), "mustBeThree" : IECore.V3f( 3 ), "mustBeFour" : IECore.V3f( 4 ), "mustBeHelloWorld" : "helloWorld", "mustBeOneTwoThree" : IECore.V3f( 1, 2, 3 ), } ) b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 1 ) ) s = r.shade( self.__rectanglePoints( b ) ) self.assertEqual( s["Ci"][0], IECore.Color3f( 0, 1, 0 ) ) def testStack( self ) : self.assertEqual( os.system( "shaderdl -o test/IECoreRI/shaders/sxStackTest.sdl test/IECoreRI/shaders/sxStackTest.sl" ), 0 ) r = IECoreRI.SXRenderer() b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 100 ) ) points = self.__rectanglePoints( b ) self.assertEqual( r.getAttribute( "color" ), IECore.Color3fData( IECore.Color3f( 1 ) ) ) self.assertEqual( r.getAttribute( "opacity" ), IECore.Color3fData( IECore.Color3f( 1 ) ) ) with IECore.WorldBlock( r ) : r.setAttribute( "color", IECore.Color3f( 1, 0, 0 ) ) self.assertEqual( r.getAttribute( "color" ), IECore.Color3fData( IECore.Color3f( 1, 0, 0 ) ) ) r.shader( "surface", "test/IECoreRI/shaders/sxStackTest.sdl", { "blue" : 1.0 } ) with IECore.AttributeBlock( r ) : r.setAttribute( "color", IECore.Color3f( 0, 1, 0 ) ) self.assertEqual( r.getAttribute( "color" ), IECore.Color3fData( IECore.Color3f( 0, 1, 0 ) ) ) r.shader( "surface", "test/IECoreRI/shaders/sxStackTest.sdl", { "blue" : 0.5 } ) s = r.shade( points ) for c in s["Ci"] : self.assertEqual( c, IECore.Color3f( 0, 0, 0.5 ) ) self.assertEqual( r.getAttribute( "color" ), IECore.Color3fData( IECore.Color3f( 1, 0, 0 ) ) ) s = r.shade( points ) for c in s["Ci"] : self.assertEqual( c, IECore.Color3f( 0, 0, 1 ) ) self.assertEqual( r.getAttribute( "color" ), IECore.Color3fData( IECore.Color3f( 1 ) ) ) self.assertEqual( r.getAttribute( "opacity" ), IECore.Color3fData( IECore.Color3f( 1 ) ) ) def testNoShader( self ) : r = IECoreRI.SXRenderer() with IECore.WorldBlock( r ) : self.assertRaises( RuntimeError, r.shade, self.__rectanglePoints( IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 100 ) ) ) ) def testCoshaders( self ) : self.assertEqual( os.system( "shaderdl -o test/IECoreRI/shaders/sxCoshaderTest.sdl test/IECoreRI/shaders/sxCoshaderTest.sl" ), 0 ) self.assertEqual( os.system( "shaderdl -o test/IECoreRI/shaders/sxCoshaderTestMain.sdl test/IECoreRI/shaders/sxCoshaderTestMain.sl" ), 0 ) r = IECoreRI.SXRenderer() b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 100 ) ) points = self.__rectanglePoints( b ) with IECore.WorldBlock( r ) : r.shader( "shader", "test/IECoreRI/shaders/sxCoshaderTest", { "shaderColor" : IECore.Color3f( 1, 0, 0 ), "__handle" : "cs1" } ) r.shader( "shader", "test/IECoreRI/shaders/sxCoshaderTest", { "sColor" : IECore.Color3f( 0, 1, 0 ), "__handle" : "cs2" } ) r.shader( "shader", "test/IECoreRI/shaders/sxCoshaderTest", { "tColor" : IECore.Color3f( 0, 0, 1 ), "__handle" : "cs3" } ) r.shader( "surface", "test/IECoreRI/shaders/sxCoshaderTestMain", { "coshaders" : IECore.StringVectorData( [ "cs1", "cs2", "cs3" ] ) } ) s = r.shade( points ) self.assertEqual( s["Ci"], IECore.ObjectReader( "test/IECoreRI/data/sxOutput/coshaders.cob" ).read() ) def testCoshadersWithGetVar( self ) : self.assertEqual( os.system( "shaderdl -o test/IECoreRI/shaders/sxCoshaderTest.sdl test/IECoreRI/shaders/sxCoshaderTest.sl" ), 0 ) self.assertEqual( os.system( "shaderdl -o test/IECoreRI/shaders/sxCoshaderTestMain.sdl test/IECoreRI/shaders/sxCoshaderTestMain.sl" ), 0 ) r = IECoreRI.SXRenderer() b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 4 ) ) points = self.__rectanglePoints( b ) points["forGetVar"] = IECore.Color3fVectorData( [ IECore.Color3f( x[0], x[1], x[2] ) for x in points["P"] ] ) with IECore.WorldBlock( r ) : r.shader( "shader", "test/IECoreRI/shaders/sxCoshaderTest", { "primVarName" : "forGetVar", "__handle" : "cs1" } ) r.shader( "surface", "test/IECoreRI/shaders/sxCoshaderTestMain", { "coshaders" : IECore.StringVectorData( [ "cs1" ] ) } ) s = r.shade( points ) self.assertEqual( s["Ci"], points["forGetVar"] ) def testGrids( self ) : self.assertEqual( os.system( "shaderdl -o test/IECoreRI/shaders/sxGridTest.sdl test/IECoreRI/shaders/sxGridTest.sl" ), 0 ) r = IECoreRI.SXRenderer() b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 20, 10 ) ) points = self.__rectanglePoints( b ) with IECore.WorldBlock( r ) : r.shader( "surface", "test/IECoreRI/shaders/sxGridTest", {} ) # not providing enough points for the grid should raise self.assertRaises( RuntimeError, r.shade, points, IECore.V2i( 100, 500 ) ) s = r.shade( points ) del s["P"] # test data on disk was created before we supported P as an output del s["N"] # test data on disk was created before we supported N as an output self.assertEqual( s, IECore.ObjectReader( "test/IECoreRI/data/sxOutput/noGrid.cob" ).read() ) s = r.shade( points, IECore.V2i( 21, 11 ) ) del s["P"] # test data on disk was created before we supported P as an output del s["N"] # test data on disk was created before we supported N as an output self.assertEqual( s, IECore.ObjectReader( "test/IECoreRI/data/sxOutput/grid.cob" ).read() ) def testMultipleGrids( self ) : self.assertEqual( os.system( "shaderdl -o test/IECoreRI/shaders/sxGridTest.sdl test/IECoreRI/shaders/sxGridTest.sl" ), 0 ) r = IECoreRI.SXRenderer() b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 19, 9 ) ) points = self.__rectanglePoints( b ) with IECore.WorldBlock( r ) : r.shader( "surface", "test/IECoreRI/shaders/sxGridTest", {} ) # there are 20 x 10 points in the input, so this call should shade four 10 x 5 grids: r.shade( points, IECore.V2i( 10, 5 ) ) def testPlaneShade( self ) : r = IECoreRI.SXRenderer() self.assertEqual( os.system( "shaderdl -o test/IECoreRI/shaders/sxStTest.sdl test/IECoreRI/shaders/sxStTest.sl" ), 0 ) r.shader( "surface", "test/IECoreRI/shaders/sxStTest.sdl", {} ) data = r.shadePlane( IECore.V2i( 64, 64 ) ) del data["P"] del data["N"] self.assertEqual( data, IECore.Reader.create( "test/IECoreRI/data/sxOutput/shadePlaneCompoundData.cob" ).read() ) image = r.shadePlaneToImage( IECore.V2i( 64, 64 ) ) expectedImage = IECore.Reader.create( "test/IECoreRI/data/sxOutput/shadePlaneImage.exr" ).read() self.assertEqual( IECore.ImageDiffOp()( imageA=image, imageB=expectedImage, maxError=0 ), IECore.BoolData( False ) ) def testWrongType( self ) : self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/splineTest.sdl test/IECoreRI/shaders/splineTest.sl" ), 0 ) r = IECoreRI.SXRenderer() r.shader( "surface", "test/IECoreRI/shaders/splineTest.sdl", {} ) p = self.__rectanglePoints( IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 10 ) ) ) p["t"] = p["P"] self.assertRaises( RuntimeError, r.shade, p ) def testWrongSize( self ) : self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/splineTest.sdl test/IECoreRI/shaders/splineTest.sl" ), 0 ) r = IECoreRI.SXRenderer() r.shader( "surface", "test/IECoreRI/shaders/splineTest.sdl", {} ) p = self.__rectanglePoints( IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 10 ) ) ) del p["t"][-10:] self.assertRaises( RuntimeError, r.shade, p ) def testDisplacementShader( self ) : self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxDisplacementTest.sdl test/IECoreRI/shaders/sxDisplacementTest.sl" ), 0 ) r = IECoreRI.SXRenderer() with IECore.WorldBlock( r ) : r.shader( "displacement", "test/IECoreRI/shaders/sxDisplacementTest.sdl", {} ) b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 20, 10 ) ) points = self.__rectanglePoints( b ) ## need to use a grid topology if we want calculatenormal() to work s = r.shade( points, IECore.V2i( 21, 11 ) ) self.assertEqual( len( s ), 2 ) self.failUnless( "P" in s ) self.failUnless( "N" in s ) for i in range( 0, len( points["P"] ) ) : self.failUnless( s["P"][i].equalWithAbsError( points["P"][i] + points["N"][i], 0.001 ) ) self.failUnless( s["N"][i].equalWithAbsError( IECore.V3f( 0, 0, 1 ), 0.001 ) ) def testDisplacementAndSurfaceShaders( self ) : self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxDisplacementTest.sdl test/IECoreRI/shaders/sxDisplacementTest.sl" ), 0 ) self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxTest.sdl test/IECoreRI/shaders/sxTest.sl" ), 0 ) r = IECoreRI.SXRenderer() with IECore.WorldBlock( r ) : r.shader( "displacement", "test/IECoreRI/shaders/sxDisplacementTest.sdl", {} ) r.shader( "surface", "test/IECoreRI/shaders/sxTest.sdl", {} ) b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 20, 10 ) ) points = self.__rectanglePoints( b ) ## need to use a grid topology if we want calculatenormal() to work s = r.shade( points, IECore.V2i( 21, 11 ) ) self.assertEqual( len( s ), 6 ) self.failUnless( "P" in s ) self.failUnless( "N" in s ) self.failUnless( "Ci" in s ) self.failUnless( "Oi" in s ) self.failUnless( "outputFloat" in s ) self.failUnless( "outputColor" in s ) for i in range( 0, len( points["P"] ) ) : self.failUnless( s["P"][i].equalWithAbsError( points["P"][i] + points["N"][i], 0.001 ) ) self.failUnless( s["N"][i].equalWithAbsError( IECore.V3f( 0, 0, 1 ), 0.001 ) ) def testLights( self ) : self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxLightTest.sdl test/IECoreRI/shaders/sxLightTest.sl" ), 0 ) self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxIlluminanceTest.sdl test/IECoreRI/shaders/sxIlluminanceTest.sl" ), 0 ) r = IECoreRI.SXRenderer() with IECore.WorldBlock( r ) : r.shader( "surface", "test/IECoreRI/shaders/sxIlluminanceTest", {} ) r.light( "test/IECoreRI/shaders/sxLightTest", "light0", {} ) b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 20, 10 ) ) points = self.__rectanglePoints( b ) s = r.shade( points, IECore.V2i( 21, 11 ) ) for i in range( 0, len( points["P"] ) ) : c = s["Ci"][i] self.assertEqual( points["P"][i], IECore.V3f( c[0], c[1], c[2] ) ) def testPredefinedPrimitiveVariables( self ) : self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxPredefinedPrimitiveVariableTest.sdl test/IECoreRI/shaders/sxPredefinedPrimitiveVariableTest.sl" ), 0 ) r = IECoreRI.SXRenderer() with IECore.WorldBlock( r ) : r.shader( "surface", "test/IECoreRI/shaders/sxPredefinedPrimitiveVariableTest", {} ) b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 20, 10 ) ) points = self.__rectanglePoints( b ) s = r.shade( points, IECore.V2i( 21, 11 ) ) for i in range( 0, len( points["P"] ) ) : self.assertEqual( s["Ci"][i], IECore.Color3f( 1, 1, 1 ) ) def testNonPredefinedPrimitiveVariables( self ) : self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxNonPredefinedPrimitiveVariableTest.sdl test/IECoreRI/shaders/sxNonPredefinedPrimitiveVariableTest.sl" ), 0 ) r = IECoreRI.SXRenderer() with IECore.WorldBlock( r ) : r.shader( "surface", "test/IECoreRI/shaders/sxNonPredefinedPrimitiveVariableTest", {} ) b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 20, 10 ) ) points = self.__rectanglePoints( b ) points["colorPrimVar"] = IECore.Color3fVectorData( [ IECore.Color3f( v[0], v[1], v[2] ) for v in points["P"] ] ) points["floatPrimVar"] = points["s"] s = r.shade( points, IECore.V2i( 21, 11 ) ) for i in range( 0, len( points["P"] ) ) : c = points["colorPrimVar"][i] c[0] = points["s"][i] self.assertEqual( s["Ci"][i], c ) def testNonPredefinedPrimitiveVariablesForCoshaders( self ) : self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxCoshaderTestMain.sdl test/IECoreRI/shaders/sxCoshaderTestMain.sl" ), 0 ) self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxCoshaderTest.sdl test/IECoreRI/shaders/sxCoshaderTest.sl" ), 0 ) r = IECoreRI.SXRenderer() with IECore.WorldBlock( r ) : b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 20, 10 ) ) points = self.__rectanglePoints( b ) points["colorPrimVar"] = IECore.Color3fVectorData( [ IECore.Color3f( v[0], v[1], v[2] ) for v in points["P"] ] ) r.shader( "shader", "test/IECoreRI/shaders/sxCoshaderTest", { "__handle" : "cs1" } ) r.shader( "surface", "test/IECoreRI/shaders/sxCoshaderTestMain", { "coshaders" : IECore.StringVectorData( [ "cs1" ] ) } ) s = r.shade( points, IECore.V2i( 21, 11 ) ) self.assertEqual( s["Ci"], points["colorPrimVar"] ) def testUniformPrimitiveVariables( self ) : self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxUniformPrimitiveVariableTest.sdl test/IECoreRI/shaders/sxUniformPrimitiveVariableTest.sl" ), 0 ) r = IECoreRI.SXRenderer() with IECore.WorldBlock( r ) : r.shader( "surface", "test/IECoreRI/shaders/sxUniformPrimitiveVariableTest", {} ) b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 20, 10 ) ) points = self.__rectanglePoints( b ) points["colorPrimVar"] = IECore.Color3fData( IECore.Color3f( 0, 0.5, 1 ) ) points["floatPrimVar"] = IECore.FloatData( 16.0 ) points["vectorPrimVar"] = IECore.V3fData( IECore.V3f( 0.25, 0.5, 2 ) ) points["stringPrimVar"] = IECore.StringData( "hello shader!" ) points["stringVectorPrimVar"] = IECore.StringVectorData( ["who's", "a", "good", "boy" ] ) s = r.shade( points, IECore.V2i( 21, 11 ) ) for i in range( 0, len( points["P"] ) ) : self.assertEqual( s["Ci"][i], IECore.Color3f( 0.125, 0.25, 0.75 ) ) def testUniformPrimitiveVariableShaderParameters( self ) : self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxUniformPrimitiveVariableShaderParameterTest.sdl test/IECoreRI/shaders/sxUniformPrimitiveVariableShaderParameterTest.sl" ), 0 ) r = IECoreRI.SXRenderer() with IECore.WorldBlock( r ) : r.shader( "surface", "test/IECoreRI/shaders/sxUniformPrimitiveVariableShaderParameterTest", {} ) b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 20, 10 ) ) points = self.__rectanglePoints( b ) points["colorPrimVar"] = IECore.Color3fData( IECore.Color3f( 0, 0.5, 1 ) ) points["floatPrimVar"] = IECore.FloatData( 16.0 ) points["vectorPrimVar"] = IECore.V3fData( IECore.V3f( 0.25, 0.5, 2 ) ) points["stringPrimVar"] = IECore.StringData( "hello shader!" ) points["stringVectorPrimVar"] = IECore.StringVectorData( ["who's", "a", "good", "boy" ] ) s = r.shade( points, IECore.V2i( 21, 11 ) ) for i in range( 0, len( points["P"] ) ) : self.assertEqual( s["Ci"][i], IECore.Color3f( 0.125, 0.25, 0.5 ) ) def testThreading( self ) : # set up a renderer with a shader in it self.assertEqual( os.system( "shaderdl -o test/IECoreRI/shaders/sxTest.sdl test/IECoreRI/shaders/sxTest.sl" ), 0 ) r = IECoreRI.SXRenderer() r.shader( "surface", "test/IECoreRI/shaders/sxTest.sdl", { "noiseFrequency" : 1.0, "tint" : IECore.Color3f( 1 ) } ) # and get some points to shade points = IECore.CompoundData( { "N" : self.__loadImage( "test/IECoreRI/data/sxInput/cowN.exr" ), "Ng" : self.__loadImage( "test/IECoreRI/data/sxInput/cowN.exr" ), "P" : self.__loadImage( "test/IECoreRI/data/sxInput/cowP.exr" ), "I" : self.__loadImage( "test/IECoreRI/data/sxInput/cowI.exr" ), } ) # shade in lots of different threads at the same time def s( i ) : results[i] = r.shade( points ) threads = [] results = [] for i in range( 0, 300 ) : threads.append( threading.Thread( target = IECore.curry( s, i ) ) ) results.append( None ) for t in threads : t.start() for t in threads : t.join() # and check that it all worked cowFloat = IECore.ObjectReader( "test/IECoreRI/data/sxOutput/cowFloat.cob" ).read() cowColor = IECore.ObjectReader( "test/IECoreRI/data/sxOutput/cowColor.cob" ).read() cowCI = IECore.ObjectReader( "test/IECoreRI/data/sxOutput/cowCI.cob" ).read() cowOI = IECore.ObjectReader( "test/IECoreRI/data/sxOutput/cowOI.cob" ).read() # check that the first set of results is close enough to the expected results. # we allow some small variation as 3delight's noise routines seem to yield # veryvery small differences between some versions. self.__assertVectorDataAlmostEqual( results[0]["outputFloat"], cowFloat ) self.__assertVectorDataAlmostEqual( results[0]["outputColor"], cowColor ) self.__assertVectorDataAlmostEqual( results[0]["Ci"], cowCI ) self.__assertVectorDataAlmostEqual( results[0]["Oi"], cowOI ) # check that all results are exactly equal to the first set. even if we # accept small variations between different 3delight versions we don't accept # variation within one version. for s in results : self.assertEqual( len( s ), 6 ) self.failUnless( "outputFloat" in s ) self.failUnless( "outputColor" in s ) self.failUnless( "Ci" in s ) self.failUnless( "Oi" in s ) self.failUnless( "P" in s ) self.failUnless( "N" in s ) self.assertEqual( s["outputFloat"], results[0]["outputFloat"] ) self.assertEqual( s["outputColor"], results[0]["outputColor"] ) self.assertEqual( s["Ci"], results[0]["Ci"] ) self.assertEqual( s["Oi"], results[0]["Oi"] ) def testGetVar( self ) : self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxGetVarTest.sdl test/IECoreRI/shaders/sxGetVarTest.sl" ), 0 ) r = IECoreRI.SXRenderer() with IECore.WorldBlock( r ) : b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 20, 10 ) ) points = self.__rectanglePoints( b ) points["floatValue1"] = points["s"] points["floatValue2"] = points["t"] r.shader( "surface", "test/IECoreRI/shaders/sxGetVarTest", { } ) s = r.shade( points, IECore.V2i( 21, 11 ) ) for i in range( 0, len( s["Ci"] ) ) : self.assertEqual( s["Ci"][i], IECore.Color3f( 0, points["floatValue1"][i], points["floatValue2"][i] ) ) def testGetShaderInConstruct( self ) : self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxGetShaderTest.sdl test/IECoreRI/shaders/sxGetShaderTest.sl" ), 0 ) self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxCoshaderTest.sdl test/IECoreRI/shaders/sxCoshaderTest.sl" ), 0 ) r = IECoreRI.SXRenderer() with IECore.WorldBlock( r ) : b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 20, 10 ) ) points = self.__rectanglePoints( b ) r.shader( "shader", "test/IECoreRI/shaders/sxCoshaderTest", { "__handle" : "cs1", "sColor" : IECore.Color3f( 0, 1, 0 ), } ) r.shader( "surface", "test/IECoreRI/shaders/sxGetShaderTest", { "coshader" : IECore.StringData( "cs1" ) } ) s = r.shade( points, IECore.V2i( 21, 11 ) ) for i in range( 0, len( points["P"] ) ) : self.assertEqual( s["Ci"][i], IECore.Color3f( 0, points["s"][i], 0 ) ) def testCoshadersStack( self ) : self.assertEqual( os.system( "shaderdl -o test/IECoreRI/shaders/sxCoshaderTest.sdl test/IECoreRI/shaders/sxCoshaderTest.sl" ), 0 ) self.assertEqual( os.system( "shaderdl -o test/IECoreRI/shaders/sxCoshaderTestMain.sdl test/IECoreRI/shaders/sxCoshaderTestMain.sl" ), 0 ) r = IECoreRI.SXRenderer() b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 100 ) ) points = self.__rectanglePoints( b ) with IECore.WorldBlock( r ) : r.shader( "shader", "test/IECoreRI/shaders/sxCoshaderTest", { "shaderColor" : IECore.Color3f( 1, 0, 0 ), "__handle" : "cs1" } ) r.shader( "shader", "test/IECoreRI/shaders/sxCoshaderTest", { "sColor" : IECore.Color3f( 0, 1, 0 ), "__handle" : "cs2" } ) r.shader( "shader", "test/IECoreRI/shaders/sxCoshaderTest", { "tColor" : IECore.Color3f( 0, 0, 1 ), "__handle" : "cs3" } ) with IECore.AttributeBlock( r ) : # these guys should be popped and therefore not affect the result r.shader( "shader", "test/IECoreRI/shaders/sxCoshaderTest", { "shaderColor" : IECore.Color3f( 1, 1, 1 ), "__handle" : "cs1" } ) r.shader( "shader", "test/IECoreRI/shaders/sxCoshaderTest", { "sColor" : IECore.Color3f( 1, 1, 0 ), "__handle" : "cs2" } ) r.shader( "shader", "test/IECoreRI/shaders/sxCoshaderTest", { "tColor" : IECore.Color3f( 0.5, 0, 0.25 ), "__handle" : "cs3" } ) r.shader( "surface", "test/IECoreRI/shaders/sxCoshaderTestMain", { "coshaders" : IECore.StringVectorData( [ "cs1", "cs2", "cs3" ] ) } ) s = r.shade( points ) self.assertEqual( s["Ci"], IECore.ObjectReader( "test/IECoreRI/data/sxOutput/coshaders.cob" ).read() ) def testLightsStack( self ) : self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxLightTest.sdl test/IECoreRI/shaders/sxLightTest.sl" ), 0 ) self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxIlluminanceTest.sdl test/IECoreRI/shaders/sxIlluminanceTest.sl" ), 0 ) r = IECoreRI.SXRenderer() with IECore.WorldBlock( r ) : r.shader( "surface", "test/IECoreRI/shaders/sxIlluminanceTest", {} ) r.light( "test/IECoreRI/shaders/sxLightTest", "light0", {} ) with IECore.AttributeBlock( r ) : # this guy should be popped and therefore not affect the result r.light( "test/IECoreRI/shaders/sxLightTest", "light1", {} ) b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 20, 10 ) ) points = self.__rectanglePoints( b ) s = r.shade( points, IECore.V2i( 21, 11 ) ) for i in range( 0, len( points["P"] ) ) : c = s["Ci"][i] self.assertEqual( points["P"][i], IECore.V3f( c[0], c[1], c[2] ) ) def testZeroLength( self ) : self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/splineTest.sdl test/IECoreRI/shaders/splineTest.sl" ), 0 ) r = IECoreRI.SXRenderer() r.shader( "surface", "test/IECoreRI/shaders/splineTest.sdl", {} ) p = self.__rectanglePoints( IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 10 ) ) ) for k, v in p.items() : del v[:] self.assertRaises( RuntimeError, r.shade, p ) def testThreadedTextureLookups( self ) : self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxTextureTest.sdl test/IECoreRI/shaders/sxTextureTest.sl" ), 0 ) points = self.__rectanglePoints( IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 255 ) ) ) # by default you should be able to run as many threads as the hardware will support # concurrently. for i in range( 0, 10 ) : r = IECoreRI.SXRenderer() r.shader( "surface", "test/IECoreRI/shaders/sxTextureTest.sdl", { "fileName" : os.path.realpath( "./test/IECoreRI/data/textures/uvMap.256x256.tdl" ), } ) # note the -1 when determining the number of threads. 3delight behaviour changed around # 10.0.35, such that render:nthreads (which defaults to hardwareConcurrency()) is the # number of threads that will be making Sx calls of any sort, whereas prior to that it # was the number of threads that would actually call SxCallShader. because we've set up # the renderer on this thread, it's taken one off the count for the number of threads we # can spawn to do the shading. threads = [] for i in range( 0, IECore.hardwareConcurrency() - 1 ) : threads.append( threading.Thread( target = IECore.curry( r.shade, points ) ) ) for t in threads : t.start() for t in threads : t.join() # but if you want to use more then you need to let the library know about it # by calling setOption( "ri:render:nthreads" ) for i in range( 0, 10 ) : r = IECoreRI.SXRenderer() # see above - we're adding one to number of threads we'll be using to do the shading, # because we've also used a thread (the current thread) to perform the setup. r.setOption( "ri:render:nthreads", IECore.IntData( IECore.hardwareConcurrency() * 2 + 1 ) ) r.shader( "surface", "test/IECoreRI/shaders/sxTextureTest.sdl", { "fileName" : os.path.realpath( "./test/IECoreRI/data/textures/uvMap.256x256.tdl" ), } ) threads = [] for i in range( 0, IECore.hardwareConcurrency() * 2 ) : threads.append( threading.Thread( target = IECore.curry( r.shade, points ) ) ) for t in threads : t.start() for t in threads : t.join() def testUserOptions( self ): self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxUserOptionTest.sdl test/IECoreRI/shaders/sxUserOptionTest.sl" ), 0 ) points = self.__rectanglePoints( IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 1 ) ) ) r = IECoreRI.SXRenderer() r.shader( "surface", "test/IECoreRI/shaders/sxUserOptionTest.sdl", {} ) s = r.shade( points ) self.assertEqual( s["Ci"][0], IECore.Color3f( 0,0,0 ) ) r.setOption( "user:outputColor", IECore.FloatData( 1 ) ) s = r.shade( points ) self.assertEqual( s["Ci"][0], IECore.Color3f( 1,1,1 ) ) def testStringArrayOptions( self ): self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxStringArrayOptionTest.sdl test/IECoreRI/shaders/sxStringArrayOptionTest.sl" ), 0 ) points = self.__rectanglePoints( IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 1 ) ) ) r = IECoreRI.SXRenderer() r.shader( "surface", "test/IECoreRI/shaders/sxStringArrayOptionTest.sdl", {} ) s = r.shade( points ) self.assertEqual( s["Ci"][0], IECore.Color3f( 0,0,0 ) ) r.setOption( "user:stringArray", IECore.StringVectorData( ["this","should","work"] ) ) s = r.shade( points ) self.assertEqual( s["Ci"][0], IECore.Color3f( 1,1,1 ) ) def testCoordinateSystems( self ): self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxCoordSystemTest.sdl test/IECoreRI/shaders/sxCoordSystemTest.sl" ), 0 ) points = self.__rectanglePoints( IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 1 ) ) ) r = IECoreRI.SXRenderer() r.shader( "surface", "test/IECoreRI/shaders/sxCoordSystemTest.sdl", { "coordSysName" : IECore.StringData( "test1" ) } ) r.transformBegin() r.setTransform( IECore.M44f.createTranslated( IECore.V3f(0,0,2) ) ) r.coordinateSystem( "test1" ) r.transformEnd() r.transformBegin() r.setTransform( IECore.M44f.createRotated( IECore.V3f(1,0,0) ) ) r.concatTransform( IECore.M44f.createTranslated( IECore.V3f(0,0,2) ) ) r.coordinateSystem( "test2" ) r.transformEnd() s1 = r.shade( points ) for i in range( len( s1["Ci"] ) ): self.assertEqual( points["P"][i] + IECore.V3f(0,0,2), IECore.V3f( s1["Ci"][i][0], s1["Ci"][i][1], s1["Ci"][i][2] ) ) r.shader( "surface", "test/IECoreRI/shaders/sxCoordSystemTest.sdl", { "coordSysName" : IECore.StringData( "test2" ) } ) s2 = r.shade( points ) for i in range( len( s2["Ci"] ) ): shaderP = IECore.V3f( s2["Ci"][i][0], s2["Ci"][i][1], s2["Ci"][i][2] ) transP = points["P"][i] * IECore.M44f.createTranslated( IECore.V3f(0,0,2) ) * IECore.M44f.createRotated( IECore.V3f(1,0,0) ) self.failUnless( ( shaderP - transP ).length() < 1.e-5 ) def tearDown( self ) : files = [ "test/IECoreRI/shaders/sxTest.sdl", "test/IECoreRI/shaders/splineTest.sdl", "test/IECoreRI/shaders/sxParameterTest.sdl", "test/IECoreRI/shaders/sxStackTest.sdl", "test/IECoreRI/shaders/sxCoshaderTest.sdl", "test/IECoreRI/shaders/sxCoshaderTestMain.sdl", "test/IECoreRI/shaders/sxGridTest.sdl", "test/IECoreRI/shaders/sxDisplacementTest.sdl", "test/IECoreRI/shaders/sxIlluminanceTest.sdl", "test/IECoreRI/shaders/sxLightTest.sdl", "test/IECoreRI/shaders/sxStTest.sdl", "test/IECoreRI/shaders/sxPredefinedPrimitiveVariableTest.sdl", "test/IECoreRI/shaders/sxNonPredefinedPrimitiveVariableTest.sdl", "test/IECoreRI/shaders/sxGetVarTest.sdl", "test/IECoreRI/shaders/sxGetShaderTest.sdl", "test/IECoreRI/shaders/sxTextureTest.sdl", "test/IECoreRI/shaders/sxUniformPrimitiveVariableShaderParameterTest.sdl", "test/IECoreRI/shaders/sxUniformPrimitiveVariableTest.sdl", "test/IECoreRI/shaders/sxUserOptionTest.sdl", "test/IECoreRI/shaders/sxCoordSystemTest.sdl", "test/IECoreRI/shaders/sxStringArrayOptionTest.sdl", ] for f in files : if os.path.exists( f ) : os.remove( f ) if __name__ == "__main__": unittest.main()
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for `tf.data.Dataset`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from tensorflow.core.framework import graph_pb2 from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.ops import optional_ops from tensorflow.python.data.ops import readers from tensorflow.python.data.util import nest from tensorflow.python.data.util import structure from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import test_util from tensorflow.python.platform import test @test_util.run_all_in_graph_and_eager_modes class DatasetTest(test_base.DatasetTestBase, parameterized.TestCase): def testAsSerializedGraph(self): dataset = dataset_ops.Dataset.range(10) graph = graph_pb2.GraphDef().FromString( self.evaluate(dataset._as_serialized_graph())) self.assertTrue(any([node.op != "RangeDataset" for node in graph.node])) @staticmethod def make_apply_fn(dataset): def apply_fn(dataset): def _apply_fn(dataset): return dataset.cache() return dataset.apply(_apply_fn) return apply_fn @staticmethod def make_gen(): def gen(): yield 42 return gen @staticmethod def make_interleave_fn(dataset, num_parallel_calls=None): def interleave_fn(dataset): return dataset.interleave( lambda x: dataset_ops.Dataset.range(0), cycle_length=2, num_parallel_calls=num_parallel_calls) return interleave_fn @parameterized.named_parameters( ("FixedLengthRecord", lambda: readers.FixedLengthRecordDataset("", 42)), ("FromGenerator", lambda: dataset_ops.Dataset.from_generator( DatasetTest.make_gen(), dtypes.int32), 1), ("FromTensors", lambda: dataset_ops.Dataset.from_tensors([42])), ("FromTensorSlices", lambda: dataset_ops.Dataset.from_tensors([42])), ("Range", lambda: dataset_ops.Dataset.range(10)), ("TextLine", lambda: readers.TextLineDataset("")), ("TFRecord", lambda: readers.TFRecordDataset(""), 1), ) def testDatasetSimpleSourceInputs(self, dataset_fn, num_inputs=0): self.assertEqual(num_inputs, len(dataset_fn()._inputs())) def testDatasetComplexSourceInputs(self): dataset_fn = dataset_ops.Dataset.from_sparse_tensor_slices( sparse_tensor.SparseTensor( indices=np.array([[0, 0], [1, 0], [2, 0]]), values=np.array([0, 0, 0]), dense_shape=np.array([3, 1]))) self.assertEqual(0, len(dataset_fn._inputs())) @parameterized.named_parameters( ("Batch", lambda x: x.batch(10), lambda: dataset_ops.Dataset.range(0)), ("Cache", lambda x: x.cache(), lambda: dataset_ops.Dataset.range(0)), ("Filter", lambda x: x.filter(lambda x: True), lambda: dataset_ops.Dataset.range(0)), ("FlatMap", lambda x: x.flat_map(lambda x: dataset_ops.Dataset.range(0)), lambda: dataset_ops.Dataset.range(0)), ("Map", lambda x: x.map(lambda x: x), lambda: dataset_ops.Dataset.range(0)), ("PaddedBatch", lambda x: x.padded_batch(10, []), lambda: dataset_ops.Dataset.range(0)), ("ParallelMap", lambda x: x.map(lambda x: x, num_parallel_calls=2), lambda: dataset_ops.Dataset.range(0)), ("Repeat", lambda x: x.repeat(), lambda: dataset_ops.Dataset.range(0)), ("Shuffle", lambda x: x.shuffle(10), lambda: dataset_ops.Dataset.range(0)), ("Skip", lambda x: x.skip(1), lambda: dataset_ops.Dataset.range(0)), ("Take", lambda x: x.take(1), lambda: dataset_ops.Dataset.range(0)), ("Window", lambda x: x.window(10), lambda: dataset_ops.Dataset.range(0)), ) def testUnaryTransformationInputs(self, dataset_fn, input_dataset_fn): input_dataset = input_dataset_fn() self.assertEqual([input_dataset], dataset_fn(input_dataset)._inputs()) def testUnaryTransformationInputsApply(self): input_dataset = dataset_ops.Dataset.range(0) dataset_fn = self.make_apply_fn(dataset_ops.Dataset.range(0)) self.assertEqual([input_dataset], dataset_fn(input_dataset)._inputs()) @parameterized.named_parameters( ("ParallelInterleave", [lambda: dataset_ops.Dataset.range(0), 2], lambda: dataset_ops.Dataset.range(0)), ("Interleave", [lambda: dataset_ops.Dataset.range(0), None], lambda: dataset_ops.Dataset.range(0)), ) def testUnaryTransformationInputsWithInterleaveFn( self, interleave_fn_args, input_dataset_fn): input_dataset = input_dataset_fn() dataset_fn = self.make_interleave_fn(*interleave_fn_args) self.assertEqual([input_dataset], dataset_fn(input_dataset)._inputs()) @parameterized.named_parameters( ("Concatenate", lambda x, y: x.concatenate(y), lambda: dataset_ops.Dataset.range(0), lambda: dataset_ops.Dataset.range(1))) def testBinaryTransformationInputs(self, dataset_fn, input1_fn, input2_fn): input1 = input1_fn() input2 = input2_fn() self.assertEqual([input1, input2], dataset_fn(input1, input2)._inputs()) @parameterized.named_parameters( ("ZipOne", dataset_ops.Dataset.zip, lambda: (dataset_ops.Dataset.range(0))), ("ZipNest", dataset_ops.Dataset.zip, lambda: (dataset_ops.Dataset.range(0), (dataset_ops.Dataset.range(1), dataset_ops.Dataset.range(2)))), ("ZipTuple", dataset_ops.Dataset.zip, lambda: (dataset_ops.Dataset.range(0), dataset_ops.Dataset.range(1))), ) def testVariadicTransformationInputs(self, dataset_fn, input_datasets_fn): input_datasets = input_datasets_fn() self.assertEqual( nest.flatten(input_datasets), dataset_fn(input_datasets)._inputs()) def testCollectInputs(self): ds1 = dataset_ops.Dataset.range(0) ds2 = ds1.concatenate(ds1) ds3 = dataset_ops.Dataset.zip((ds2, ds1, ds2)) inputs = [] queue = [ds3] while queue: ds = queue[0] queue = queue[1:] queue.extend(ds._inputs()) inputs.append(ds) self.assertEqual(5, inputs.count(ds1)) self.assertEqual(2, inputs.count(ds2)) self.assertEqual(1, inputs.count(ds3)) # TODO(b/119882922): use-after-free bug in eager mode. # pylint: disable=g-long-lambda @parameterized.named_parameters( ("Tensor", lambda: constant_op.constant(37.0), structure.TensorStructure(dtypes.float32, [])), ("SparseTensor", lambda: sparse_tensor.SparseTensor( indices=[[0]], values=constant_op.constant([0], dtype=dtypes.int32), dense_shape=[1]), structure.SparseTensorStructure(dtypes.int32, [1])), ("Nest", lambda: { "a": constant_op.constant(37.0), "b": (constant_op.constant(["Foo"]), constant_op.constant("Bar"))}, structure.NestedStructure({ "a": structure.TensorStructure(dtypes.float32, []), "b": (structure.TensorStructure(dtypes.string, [1]), structure.TensorStructure(dtypes.string, []))})), ("Dataset", lambda: dataset_ops.Dataset.from_tensor_slices( constant_op.constant([1, 2, 3])), dataset_ops.DatasetStructure( structure.TensorStructure(dtypes.int32, []))), ("Optional", lambda: optional_ops.Optional.from_value(37.0), optional_ops.OptionalStructure( structure.TensorStructure(dtypes.float32, []))), ) def testSkipEagerDatasetStructure(self, tf_value_fn, expected_element_structure): dataset = dataset_ops.Dataset.from_tensors(0).map(lambda _: tf_value_fn()) dataset_structure = structure.Structure.from_value(dataset) self.assertIsInstance(dataset_structure, dataset_ops.DatasetStructure) # TODO(b/110122868): Add a public API to `tf.data.Dataset` for accessing # the element structure. self.assertTrue(expected_element_structure.is_compatible_with( dataset_structure._element_structure)) self.assertTrue(dataset_structure._element_structure.is_compatible_with( expected_element_structure)) self.assertEqual([dtypes.variant], dataset_structure._flat_types) self.assertEqual([tensor_shape.scalar()], dataset_structure._flat_shapes) # Assert that the `Dataset` survives a round-trip via _from_tensor_list() # and _to_tensor_list(). round_trip_dataset = dataset_structure._from_tensor_list( dataset_structure._to_tensor_list(dataset)) value = tf_value_fn() if isinstance(value, dataset_ops.Dataset): self.assertDatasetsEqual(value, dataset.flat_map(lambda x: x)) elif isinstance(value, optional_ops.Optional): self.assertDatasetProduces( round_trip_dataset.map(lambda opt: opt.get_value()), [self.evaluate(value.get_value())], requires_initialization=True) else: self.assertDatasetProduces( round_trip_dataset, [self.evaluate(tf_value_fn())], requires_initialization=True) if __name__ == "__main__": test.main()
import glob import os import logging import re from datetime import datetime import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.neighbors import BallTree import src.config.filepaths as fp import src.features.fre_to_tpm.viirs.ftt_utils as ut def make_balltree_subset(array_lats_flat, array_lons_flat): array_lat_lon = np.dstack([np.deg2rad(array_lats_flat), np.deg2rad(array_lons_flat)])[0] return BallTree(array_lat_lon, metric='haversine') def spatial_intersection_subset(array_balltree, point_lat, point_lon, x_positions_flat, y_positions_flat): # get the unique flare lats and lons for assessment in kdtree point_location = np.array([np.deg2rad(point_lat), np.deg2rad(point_lon)]).reshape(1, -1) # compare the flare locations to the potential locations in the orbit distance, index = array_balltree.query(point_location, k=1) # get indexes x = x_positions_flat[index][0][0] y = y_positions_flat[index][0][0] d = distance[0][0] return x, y, np.rad2deg(d) def read_aeronet(filename): """Read a given AERONET AOT data file, and return it as a dataframe. This returns a DataFrame containing the AERONET data, with the index set to the timestamp of the AERONET observations. Rows or columns consisting entirely of missing data are removed. All other columns are left as-is. """ dateparse = lambda x: pd.datetime.strptime(x, "%d:%m:%Y %H:%M:%S") aeronet = pd.read_csv(filename, skiprows=6, na_values=[-999.0], parse_dates={'times': [0, 1]}, date_parser=dateparse) aeronet = aeronet.set_index('times') # Drop any rows that are all NaN and any cols that are all NaN # & then sort by the index an = (aeronet.dropna(axis=1, how='all') .dropna(axis=0, how='all') .rename(columns={'Last_Processing_Date(dd/mm/yyyy)': 'Last_Processing_Date'}) .sort_index()) return an def load_aeronet(): aeronet_files = glob.glob(fp.path_to_aeronet + '/*/*.lev15') aeronet_dict = {} for af in aeronet_files: place = af.split('_')[-1].split('.')[0] ds = read_aeronet(af) aeronet_dict[place] = ds return aeronet_dict def aeronet_intersections(timestamp, aeronet_station_data): for station in aeronet_station_data: time_deltas = np.abs((aeronet_station_data[station].index - timestamp).total_seconds()) if min(time_deltas) <= 3600: # lets say less than an hour diffence in obs time return True return False def interpolate_aod550(angstrom, aod): return aod * (550. / 675) ** (-angstrom) def collocate_station(station, balltree, cols, rows, timestamp): # first check if any datapoints with the hour max_t_delta = 1800 # 30 minuts each side temporal_df = station[np.abs((station.index - timestamp).total_seconds()) <= max_t_delta] n_points_in_mean = temporal_df.shape[0] if temporal_df.empty: return 0, 0, 0, 0, 0, 0, 0 # get station location station_lat = temporal_df.iloc[0]['Site_Latitude(Degrees)'] station_lon = temporal_df.iloc[0]['Site_Longitude(Degrees)'] # check if scene intersects x, y, d = spatial_intersection_subset(balltree, station_lat, station_lon, cols, rows) # lets only consider stations less than 1 arcminute distant to a pixel (2km) if d > 1 / 60.0: return 0, 0, 0, 0, 0, 0, 0 print 'station lat', station_lat print 'station lon', station_lon # interpolate aod aod500 = temporal_df['AOD_500nm'].mean() aod675 = temporal_df['AOD_675nm'].mean() angstrom = temporal_df['440-675_Angstrom_Exponent'].mean() aod550 = interpolate_aod550(angstrom, aod675) return x, y, d, n_points_in_mean, aod550, aod500, aod675 def get_fname(path, timestamp): file_list = os.listdir(path) min_diff = 999999 min_diff_ind = 0 for i, f in enumerate(file_list): viirs_timestamp = re.search("[d][0-9]{8}[_][t][0-9]{4}", f).group() viirs_timestamp = datetime.strptime(viirs_timestamp, 'd%Y%m%d_t%H%M') diff = abs((timestamp - viirs_timestamp).total_seconds()) if diff < min_diff: min_diff = diff min_diff_ind = i if min_diff <= 60: return file_list[min_diff_ind] else: return '' def get_aod(sat_data_utm, x, y): sample_size = 10 half_sample_size = sample_size / 2.0 min_y = int(y-half_sample_size) if y-half_sample_size > 0 else 0 min_x = int(x-half_sample_size) if x-half_sample_size > 0 else 0 max_y = int(y+half_sample_size) if y+half_sample_size < sat_data_utm['orac_aod_utm'].shape[0] else \ sat_data_utm['orac_aod_utm'].shape[0] max_x = int(x+half_sample_size) if x+half_sample_size < sat_data_utm['orac_aod_utm'].shape[1] else \ sat_data_utm['orac_aod_utm'].shape[1] # do orac proc orac_aod_subset = sat_data_utm['orac_aod_utm'][min_y:max_y, min_x:max_x] orac_cost_subset = sat_data_utm['orac_cost_utm'][min_y:max_y, min_x:max_x] mask = orac_cost_subset <= 100 n_orac = np.sum(mask) print 'n_orac cost <3', n_orac if n_orac: mean_orac_aod = np.mean(orac_aod_subset[mask]) mean_orac_cost = np.mean(orac_cost_subset[mask]) else: mean_orac_aod = -999 mean_orac_cost = -999 # do viirs viirs_aod_subset = sat_data_utm['viirs_aod_utm'][min_y:max_y, min_x:max_x] viirs_flag_subset = sat_data_utm['viirs_flag_utm'][min_y:max_y, min_x:max_x] mask = viirs_flag_subset <= 1 n_viirs = np.sum(mask) if n_viirs: mean_viirs_aod = np.mean(viirs_aod_subset[mask]) mean_viirs_flag = np.mean(viirs_flag_subset[mask]) else: mean_viirs_aod = -999 mean_viirs_flag = -999 return mean_orac_aod, mean_orac_cost, mean_viirs_aod, mean_viirs_flag, n_orac, n_viirs def image_histogram_equalization(image, number_bins=256): # from http://www.janeriksolem.net/2009/06/histogram-equalization-with-python-and.html # get image histogram image_histogram, bins = np.histogram(image[image > 0].flatten(), number_bins, normed=True) cdf = image_histogram.cumsum() # cumulative distribution function cdf = 255 * cdf / cdf[-1] # normalize # use linear interpolation of cdf to find new pixel values image_equalized = np.interp(image.flatten(), bins[:-1], cdf) return image_equalized.reshape(image.shape) def create_png(viirs_data, image_id, x, y, station): im_size = 200 half_im_size = im_size/2 # # m1_params = viirs_data['All_Data']['VIIRS-M1-SDR_All']['RadianceFactors'] # m1 = viirs_data['All_Data']['VIIRS-M1-SDR_All']['Radiance'][:] # # m4_params = viirs_data['All_Data']['VIIRS-M1-SDR_All']['RadianceFactors'] # m4 = viirs_data['All_Data']['VIIRS-M4-SDR_All']['Radiance'][:] # # m5_params = viirs_data['All_Data']['VIIRS-M1-SDR_All']['RadianceFactors'] # m5 = viirs_data['All_Data']['VIIRS-M5-SDR_All']['Radiance'][:] # # resampled_m1 = utm_rs.resample_image(m1, masked_lats, masked_lons, fill_value=0) # resampled_m4 = utm_rs.resample_image(m4, masked_lats, masked_lons, fill_value=0) # resampled_m5 = utm_rs.resample_image(m5, masked_lats, masked_lons, fill_value=0) resampled_m3 = viirs_data['m3'] resampled_m4 = viirs_data['m4'] resampled_m5 = viirs_data['m5'] min_y = int(y-half_im_size) if y-half_im_size > 0 else 0 min_x = int(x-half_im_size) if x-half_im_size > 0 else 0 max_y = int(y+half_im_size) if y+half_im_size < resampled_m3.shape[0] else resampled_m3.shape[0] max_x = int(x+half_im_size) if x+half_im_size < resampled_m3.shape[1] else resampled_m3.shape[1] # subset to roi resampled_m3 = resampled_m3[min_y:max_y, min_x:max_x] resampled_m4 = resampled_m4[min_y:max_y, min_x:max_x] resampled_m5 = resampled_m5[min_y:max_y, min_x:max_x] # get station loc in scene diff_y = max_y - min_y diff_x = max_x - min_x if diff_y < im_size: if min_y == 0: pos_y = half_im_size - 1 - (im_size - diff_y) else: pos_y = half_im_size # if greater we still use same position else: pos_y = half_im_size - 1 if diff_x < im_size: if min_x == 0: pos_x = half_im_size - 1 - (im_size - diff_x) else: pos_x = half_im_size else: pos_x = half_im_size - 1 # r = image_histogram_equalization(resampled_m5) # g = image_histogram_equalization(resampled_m4) # b = image_histogram_equalization(resampled_m1) r = resampled_m5 g = resampled_m4 b = resampled_m3 r[r < 0] = 0 g[g < 0] = 0 b[b < 0] = 0 r = np.round((r * (255 / np.max(r))) * 1).astype('uint8') g = np.round((g * (255 / np.max(g))) * 1).astype('uint8') b = np.round((b * (255 / np.max(b))) * 1).astype('uint8') rgb = np.dstack((r, g, b)) plt.imshow(rgb) plt.plot(pos_x, pos_y, 'rx') # plot the area over which mean is computed plt.plot(pos_x-5, pos_y-5, 'bx') plt.plot(pos_x+5, pos_y-5, 'bx') plt.plot(pos_x-5, pos_y+5, 'bx') plt.plot(pos_x+5, pos_y+5, 'bx') plt.savefig(os.path.join(fp.path_to_aeronet_visuals, 'images_v2', 'id_' + str(image_id) + '_station_' + station + '.png'), bbox_inches='tight') def main(): aeronet_station_data = load_aeronet() viirs_orac_filepaths = glob.glob(fp.path_to_viirs_orac + '/*') optical_prop_suffix = 'AR2' image_id = 0 data_dict = dict(x=[], y=[], dist=[], aod550=[], aod500=[], aod675=[], orac_aod=[], orac_cost=[], viirs_aod=[], viirs_flag=[], image_id=[], orac_file=[], viirs_file=[], station=[], n_points_in_aeronet_mean=[], n_orac=[], n_viirs=[]) # iterate over VIIRS AOD files for o_f in viirs_orac_filepaths: if not '.nc' in o_f: continue if optical_prop_suffix not in o_f: continue print o_f #i_f = '/Volumes/INTENSO/kcl-fire-aot/ODA/raw/viirs/orac/KCL-NCEO-L2-CLOUD-CLD-VIIRS_ORAC_NPP_201508070603_R4591AR2.primary.nc' #if o_f != i_f: # continue timestamp = ut.get_timestamp(o_f, 'orac') dt = datetime.strptime(timestamp, '_%Y%m%d%H%M_') # check if station has intersection for given time stamp, if not continue if not aeronet_intersections(dt, aeronet_station_data): continue try: sat_data = ut.setup_sat_data(timestamp) except Exception, e: logger.info('Could not load all datasets for: ' + str(timestamp) + '. Failed with error: ' + str(e)) continue try: sat_data_utm = ut.resample_satellite_datasets(sat_data, fill_value=-999) except: logger.warning('Could not resample sat data, continuing') continue # generate coordinate array from resampled grid rows = np.arange(sat_data_utm['lats'].shape[0]) cols = np.arange(sat_data_utm['lats'].shape[1]) cols, rows = np.meshgrid(cols, rows) # mask all points to valid mask = sat_data_utm['lats'] != -999 resampled_lats_sub = sat_data_utm['lats'][mask] resampled_lons_sub = sat_data_utm['lons'][mask] cols = cols[mask] rows = rows[mask] balltree = make_balltree_subset(resampled_lats_sub, resampled_lons_sub) if station is not None: pass for station in aeronet_station_data: print print station station_df = aeronet_station_data[station] # locate aeronet station in scene x, y, dist, n_aeronet, aod550, aod500, aod675 = collocate_station(station_df, balltree, cols, rows, dt) # if nothing in scene continue if not x: print 'no station points within 30 minutes of overpass or within 2 arcminutes of image ' continue print 'image lat', sat_data_utm['lats'][y, x] print 'image lon', sat_data_utm['lons'][y, x] # take valid mean AOD within 10km mean_orac_aod, mean_orac_cost, \ mean_viirs_aod, mean_viirs_flag, \ n_orac, n_viirs = get_aod(sat_data_utm, x, y) # append to dict data_dict['x'].append(x) data_dict['y'].append(y) data_dict['dist'].append(dist) data_dict['n_points_in_aeronet_mean'].append(n_aeronet) data_dict['n_orac'].append(n_orac) data_dict['n_viirs'].append(n_viirs) data_dict['aod550'].append(aod550) data_dict['aod500'].append(aod500) data_dict['aod675'].append(aod675) data_dict['orac_aod'].append(mean_orac_aod) data_dict['orac_cost'].append(mean_orac_cost) data_dict['viirs_aod'].append(mean_viirs_aod) data_dict['viirs_flag'].append(mean_viirs_flag) data_dict['image_id'].append(image_id) data_dict['orac_file'].append(o_f) data_dict['station'].append(station) create_png(sat_data_utm, image_id, x, y, station) # update image image_id += 1 # convert dict to dataframe df = pd.DataFrame.from_dict(data_dict) # dump to csv df.to_csv(os.path.join(fp.path_to_dataframes, 'aeronet_comp_' + optical_prop_suffix + '.csv')) if __name__ == "__main__": log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' logging.basicConfig(level=logging.INFO, format=log_fmt) logger = logging.getLogger(__name__) main()
#!/usr/bin/env vpython # Copyright 2020 The LUCI Authors. All rights reserved. # Use of this source code is governed under the Apache License, Version 2.0 # that can be found in the LICENSE file. # pylint: disable=no-value-for-parameter import logging import os import sys import unittest import test_env test_env.setup_test_env() import mock import parameterized from test_support import test_case from components.auth import model from components.config import fs from realms import config from realms import permissions def fake_db(rev, perms=None): b = permissions.Builder(rev) for p in (perms or []): b.permission(p) return b.finish() def fake_realms_rev(project_id, config_digest, perms_rev): return config.RealmsCfgRev( project_id, 'config-rev', config_digest, 'config-body', perms_rev) class CheckConfigChangesTest(test_case.TestCase): @mock.patch('realms.config.update_realms', autospec=True) @mock.patch('realms.config.delete_realms', autospec=True) def call(self, db, latest, stored, delete_realms_mock, update_realms_mock): updated = set() deleted = set() batches = [] def do_update(_db, revs, _comment): batches.append(len(revs)) for r in revs: self.assertNotIn(r.project_id, updated) self.assertNotIn(r.project_id, deleted) updated.add(r.project_id) update_realms_mock.side_effect = do_update def do_delete(project_id): self.assertNotIn(project_id, updated) self.assertNotIn(project_id, deleted) deleted.add(project_id) delete_realms_mock.side_effect = do_delete jobs = config.check_config_changes(db, latest, stored) self.assertTrue(config.execute_jobs(jobs, 0.0)) return updated, deleted, batches def test_noop_when_up_to_date(self): updated, deleted, _ = self.call( fake_db('db-rev'), [ fake_realms_rev('proj1', 'digest1', 'db-rev'), fake_realms_rev('proj2', 'digest1', 'db-rev'), ], [ fake_realms_rev('proj1', 'digest1', 'db-rev'), fake_realms_rev('proj2', 'digest1', 'db-rev'), ]) self.assertEqual(updated, set()) self.assertEqual(deleted, set()) def test_new_projects(self): updated, deleted, _ = self.call( fake_db('db-rev'), [ fake_realms_rev('proj1', 'digest1', 'db-rev'), fake_realms_rev('proj2', 'digest1', 'db-rev'), ], [ fake_realms_rev('proj1', 'digest1', 'db-rev'), ]) self.assertEqual(updated, {'proj2'}) self.assertEqual(deleted, set()) def test_updated_projects(self): updated, deleted, _ = self.call( fake_db('db-rev'), [ fake_realms_rev('proj1', 'digest1', 'db-rev'), fake_realms_rev('proj2', 'digest1', 'db-rev'), ], [ fake_realms_rev('proj1', 'digest1', 'db-rev'), fake_realms_rev('proj2', 'digest2', 'db-rev'), ]) self.assertEqual(updated, {'proj2'}) self.assertEqual(deleted, set()) def test_deleted_projects(self): updated, deleted, _ = self.call( fake_db('db-rev'), [ fake_realms_rev('proj1', 'digest1', 'db-rev'), ], [ fake_realms_rev('proj1', 'digest1', 'db-rev'), fake_realms_rev('proj2', 'digest2', 'db-rev'), ]) self.assertEqual(updated, set()) self.assertEqual(deleted, {'proj2'}) def test_perms_revision_change(self): revs = [ fake_realms_rev('proj%d' % i, 'digest1', 'db-rev1') for i in range(20) ] updated, deleted, batches = self.call(fake_db('db-rev2'), revs, revs) self.assertEqual(updated, {p.project_id for p in revs}) # all of them self.assertEqual(deleted, set()) self.assertEqual(len(batches), config.DB_REEVAL_REVISIONS) class CheckPermissionChangesTest(test_case.TestCase): def call(self, db): jobs = config.check_permission_changes(db) self.assertTrue(config.execute_jobs(jobs, 0.0)) def test_works(self): def perms_from_authdb(): e = model.realms_globals_key().get() return [p.name for p in e.permissions] if e else [] # The initial state. self.assertEqual(model.get_auth_db_revision(), 0) self.assertEqual(perms_from_authdb(), []) # Create the initial copy of AuthRealmsGlobals. self.call(fake_db('rev1', ['luci.dev.p1', 'luci.dev.p2'])) self.assertEqual(model.get_auth_db_revision(), 1) self.assertEqual(perms_from_authdb(), ['luci.dev.p1', 'luci.dev.p2']) # Noop change. self.call(fake_db('rev1', ['luci.dev.p1', 'luci.dev.p2'])) self.assertEqual(model.get_auth_db_revision(), 1) self.assertEqual(perms_from_authdb(), ['luci.dev.p1', 'luci.dev.p2']) # Real change. self.call(fake_db('rev2', ['luci.dev.p3'])) self.assertEqual(model.get_auth_db_revision(), 2) self.assertEqual(perms_from_authdb(), ['luci.dev.p3']) class ProjectConfigFetchTest(test_case.TestCase): @mock.patch('components.config.common.self_config_set', autospec=True) @mock.patch('components.config.fs.get_provider', autospec=True) def test_works(self, get_provider_mock, self_config_set_mock): TESTS_DIR = os.path.dirname(os.path.abspath(__file__)) get_provider_mock.return_value = fs.Provider( os.path.join(TESTS_DIR, 'test_data')) # See test_data/... layout. self_config_set_mock.return_value = 'services/auth-service-app-id' revs = config.get_latest_revs_async().get_result() self.assertEqual(sorted(revs, key=lambda r: r.project_id), [ config.RealmsCfgRev( project_id='@internal', config_rev='unknown', config_digest='90549bf56e8be6c0ff6001d2376db' + 'def519b97cc89e65b2813237b252300dea8', config_body='realms {\n name: "internal-realm"\n}\n', perms_rev=None, ), config.RealmsCfgRev( project_id='proj1', config_rev='unknown', config_digest='05105846cbabf80e1ab2979b7787' + 'f1df1aca9751661fe4b4d28494e0b442459b', config_body='realms {\n name: "realm1"\n}\n', perms_rev=None, ), config.RealmsCfgRev( project_id='proj2', config_rev='unknown', config_digest='fe0857c4fe4282083c0295ee835e7' + '96403027d13c652f4959a0c6a41957dbc18', config_body='realms {\n name: "realm2"\n}\n', perms_rev=None, ), ]) class RealmsUpdateTest(test_case.TestCase): @parameterized.parameterized.expand([ ('some-proj',), ('@internal',), ]) def test_realms_config_lifecycle(self, project_id): self.assertEqual(model.get_auth_db_revision(), 0) # A new config appears. rev = config.RealmsCfgRev( project_id=project_id, config_rev='cfg_rev1', config_digest='digest1', config_body='realms{ name: "realm1" }', perms_rev=None) config.update_realms(fake_db('db-rev1'), [rev], 'New config') # Generated new AuthDB revisions. self.assertEqual(model.get_auth_db_revision(), 1) # Stored now in the expanded form. ent = model.project_realms_key(project_id).get() self.assertEqual( [r.name for r in ent.realms.realms], ['%s:@root' % project_id, '%s:realm1' % project_id]) self.assertEqual(ent.config_rev, 'cfg_rev1') self.assertEqual(ent.perms_rev, 'db-rev1') # Permissions DB changes in a way that doesn't affect the expanded form. config.update_realms(fake_db('db-rev2'), [rev], 'Reeval') # Seeing the same AuthDB version. self.assertEqual(model.get_auth_db_revision(), 1) # The config body changes in a way that doesn't affect the expanded form. rev = config.RealmsCfgRev( project_id=project_id, config_rev='cfg_rev2', config_digest='digest2', config_body='realms{ name: "realm1" } # blah blah', perms_rev=None) config.update_realms(fake_db('db-rev2'), [rev], 'Updated config') # Still the same AuthDB version. self.assertEqual(model.get_auth_db_revision(), 1) # The config change significantly now. rev = config.RealmsCfgRev( project_id=project_id, config_rev='cfg_rev3', config_digest='digest3', config_body='realms{ name: "realm2" }', perms_rev=None) config.update_realms(fake_db('db-rev2'), [rev], 'Updated config') # New revision. self.assertEqual(model.get_auth_db_revision(), 2) # And new body. ent = model.project_realms_key(project_id).get() self.assertEqual( [r.name for r in ent.realms.realms], ['%s:@root' % project_id, '%s:realm2' % project_id]) self.assertEqual(ent.config_rev, 'cfg_rev3') self.assertEqual(ent.perms_rev, 'db-rev2') # The config is gone. config.delete_realms(project_id) # This generated a new revision. self.assertEqual(model.get_auth_db_revision(), 3) # And it is indeed gone. ent = model.project_realms_key(project_id).get() self.assertIsNone(ent) # The second deletion is noop. config.delete_realms(project_id) self.assertEqual(model.get_auth_db_revision(), 3) def test_update_many_projects(self): self.assertEqual(model.get_auth_db_revision(), 0) cfg_rev = lambda proj, realm, rev_sfx: config.RealmsCfgRev( project_id=proj, config_rev='cfg-rev-'+rev_sfx, config_digest='digest-'+rev_sfx, config_body='realms{ name: "%s" }' % realm, perms_rev=None) # Create a bunch of project configs at once. config.update_realms( fake_db('db-rev1'), [ cfg_rev('proj1', 'realm1', 'p1s1'), cfg_rev('proj2', 'realm1', 'p2s1'), ], 'New config') # Produced a single revision. self.assertEqual(model.get_auth_db_revision(), 1) # Present now. revs = config.get_stored_revs_async().get_result() self.assertEqual(revs, [ config.RealmsCfgRev( project_id='proj1', config_rev=u'cfg-rev-p1s1', config_digest=u'digest-p1s1', config_body=None, perms_rev=u'db-rev1', ), config.RealmsCfgRev( project_id='proj2', config_rev=u'cfg-rev-p2s1', config_digest=u'digest-p2s1', config_body=None, perms_rev=u'db-rev1', ), ]) self.assertEqual( model.project_realms_key('proj1').get().config_rev, 'cfg-rev-p1s1') self.assertEqual( model.project_realms_key('proj2').get().config_rev, 'cfg-rev-p2s1') # One is modified significantly, another not. config.update_realms( fake_db('db-rev1'), [ cfg_rev('proj1', 'realm1', 'p1s2'), # noop change cfg_rev('proj2', 'realm2', 'p2s2'), # significant change ], 'New config') revs = config.get_stored_revs_async().get_result() self.assertEqual( model.project_realms_key('proj1').get().config_rev, 'cfg-rev-p1s1') self.assertEqual( model.project_realms_key('proj2').get().config_rev, 'cfg-rev-p2s2') # One config is broken. config.update_realms( fake_db('db-rev1'), [ cfg_rev('proj1', 'realm3', 'p1s3'), cfg_rev('proj2', '@@@@@@', 'p2s3'), ], 'New config') revs = config.get_stored_revs_async().get_result() self.assertEqual( model.project_realms_key('proj1').get().config_rev, 'cfg-rev-p1s3') self.assertEqual( model.project_realms_key('proj2').get().config_rev, 'cfg-rev-p2s2') if __name__ == '__main__': if '-v' in sys.argv: unittest.TestCase.maxDiff = None logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.FATAL) unittest.main()
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Vincent Michel <vincent.michel@inria.fr> # Gilles Louppe <g.louppe@gmail.com> # # License: BSD 3 clause """Recursive feature elimination for feature ranking""" import warnings import numpy as np from ..utils import check_X_y, safe_sqr from ..utils.metaestimators import if_delegate_has_method from ..base import BaseEstimator from ..base import MetaEstimatorMixin from ..base import clone from ..base import is_classifier from ..cross_validation import _check_cv as check_cv from ..cross_validation import _safe_split, _score from ..metrics.scorer import check_scoring from .base import SelectorMixin class RFE(BaseEstimator, MetaEstimatorMixin, SelectorMixin): """Feature ranking with recursive feature elimination. Given an external estimator that assigns weights to features (e.g., the coefficients of a linear model), the goal of recursive feature elimination (RFE) is to select features by recursively considering smaller and smaller sets of features. First, the estimator is trained on the initial set of features and weights are assigned to each one of them. Then, features whose absolute weights are the smallest are pruned from the current set features. That procedure is recursively repeated on the pruned set until the desired number of features to select is eventually reached. Parameters ---------- estimator : object A supervised learning estimator with a `fit` method that updates a `coef_` attribute that holds the fitted parameters. Important features must correspond to high absolute values in the `coef_` array. For instance, this is the case for most supervised learning algorithms such as Support Vector Classifiers and Generalized Linear Models from the `svm` and `linear_model` modules. n_features_to_select : int or None (default=None) The number of features to select. If `None`, half of the features are selected. step : int or float, optional (default=1) If greater than or equal to 1, then `step` corresponds to the (integer) number of features to remove at each iteration. If within (0.0, 1.0), then `step` corresponds to the percentage (rounded down) of features to remove at each iteration. estimator_params : dict Parameters for the external estimator. This attribute is deprecated as of version 0.16 and will be removed in 0.18. Use estimator initialisation or set_params method instead. verbose : int, default=0 Controls verbosity of output. Attributes ---------- n_features_ : int The number of selected features. support_ : array of shape [n_features] The mask of selected features. ranking_ : array of shape [n_features] The feature ranking, such that ``ranking_[i]`` corresponds to the ranking position of the i-th feature. Selected (i.e., estimated best) features are assigned rank 1. estimator_ : object The external estimator fit on the reduced dataset. Examples -------- The following example shows how to retrieve the 5 right informative features in the Friedman #1 dataset. >>> from sklearn.datasets import make_friedman1 >>> from sklearn.feature_selection import RFE >>> from sklearn.svm import SVR >>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0) >>> estimator = SVR(kernel="linear") >>> selector = RFE(estimator, 5, step=1) >>> selector = selector.fit(X, y) >>> selector.support_ # doctest: +NORMALIZE_WHITESPACE array([ True, True, True, True, True, False, False, False, False, False], dtype=bool) >>> selector.ranking_ array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5]) References ---------- .. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection for cancer classification using support vector machines", Mach. Learn., 46(1-3), 389--422, 2002. """ def __init__(self, estimator, n_features_to_select=None, step=1, estimator_params=None, verbose=0): self.estimator = estimator self.n_features_to_select = n_features_to_select self.step = step self.estimator_params = estimator_params self.verbose = verbose def fit(self, X, y): """Fit the RFE model and then the underlying estimator on the selected features. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] The training input samples. y : array-like, shape = [n_samples] The target values. """ X, y = check_X_y(X, y, "csc") # Initialization n_features = X.shape[1] if self.n_features_to_select is None: n_features_to_select = n_features / 2 else: n_features_to_select = self.n_features_to_select if 0.0 < self.step < 1.0: step = int(max(1, self.step * n_features)) else: step = int(self.step) if step <= 0: raise ValueError("Step must be >0") if self.estimator_params is not None: warnings.warn("The parameter 'estimator_params' is deprecated as " "of version 0.16 and will be removed in 0.18. The " "parameter is no longer necessary because the value " "is set via the estimator initialisation or " "set_params method.", DeprecationWarning) support_ = np.ones(n_features, dtype=np.bool) ranking_ = np.ones(n_features, dtype=np.int) # Elimination while np.sum(support_) > n_features_to_select: # Remaining features features = np.arange(n_features)[support_] # Rank the remaining features estimator = clone(self.estimator) if self.estimator_params: estimator.set_params(**self.estimator_params) if self.verbose > 0: print("Fitting estimator with %d features." % np.sum(support_)) estimator.fit(X[:, features], y) # Get coefs if hasattr(estimator, 'coef_'): coefs = estimator.coef_ elif hasattr(estimator, 'feature_importances_'): coefs = estimator.feature_importances_ else: raise RuntimeError('The classifier does not expose ' '"coef_" or "feature_importances_" ' 'attributes') # Get ranks if coefs.ndim > 1: ranks = np.argsort(safe_sqr(coefs).sum(axis=0)) else: ranks = np.argsort(safe_sqr(coefs)) # for sparse case ranks is matrix ranks = np.ravel(ranks) # Eliminate the worse features threshold = min(step, np.sum(support_) - n_features_to_select) support_[features[ranks][:threshold]] = False ranking_[np.logical_not(support_)] += 1 # Set final attributes self.estimator_ = clone(self.estimator) if self.estimator_params: self.estimator_.set_params(**self.estimator_params) self.estimator_.fit(X[:, support_], y) self.n_features_ = support_.sum() self.support_ = support_ self.ranking_ = ranking_ return self @if_delegate_has_method(delegate='estimator') def predict(self, X): """Reduce X to the selected features and then predict using the underlying estimator. Parameters ---------- X : array of shape [n_samples, n_features] The input samples. Returns ------- y : array of shape [n_samples] The predicted target values. """ return self.estimator_.predict(self.transform(X)) @if_delegate_has_method(delegate='estimator') def score(self, X, y): """Reduce X to the selected features and then return the score of the underlying estimator. Parameters ---------- X : array of shape [n_samples, n_features] The input samples. y : array of shape [n_samples] The target values. """ return self.estimator_.score(self.transform(X), y) def _get_support_mask(self): return self.support_ @if_delegate_has_method(delegate='estimator') def decision_function(self, X): return self.estimator_.decision_function(self.transform(X)) @if_delegate_has_method(delegate='estimator') def predict_proba(self, X): return self.estimator_.predict_proba(self.transform(X)) @if_delegate_has_method(delegate='estimator') def predict_log_proba(self, X): return self.estimator_.predict_log_proba(self.transform(X)) class RFECV(RFE, MetaEstimatorMixin): """Feature ranking with recursive feature elimination and cross-validated selection of the best number of features. Parameters ---------- estimator : object A supervised learning estimator with a `fit` method that updates a `coef_` attribute that holds the fitted parameters. Important features must correspond to high absolute values in the `coef_` array. For instance, this is the case for most supervised learning algorithms such as Support Vector Classifiers and Generalized Linear Models from the `svm` and `linear_model` modules. step : int or float, optional (default=1) If greater than or equal to 1, then `step` corresponds to the (integer) number of features to remove at each iteration. If within (0.0, 1.0), then `step` corresponds to the percentage (rounded down) of features to remove at each iteration. cv : int or cross-validation generator, optional (default=None) If int, it is the number of folds. If None, 3-fold cross-validation is performed by default. Specific cross-validation objects can also be passed, see `sklearn.cross_validation module` for details. scoring : string, callable or None, optional, default: None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. estimator_params : dict Parameters for the external estimator. This attribute is deprecated as of version 0.16 and will be removed in 0.18. Use estimator initialisation or set_params method instead. verbose : int, default=0 Controls verbosity of output. Attributes ---------- n_features_ : int The number of selected features with cross-validation. support_ : array of shape [n_features] The mask of selected features. ranking_ : array of shape [n_features] The feature ranking, such that `ranking_[i]` corresponds to the ranking position of the i-th feature. Selected (i.e., estimated best) features are assigned rank 1. grid_scores_ : array of shape [n_subsets_of_features] The cross-validation scores such that ``grid_scores_[i]`` corresponds to the CV score of the i-th subset of features. estimator_ : object The external estimator fit on the reduced dataset. Notes ----- The size of ``grid_scores_`` is equal to (n_features + step - 2) // step + 1, where step is the number of features removed at each iteration. Examples -------- The following example shows how to retrieve the a-priori not known 5 informative features in the Friedman #1 dataset. >>> from sklearn.datasets import make_friedman1 >>> from sklearn.feature_selection import RFECV >>> from sklearn.svm import SVR >>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0) >>> estimator = SVR(kernel="linear") >>> selector = RFECV(estimator, step=1, cv=5) >>> selector = selector.fit(X, y) >>> selector.support_ # doctest: +NORMALIZE_WHITESPACE array([ True, True, True, True, True, False, False, False, False, False], dtype=bool) >>> selector.ranking_ array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5]) References ---------- .. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection for cancer classification using support vector machines", Mach. Learn., 46(1-3), 389--422, 2002. """ def __init__(self, estimator, step=1, cv=None, scoring=None, estimator_params=None, verbose=0): self.estimator = estimator self.step = step self.cv = cv self.scoring = scoring self.estimator_params = estimator_params self.verbose = verbose def fit(self, X, y): """Fit the RFE model and automatically tune the number of selected features. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vector, where `n_samples` is the number of samples and `n_features` is the total number of features. y : array-like, shape = [n_samples] Target values (integers for classification, real numbers for regression). """ X, y = check_X_y(X, y, "csr") if self.estimator_params is not None: warnings.warn("The parameter 'estimator_params' is deprecated as " "of version 0.16 and will be removed in 0.18. " "The parameter is no longer necessary because the " "value is set via the estimator initialisation or " "set_params method.", DeprecationWarning) # Initialization rfe = RFE(estimator=self.estimator, n_features_to_select=1, step=self.step, estimator_params=self.estimator_params, verbose=self.verbose - 1) cv = check_cv(self.cv, X, y, is_classifier(self.estimator)) scorer = check_scoring(self.estimator, scoring=self.scoring) scores = np.zeros(X.shape[1]) n_features_to_select_by_rank = np.zeros(X.shape[1]) # Cross-validation for n, (train, test) in enumerate(cv): X_train, y_train = _safe_split(self.estimator, X, y, train) X_test, y_test = _safe_split(self.estimator, X, y, test, train) # Compute a full ranking of the features # ranking_ contains the same set of values for all CV folds, # but perhaps reordered ranking_ = rfe.fit(X_train, y_train).ranking_ # Score each subset of features for k in range(0, np.max(ranking_)): indices = np.where(ranking_ <= k + 1)[0] estimator = clone(self.estimator) estimator.fit(X_train[:, indices], y_train) score = _score(estimator, X_test[:, indices], y_test, scorer) if self.verbose > 0: print("Finished fold with %d / %d feature ranks, score=%f" % (k + 1, np.max(ranking_), score)) scores[k] += score # n_features_to_select_by_rank[k] is being overwritten # multiple times, but by the same value n_features_to_select_by_rank[k] = indices.size # Select the best upper bound for feature rank. It's OK to use the # last ranking_, as np.max(ranking_) is the same over all CV folds. scores = scores[:np.max(ranking_)] k = np.argmax(scores) # Re-execute an elimination with best_k over the whole set rfe = RFE(estimator=self.estimator, n_features_to_select=n_features_to_select_by_rank[k], step=self.step, estimator_params=self.estimator_params) rfe.fit(X, y) # Set final attributes self.support_ = rfe.support_ self.n_features_ = rfe.n_features_ self.ranking_ = rfe.ranking_ self.estimator_ = clone(self.estimator) if self.estimator_params: self.estimator_.set_params(**self.estimator_params) self.estimator_.fit(self.transform(X), y) # Fixing a normalization error, n is equal to len(cv) - 1 # here, the scores are normalized by len(cv) self.grid_scores_ = scores / len(cv) return self
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for TPUStrategy.""" from absl import logging from absl.testing import parameterized from tensorflow.core.protobuf import config_pb2 from tensorflow.python.data.ops import dataset_ops from tensorflow.python.distribute import distribute_lib from tensorflow.python.distribute import distribution_strategy_context from tensorflow.python.distribute import reduce_util from tensorflow.python.distribute import strategy_test_lib from tensorflow.python.distribute import tpu_strategy as tpu_lib from tensorflow.python.distribute import tpu_values from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver from tensorflow.python.eager import def_function from tensorflow.python.eager import function from tensorflow.python.eager import remote from tensorflow.python.eager import test from tensorflow.python.framework import composite_tensor from tensorflow.python.framework import config from tensorflow.python.framework import constant_op from tensorflow.python.framework import device as tf_device from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import tensor_spec from tensorflow.python.framework import test_util from tensorflow.python.framework import type_spec from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import embedding_ops from tensorflow.python.ops import gen_dataset_ops from tensorflow.python.ops import lookup_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import variables from tensorflow.python.ops.ragged import ragged_tensor from tensorflow.python.platform import flags from tensorflow.python.platform import tf_logging as logging from tensorflow.python.tpu import device_assignment as device_assignment_lib from tensorflow.python.tpu import tpu from tensorflow.python.tpu import tpu_strategy_util from tensorflow.python.training import server_lib from tensorflow.python.util import nest FLAGS = flags.FLAGS flags.DEFINE_string("tpu", "", "Name of TPU to connect to.") flags.DEFINE_string("project", None, "Name of GCP project with TPU.") flags.DEFINE_string("zone", None, "Name of GCP zone with TPU.") def get_tpu_cluster_resolver(): resolver = tpu_cluster_resolver.TPUClusterResolver( tpu=FLAGS.tpu, zone=FLAGS.zone, project=FLAGS.project, ) return resolver def get_tpu_strategy(enable_packed_var=False): resolver = get_tpu_cluster_resolver() remote.connect_to_cluster(resolver) tpu_strategy_util.initialize_tpu_system(resolver) strategy = tpu_lib.TPUStrategyV2(resolver) strategy._enable_packed_variable_in_eager_mode = enable_packed_var return strategy # TPU tests which don't use TPUStrategy. @test_util.with_eager_op_as_function class TPUTest(test.TestCase): # In this case, the entire computation in foo is compiled using JIT # compilation. def test_single_tpu_jit_compile(self): if FLAGS.tpu_use_tfrt: self.skipTest( "This test triggers _XlaCompile and XlaLaunch which are not " "supported in tfrt yet. We should avoid using these kernels on TPU. " "However, it is a workaround to support b/129842431. We need more " "discussion about how to support it in the long term.") with ops.device("/device:TPU:0"): a = variables.Variable(1) def get_a_plus_one(): return a + 1 @def_function.function( input_signature=[tensor_spec.TensorSpec([], dtypes.int32)]) def foo(x): b = x + get_a_plus_one() b = b + get_a_plus_one() return b + 1 with ops.device("/device:TPU:0"): result = foo(a) self.assertAllEqual(6, result) # In this case, each of the ops in the TPU device scope are compiled and run # individually. def test_single_tpu_on_demand(self): with ops.device("/device:TPU:0"): a = variables.Variable(1) def get_a_plus_one(): return a + 1 x = 1 with ops.device("/device:TPU:0"): b = x + get_a_plus_one() b = b + get_a_plus_one() result = b + 1 self.assertAllEqual(6, result) # In this case, each of the ops in the tf.function and TPU device scope are # compiled and run individually. def test_single_tpu_on_demand_tf_function(self): with ops.device("/device:TPU:0"): a = variables.Variable(1) def get_a_plus_one(): return a + 1 @def_function.function( input_signature=[tensor_spec.TensorSpec([], dtypes.int32)]) def foo(x): with ops.device("/device:TPU:0"): b = x + get_a_plus_one() b = b + get_a_plus_one() return b + 1 result = foo(a) self.assertAllEqual(6, result) def test_multiple_initialize_system(self): resolver = get_tpu_cluster_resolver() remote.connect_to_cluster(resolver) tpu_strategy_util.initialize_tpu_system(resolver) with test.mock.patch.object(logging, "warning") as mock_log: tpu_strategy_util.initialize_tpu_system(resolver) self.assertRegex(str(mock_log.call_args), "already been initialized") def test_tpu_tf_function_same_device(self): with ops.device("/device:TPU:0"): a = variables.Variable(1) @function.defun_with_attributes(attributes={"_noinline": True}) def get_a_plus_one(): return a + 1 @def_function.function( input_signature=[tensor_spec.TensorSpec([], dtypes.int32)]) def foo(x): with ops.device("/device:TPU:0"): b = x + get_a_plus_one() return b + 1 result = foo(a) self.assertAllEqual(4, result) def test_tpu_return_int32(self): with ops.device("/device:TPU:0"): a = variables.Variable(0) @def_function.function def foo(): return a + 1 @def_function.function def bar(): with ops.device("/device:TPU:1"): return foo() with ops.device("/device:CPU:0"): result = bar() + 1 self.assertAllEqual(result, 2) def test_tpu_output_device(self): def foo(): return 1 + 1 func1 = function.defun_with_attributes( foo, attributes={"_XlaMustCompile": False}) func2 = function.defun_with_attributes( foo, attributes={ "_OutputsOnOpDevice": True, "_XlaMustCompile": False }) with ops.device("/device:TPU:0"): ret1 = func1() ret2 = func2() self.assertAllEqual(ret1.backing_device, "/job:localhost/replica:0/task:0/device:CPU:0") self.assertAllEqual(ret2.backing_device, "/job:localhost/replica:0/task:0/device:TPU:0") def test_on_demand_op_with_dynamic_output(self): with ops.device("/device:TPU:0"): where_output = array_ops.where([True, False, True]) self.assertAllEqual(where_output, [[0], [2]]) with ops.device("/device:TPU:0"): repeat_output = array_ops.repeat(math_ops.range(2), [1, 4]) self.assertAllEqual(repeat_output, [0, 1, 1, 1, 1]) @parameterized.named_parameters([("PackedVar", True), ("", False)]) @test_util.with_eager_op_as_function class TPUStrategyTest(test.TestCase, parameterized.TestCase): def test_handle_in_cross_replica_context(self, enable_packed_var): strategy = get_tpu_strategy(enable_packed_var) with strategy.scope(): v = variables.Variable(1.0) @def_function.function def func(): self.assertEndsWith(v.handle.device, "device:TPU:0") return v + 1.0 ret = func() self.assertAllEqual(ret, 2.0) def testStaticHashTableDatasetFnHostTrainingLoop(self, enable_packed_var): self._dataset_fn_tracing_count = 0 strategy = get_tpu_strategy(enable_packed_var) with strategy.scope(): vals = [0, 1, 2] keys_tensor = constant_op.constant( list(range(len(vals))), dtype=dtypes.int64) vals_tensor = constant_op.constant(vals) initializer = lookup_ops.KeyValueTensorInitializer( keys_tensor, vals_tensor) per_worker_table = lookup_ops.StaticHashTable( initializer, default_value=-1) @def_function.function def dataset_fn(input_context): tensor = constant_op.constant([0, 1, 3], dtype=dtypes.int64) global_batch_size = 2 batch_size = input_context.get_per_replica_batch_size(global_batch_size) dataset = dataset_ops.Dataset.from_tensors(tensor).repeat().batch( batch_size, drop_remainder=True) dataset = dataset.shard(input_context.num_input_pipelines, input_context.input_pipeline_id) dataset = dataset.prefetch(2) # This prefetches 2 batches per device. dataset = dataset.map(per_worker_table.lookup) self._dataset_fn_tracing_count += 1 return dataset dist_iterator = iter( strategy.experimental_distribute_datasets_from_function(dataset_fn)) @def_function.function def step_fn(inputs): # inputs should be [0, 1, -1] return math_ops.reduce_sum(inputs) def train_steps(iterator, steps): for _ in math_ops.range(steps): strategy.run(step_fn, args=(next(iterator),)) train_steps(dist_iterator, steps=5) self.assertEqual(self._dataset_fn_tracing_count, 1) def test_function_compile_with_xla(self, enable_packed_var): if FLAGS.tpu_use_tfrt: self.skipTest( "This test triggers _XlaCompile and XlaLaunch which are not " "supported in tfrt yet. We should avoid using these kernels on TPU. " "However, it is a workaround to support b/129842431. We need more " "discussion about how to support it in the long term.") strategy = get_tpu_strategy(enable_packed_var) with strategy.scope(): v = variables.Variable(1.0) @def_function.function def func(): return v.read_value() + 1.0 with ops.device("/device:TPU:0"): self.assertAllEqual(func(), 2.0) def test_sequential_runs(self, enable_packed_var): resolver = get_tpu_cluster_resolver() remote.connect_to_cluster(resolver) topology = tpu_strategy_util.initialize_tpu_system(resolver) # Computation replicated to all cores. device_assignment = device_assignment_lib.DeviceAssignment.build( topology, num_replicas=2) strategy = tpu_lib.TPUStrategyV2( resolver, experimental_device_assignment=device_assignment) strategy._enable_packed_variable_in_eager_mode = enable_packed_var # Computation on the 1st core. device_assignment2 = device_assignment_lib.DeviceAssignment.build( topology, num_replicas=1) strategy2 = tpu_lib.TPUStrategyV2( resolver, experimental_device_assignment=device_assignment2) def computation(x): return math_ops.square(x) @def_function.function def train_step(): outputs = strategy.experimental_local_results( strategy.run(computation, args=([2., 2.],))) outputs2 = strategy2.run( computation, args=([outputs[0]],)) return outputs2 self.assertAllEqual([[16., 16.]], train_step()) def test_device_switch_case(self, enable_packed_var): strategy = get_tpu_strategy(enable_packed_var) with strategy.scope(): a = variables.Variable(1) inference_iteration = variables.Variable(-1) def inference_fn(x, i): return a + x + i @def_function.function def run_inference(x): def do_inference(device, inference_fn, i): with ops.device(device): return inference_fn(x, i) branch_fns = { 0: (lambda: do_inference("/device:TPU:0", inference_fn, 0)), 1: (lambda: do_inference("/device:TPU:1", inference_fn, 1)), } branch_index = inference_iteration.assign_add(1, use_locking=True) % 2 return control_flow_ops.switch_case(branch_index, branch_fns) self.assertAllEqual(2., run_inference(1)) # Use TPU core 0. self.assertAllEqual(3., run_inference(1)) # Use TPU core 1. def test_recover_from_compilation_failures(self, enable_packed_var): # TODO(b/148150981): Stop skipping this test once recovery works # for non-local TPU. if FLAGS.tpu: self.skipTest("Recovery fails for non-local TPU, see b/148150981") # Disable automatic outside compilation. config.set_soft_device_placement(False) strategy = get_tpu_strategy(enable_packed_var) @def_function.function def compilation_failure_run(): def computation(): return random_ops.random_gamma([10], [0.5, 1.5]) return strategy.run(computation) with self.assertRaises(errors.OpError): compilation_failure_run() @def_function.function def good_run(): def computation(): return random_ops.random_normal([10]) return strategy.run(computation) good_run() def test_dynamic_shape_with_outside_compilation_failure( self, enable_packed_var): # Enable automatic outside compilation. config.set_soft_device_placement(True) strategy = get_tpu_strategy(enable_packed_var) dataset = dataset_ops.Dataset.from_tensors(("string", 1.0)).repeat().batch( 2, drop_remainder=False) dataset = strategy.experimental_distribute_dataset(dataset) iterator = iter(dataset) @def_function.function def train_fn(iterator): def step_fn(inputs): input0, input1 = inputs return array_ops.size(input0), math_ops.reduce_sum(input1) return strategy.experimental_local_results( strategy.run(step_fn, args=(next(iterator),))) with self.assertRaises(errors.InvalidArgumentError): logging.info(train_fn(iterator)) def test_computation_on_subset_cores(self, enable_packed_var): resolver = get_tpu_cluster_resolver() remote.connect_to_cluster(resolver) topology = tpu_strategy_util.initialize_tpu_system(resolver) all_core_strategy = tpu_lib.TPUStrategyV2(resolver) all_core_strategy._enable_packed_variable_in_eager_mode = enable_packed_var with all_core_strategy.scope(): v = variables.Variable(0.0, aggregation=variables.VariableAggregation.MEAN) # Computation on the 1st core. device_assignment = device_assignment_lib.DeviceAssignment.build( topology, num_replicas=1) first_core_strategy = tpu_lib.TPUStrategyV2( resolver, experimental_device_assignment=device_assignment) first_core_strategy._enable_packed_variable_in_eager_mode = ( enable_packed_var) # Computation on the 2nd core. device_assignment2 = device_assignment_lib.DeviceAssignment( topology, [[[0, 0, 0, 1]]]) second_core_strategy = tpu_lib.TPUStrategyV2( resolver, experimental_device_assignment=device_assignment2) second_core_strategy._enable_packed_variable_in_eager_mode = ( enable_packed_var) @def_function.function def train_step(): def step_fn(): return v + 1.0 all_core_strategy.run(step_fn) r1 = first_core_strategy.run(step_fn) r2 = second_core_strategy.run(step_fn) return r1 + r2 train_step() self.assertAllEqual(2., train_step()) def test_worker_devices_on_subset_cores(self, enable_packed_var): resolver = get_tpu_cluster_resolver() remote.connect_to_cluster(resolver) topology = tpu_strategy_util.initialize_tpu_system(resolver) # Strategy for the 1st core. device_assignment = device_assignment_lib.DeviceAssignment.build( topology, num_replicas=1) first_core_strategy = tpu_lib.TPUStrategyV2( resolver, experimental_device_assignment=device_assignment) first_core_strategy._enable_packed_variable_in_eager_mode = ( enable_packed_var) # Strategy for the 2nd core. device_assignment2 = device_assignment_lib.DeviceAssignment( topology, [[[0, 0, 0, 1]]]) second_core_strategy = tpu_lib.TPUStrategyV2( resolver, experimental_device_assignment=device_assignment2) second_core_strategy._enable_packed_variable_in_eager_mode = ( enable_packed_var) self.assertLen(first_core_strategy.extended.worker_devices, 1) self.assertEndsWith(first_core_strategy.extended.worker_devices[0], "device:TPU:0") self.assertLen(second_core_strategy.extended.worker_devices, 1) self.assertEndsWith(second_core_strategy.extended.worker_devices[0], "device:TPU:1") def test_control_output_in_while_body_fn(self, enable_packed_var): strategy = get_tpu_strategy(enable_packed_var) with strategy.scope(): v = variables.Variable( 0.0, aggregation=variables.VariableAggregation.MEAN) @def_function.function def train_step(): def step_fn(): v.assign_add(1) for _ in math_ops.range(2): strategy.run(step_fn) train_step() self.assertEqual(2.0, v.numpy()) def test_cluster_conditional_with_dynamic_shape(self, enable_packed_var): strategy = get_tpu_strategy(enable_packed_var) @def_function.function def train_step(): def shape_list(tensor): shape = tensor.shape.as_list() non_static_indexes = [] for (index, dim) in enumerate(shape): if dim is None: non_static_indexes.append(index) if not non_static_indexes: return shape dynamic_shape = array_ops.shape(input=tensor) for index in non_static_indexes: shape[index] = dynamic_shape[index] return shape def step_fn(condition): where = array_ops.where(condition) if array_ops.shape(where)[0] > 0: tensor_shape = shape_list(where) d1 = tensor_shape[0] d2 = tensor_shape[1] where = array_ops.reshape(where, [d1, d2]) return where return strategy.run(step_fn, args=([True, False, True],)) outputs = strategy.experimental_local_results(train_step()) self.assertAllEqual(outputs[0].numpy(), [[0], [2]]) def test_cluster_in_graph_and_while_body_fn(self, enable_packed_var): strategy = get_tpu_strategy(enable_packed_var) @def_function.function def train_step(): def step_fn(prev): s = prev + 1 return s def init_fn(): return array_ops.zeros(shape=()) prev = strategy.run(init_fn) for _ in math_ops.range(10): prev = strategy.run(step_fn, args=(prev,)) return strategy.reduce(reduce_util.ReduceOp.SUM, prev, axis=None) sum_val = train_step().numpy().astype(float) self.assertEqual(sum_val, strategy.num_replicas_in_sync * 10) def test_two_clusters_with_same_fn(self, enable_packed_var): strategy = get_tpu_strategy(enable_packed_var) @def_function.function def foo(x): return strategy.run(lambda x: x + 1, (x,)) @def_function.function def bar(x): foo(x) return foo(x) bar(1) def test_tpu_variable_run_argument(self, enable_packed_var): # TPUStrategy.run() casts inputs to Tensor, but has logic to preserve # variables to avoid unintuitive errors. # Here we test that a TPUDistributedVariable passed to TPUStrategy.run() # remains a variable. strategy = get_tpu_strategy(enable_packed_var) with strategy.scope(): tpu_variable = variables.Variable(1) def replica_step(first_arg, variable): del first_arg # Just here to make sure we're not relying on arg position. if variable is not None: self.assertIsInstance(variable, tpu_values.TPUDistributedVariable) @def_function.function def step(): strategy.run( replica_step, args=( 2, tpu_variable, )) step() def test_tpu_run_arg_parsing(self, enable_packed_var): strategy = get_tpu_strategy(enable_packed_var) with strategy.scope(): tpu_vars = [variables.Variable(1)] def only_star_args(*args): del args def pos_and_star_args(first_arg, *args): del first_arg del args def named_args(first_arg, second_arg): del first_arg del second_arg def star_args_and_kw_only(*args, kw): del args del kw # pylint:disable=function-redefined @def_function.function def step(): strategy.run(only_star_args, args=(2,)) step() @def_function.function def step(): strategy.run(named_args, kwargs={"first_arg": 2, "second_arg": 3}) step() with self.assertRaisesRegex(TypeError, r"got multiple values for argument"): @def_function.function def step(): strategy.run( named_args, args=(1,), kwargs={ "first_arg": 2, "second_arg": 3 }) step() with self.assertRaisesRegex(ValueError, r"cannot handle Variables passed to \*args"): @def_function.function def step(): strategy.run( only_star_args, args=( 2, tpu_vars, )) step() @def_function.function def step(): strategy.run(pos_and_star_args, args=(2, 3, 4)) step() @def_function.function def step(): strategy.run(star_args_and_kw_only, args=(2, 3), kwargs={"kw": tpu_vars}) step() with self.assertRaisesRegex(ValueError, r"mix of positional args and \*args"): @def_function.function def step(): strategy.run(pos_and_star_args, args=(tpu_vars, 3, 4)) step() with self.assertRaisesRegex(ValueError, r"Too many positional arguments"): @def_function.function def step(): strategy.run(named_args, args=(2, 3, 4)) step() class DummyClass: @def_function.function def method(self, arg_1): del arg_1 def step(self): strategy.run(self.method, args=(tpu_vars,)) DummyClass().step() # pylint:enable=function-redefined def test_using_external_variable_inside_tf_function(self, enable_packed_var): strategy = get_tpu_strategy(enable_packed_var) dataset = dataset_ops.Dataset.range( strategy.num_replicas_in_sync * 2, output_type=dtypes.float32).batch(strategy.num_replicas_in_sync) input_iterator = iter(strategy.experimental_distribute_dataset(dataset)) v = variables.Variable(2.0) @def_function.function def train_step(data): def computation(inputs): return inputs + v return strategy.run(computation, args=(data,)) expected_result = [[x + 2.] for x in range(0, strategy.num_replicas_in_sync) ] self.assertAllEqual( expected_result, strategy.experimental_local_results(train_step(next(input_iterator)))) # TODO(b/145574622): Remove this test once it is re-enabled in values_test.py. def test_all_reduce_on_sync_on_read_variable(self, enable_packed_var): strategy = get_tpu_strategy(enable_packed_var) dataset = dataset_ops.Dataset.range( strategy.num_replicas_in_sync, output_type=dtypes.float32).batch( strategy.num_replicas_in_sync, drop_remainder=True) input_iterator = iter(strategy.experimental_distribute_dataset(dataset)) with strategy.scope(): w = variables.Variable( (0.,), shape=(1,), trainable=False, synchronization=variables.VariableSynchronization.ON_READ, aggregation=variables.VariableAggregation.ONLY_FIRST_REPLICA) @def_function.function def run(iterator): def computation(x): w.assign(x + w) return w def all_reduce(x): ctx = distribution_strategy_context.get_replica_context() return ctx.all_reduce("SUM", w) + x outputs = strategy.run(computation, args=(next(iterator),)) outputs2 = strategy.experimental_local_results( strategy.run(all_reduce, args=(outputs,))) return outputs2 data = range(0, strategy.num_replicas_in_sync) data_sum = sum(data) expected_result = [ [x + data_sum] for x in range(0, strategy.num_replicas_in_sync) ] self.assertAllEqual(expected_result, run(input_iterator)) self.assertAllEqual((0.,), w.read_value()) def test_run_output_on_device(self, enable_packed_var): strategy = get_tpu_strategy(enable_packed_var) def computation(x): return math_ops.square(x) @def_function.function def train_step(): outputs = strategy.experimental_local_results( strategy.run(computation, args=(2,))) return outputs results = train_step() self.assertAllEqual([4., 4.], results) self.assertAllEqual("/job:localhost/replica:0/task:0/device:TPU:0", results[0].backing_device) self.assertAllEqual("/job:localhost/replica:0/task:0/device:TPU:1", results[1].backing_device) def test_run_passing_and_returning_nones(self, enable_packed_var): strategy = get_tpu_strategy(enable_packed_var) @def_function.function def train_step(): def computation(x): return x # Note that this input None is nested. outputs = strategy.experimental_local_results( strategy.run(computation, args=([1, [2, None]],))) return outputs results = train_step() self.assertAllEqual(1, results[0][0]) self.assertAllEqual(2, results[0][1][0]) self.assertIsNone(results[0][1][1]) def test_run_passing_and_returning_empty_list(self, enable_packed_var): strategy = get_tpu_strategy(enable_packed_var) @def_function.function def train_step(): def computation(x): return x outputs = strategy.experimental_local_results( strategy.run(computation, args=([],))) return outputs self.assertEqual([], train_step()[0]) def test_run_passing_and_returning_empty_dict(self, enable_packed_var): strategy = get_tpu_strategy(enable_packed_var) @def_function.function def train_step(): def computation(x): return x outputs = strategy.experimental_local_results( strategy.run(computation, args=({},))) return outputs self.assertEqual({}, train_step()[0]) def test_composite_input_output(self, enable_packed_var): strategy = get_tpu_strategy(enable_packed_var) if strategy.num_replicas_in_sync != 2: self.skipTest("Test assumes two replicas.") with strategy.scope(): table = variables.Variable( initial_value=[[0.0, 1.0], [3.0, 7.0]], dtype=dtypes.float32) @def_function.function def sparse_lookup(iterator): def tpu_function(sparse): # Assumes dense_shape is (2, *) looked_up = array_ops.gather(table, sparse.values) segment_sum = math_ops.unsorted_segment_sum( looked_up, sparse.indices[:, 0], 2) return sparse, segment_sum return nest.map_structure( strategy.experimental_local_results, strategy.run(tpu_function, args=(next(iterator),))) def dataset_fn(_): dataset = dataset_ops.Dataset.range(2) def make_sparse(_): return sparse_tensor.SparseTensor( indices=array_ops.constant([[0, 0], [1, 0], [1, 1]], dtype=dtypes.int64), values=array_ops.constant([0, 0, 1], dtype=dtypes.int32), dense_shape=array_ops.constant([2, 2], dtype=dtypes.int64)) return dataset.map(make_sparse) dataset = iter( strategy.distribute_datasets_from_function( dataset_fn, distribute_lib.InputOptions(experimental_fetch_to_device=False))) sparse, result = sparse_lookup(dataset) # All replicas return identical reults. for replica in range(strategy.num_replicas_in_sync): self.assertIsInstance(sparse[replica], sparse_tensor.SparseTensor) self.assertAllEqual(sparse[replica].indices, [[0, 0], [1, 0], [1, 1]]) self.assertAllEqual(sparse[replica].values, [0, 0, 1]) self.assertAllEqual(sparse[replica].dense_shape, [2, 2]) self.assertAllEqual(result[replica], [[0.0, 1.0], [3.0, 8.0]]) def test_composite_input_non_flat_output(self, enable_packed_var): strategy = get_tpu_strategy(enable_packed_var) if strategy.num_replicas_in_sync != 2: self.skipTest("Test assumes two replicas.") with strategy.scope(): table = variables.Variable( initial_value=[[0.0, 1.0], [3.0, 7.0]], dtype=dtypes.float32) @def_function.function def sparse_lookup(iterator): def tpu_function(sparse): # Assumes dense_shape is (2, *) looked_up = array_ops.gather(table, sparse.values) segment_sum = math_ops.unsorted_segment_sum( looked_up, sparse.indices[:, 0], 2) return {"sparse": sparse, "segment_sum": segment_sum} return nest.map_structure( strategy.experimental_local_results, strategy.run(tpu_function, args=(next(iterator),))) def dataset_fn(_): dataset = dataset_ops.Dataset.range(2) def make_sparse(_): return sparse_tensor.SparseTensor( indices=array_ops.constant([[0, 0], [1, 0], [1, 1]], dtype=dtypes.int64), values=array_ops.constant([0, 0, 1], dtype=dtypes.int32), dense_shape=array_ops.constant([2, 2], dtype=dtypes.int64)) return dataset.map(make_sparse) dataset = iter( strategy.distribute_datasets_from_function( dataset_fn, distribute_lib.InputOptions(experimental_fetch_to_device=False))) output = sparse_lookup(dataset) # All replicas return identical reults. for replica in range(strategy.num_replicas_in_sync): self.assertIsInstance(output["sparse"][replica], sparse_tensor.SparseTensor) self.assertAllEqual(output["sparse"][replica].indices, [[0, 0], [1, 0], [1, 1]]) self.assertAllEqual(output["sparse"][replica].values, [0, 0, 1]) self.assertAllEqual(output["sparse"][replica].dense_shape, [2, 2]) self.assertAllEqual(output["segment_sum"][replica], [[0.0, 1.0], [3.0, 8.0]]) def test_composite_input_dynamic_shapes_outside_compilation( self, enable_packed_var): strategy = get_tpu_strategy(enable_packed_var) if strategy.num_replicas_in_sync != 2: self.skipTest("Test assumes two replicas.") table = variables.Variable( initial_value=[[0.0, 1.0], [3.0, 7.0]], dtype=dtypes.float32) @def_function.function def sparse_lookup(iterator): def tpu_function(sparse): lookup = tpu.outside_compilation( embedding_ops.safe_embedding_lookup_sparse, table, sparse) return math_ops.reduce_sum(lookup, axis=0) return strategy.experimental_local_results( strategy.run(tpu_function, args=(next(iterator),))) def dataset_fn(_): dataset = dataset_ops.Dataset.range(2) def make_sparse(i): indices = array_ops.constant([[0, 0], [1, 0], [1, 1]], dtype=dtypes.int64)[0:2 + i] values = array_ops.constant([0, 0, 1], dtype=dtypes.int32)[0:2 + i] shape = [ array_ops.constant([2], dtype=dtypes.int64), array_ops.expand_dims(1 + i, axis=0) ] dense_shape = array_ops.concat(shape, axis=0) return sparse_tensor.SparseTensor( indices=indices, values=values, dense_shape=dense_shape) return dataset.map(make_sparse) dataset = iter( strategy.distribute_datasets_from_function( dataset_fn, options=distribute_lib.InputOptions( experimental_fetch_to_device=False))) result = sparse_lookup(dataset) self.assertAllEqual(result, [[0.0, 2.0], [1.5, 5.0]]) def test_composite_input_with_non_flat_components(self, enable_packed_var): strategy = get_tpu_strategy(enable_packed_var) class TestCompositeTypeSpec(type_spec.TypeSpec): def __init__(self, component_type_spec): self._component_type_spec = component_type_spec @property def value_type(self): return TestComposite def _to_components(self, value): return value.values def _from_components(self, components): return TestComposite(components[0], components[1][0], components[1][1]) @property def _component_specs(self): return [self._component_type_spec, [self._component_type_spec, self._component_type_spec]] def _serialize(self): return (self._component_type_spec,) class TestComposite(composite_tensor.CompositeTensor): def __init__(self, value1, value2, value3): self.values = [value1, [value2, value3]] @property def _type_spec(self): return TestCompositeTypeSpec( tensor_spec.TensorSpec.from_tensor(self.values[0])) def _shape_invariant_to_type_spec(self, shape): return [shape, [shape, shape]] @def_function.function def test_fn(test_composite): def tpu_function(composite): return (composite, composite.values[0] + ( composite.values[1][0] + composite.values[1][1])/2) return nest.map_structure( strategy.experimental_local_results, strategy.run(tpu_function, args=(test_composite,))) a = array_ops.constant([0.1]) b = array_ops.constant([1.2]) c = array_ops.constant([-0.4]) test_composite = TestComposite(a, b, c) composite, result = test_fn(test_composite) # All replicas return identical reults. for replica in range(strategy.num_replicas_in_sync): self.assertIsInstance(composite[replica], TestComposite) self.assertAllEqual(composite[replica].values[0], a) self.assertAllEqual(composite[replica].values[1][0], b) self.assertAllEqual(composite[replica].values[1][1], c) self.assertAllEqual(result[replica], array_ops.constant([0.50000006])) def test_per_device_tracing_of_mirrored_variables(self, enable_packed_var): # Define trace_count as a list to avoid python scoping error trace_count = [0] strategy = get_tpu_strategy(enable_packed_var) with strategy.scope(): variable = variables.Variable(0.0) @def_function.function def add_one(): trace_count[0] = trace_count[0] + 1 return math_ops.add(variable, constant_op.constant(1.0)) @def_function.function def update_variable(): for device in set(strategy.extended.worker_devices): with ops.device(device): add_one() with strategy.scope(): update_variable.get_concrete_function() self.assertLen(strategy.extended.worker_devices, trace_count[0]) def test_tpu_cancellation_does_not_close_chips(self, enable_packed_var): if not FLAGS.tpu_use_tfrt: self.skipTest( "`tpu_cancellation_closes_chip only applies to TFRT TPU Runtime.") strategy = get_tpu_strategy(enable_packed_var) num_replicas = strategy.num_replicas_in_sync with strategy.scope(): x = random_ops.random_normal((10240, 10240)) y = random_ops.random_normal((10240, 10240)) v = variables.Variable(array_ops.identity(x)) dist_dataset = strategy.experimental_distribute_dataset( dataset_ops.Dataset.from_tensors(y).repeat(num_replicas).batch( num_replicas)) dist_iterator = iter(dist_dataset) @def_function.function def train_steps(v, iterator, steps): def step_fn(inputs): for val in inputs: v.assign(math_ops.matmul(v, val)) for _ in math_ops.range(steps): strategy.run(step_fn, args=(next(iterator),)) with self.assertRaises(errors.OutOfRangeError): # The iterator has num_replicas/num_replicas = 1 step only. train_steps(v, dist_iterator, 2) # If TPU chips are not closed we can run the function on TPU again. w = variables.Variable(array_ops.identity(x)) dist_dataset = strategy.experimental_distribute_dataset( dataset_ops.Dataset.from_tensors(y).repeat(num_replicas).batch( num_replicas)) dist_iterator = iter(dist_dataset) train_steps(w, dist_iterator, 1) @test_util.with_eager_op_as_function class TPUStrategyDataPrefetchTest(test.TestCase): def test_prefetch_to_device_default(self): strategy = get_tpu_strategy() dataset = dataset_ops.Dataset.range( strategy.num_replicas_in_sync * 2, output_type=dtypes.float32).batch(strategy.num_replicas_in_sync) # Check default, should prefetch to TPU. dataset_item = next(iter(strategy.experimental_distribute_dataset(dataset))) dataset_location = tf_device.DeviceSpec.from_string( dataset_item.values[0].device) self.assertEqual(dataset_location.device_type, "TPU") def test_prefetch_to_device_tpu(self): strategy = get_tpu_strategy() dataset = dataset_ops.Dataset.range( strategy.num_replicas_in_sync * 2, output_type=dtypes.float32).batch(strategy.num_replicas_in_sync) input_options = distribute_lib.InputOptions( experimental_fetch_to_device=True) dataset_item = next(iter(strategy.experimental_distribute_dataset( dataset, options=input_options))) dataset_location = tf_device.DeviceSpec.from_string( dataset_item.values[0].device) self.assertEqual(dataset_location.device_type, "TPU") def test_prefetch_to_device_cpu(self): strategy = get_tpu_strategy() dataset = dataset_ops.Dataset.range( strategy.num_replicas_in_sync * 2, output_type=dtypes.float32).batch(strategy.num_replicas_in_sync) # Should be CPU when prefetch_to_device is False. input_options = distribute_lib.InputOptions( experimental_fetch_to_device=False) dataset_item = next(iter(strategy.experimental_distribute_dataset( dataset, options=input_options))) dataset_location = tf_device.DeviceSpec.from_string( dataset_item.values[0].device) self.assertEqual(dataset_location.device_type, "CPU") def test_prefetch_to_device_sparse_dataset(self): strategy = get_tpu_strategy() # Values here aren't important. dataset = dataset_ops.Dataset.from_tensors( sparse_tensor.SparseTensor(indices=[[0, 0], [0, 1], [1, 0]], values=[1, 2, 3], dense_shape=[2, 2])) dataset = dataset.repeat() dataset = dataset.batch(strategy.num_replicas_in_sync) with self.assertRaisesRegex(ValueError, "TPUStrategy does not support"): iter(strategy.experimental_distribute_dataset(dataset)) def test_prefetch_to_device_ragged_dataset(self): strategy = get_tpu_strategy() # Values here aren't important. dataset = dataset_ops.Dataset.from_tensors( ragged_tensor.RaggedTensor.from_row_splits( values=[1, 2, 3], row_splits=[0, 2, 3])) dataset = dataset.repeat() dataset = dataset.batch(strategy.num_replicas_in_sync) with self.assertRaisesRegex(ValueError, "TPUStrategy does not support"): iter(strategy.experimental_distribute_dataset(dataset)) def test_prefetch_to_device_sparse_dataset_fn(self): strategy = get_tpu_strategy() def dataset_fn(ctx): del ctx # Values here aren't important. dataset = dataset_ops.Dataset.from_tensors( sparse_tensor.SparseTensor(indices=[[0, 0], [0, 1], [1, 0]], values=[1, 2, 3], dense_shape=[2, 2])) dataset = dataset.repeat() return dataset.batch(strategy.num_replicas_in_sync) with self.assertRaisesRegex(ValueError, "TPUStrategy does not support"): iter(strategy.distribute_datasets_from_function(dataset_fn)) def test_prefetch_to_device_ragged_dataset_fn(self): strategy = get_tpu_strategy() def dataset_fn(ctx): del ctx # Values here aren't important. dataset = dataset_ops.Dataset.from_tensors( ragged_tensor.RaggedTensor.from_row_splits( values=[1, 2, 3], row_splits=[0, 2, 3])) dataset = dataset.repeat() return dataset.batch(strategy.num_replicas_in_sync) with self.assertRaisesRegex(ValueError, "TPUStrategy does not support"): iter(strategy.distribute_datasets_from_function(dataset_fn)) def test_create_iterator_on_device(self): @def_function.function def create_iter(): with ops.device("/device:TPU:0"): return gen_dataset_ops.anonymous_iterator_v3( output_types=[dtypes.float32], output_shapes=[[]]) create_iter() @test_util.with_eager_op_as_function class TPUStrategyDistributionTest( strategy_test_lib.DistributionTestBase, strategy_test_lib.TwoDeviceDistributionTestBase): def test_update_config_proto(self): resolver = get_tpu_cluster_resolver() remote.connect_to_cluster(resolver) tpu_strategy_util.initialize_tpu_system(resolver) strategy = tpu_lib.TPUStrategyV2(resolver) config_proto = config_pb2.ConfigProto() cluster_spec = server_lib.ClusterSpec({"worker": ["fake1", "fake2"]}) with test.mock.patch.object( resolver, "cluster_spec", return_value=cluster_spec): new_config = strategy.update_config_proto(config_proto) # Verify cluster_def. self.assertProtoEquals(cluster_spec.as_cluster_def(), new_config.cluster_def) # Verify isolate_session_state self.assertTrue(new_config.isolate_session_state) def test_make_input_fn_iterable(self): dataset_fn = lambda: dataset_ops.Dataset.range(10) expected_values = [[i, i+1] for i in range(0, 10, 2)] distribution = get_tpu_strategy() input_fn = self._input_fn_to_test_input_context( dataset_fn, expected_num_replicas_in_sync=2, expected_num_input_pipelines=1, expected_input_pipeline_id=0) self._test_input_fn_iterable(distribution, input_fn, expected_values) def test_make_input_fn_iterator(self): dataset_fn = lambda: dataset_ops.Dataset.range(10) expected_values = [[i, i+1] for i in range(0, 10, 2)] distribution = get_tpu_strategy() input_fn = self._input_fn_to_test_input_context( dataset_fn, expected_num_replicas_in_sync=2, expected_num_input_pipelines=1, expected_input_pipeline_id=0) iterator = distribution.make_input_fn_iterator(input_fn) self._test_input_fn_iterator( iterator, distribution.extended.worker_devices, expected_values) def test_num_replicas_in_sync(self): strategy = get_tpu_strategy() self.assertEqual(2, strategy.num_replicas_in_sync) def test_call_and_merge_exceptions(self): strategy = get_tpu_strategy() self._test_call_and_merge_exceptions(strategy) def test_numpy_dataset(self): strategy = get_tpu_strategy() self._test_numpy_dataset(strategy, run_in_function=True) def test_global_step_update(self): strategy = get_tpu_strategy() self._test_global_step_update(strategy) def test_run(self): strategy = get_tpu_strategy() self._test_run(strategy, run_in_function=True) def test_summary_for_replica_zero_only(self): strategy = get_tpu_strategy() self._test_summary_for_replica_zero_only(strategy) def test_all_reduce_sum(self): strategy = get_tpu_strategy() self._test_all_reduce_sum(strategy, run_in_function=True) def test_all_reduce_sum_gradients(self): strategy = get_tpu_strategy() self._test_all_reduce_sum_gradients(strategy, run_in_function=True) def test_all_reduce_sum_gradient_tape(self): strategy = get_tpu_strategy() self._test_all_reduce_sum_gradient_tape(strategy, run_in_function=True) def test_all_reduce_mean(self): strategy = get_tpu_strategy() self._test_all_reduce_mean(strategy, run_in_function=True) def test_all_reduce_mean_gradients(self): strategy = get_tpu_strategy() self._test_all_reduce_mean_gradients(strategy, run_in_function=True) def test_all_reduce_mean_gradient_tape(self): strategy = get_tpu_strategy() self._test_all_reduce_mean_gradient_tape(strategy, run_in_function=True) def test_reduce(self): strategy = get_tpu_strategy() inputs = strategy.make_input_fn_iterator( lambda _: dataset_ops.Dataset.from_tensor_slices([2., 3.])) self.evaluate(inputs.initialize()) per_replica_outputs = strategy.run( def_function.function(math_ops.square), args=(next(inputs),)) with strategy.scope(): mean = strategy.reduce(reduce_util.ReduceOp.MEAN, per_replica_outputs, axis=None) self.assertEqual(6.5, self.evaluate(mean)) def test_constraint(self): strategy = get_tpu_strategy() with strategy.scope(): variable = variables.Variable(initial_value=2., constraint=lambda x: 0. * x + 1.) self.assertEqual(variable.value().numpy(), 2) @def_function.function def update_variable(): variable.assign_add(1) variable.assign(variable.constraint(variable)) update_variable() self.assertEqual(variable.value().numpy(), 1) def test_trainable_variables(self): strategy = get_tpu_strategy() self._test_trainable_variable(strategy) @test_util.with_eager_op_as_function class DeviceAssignmentTest(test.TestCase): def test_core_assignment(self): resolver = get_tpu_cluster_resolver() remote.connect_to_cluster(resolver) topology = tpu_strategy_util.initialize_tpu_system(resolver) device_assignment = device_assignment_lib.DeviceAssignment( topology, core_assignment=[[[0, 0, 0, 0]]]) self.assertAllEqual([[[0, 0, 0, 0]]], device_assignment.core_assignment) self.assertEqual(1, device_assignment.num_cores_per_replica) self.assertEqual(1, device_assignment.num_replicas) self.assertEqual("/task:0/device:TPU:0", device_assignment.tpu_device()) self.assertEqual("/task:0/device:CPU:0", device_assignment.host_device()) def test_device_assignment_strategy_properties(self): resolver = get_tpu_cluster_resolver() remote.connect_to_cluster(resolver) topology = tpu_strategy_util.initialize_tpu_system(resolver) device_assignment = device_assignment_lib.DeviceAssignment( topology, core_assignment=[[[0, 0, 0, 0]]]) strategy = tpu_lib.TPUStrategyV2( resolver, experimental_device_assignment=device_assignment) self.assertEqual(strategy.extended.num_hosts, 1) self.assertEqual(strategy.num_replicas_in_sync, 1) self.assertEqual(strategy.extended.num_replicas_per_host, 1) # pylint: disable=protected-access def test_device_assignment_constants(self): resolver = get_tpu_cluster_resolver() remote.connect_to_cluster(resolver) topology = tpu_strategy_util.initialize_tpu_system(resolver) device_assignment = device_assignment_lib.DeviceAssignment( topology, core_assignment=device_assignment_lib.SINGLE_CORE_ASSIGNMENT) self.assertAllEqual([[[0, 0, 0, 0]]], device_assignment.core_assignment) self.assertEqual(1, device_assignment.num_cores_per_replica) self.assertEqual(1, device_assignment.num_replicas) self.assertEqual("/task:0/device:TPU:0", device_assignment.tpu_device()) self.assertEqual("/task:0/device:CPU:0", device_assignment.host_device()) def test_variables_mismatched_device_assignment(self): resolver = get_tpu_cluster_resolver() remote.connect_to_cluster(resolver) topology = tpu_strategy_util.initialize_tpu_system(resolver) strategy0 = tpu_lib.TPUStrategyV2(resolver) self.assertEqual( ("/job:localhost/replica:0/task:0/device:TPU:0", "/job:localhost/replica:0/task:0/device:TPU:1"), strategy0.extended.worker_devices) with strategy0.scope(): v = variables.Variable(1.) v1_assign_op = strategy0.experimental_local_results(v)[1].assign(42.) with self.cached_session(): self.evaluate(variables.global_variables_initializer()) self.evaluate(v1_assign_op) self.assertAllEqual([1., 42.], self.evaluate( strategy0.experimental_local_results(v))) # Second strategy has devices reversed relative to the first. device_assignment = device_assignment_lib.DeviceAssignment( topology, core_assignment=[[[0, 0, 0, 1]], [[0, 0, 0, 0]]]) strategy1 = tpu_lib.TPUStrategyV2( resolver, experimental_device_assignment=device_assignment) self.assertEqual( ("/job:localhost/replica:0/task:0/device:TPU:1", "/job:localhost/replica:0/task:0/device:TPU:0"), strategy1.extended.worker_devices) v_read = strategy1.run(def_function.function(v.read_value)) with self.cached_session(): self.assertAllEqual([42., 1.], self.evaluate( strategy0.experimental_local_results(v_read))) if __name__ == "__main__": test.main()
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django import forms from django.contrib.admin import widgets from django.contrib.auth.decorators import login_required from django.core.exceptions import PermissionDenied from django.http import HttpResponseRedirect, Http404 from django.shortcuts import render, redirect, get_object_or_404 from django.utils.translation import ugettext_lazy as _ from .models import Folder, File, Image, Clipboard, tools, FolderRoot from . import settings as filer_settings class NewFolderForm(forms.ModelForm): class Meta: model = Folder fields = ('name',) widgets = { 'name': widgets.AdminTextInputWidget, } def popup_status(request): return ('_popup' in request.GET or 'pop' in request.GET or '_popup' in request.POST or 'pop' in request.POST) def selectfolder_status(request): return 'select_folder' in request.GET or 'select_folder' in request.POST def popup_param(request, separator="?"): if popup_status(request): return "%s_popup=1" % separator else: return "" def selectfolder_param(request, separator="&"): if selectfolder_status(request): return "%sselect_folder=1" % separator else: return "" def _userperms(item, request): r = [] ps = ['read', 'edit', 'add_children'] for p in ps: attr = "has_%s_permission" % p if hasattr(item, attr): x = getattr(item, attr)(request) if x: r.append(p) return r def canonical(request, uploaded_at, file_id): """ Redirect to the current url of a public file """ filer_file = get_object_or_404(File, pk=file_id, is_public=True) if (uploaded_at != filer_file.uploaded_at.strftime('%s') or not filer_file.file): raise Http404('No %s matches the given query.' % File._meta.object_name) return redirect(filer_file.url) @login_required def edit_folder(request, folder_id): # TODO: implement edit_folder view folder = None return render(request, 'admin/filer/folder/folder_edit.html', { 'folder': folder, 'is_popup': popup_status(request), 'select_folder': selectfolder_status(request), }) @login_required def edit_image(request, folder_id): # TODO: implement edit_image view folder = None return render(request, 'filer/image_edit.html', { 'folder': folder, 'is_popup': popup_status(request), 'select_folder': selectfolder_status(request), }) @login_required def make_folder(request, folder_id=None): if not folder_id: folder_id = request.GET.get('parent_id', None) if not folder_id: folder_id = request.POST.get('parent_id', None) if folder_id: folder = Folder.objects.get(id=folder_id) else: folder = None if request.user.is_superuser: pass elif folder is None: # regular users may not add root folders unless configured otherwise if not filer_settings.FILER_ALLOW_REGULAR_USERS_TO_ADD_ROOT_FOLDERS: raise PermissionDenied elif not folder.has_add_children_permission(request): # the user does not have the permission to add subfolders raise PermissionDenied if request.method == 'POST': new_folder_form = NewFolderForm(request.POST) if new_folder_form.is_valid(): new_folder = new_folder_form.save(commit=False) if (folder or FolderRoot()).contains_folder(new_folder.name): new_folder_form._errors['name'] = new_folder_form.error_class( [_('Folder with this name already exists.')]) else: new_folder.parent = folder new_folder.owner = request.user new_folder.save() return render(request, 'admin/filer/dismiss_popup.html') else: new_folder_form = NewFolderForm() return render(request, 'admin/filer/folder/new_folder_form.html', { 'opts': Folder._meta, 'new_folder_form': new_folder_form, 'is_popup': popup_status(request), 'select_folder': selectfolder_status(request), }) class UploadFileForm(forms.ModelForm): class Meta: model = Image exclude = () @login_required def upload(request): return render(request, 'filer/upload.html', { 'title': 'Upload files', 'is_popup': popup_status(request), 'select_folder': selectfolder_status(request), }) @login_required def paste_clipboard_to_folder(request): if request.method == 'POST': folder = Folder.objects.get(id=request.POST.get('folder_id')) clipboard = Clipboard.objects.get(id=request.POST.get('clipboard_id')) if folder.has_add_children_permission(request): tools.move_files_from_clipboard_to_folder(clipboard, folder) tools.discard_clipboard(clipboard) else: raise PermissionDenied redirect = request.GET.get('redirect_to', '') if not redirect: redirect = request.POST.get('redirect_to', '') return HttpResponseRedirect('%s?order_by=-modified_at%s%s' % ( redirect, popup_param(request, separator='&'), selectfolder_param(request))) @login_required def discard_clipboard(request): if request.method == 'POST': clipboard = Clipboard.objects.get(id=request.POST.get('clipboard_id')) tools.discard_clipboard(clipboard) return HttpResponseRedirect('%s%s%s' % ( request.POST.get('redirect_to', ''), popup_param(request), selectfolder_param(request))) @login_required def delete_clipboard(request): if request.method == 'POST': clipboard = Clipboard.objects.get(id=request.POST.get('clipboard_id')) tools.delete_clipboard(clipboard) return HttpResponseRedirect('%s%s%s' % ( request.POST.get('redirect_to', ''), popup_param(request), selectfolder_param(request))) @login_required def clone_files_from_clipboard_to_folder(request): if request.method == 'POST': clipboard = Clipboard.objects.get(id=request.POST.get('clipboard_id')) folder = Folder.objects.get(id=request.POST.get('folder_id')) tools.clone_files_from_clipboard_to_folder(clipboard, folder) return HttpResponseRedirect('%s%s%s' % ( request.POST.get('redirect_to', ''), popup_param(request), selectfolder_param(request)))
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). import json import os from pants.base.exceptions import TaskError from pants.base.workunit import WorkUnitLabel from pants.subsystem.subsystem import Subsystem from pants.util.contextutil import pushd from pants.util.dirutil import relative_symlink from pants.util.fileutil import safe_temp_edit from pants.contrib.node.subsystems.package_managers import ( PACKAGE_MANAGER_NPM, PACKAGE_MANAGER_YARNPKG, ) from pants.contrib.node.subsystems.resolvers.node_resolver_base import NodeResolverBase from pants.contrib.node.targets.node_module import NodeModule from pants.contrib.node.tasks.node_resolve import NodeResolve class NpmResolver(Subsystem, NodeResolverBase): options_scope = "npm-resolver" @classmethod def register_options(cls, register): super().register_options(register) register( "--install-optional", type=bool, default=False, fingerprint=True, help="If enabled, install optional dependencies.", ) register( "--install-production", type=bool, default=False, fingerprint=True, help="If enabled, do not install devDependencies.", ) register( "--force", type=bool, default=False, fingerprint=True, help="If enabled, refetch and resolve dependencies even if they are already built.", ) register( "--frozen-lockfile", type=bool, default=True, fingerprint=True, help="If enabled, disallow automatic update of lock files.", ) # There are cases where passed through options does not override hard-coded options. # One example is for node-install, --frozen-lockfile=False is the dominate configuration # as it allows the user to modify dependencies and generate a new lockfile. # By turning on --force-option-override, the user accepts full responsibilities. register( "--force-option-override", type=bool, default=False, fingerprint=True, advanced=True, help="If enabled, options will override hard-coded values. Be aware of default values.", ) NodeResolve.register_resolver_for_type(NodeModule, cls) def resolve_target( self, node_task, target, results_dir, node_paths, resolve_locally=False, install_optional=None, production_only=None, force=None, frozen_lockfile=None, **kwargs, ): """Installs the node_package target in the results directory copying sources if necessary. :param Task node_task: The task executing this method. :param Target target: The target being resolve. :param String results_dir: The output location where this target will be resolved. :param NodePaths node_paths: A mapping of targets and their resolved location, if resolved. :param Boolean resolve_locally: If true, the sources do not have to be copied. :param Boolean install_optional: If true, install optional dependencies. :param Boolean force: If true, rebuild dependencies even if already built. :param Boolean frozen_lockfile: Preserve lock file and fails if a change is detected. """ if self.get_options().force_option_override: install_optional = self.get_options().install_optional production_only = self.get_options().install_production force = self.get_options().force frozen_lockfile = self.get_options().frozen_lockfile else: install_optional = ( install_optional if install_optional is not None else self.get_options().install_optional ) production_only = ( production_only if production_only is not None else self.get_options().install_production ) force = force if force is not None else self.get_options().force frozen_lockfile = ( frozen_lockfile if frozen_lockfile is not None else self.get_options().frozen_lockfile ) if not resolve_locally: self._copy_sources(target, results_dir) with pushd(results_dir): if not os.path.exists("package.json"): raise TaskError( "Cannot find package.json. Did you forget to put it in target sources?" ) # TODO: remove/remodel the following section when node_module dependency is fleshed out. package_manager = node_task.get_package_manager(target=target).name if package_manager == PACKAGE_MANAGER_NPM: if resolve_locally: raise TaskError( "Resolving node package modules locally is not supported for NPM." ) if os.path.exists("npm-shrinkwrap.json"): node_task.context.log.info( "Found npm-shrinkwrap.json, will not inject package.json" ) else: node_task.context.log.warn( "Cannot find npm-shrinkwrap.json. Did you forget to put it in target sources? " "This package will fall back to inject package.json with pants BUILD dependencies " "including node_remote_module and other node dependencies. However, this is " "not fully supported." ) self._emit_package_descriptor(node_task, target, results_dir, node_paths) elif package_manager == PACKAGE_MANAGER_YARNPKG: if not os.path.exists("yarn.lock") and frozen_lockfile: raise TaskError( "Cannot find yarn.lock. Did you forget to put it in target sources?" ) # Install all dependencies except for `file:` dependencies # `file:` dependencies are special dependencies that point to a local path to a pants node target # `file:` dependencies are already in the build graph and should be already be installed by this point # Copy the package.json and then remove the file: dependencies from package.json # Run the install and symlink the file: dependencies using their node_paths # Afterwards, restore the original package.json to not cause diff changes when resolve_locally=True # The file mutation is occurring in place and the package.json may potentially not be restored here # if the process is closed. with safe_temp_edit("package.json") as package_json: with open(package_json, "r") as package_json_file: json_data = json.load(package_json_file) source_deps = { k: v for k, v in json_data.get("dependencies", {}).items() if self.parse_file_path(v) } third_party_deps = { k: v for k, v in json_data.get("dependencies", {}).items() if not self.parse_file_path(v) } json_data["dependencies"] = third_party_deps # TODO(6489): Currently the file: dependencies need to be duplicated in BUILD. # After this issue is closed, only dependencies need to be specified in package.json for package_name, file_path in source_deps.items(): if self._get_target_from_package_name(target, package_name, file_path) is None: raise TaskError( "Local dependency in package.json not found in the build graph. " 'Check your BUILD file for missing dependencies. ["{}": {}]'.format( package_name, file_path ) ) with open(package_json, "w") as package_json_file: json.dump(json_data, package_json_file, indent=2, separators=(",", ": ")) result, command = node_task.install_module( target=target, install_optional=install_optional, production_only=production_only, force=force, frozen_lockfile=frozen_lockfile, workunit_name=target.address.reference(), workunit_labels=[WorkUnitLabel.COMPILER], ) if result != 0: raise TaskError( "Failed to resolve dependencies for {}:\n\t{} failed with exit code {}".format( target.address.reference(), command, result ) ) if source_deps: self._link_source_dependencies( node_task, target, results_dir, node_paths, source_deps ) def _link_source_dependencies(self, node_task, target, results_dir, node_paths, source_deps): for package_name, file_path in source_deps.items(): # Package name should always the same as the target name dep = self._get_target_from_package_name(target, package_name, file_path) # Apply node-scoping rules if applicable node_scope = dep.payload.node_scope or node_task.node_distribution.node_scope dep_package_name = self._scoped_package_name(node_task, dep.package_name, node_scope) # Symlink each target dep_path = node_paths.node_path(dep) node_module_dir = os.path.join(results_dir, "node_modules") relative_symlink(dep_path, os.path.join(node_module_dir, dep_package_name)) # If there are any bin, we need to symlink those as well bin_dir = os.path.join(node_module_dir, ".bin") for bin_name, rel_bin_path in dep.bin_executables.items(): bin_path = os.path.join(dep_path, rel_bin_path) relative_symlink(bin_path, os.path.join(bin_dir, bin_name)) @staticmethod def _scoped_package_name(node_task, package_name, node_scope): """Apply a node_scope to the package name. Overrides any existing package_name if already in a scope :return: A package_name with prepended with a node scope via '@' """ if not node_scope: return package_name scoped_package_name = package_name chunk = package_name.split("/", 1) if len(chunk) > 1 and chunk[0].startswith("@"): scoped_package_name = os.path.join(f"@{node_scope}", chunk[1:]) else: scoped_package_name = os.path.join(f"@{node_scope}", package_name) node_task.context.log.debug( f'Node package "{package_name}" will be resolved with scope "{scoped_package_name}".' ) return scoped_package_name @staticmethod def _emit_package_descriptor(node_task, target, results_dir, node_paths): dependencies = { dep.package_name: node_paths.node_path(dep) if node_task.is_node_module(dep) else dep.version for dep in target.dependencies } package_json_path = os.path.join(results_dir, "package.json") if os.path.isfile(package_json_path): with open(package_json_path, "r") as fp: package = json.load(fp) else: package = {} if "name" not in package: package["name"] = target.package_name elif package["name"] != target.package_name: raise TaskError( "Package name in the corresponding package.json is not the same " "as the BUILD target name for {}".format(target.address.reference()) ) if "version" not in package: package["version"] = "0.0.0" # TODO(Chris Pesto): Preserve compatibility with normal package.json files by dropping existing # dependency fields. This lets Pants accept working package.json files from standalone projects # that can be "npm install"ed without Pants. Taking advantage of this means expressing # dependencies in package.json and BUILD, though. In the future, work to make # Pants more compatible with package.json to eliminate duplication if you still want your # project to "npm install" through NPM by itself. dependencies_to_remove = [ "dependencies", "devDependencies", "peerDependencies", "optionalDependencies", ] node_task.context.log.debug( f"Removing {dependencies_to_remove} from package.json for {package['name']}" ) for dependencyType in dependencies_to_remove: package.pop(dependencyType, None) node_task.context.log.debug(f"Adding {dependencies} to package.json for {package['name']}") package["dependencies"] = dependencies with open(package_json_path, "w") as fp: json.dump(package, fp, indent=2)
import ast import inspect import traceback import warnings from copy import deepcopy from threading import current_thread from uuid import uuid4 from executing import only, future_flags from snoop.formatting import Event, Source from snoop.tracer import FrameInfo from snoop.utils import NO_ASTTOKENS, optional_numeric_label, builtins, FormattedValue, pp_name_prefix class PP(object): def __init__(self, config): self.config = config def __call__(self, *args): if self.config.enabled: PPEvent(self, args, deep=False) if len(args) == 1: return args[0] else: return args def deep(self, arg): if not is_deep_arg(arg): raise TypeError("Argument must be a lambda without arguments") if self.config.enabled: return PPEvent(self, [arg], deep=True).returns return arg() class PPEvent(object): def __init__(self, pp_object, args, deep): self.config = pp_object.config self.args = args depth = getattr(self.config.thread_local, 'depth', 0) frame = inspect.currentframe().f_back.f_back self.event = Event(FrameInfo(frame), 'log', None, depth) formatted = self.config.formatter.format_log(self.event) self.config.write(formatted) self.returns = None try: assert not NO_ASTTOKENS self.call = call = Source.executing(frame).node assert isinstance(call, ast.Call) assert len(args) == len(call.args) except Exception: if deep: self.returns = args[0] = args[0]() for i, arg in enumerate(args): self.write_placeholder(i, arg) else: if deep: call_arg = only(call.args) assert isinstance(call_arg, ast.Lambda), "You must pass a lambda DIRECTLY to pp.deep, not as a result of any other expression" self.returns = self.deep_pp(call_arg.body, frame) else: self.plain_pp(args, call.args) def write(self, source, value, depth=0): if depth == 0: value_string = self.config.pformat(value) else: try: value_string = repr(value) except Exception as e: exception_string = ''.join( traceback.format_exception_only(type(e), e) ).strip() value_string = '<Exception in repr(): {}>'.format(exception_string) formatted = self.config.formatter.format_log_value( self.event, source, value_string, depth) self.config.write(formatted) def write_node(self, node, value, depth=0): source = self.event.source.get_text_with_indentation(node) self.write(source, value, depth=depth) def write_placeholder(self, i, arg): source = '<argument{}>'.format(optional_numeric_label(i, self.args)) return self.write(source, arg) def plain_pp(self, args, call_args): for i, (call_arg, arg) in enumerate(zip(call_args, args)): try: self.write_node(call_arg, arg) except Exception: self.write_placeholder(i, arg) def deep_pp(self, call_arg, frame): stack = [] thread = current_thread() def before_expr(tree_index): node = self.event.source.nodes[tree_index] if thread == current_thread(): stack.append(node) return node before_expr.name = pp_name_prefix + 'before_' + uuid4().hex def after_expr(node, value): if thread == current_thread(): assert node is stack.pop() try: ast.literal_eval(node) is_obvious = True except ValueError: is_obvious = ( isinstance(node, ast.Name) and getattr(builtins, node.id, object()) == value ) if not is_obvious: self.write_node(node, value, depth=node._depth - call_arg._depth) return value after_expr.name = pp_name_prefix + 'after_' + uuid4().hex new_node = deepcopy(call_arg) new_node = NodeVisitor(before_expr.name, after_expr.name).visit(new_node) expr = ast.Expression(new_node) ast.copy_location(expr, new_node) code = compile( expr, frame.f_code.co_filename, 'eval', dont_inherit=True, flags=future_flags & frame.f_code.co_flags, ) frame.f_globals[before_expr.name] = before_expr frame.f_globals[after_expr.name] = after_expr try: return eval(code, frame.f_globals, frame.f_locals) except Exception as e: if stack: last_node = stack[-1] self.write_node( last_node, DirectRepr('!!! {}!'.format(e.__class__.__name__)), last_node._depth - call_arg._depth, ) raise finally: frame.f_globals[before_expr.name] = lambda x: x frame.f_globals[after_expr.name] = lambda node, value: value class DirectRepr(str): def __repr__(self): return self def is_deep_arg(x): with warnings.catch_warnings(): warnings.simplefilter("ignore", DeprecationWarning) # noinspection PyDeprecation return ( inspect.isfunction(x) and x.__code__.co_name == '<lambda>' and not any(inspect.getargspec(x)) ) class NodeVisitor(ast.NodeTransformer): """ This does the AST modifications that call the hooks. """ def __init__(self, before_name, after_name): self.before_name = before_name self.after_name = after_name def generic_visit(self, node): # type: (ast.AST) -> ast.AST if (isinstance(node, ast.expr) and not (hasattr(node, "ctx") and not isinstance(node.ctx, ast.Load)) and not isinstance(node, getattr(ast, 'Starred', ()))): return self.visit_expr(node) return super(NodeVisitor, self).generic_visit(node) def visit_expr(self, node): # type: (ast.expr) -> ast.Call """ each expression e gets wrapped like this: _treetrace_hidden_after_expr(_treetrace_hidden_before_expr(_tree_index), e) where the _treetrace_* functions are the corresponding methods with the TreeTracerBase and traced_file arguments already filled in (see _trace_methods_dict) """ before_marker = ast.Call( func=ast.Name(id=self.before_name, ctx=ast.Load()), args=[ast.Num(node._tree_index)], keywords=[], ) ast.copy_location(before_marker, node) if isinstance(node, FormattedValue): arg = node else: arg = super(NodeVisitor, self).generic_visit(node) after_marker = ast.Call( func=ast.Name(id=self.after_name, ctx=ast.Load()), args=[ before_marker, arg, ], keywords=[], ) ast.copy_location(after_marker, node) ast.fix_missing_locations(after_marker) return after_marker
from __future__ import unicode_literals, division, absolute_import from builtins import * # noqa pylint: disable=unused-import, redefined-builtin from past.builtins import basestring import difflib import json import logging import re import random from bs4.element import Tag from flexget.utils.soup import get_soup from flexget.utils.requests import Session, TimedLimiter from flexget.utils.tools import str_to_int from flexget import plugin log = logging.getLogger('imdb.utils') # IMDb delivers a version of the page which is unparsable to unknown (and some known) user agents, such as requests' # Spoof the old urllib user agent to keep results consistent requests = Session() requests.headers.update({'User-Agent': 'Python-urllib/2.6'}) # requests.headers.update({'User-Agent': random.choice(USERAGENTS)}) # this makes most of the titles to be returned in english translation, but not all of them requests.headers.update({'Accept-Language': 'en-US,en;q=0.8'}) requests.headers.update( {'X-Forwarded-For': '24.110.%d.%d' % (random.randint(0, 254), random.randint(0, 254))} ) # give imdb a little break between requests (see: http://flexget.com/ticket/129#comment:1) requests.add_domain_limiter(TimedLimiter('imdb.com', '3 seconds')) def is_imdb_url(url): """Tests the url to see if it's for imdb.com.""" if not isinstance(url, basestring): return # Probably should use urlparse. return re.match(r'https?://[^/]*imdb\.com/', url) def is_valid_imdb_title_id(value): """ Return True if `value` is a valid IMDB ID for titles (movies, series, etc). """ if not isinstance(value, basestring): raise TypeError("is_valid_imdb_title_id expects a string but got {0}".format(type(value))) # IMDB IDs for titles have 'tt' followed by 7 or 8 digits return re.match(r'tt\d{7,8}', value) is not None def is_valid_imdb_person_id(value): """ Return True if `value` is a valid IMDB ID for a person. """ if not isinstance(value, basestring): raise TypeError("is_valid_imdb_person_id expects a string but got {0}".format(type(value))) # An IMDB ID for a person is formed by 'nm' followed by 7 digits return re.match(r'nm\d{7,8}', value) is not None def extract_id(url): """Return IMDb ID of the given URL. Return None if not valid or if URL is not a string.""" if not isinstance(url, basestring): return m = re.search(r'((?:nm|tt)\d{7,8})', url) if m: return m.group(1) def make_url(imdb_id): """Return IMDb URL of the given ID""" return u'https://www.imdb.com/title/%s/' % imdb_id class ImdbSearch(object): def __init__(self): # de-prioritize aka matches a bit self.aka_weight = 0.95 # prioritize first self.first_weight = 1.1 self.min_match = 0.7 self.min_diff = 0.01 self.debug = False self.max_results = 50 def ireplace(self, text, old, new, count=0): """Case insensitive string replace""" pattern = re.compile(re.escape(old), re.I) return re.sub(pattern, new, text, count) def smart_match(self, raw_name, single_match=True): """Accepts messy name, cleans it and uses information available to make smartest and best match""" parser = plugin.get('parsing', 'imdb_search').parse_movie(raw_name) name = parser.name year = parser.year if not name: log.critical('Failed to parse name from %s', raw_name) return None log.debug('smart_match name=%s year=%s' % (name, str(year))) return self.best_match(name, year, single_match) def best_match(self, name, year=None, single_match=True): """Return single movie that best matches name criteria or None""" movies = self.search(name) if not movies: log.debug('search did not return any movies') return None # remove all movies below min_match, and different year for movie in movies[:]: if year and movie.get('year'): if movie['year'] != year: log.debug( 'best_match removing %s - %s (wrong year: %s)' % (movie['name'], movie['url'], str(movie['year'])) ) movies.remove(movie) continue if movie['match'] < self.min_match: log.debug('best_match removing %s (min_match)', movie['name']) movies.remove(movie) continue if not movies: log.debug('FAILURE: no movies remain') return None # if only one remains .. if len(movies) == 1: log.debug('SUCCESS: only one movie remains') return movies[0] # check min difference between best two hits diff = movies[0]['match'] - movies[1]['match'] if diff < self.min_diff: log.debug( 'unable to determine correct movie, min_diff too small (`%s` <-?-> `%s`)' % (movies[0], movies[1]) ) for m in movies: log.debug('remain: %s (match: %s) %s' % (m['name'], m['match'], m['url'])) return None else: return movies[0] if single_match else movies def search(self, name): """Return array of movie details (dict)""" log.debug('Searching: %s', name) url = u'https://www.imdb.com/find' # This may include Shorts and TV series in the results params = {'q': name, 's': 'tt'} log.debug('Search query: %s', repr(url)) page = requests.get(url, params=params) actual_url = page.url movies = [] soup = get_soup(page.text) # in case we got redirected to movie page (perfect match) re_m = re.match(r'.*\.imdb\.com/title/tt\d+/', actual_url) if re_m: actual_url = re_m.group(0) imdb_id = extract_id(actual_url) movie_parse = ImdbParser() movie_parse.parse(imdb_id, soup=soup) log.debug('Perfect hit. Search got redirected to %s', actual_url) movie = { 'match': 1.0, 'name': movie_parse.name, 'imdb_id': imdb_id, 'url': make_url(imdb_id), 'year': movie_parse.year, } movies.append(movie) return movies section_table = soup.find('table', 'findList') if not section_table: log.debug('results table not found') return rows = section_table.find_all('tr') if not rows: log.debug('Titles section does not have links') for count, row in enumerate(rows): # Title search gives a lot of results, only check the first ones if count > self.max_results: break result_text = row.find('td', 'result_text') movie = {} additional = re.findall(r'\((.*?)\)', result_text.text) if len(additional) > 0: if re.match('^\d{4}$', additional[-1]): movie['year'] = str_to_int(additional[-1]) elif len(additional) > 1: movie['year'] = str_to_int(additional[-2]) if additional[-1] not in ['TV Movie', 'Video']: log.debug('skipping %s', result_text.text) continue primary_photo = row.find('td', 'primary_photo') movie['thumbnail'] = primary_photo.find('a').find('img').get('src') link = result_text.find_next('a') movie['name'] = link.text movie['imdb_id'] = extract_id(link.get('href')) movie['url'] = make_url(movie['imdb_id']) log.debug('processing name: %s url: %s' % (movie['name'], movie['url'])) # calc & set best matching ratio seq = difflib.SequenceMatcher(lambda x: x == ' ', movie['name'].title(), name.title()) ratio = seq.ratio() # check if some of the akas have better ratio for aka in link.parent.find_all('i'): aka = aka.next.string match = re.search(r'".*"', aka) if not match: log.debug('aka `%s` is invalid' % aka) continue aka = match.group(0).replace('"', '') log.trace('processing aka %s' % aka) seq = difflib.SequenceMatcher(lambda x: x == ' ', aka.title(), name.title()) aka_ratio = seq.ratio() if aka_ratio > ratio: ratio = aka_ratio * self.aka_weight log.debug( '- aka `%s` matches better to `%s` ratio %s (weighted to %s)' % (aka, name, aka_ratio, ratio) ) # prioritize items by position position_ratio = (self.first_weight - 1) / (count + 1) + 1 log.debug( '- prioritizing based on position %s `%s`: %s' % (count, movie['url'], position_ratio) ) ratio *= position_ratio # store ratio movie['match'] = ratio movies.append(movie) movies.sort(key=lambda x: x['match'], reverse=True) return movies class ImdbParser(object): """Quick-hack to parse relevant imdb details""" def __init__(self): self.genres = [] self.languages = [] self.actors = {} self.directors = {} self.writers = {} self.score = 0.0 self.votes = 0 self.meta_score = 0 self.year = 0 self.plot_outline = None self.name = None self.original_name = None self.url = None self.imdb_id = None self.photo = None self.mpaa_rating = '' def __str__(self): return '<ImdbParser(name=%s,imdb_id=%s)>' % (self.name, self.imdb_id) def parse(self, imdb_id, soup=None): self.imdb_id = extract_id(imdb_id) url = make_url(self.imdb_id) self.url = url if not soup: page = requests.get(url) soup = get_soup(page.text) title_wrapper = soup.find('div', attrs={'class': 'title_wrapper'}) data = json.loads(soup.find('script', {'type': 'application/ld+json'}).text) if not data: raise plugin.PluginError( 'IMDB parser needs updating, imdb format changed. Please report on Github.' ) # Parse stuff from the title-overview section name_elem = data['name'] if name_elem: self.name = name_elem.strip() else: log.error('Possible IMDB parser needs updating, Please report on Github.') raise plugin.PluginError( 'Unable to set imdb_name for %s from %s' % (self.imdb_id, self.url) ) year = soup.find('span', attrs={'id': 'titleYear'}) if year: m = re.search(r'([0-9]{4})', year.text) if m: self.year = int(m.group(1)) if not self.year: log.debug('No year found for %s', self.imdb_id) mpaa_rating_elem = data.get('contentRating') if mpaa_rating_elem: self.mpaa_rating = mpaa_rating_elem else: log.debug('No rating found for %s', self.imdb_id) photo_elem = data.get('image') if photo_elem: self.photo = photo_elem else: log.debug('No photo found for %s', self.imdb_id) original_name_elem = title_wrapper.find('div', {'class': 'originalTitle'}) if original_name_elem: self.name = title_wrapper.find('h1').contents[0].strip() self.original_name = original_name_elem.contents[0].strip().strip('"') else: log.debug('No original title found for %s', self.imdb_id) votes_elem = data.get('aggregateRating', {}).get('ratingCount') if votes_elem: self.votes = str_to_int(votes_elem) if not isinstance(votes_elem, int) else votes_elem else: log.debug('No votes found for %s', self.imdb_id) score_elem = data.get('aggregateRating', {}).get('ratingValue') if score_elem: self.score = float(score_elem) else: log.debug('No score found for %s', self.imdb_id) meta_score_elem = soup.find(attrs={'class': 'metacriticScore'}) if meta_score_elem: self.meta_score = str_to_int(meta_score_elem.text) else: log.debug('No Metacritic score found for %s', self.imdb_id) # get director(s) directors = data.get('director', []) if not isinstance(directors, list): directors = [directors] for director in directors: if director['@type'] != 'Person': continue director_id = extract_id(director['url']) director_name = director['name'] self.directors[director_id] = director_name # get writer(s) writers = data.get('creator', []) if not isinstance(writers, list): writers = [writers] for writer in writers: if writer['@type'] != 'Person': continue writer_id = extract_id(writer['url']) writer_name = writer['name'] self.writers[writer_id] = writer_name # Details section title_details = soup.find('div', attrs={'id': 'titleDetails'}) if title_details: # get languages for link in title_details.find_all( 'a', href=re.compile(r'^/search/title\?title_type=feature' '&primary_language=') ): lang = link.text.strip().lower() if lang not in self.languages: self.languages.append(lang.strip()) # Storyline section storyline = soup.find('div', attrs={'id': 'titleStoryLine'}) if storyline: plot_elem = storyline.find('p') if plot_elem: # Remove the "Written By" part. if plot_elem.em: plot_elem.em.replace_with('') self.plot_outline = plot_elem.text.strip() else: log.debug('No storyline found for %s', self.imdb_id) genres = data.get('genre', []) if not isinstance(genres, list): genres = [genres] self.genres = [g.strip().lower() for g in genres] # Cast section cast = soup.find('table', attrs={'class': 'cast_list'}) if cast: for actor in cast.select('tr > td:nth-of-type(2) > a'): actor_id = extract_id(actor['href']) actor_name = actor.text.strip() # tag instead of name if isinstance(actor_name, Tag): actor_name = None self.actors[actor_id] = actor_name
import logging from shuttl import db, app from shuttl.Storage import Storage from shuttl.Models.FileTree.TreeNodeObject import TreeNodeObject from shuttl.Models.FileTree.Directory import Directory from werkzeug import secure_filename import uuid from flask import abort import os ## Base class for all objects that require a file (HTML, CSS, JS, Sass, ETC. . .) class FileObject(TreeNodeObject): ##Id of the directory. Because of the way inheritance is set up in sqlalchemy, this is a foriegnKey id = db.Column(db.Integer, db.ForeignKey('tree_node_object.id'), primary_key=True) _fileType = "file" fileType = db.Column(db.String) ## the location of the file filePath = db.Column(db.String, nullable=False) ## Tell the mapper that for type=FileObject, cast into a FileObject object __mapper_args__ = { 'polymorphic_identity': 'file_object', } ## the underlying file of the object, Opens for reading # \returns the file to read @property def file(self): if not os.path.isfile(self.filePath): Storage.Download(self) pass os.utime(self.filePath, None) return open(self.filePath, "r") @property def fileContents(self): content = None with self.file as fi: content = fi.read() pass return content @classmethod def Sync(cls): for i in cls.query.all(): Storage.Download(i) pass pass ## Sets the file # \param file, the new file @file.setter def file(self, file): filename = secure_filename(file.filename) _, extension = filename.rsplit(".", 1) filename = "{0}.{1}".format(uuid.uuid4(), extension) uploadTo = os.path.join(self.getUploadPath(), filename) file.save(uploadTo) self.filePath = uploadTo Storage.Upload(self) pass ## saves the content to the file # \param content the content to save def updateContent(self, content): self.writeToFile(content) pass ## Writes content to the file # \param content the content to go to the file def writeToFile(self, content): with open(self.filePath, "w+") as fi: os.utime(self.filePath, None) fi.write(content) pass Storage.Upload(self) pass ## Gets the relative path where the file belongs, should be overloaded for each class # \return the place to save the file def _getUploadPath(self): return "" ## Gets the full path were the file belongs # \return the place to save the file def getUploadPath(self): parts = [app.config["UPLOAD_DIR"]] uploadPart = self._getUploadPath() if uploadPart != "": parts.append(uploadPart) pass return os.path.join(*parts) ## Gets the dir names of the path # \return a list of names all coresponding to a path list def _getPathParts(self): path = [] dir = self.parent while dir is not None and dir.name != "root": path.append(dir.sys_name) dir = dir.parent pass return path[::-1] @property def __json__(self): res = super(FileObject, self).__json__ res.add("fileType") return res ## the full path of the file object with the name (ie: /dir1/dir2/dir3/file.html) # \return the full path of the file object @property def fullPath(self): path = self._getPathParts() path.append(self.sys_name) return "/" + os.path.join(*path) ## Renders the page for the FileMap # \return a dictionary defining the object def render(self): raise NotImplementedError ## Get the path to the file, Doesn't include file name # \return the path to the file def resolvePath(self): path = self._getPathParts() return "/" + os.path.join(*path) ## Builds the content of the file. # \param context the context of the request, used to build the Page # \return the built content def buildContent(self, context, **kwargs): raise NotImplementedError ## Builds the response for sending the file # \return the built response def buildResponse(self): raise NotImplementedError ## Gets the headers of the file # \return the file headers def headers(self): raise NotImplementedError ## Gets the file or throws a 404 error. # \param *args could be anything # \return the file you're looking for # \raise 404 if the object is not found @classmethod def getItemOr404(cls, *args): itm = cls.polyQuery().filter(*args) if itm is None: abort(404) pass return itm @classmethod def Create(cls, parent, file, *args, **kwargs): kwargs["file"] = file kwargs["parent"] = parent inst = super(FileObject, cls).Create(*args, **kwargs) parent.children.append(inst) return inst ## Deletes the fileobject and removes the file if removeFile is True # \param removeFile, if true delete will delete the file as well. def delete(self, removeFile=False): if removeFile: os.remove(self.filePath) Storage.Delete(self) pass super(FileObject, self).delete() def save(self): self.fileType = self._fileType super(FileObject, self).save() pass @classmethod def LoadMapper(cls): FileObject.fileTypeMap = dict() for klass in FileObject.__subclasses__(): FileObject.fileTypeMap[klass._fileType] = klass pass def publish(self, publisher): publisher.publishFile(self) pass pass def cast(self): return FileObject.fileTypeMap[self.fileType].query.get(self.id) def serialize(self, *args, **kwargs): fileDict = super(FileObject, self).serialize(*args, **kwargs) try: content = self.buildContent(dict()) except NotImplementedError: content = None pass except FileNotFoundError: msg = """File Connection has been lost. Here are the details: File ID: {id} File Path: {path} File Name: {name} File Type: {type} """ logging.error(msg.format(id=self.id, path=self.fullPath, name=self.name, type=self._fileType)) content = "Error: File contents Lost" pass fileDict["content"] = content return fileDict ## A class to test FileObject class FileObjectMock(FileObject): id = db.Column(db.Integer, db.ForeignKey('file_object.id'), primary_key=True) _fileType = "file_mock" fileType = db.Column(db.String) __mapper_args__ = { 'polymorphic_identity': 'fileobjectmock', } ## Get where the file belongs, should be overloaded for each object # \return the place to save the file # \raises NotImplementedError Because this function is meant to be implemented into the base class def _getUploadPath(self): return "" def render(self): return {"This": "rendered"}
""" IEC 61672-1:2013 ================ IEC 61672-1:2013 gives electroacoustical performance specifications for three kinds of sound measuring instruments [IEC61672]_: - time-weighting sound level meters that measure exponential-time-weighted, frequency-weighted sound levels; - integrating-averaging sound level meters that measure time-averaged, frequency-weighted sound levels; and - integrating sound level meters that measure frequency-weighted sound exposure levels. .. [IEC61672] http://webstore.iec.ch/webstore/webstore.nsf/artnum/048669!opendocument Weighting functions ******************* .. autofunction:: weighting_function_a .. autofunction:: weighting_function_c .. autofunction:: weighting_function_z Weighting systems ***************** .. autofunction:: weighting_system_a .. autofunction:: weighting_system_c .. autofunction:: weighting_system_z """ import io import os import pkgutil import numpy as np import pandas as pd from scipy.signal import zpk2tf from scipy.signal import lfilter, bilinear from .iso_tr_25417_2007 import REFERENCE_PRESSURE WEIGHTING_DATA = pd.read_csv( io.BytesIO(pkgutil.get_data('acoustics', os.path.join('data', 'iec_61672_1_2013.csv'))), sep=',', index_col=0) """DataFrame with indices, nominal frequencies and weighting values. """ NOMINAL_THIRD_OCTAVE_CENTER_FREQUENCIES = np.array(WEIGHTING_DATA.nominal) """Nominal 1/3-octave frequencies. See table 3. """ NOMINAL_OCTAVE_CENTER_FREQUENCIES = np.array(WEIGHTING_DATA.nominal)[2::3] """Nominal 1/1-octave frequencies. Based on table 3. """ REFERENCE_FREQUENCY = 1000.0 """Reference frequency. See table 3. """ EXACT_THIRD_OCTAVE_CENTER_FREQUENCIES = REFERENCE_FREQUENCY * 10.0**(0.01 * (np.arange(10, 44) - 30)) """Exact third-octave center frequencies. See table 3. """ WEIGHTING_A = np.array(WEIGHTING_DATA.A) """Frequency weighting A. See table 3. """ WEIGHTING_C = np.array(WEIGHTING_DATA.C) """Frequency weighting C. See table 3. """ WEIGHTING_Z = np.array(WEIGHTING_DATA.Z) """Frequency weighting Z. See table 3. """ WEIGHTING_VALUES = {'A': WEIGHTING_A, 'C': WEIGHTING_C, 'Z': WEIGHTING_Z} """Dictionary with weighting values 'A', 'C' and 'Z' weighting. """ FAST = 0.125 """FAST time-constant. """ SLOW = 1.000 """SLOW time-constant. """ def time_averaged_sound_level(pressure, sample_frequency, averaging_time, reference_pressure=REFERENCE_PRESSURE): """Time-averaged sound pressure level. :param pressure: Dynamic pressure. :param sample_frequency: Sample frequency. :param averaging_time: Averaging time. :param reference_pressure: Reference pressure. """ levels = 10.0 * np.log10(average(pressure**2.0, sample_frequency, averaging_time) / reference_pressure**2.0) times = np.arange(levels.shape[-1]) * averaging_time return times, levels def average(data, sample_frequency, averaging_time): """Average the sound pressure squared. :param data: Energetic quantity, e.g. :math:`p^2`. :param sample_frequency: Sample frequency. :param averaging_time: Averaging time. :returns: Time weighting is applied by applying a low-pass filter with one real pole at :math:`-1/\\tau`. .. note:: Because :math:`f_s \\cdot t_i` is generally not an integer, samples are discarded. This results in a drift of samples for longer signals (e.g. 60 minutes at 44.1 kHz). """ averaging_time = np.asarray(averaging_time) sample_frequency = np.asarray(sample_frequency) samples = data.shape[-1] n = np.floor(averaging_time * sample_frequency).astype(int) data = data[..., 0:n * (samples // n)] # Drop the tail of the signal. newshape = list(data.shape[0:-1]) newshape.extend([-1, n]) data = data.reshape(newshape) #data = data.reshape((-1, n)) return data.mean(axis=-1) def time_weighted_sound_level(pressure, sample_frequency, integration_time, reference_pressure=REFERENCE_PRESSURE): """Time-weighted sound pressure level. :param pressure: Dynamic pressure. :param sample_frequency: Sample frequency. :param integration_time: Integration time. :param reference_pressure: Reference pressure. """ levels = 10.0 * np.log10(integrate(pressure**2.0, sample_frequency, integration_time) / reference_pressure**2.0) times = np.arange(levels.shape[-1]) * integration_time return times, levels def integrate(data, sample_frequency, integration_time): """Integrate the sound pressure squared using exponential integration. :param data: Energetic quantity, e.g. :math:`p^2`. :param sample_frequency: Sample frequency. :param integration_time: Integration time. :returns: Time weighting is applied by applying a low-pass filter with one real pole at :math:`-1/\\tau`. .. note:: Because :math:`f_s \\cdot t_i` is generally not an integer, samples are discarded. This results in a drift of samples for longer signals (e.g. 60 minutes at 44.1 kHz). """ integration_time = np.asarray(integration_time) sample_frequency = np.asarray(sample_frequency) samples = data.shape[-1] b, a = zpk2tf([1.0], [1.0, integration_time], [1.0]) b, a = bilinear(b, a, fs=sample_frequency) #b, a = bilinear([1.0], [1.0, integration_time], fs=sample_frequency) # Bilinear: Analog to Digital filter. n = np.floor(integration_time * sample_frequency).astype(int) data = data[..., 0:n * (samples // n)] newshape = list(data.shape[0:-1]) newshape.extend([-1, n]) data = data.reshape(newshape) #data = data.reshape((-1, n)) # Divide in chunks over which to perform the integration. return lfilter( b, a, data)[..., n - 1] / integration_time # Perform the integration. Select the final value of the integration. def fast(data, fs): """Apply fast (F) time-weighting. :param data: Energetic quantity, e.g. :math:`p^2`. :param fs: Sample frequency. .. seealso:: :func:`integrate` """ return integrate(data, fs, FAST) #return time_weighted_sound_level(data, fs, FAST) def slow(data, fs): """Apply slow (S) time-weighting. :param data: Energetic quantity, e.g. :math:`p^2`. :param fs: Sample frequency. .. seealso:: :func:`integrate` """ return integrate(data, fs, SLOW) #return time_weighted_sound_level(data, fs, SLOW) def fast_level(data, fs): """Time-weighted (FAST) sound pressure level. :param data: Dynamic pressure. :param fs: Sample frequency. .. seealso:: :func:`time_weighted_sound_level` """ return time_weighted_sound_level(data, fs, FAST) def slow_level(data, fs): """Time-weighted (SLOW) sound pressure level. :param data: Dynamic pressure. :param fs: Sample frequency. .. seealso:: :func:`time_weighted_sound_level` """ return time_weighted_sound_level(data, fs, SLOW) #---- Annex E - Analytical expressions for frequency-weightings C, A, and Z.-# _POLE_FREQUENCIES = { 1: 20.60, 2: 107.7, 3: 737.9, 4: 12194.0, } """Approximate values for pole frequencies f_1, f_2, f_3 and f_4. See section E.4.1 of the standard. """ _NORMALIZATION_CONSTANTS = { 'A': -2.000, 'C': -0.062, } """Normalization constants :math:`C_{1000}` and :math:`A_{1000}`. See section E.4.2 of the standard. """ def weighting_function_a(frequencies): r"""A-weighting function in decibel. :param frequencies: Vector of frequencies at which to evaluate the weighting. :returns: Vector with scaling factors. The weighting curve is .. math:: 20 \log_{10}{\frac{(f_4^2 * f^4)}{(f^2 + f_1^2) \sqrt{(f^2 + f_2^2)(f^2 + f_3^2)}(f^2 + f_4^2)}} - A_{1000} with :math:`A_{1000} = -2` dB. See equation E.6 of the standard. """ f = np.asarray(frequencies) offset = _NORMALIZATION_CONSTANTS['A'] f1, f2, f3, f4 = _POLE_FREQUENCIES.values() weighting = 20.0 * np.log10((f4**2.0 * f**4.0) / ( (f**2.0 + f1**2.0) * np.sqrt(f**2.0 + f2**2.0) * np.sqrt(f**2.0 + f3**2.0) * (f**2.0 + f4**2.0))) - offset return weighting def weighting_function_c(frequencies): r"""C-weighting function in decibel. :param frequencies: Vector of frequencies at which to evaluate the weighting. :returns: Vector with scaling factors. The weighting curve is .. math:: 20 \log_{10}{\frac{(f_4^2 f^2)}{(f^2+f_1^2)(f^2+f_4^2)}} - C_{1000} with :math:`C_{1000} = -0.062` dB See equation E.1 of the standard. """ f = np.asarray(frequencies) offset = _NORMALIZATION_CONSTANTS['C'] f1, _, _, f4 = _POLE_FREQUENCIES.values() weighting = 20.0 * np.log10((f4**2.0 * f**2.0) / ((f**2.0 + f1**2.0) * (f**2.0 + f4**2.0))) - offset return weighting def weighting_function_z(frequencies): """Z-weighting function in decibel. :param frequencies: Vector of frequencies at which to evaluate the weighting. :returns: Vector with scaling factors. """ frequencies = np.asarray(frequencies) return np.zeros_like(frequencies) WEIGHTING_FUNCTIONS = { 'A': weighting_function_a, 'C': weighting_function_c, 'Z': weighting_function_z, } """Dictionary with available weighting functions 'A', 'C' and 'Z'. """ def weighting_system_a(): """A-weighting filter represented as polynomial transfer function. :returns: Tuple of `num` and `den`. See equation E.6 of the standard. """ f1 = _POLE_FREQUENCIES[1] f2 = _POLE_FREQUENCIES[2] f3 = _POLE_FREQUENCIES[3] f4 = _POLE_FREQUENCIES[4] offset = _NORMALIZATION_CONSTANTS['A'] numerator = np.array([(2.0 * np.pi * f4)**2.0 * (10**(-offset / 20.0)), 0.0, 0.0, 0.0, 0.0]) part1 = [1.0, 4.0 * np.pi * f4, (2.0 * np.pi * f4)**2.0] part2 = [1.0, 4.0 * np.pi * f1, (2.0 * np.pi * f1)**2.0] part3 = [1.0, 2.0 * np.pi * f3] part4 = [1.0, 2.0 * np.pi * f2] denomenator = np.convolve(np.convolve(np.convolve(part1, part2), part3), part4) return numerator, denomenator def weighting_system_c(): """C-weighting filter represented as polynomial transfer function. :returns: Tuple of `num` and `den`. See equation E.1 of the standard. """ f1 = _POLE_FREQUENCIES[1] f4 = _POLE_FREQUENCIES[4] offset = _NORMALIZATION_CONSTANTS['C'] numerator = np.array([(2.0 * np.pi * f4)**2.0 * (10**(-offset / 20.0)), 0.0, 0.0]) part1 = [1.0, 4.0 * np.pi * f4, (2.0 * np.pi * f4)**2.0] part2 = [1.0, 4.0 * np.pi * f1, (2.0 * np.pi * f1)**2.0] denomenator = np.convolve(part1, part2) return numerator, denomenator def weighting_system_z(): """Z-weighting filter represented as polynomial transfer function. :returns: Tuple of `num` and `den`. Z-weighting is 0.0 dB for all frequencies and therefore corresponds to a multiplication of 1. """ numerator = [1] denomenator = [1] return numerator, denomenator WEIGHTING_SYSTEMS = { 'A': weighting_system_a, 'C': weighting_system_c, 'Z': weighting_system_z, } """Weighting systems. """
# Copyright 2014 Dietrich Epp. # This file is part of SGLib. SGLib is licensed under the terms of the # 2-clause BSD license. For more information, see LICENSE.txt. from .error import ConfigError, UserError from .schema import Variables class SourceFile(object): """An individual source file with its build variables.""" __slots__ = ['path', 'variables', 'sourcetype', 'external'] def __init__(self, path, variables, sourcetype, external): self.path = path self.variables = variables self.sourcetype = sourcetype self.external = bool(external) class Module(object): """A collection of source files and build settings.""" __slots__ = [ # The schema for build variables. 'schema', # List of source files (SourceFile objects) in the module. 'sources', # List of generated source files. 'generated_sources', # Public variables for this module. '_public', # List of public variable sets inherited by source files dependent on # this module. Contains _public. '_public_varsets', # List of all variable sets used by the module. Contains _public. '_all_varsets', # List of errors in this module. 'errors', ] def __init__(self, schema): self.schema = schema self.sources = [] self.generated_sources = [] self.errors = [] self._public = {} self._public_varsets = [self._public] self._all_varsets = [self._public] def copy(self): """Create a copy of this module.""" class_ = self.__class__ obj = class_.__new__(class_) obj.schema = self.schema obj.sources = list(self.sources) obj.generated_sources = list(self.generated_sources) obj.errors = list(self.errors) obj._public = dict(self.public) obj._public_varsets = list(self._public_varsets) obj._public_varsets[0] = obj._public obj._all_varsets = list(obj._all_varsets) obj._all_varsets[0] = obj._public return obj def variables(self): """Get the build variables for this module.""" return Variables(self.schema, self._all_varsets) def add_source(self, path, sourcetype=None, external=True): """Add a single source file with no module dependencies.""" src = SourceFile( path, Variables(self.schema, []), sourcetype, external) self.sources.append(src) return self def add_generated_source(self, source): """Add a generated source file to the module.""" self.generated_sources.append(source) return self def add_sources(self, sources, tagdefs): """Add source files to the module. Each source file is associated with a set of tags. The tag definitions map each tag to either None or a list of modules. If a tag is mapped to None, then sources with that tag are not included in the module. If a tag is mapped to a list of modules, then source files with those tags are dependent on those modules. Four tags have special meaning. The 'public' tag contains dependencies which are propagated to files which depend on this module. The 'private' tag is ignored if it is missing. The 'external' tag marks source files that should be compiled with warnings disabled. The 'exclude' tag marks source files that should be excluded entirely. """ def check_tags(): """Check that the tag definitions match the source files.""" special = 'public', 'private', 'external', 'exclude' srctags = set() for source in sources: srctags.update(source.tags) srctags.difference_update(special) deftags = set(tagdefs) deftags.difference_update(special) extras = deftags.difference(srctags) missing = srctags.difference(deftags) if extras or missing: msgs = [] if missing: msgs.append( 'missing tags: {}'.format(', '.join(sorted(missing)))) if extras: msgs.append( 'extra tags: {}'.format(', '.join(sorted(extras)))) raise UserError('; '.join(msgs)) for tag in ('public', 'private'): if tagdefs.get(tag, True) is None: raise UserError('"{}" tag should not be None'.format(tag)) if 'external' in tagdefs: raise UserError('"external" tag should not be defined') for tag, tagdef in tagdefs.items(): if tagdef is None: continue if not isinstance(tagdef, list): raise UserError('"{}" tag definition is not a list' .format(tag)) if not all(isinstance(x, Module) for x in tagdef): raise UserError('"{}" tag contains invalid item') def resolve_varsets(tags): """Resolve a list of tags to a list of variable sets.""" if 'exclude' in tags: return None varsets = [] for tag in tags: try: modules = tagdefs[tag] except KeyError: continue if modules is None: return None for module in modules: for varset in module._public_varsets: if not varset: continue if not any(x is varset for x in varsets): varsets.append(varset) return varsets def add_sources(): """Add tagged source files to this module.""" all_tags = set() schema = self.schema for source in sources: source_varsets = resolve_varsets(source.tags) if source_varsets is None: continue all_tags.update(source.tags) self.sources.append(SourceFile( source.path, Variables(schema, source_varsets), source.sourcetype, 'external' in source.tags)) all_tags.add('public') for tag in all_tags: modules = tagdefs.get(tag) if not modules: continue public = tag == 'public' for module in modules: self.add_module(module, public=public) check_tags() add_sources() return self def add_variables(self, variables, *, configs=None, archs=None): """Add public variables to this module.""" schema = self.schema variants = schema.get_variants(configs, archs) for varname, value2 in variables.items(): vardef = schema.get_variable(varname) if not vardef.isvalid(value2): raise ValueError('invalid value for {}: {!r}' .format(varname, value2)) for variant in variants: try: value1 = self._public[variant, varname] except KeyError: value = value2 else: value = vardef.combine([value1, value2]) self._public[variant, varname] = value return self def add_flags(self, flags, *, configs=None, archs=None): """Add public flags to this module. Flags are like variables, but flags have the same meanings across targets. Flags are translated into variables. """ var = {} for flag, value in flags.items(): try: varname = self.schema.flags[flag] except KeyError: raise ValueError('unknown flag: {!r}'.format(flag)) var[varname] = value return self.add_variables(var, configs=configs, archs=archs) def flag_variable(self, flag): """Get the variable name that correspond to a flag.""" raise NotImplementedError('must be implemented by subclass') def add_module(self, module, *, public=True): """Add a module dependency on another module.""" if public: for varset in module._public_varsets: if not varset: continue if not any(x is varset for x in self._public_varsets): self._public_varsets.append(varset) for varset in module._all_varsets: if not varset: continue if not any(x is varset for x in self._all_varsets): self._all_varsets.append(varset) for source in module.sources: if not any(x is source for x in self.sources): self.sources.append(source) for source in module.generated_sources: if not any(x is source for x in self.generated_sources): self.generated_sources.append(source) for error in module.errors: if not any(x is error for x in self.errors): self.errors.append(error) return self def add_define(self, definition, *, configs=None, archs=None): """Add a preprocessor definition.""" raise NotImplementedError('must be implemented by subclass') def add_header_path(self, path, *, configs=None, archs=None, system=False): """Add a header search path. If system is True, then the header path will be searched for include statements with angle brackets. Otherwise, the header path will be searched for include statements with double quotes. Not all targets support the distinction. """ raise NotImplementedError('must be implemented by subclass') def add_library(self, path, *, configs=None, archs=None): """Add a library.""" raise ConfigError('add_library not available on this target') def add_library_path(self, path, *, configs=None, archs=None): """Add a library search path.""" raise ConfigError('add_library_path not available on this target') def add_framework(self, *, name=None, path=None, configs=None, archs=None): """Add a framework. Either the name or the path should be specified (but not both). """ raise ConfigError('add_framework not available on this target') def add_framework_path(self, path, *, configs=None, archs=None): """Add a framework search path.""" raise ConfigError('add_framework_path not available') def pkg_config(self, spec, *, configs=None, archs=None): """Run the pkg-config tool and return the resulting flags.""" raise ConfigError('add_pkg_config not available on this target') def sdl_config(self, version, *, configs=None, archs=None): """Run the sdl-config tool and return the resulting flags. The version should either be 1 or 2. """ raise ConfigError('add_sdl_config not available on this target') def test_compile(self, source, sourcetype, *, configs=None, archs=None, external=True, link=True): """Try to compile a source file. Returns True if successful, False otherwise. """ raise ConfigError( 'test_compile not available on this target')
"""" Solving the problem 1) Unencrypt the base64 text in the .txt and transform it in a HEX string [DONE] 2) Guess the KEYSIZE: (3/3) - Write the hamming function [DONE] - Write the guess KEYSIZE Function [DONE] - Guess a likely KEYSIZE [DONE] 3) Write a function to break the text in KEYSIZE blocks [DONE] 4) Transpose the blocks and make big Blocks with: [DONE] - the first byte of each block [DONE] - the second byte of each block [DONE] - and so on.. [DONE] 5) Copy and paste the single XOR function from previous exercises to guess the key of each block: - Use XOR function [DONE] - Evaluate englishness on a "histogram" letter frequency basis of the KEYSIZE blocks [DONE] - Take the keys [DONE] 6) Put together the key and use it to decode the original text - Write a XOR decode repeating key function [DONE] - Use the function on the repeating key [DONE] """ from binascii import hexlify, unhexlify, a2b_base64 import base64 FILE_SOURCE = "src/challenge6.txt" """Functions for Single XOR""" # 1 # Decrypt the base64 text in the .txt and transform it in a HEX string text_file = "" for line in open(FILE_SOURCE, "r"): text_file += line.strip() text_file = text_file.decode('base64').encode('hex') # 1.1 Test # print text_file # 2 Guess the KEYSIZE # 2.1 Hamming Function def hamming_strings(string_1, string_2): """Hamming Distance Function""" if len(string_1) != len(string_2): print "Error in strings length while computing hamming function" return string_1 = bytearray(string_1) string_2 = bytearray(string_2) return sum(bin(i ^ j).count("1") for i, j in zip(string_1, string_2)) # 2.1.1 Hamming function test # print hamming_strings("this is a test", "wokka wokka!!!") # Output of test function has to be 37 # 2.2 Guess KeySize Function def guess_keysize(lower, higher, text_file): # Set Variables to guess the keysize best_normalized_guess = [] c = 0 for test_key_size in xrange(lower, higher): first = text_file[:2 * test_key_size] # Take size of first and second second = first[test_key_size:] # Slice out first part and assign it to the second first = first[:test_key_size] # Save first part slicing out the second part third = text_file[2 * test_key_size:4 * test_key_size] fourth = third[test_key_size:] third = third[:test_key_size] normalized_distance = ((hamming_strings(first, second) + hamming_strings(third, fourth)) / 2)/test_key_size if c == 0: # This is just an ugly way to avoid hard coding a distance c = 1 best_normalized_guess.append(normalized_distance) best_normalized_guess.append(test_key_size) if best_normalized_guess[0] > normalized_distance: best_normalized_guess[0] = normalized_distance best_normalized_guess[1] = test_key_size return best_normalized_guess[1] KEYSIZE = guess_keysize(2, 40, text_file) # 2.2.1 Guess KeySize Function Test print "Key size probably is: ", guess_keysize(2, 40, text_file) # Returns 4 # 3 Break Text in KEYSIZE blocks def break_n_blocks(n, text): """Takes some text, splits it in n-size Blocks""" return [text[i:i + n] for i in range(0, len(text), n)] blocks = break_n_blocks(KEYSIZE, text_file) # 3.1 Test Break Text in KEYSIZE blocks # print break_n_blocks(4, text_file) # 4 Transpose the blocks and make big Blocks with 1st bytes, 2nd bytes, 3rd bytes of all blocks solving_blocks = [] for x in xrange(KEYSIZE): temp_string = "" for block in blocks: temp_string += block[x] solving_blocks.append(temp_string) # 4.1 Test Transpose the blocks and make big Blocks with 1st bytes, 2nd bytes, 3rd bytes of all blocks # print solving_blocks # 5 Single XOR function -> Get the key def XORfunction(input_1, input_2): bin_input_1 = hexToBinary(input_1) bin_input_2 = hexToBinary(input_2) solution = [] # XOR is like a sum so if el1+el2 == 1 output is 1 else output is 0 for x in xrange(0, len(bin_input_2)): # the array is iterated from [0] to len(bin_input_1) so the elements are calculated from last to first current_compare = int(bin_input_1[x]) + int(bin_input_2[x]) if current_compare == 1: solution.append("1") else: solution.append("0") final_solution = dec_to_hex(int("".join(solution), 2)) while len(final_solution) != len(input_1): # after 16 steps len becomes less than 68 final_solution = "0" + final_solution return final_solution.decode("hex") # the final solution has to be converted from decimal to hexadecimal def dec_to_hex(value): dictionary_hex = "0123456789abcdef" solution = [] while value != 0: solution.insert(0, dictionary_hex[value % 16]) value = value / 16 return "".join(solution) # Hex is converted to a binary string to make comparisons easy as the digits become easy to select as an array of chars def hexToBinary(text): return '{:0{}b}'.format(int(text, base=16), len(text) * 4) def single_XOR_function(string, byte): byte_string = str(byte) while len(byte_string) < len(string): byte_string += str(byte) return XORfunction(string, byte_string) def score(string): freq = dict() freq['A']=834 freq['B']=154 freq['C']=273 freq['D']=414 freq['E']=1160 freq['F']=203 freq['G']=192 freq['H']=611 freq['I']=671 freq['J']=23 freq['K']=87 freq['L']=424 freq['M']=253 freq['N']=680 freq['O']=770 freq['P']=166 freq['Q']=9 freq['R']=568 freq['S']=611 freq['T']=937 freq['U']=285 freq['V']=106 freq['W']=234 freq['X']=20 freq['Y']=204 freq['Z']=6 freq[' ']=100 freq['*']=(-300) freq['\n']=(-300) ret = 0 for c in string.upper(): if c in freq: ret += freq[c] return ret def xor_decoder(text): results = [] # Create an array that contains all hex from 0 to 255 as strings for byte in xrange(0, 256): new_byte = dec_to_hex(byte) if len(new_byte) == 1: new_byte = "0" + (new_byte) if len(new_byte ) == 0: new_byte = "00" data = [single_XOR_function(text, new_byte), new_byte] results.append(data) decoded_struct = [(score(r[0]), r[0], r[1]) for r in results] decoded_struct.sort(key=lambda x: x[0], reverse = True) return decoded_struct[0][2] solution_key = [] for block in solving_blocks: solution_key.append(xor_decoder(block)) print solution_key # 5.1 Test Single XOR function # Should be Working # 6 Use the key to decrypt the text
"""SCons.Tool.tex Tool-specific initialization for TeX. Generates .dvi files from .tex files There normally shouldn't be any need to import this module directly. It will usually be imported through the generic SCons.Tool.Tool() selection method. """ # # __COPYRIGHT__ # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # from __future__ import print_function __revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__" import os.path import re import shutil import sys import platform import glob import SCons.Action import SCons.Node import SCons.Node.FS import SCons.Util import SCons.Scanner.LaTeX Verbose = False must_rerun_latex = True # these are files that just need to be checked for changes and then rerun latex check_suffixes = ['.toc', '.lof', '.lot', '.out', '.nav', '.snm'] # these are files that require bibtex or makeindex to be run when they change all_suffixes = check_suffixes + ['.bbl', '.idx', '.nlo', '.glo', '.acn', '.bcf'] # # regular expressions used to search for Latex features # or outputs that require rerunning latex # # search for all .aux files opened by latex (recorded in the .fls file) openout_aux_re = re.compile(r"OUTPUT *(.*\.aux)") # search for all .bcf files opened by latex (recorded in the .fls file) # for use by biber openout_bcf_re = re.compile(r"OUTPUT *(.*\.bcf)") #printindex_re = re.compile(r"^[^%]*\\printindex", re.MULTILINE) #printnomenclature_re = re.compile(r"^[^%]*\\printnomenclature", re.MULTILINE) #printglossary_re = re.compile(r"^[^%]*\\printglossary", re.MULTILINE) # search to find rerun warnings warning_rerun_str = '(^LaTeX Warning:.*Rerun)|(^Package \w+ Warning:.*Rerun)' warning_rerun_re = re.compile(warning_rerun_str, re.MULTILINE) # search to find citation rerun warnings rerun_citations_str = "^LaTeX Warning:.*\n.*Rerun to get citations correct" rerun_citations_re = re.compile(rerun_citations_str, re.MULTILINE) # search to find undefined references or citations warnings undefined_references_str = '(^LaTeX Warning:.*undefined references)|(^Package \w+ Warning:.*undefined citations)' undefined_references_re = re.compile(undefined_references_str, re.MULTILINE) # used by the emitter auxfile_re = re.compile(r".", re.MULTILINE) tableofcontents_re = re.compile(r"^[^%\n]*\\tableofcontents", re.MULTILINE) makeindex_re = re.compile(r"^[^%\n]*\\makeindex", re.MULTILINE) bibliography_re = re.compile(r"^[^%\n]*\\bibliography", re.MULTILINE) bibunit_re = re.compile(r"^[^%\n]*\\begin\{bibunit\}", re.MULTILINE) multibib_re = re.compile(r"^[^%\n]*\\newcites\{([^\}]*)\}", re.MULTILINE) addbibresource_re = re.compile(r"^[^%\n]*\\(addbibresource|addglobalbib|addsectionbib)", re.MULTILINE) listoffigures_re = re.compile(r"^[^%\n]*\\listoffigures", re.MULTILINE) listoftables_re = re.compile(r"^[^%\n]*\\listoftables", re.MULTILINE) hyperref_re = re.compile(r"^[^%\n]*\\usepackage.*\{hyperref\}", re.MULTILINE) makenomenclature_re = re.compile(r"^[^%\n]*\\makenomenclature", re.MULTILINE) makeglossary_re = re.compile(r"^[^%\n]*\\makeglossary", re.MULTILINE) makeglossaries_re = re.compile(r"^[^%\n]*\\makeglossaries", re.MULTILINE) makeacronyms_re = re.compile(r"^[^%\n]*\\makeglossaries", re.MULTILINE) beamer_re = re.compile(r"^[^%\n]*\\documentclass\{beamer\}", re.MULTILINE) regex = r'^[^%\n]*\\newglossary\s*\[([^\]]+)\]?\s*\{([^}]*)\}\s*\{([^}]*)\}\s*\{([^}]*)\}\s*\{([^}]*)\}' newglossary_re = re.compile(regex, re.MULTILINE) biblatex_re = re.compile(r"^[^%\n]*\\usepackage.*\{biblatex\}", re.MULTILINE) newglossary_suffix = [] # search to find all files included by Latex include_re = re.compile(r'^[^%\n]*\\(?:include|input){([^}]*)}', re.MULTILINE) includeOnly_re = re.compile(r'^[^%\n]*\\(?:include){([^}]*)}', re.MULTILINE) # search to find all graphics files included by Latex includegraphics_re = re.compile(r'^[^%\n]*\\(?:includegraphics(?:\[[^\]]+\])?){([^}]*)}', re.MULTILINE) # search to find all files opened by Latex (recorded in .log file) openout_re = re.compile(r"OUTPUT *(.*)") # list of graphics file extensions for TeX and LaTeX TexGraphics = SCons.Scanner.LaTeX.TexGraphics LatexGraphics = SCons.Scanner.LaTeX.LatexGraphics # An Action sufficient to build any generic tex file. TeXAction = None # An action to build a latex file. This action might be needed more # than once if we are dealing with labels and bibtex. LaTeXAction = None # An action to run BibTeX on a file. BibTeXAction = None # An action to run Biber on a file. BiberAction = None # An action to run MakeIndex on a file. MakeIndexAction = None # An action to run MakeIndex (for nomencl) on a file. MakeNclAction = None # An action to run MakeIndex (for glossary) on a file. MakeGlossaryAction = None # An action to run MakeIndex (for acronyms) on a file. MakeAcronymsAction = None # An action to run MakeIndex (for newglossary commands) on a file. MakeNewGlossaryAction = None # Used as a return value of modify_env_var if the variable is not set. _null = SCons.Scanner.LaTeX._null modify_env_var = SCons.Scanner.LaTeX.modify_env_var def check_file_error_message(utility, filename='log'): msg = '%s returned an error, check the %s file\n' % (utility, filename) sys.stdout.write(msg) def FindFile(name,suffixes,paths,env,requireExt=False): if requireExt: name,ext = SCons.Util.splitext(name) # if the user gave an extension use it. if ext: name = name + ext if Verbose: print(" searching for '%s' with extensions: " % name,suffixes) for path in paths: testName = os.path.join(path,name) if Verbose: print(" look for '%s'" % testName) if os.path.isfile(testName): if Verbose: print(" found '%s'" % testName) return env.fs.File(testName) else: name_ext = SCons.Util.splitext(testName)[1] if name_ext: continue # if no suffix try adding those passed in for suffix in suffixes: testNameExt = testName + suffix if Verbose: print(" look for '%s'" % testNameExt) if os.path.isfile(testNameExt): if Verbose: print(" found '%s'" % testNameExt) return env.fs.File(testNameExt) if Verbose: print(" did not find '%s'" % name) return None def InternalLaTeXAuxAction(XXXLaTeXAction, target = None, source= None, env=None): """A builder for LaTeX files that checks the output in the aux file and decides how many times to use LaTeXAction, and BibTeXAction.""" global must_rerun_latex # This routine is called with two actions. In this file for DVI builds # with LaTeXAction and from the pdflatex.py with PDFLaTeXAction # set this up now for the case where the user requests a different extension # for the target filename if (XXXLaTeXAction == LaTeXAction): callerSuffix = ".dvi" else: callerSuffix = env['PDFSUFFIX'] basename = SCons.Util.splitext(str(source[0]))[0] basedir = os.path.split(str(source[0]))[0] basefile = os.path.split(str(basename))[1] abspath = os.path.abspath(basedir) targetext = os.path.splitext(str(target[0]))[1] targetdir = os.path.split(str(target[0]))[0] saved_env = {} for var in SCons.Scanner.LaTeX.LaTeX.env_variables: saved_env[var] = modify_env_var(env, var, abspath) # Create base file names with the target directory since the auxiliary files # will be made there. That's because the *COM variables have the cd # command in the prolog. We check # for the existence of files before opening them--even ones like the # aux file that TeX always creates--to make it possible to write tests # with stubs that don't necessarily generate all of the same files. targetbase = os.path.join(targetdir, basefile) # if there is a \makeindex there will be a .idx and thus # we have to run makeindex at least once to keep the build # happy even if there is no index. # Same for glossaries, nomenclature, and acronyms src_content = source[0].get_text_contents() run_makeindex = makeindex_re.search(src_content) and not os.path.isfile(targetbase + '.idx') run_nomenclature = makenomenclature_re.search(src_content) and not os.path.isfile(targetbase + '.nlo') run_glossary = makeglossary_re.search(src_content) and not os.path.isfile(targetbase + '.glo') run_glossaries = makeglossaries_re.search(src_content) and not os.path.isfile(targetbase + '.glo') run_acronyms = makeacronyms_re.search(src_content) and not os.path.isfile(targetbase + '.acn') saved_hashes = {} suffix_nodes = {} for suffix in all_suffixes+sum(newglossary_suffix, []): theNode = env.fs.File(targetbase + suffix) suffix_nodes[suffix] = theNode saved_hashes[suffix] = theNode.get_csig() if Verbose: print("hashes: ",saved_hashes) must_rerun_latex = True # .aux files already processed by BibTex already_bibtexed = [] # # routine to update MD5 hash and compare # def check_MD5(filenode, suffix): global must_rerun_latex # two calls to clear old csig filenode.clear_memoized_values() filenode.ninfo = filenode.new_ninfo() new_md5 = filenode.get_csig() if saved_hashes[suffix] == new_md5: if Verbose: print("file %s not changed" % (targetbase+suffix)) return False # unchanged saved_hashes[suffix] = new_md5 must_rerun_latex = True if Verbose: print("file %s changed, rerunning Latex, new hash = " % (targetbase+suffix), new_md5) return True # changed # generate the file name that latex will generate resultfilename = targetbase + callerSuffix count = 0 while (must_rerun_latex and count < int(env.subst('$LATEXRETRIES'))) : result = XXXLaTeXAction(target, source, env) if result != 0: return result count = count + 1 must_rerun_latex = False # Decide if various things need to be run, or run again. # Read the log file to find warnings/errors logfilename = targetbase + '.log' logContent = '' if os.path.isfile(logfilename): logContent = open(logfilename, "rb").read() # Read the fls file to find all .aux files flsfilename = targetbase + '.fls' flsContent = '' auxfiles = [] if os.path.isfile(flsfilename): flsContent = open(flsfilename, "rb").read() auxfiles = openout_aux_re.findall(flsContent) # remove duplicates dups = {} for x in auxfiles: dups[x] = 1 auxfiles = list(dups.keys()) bcffiles = [] if os.path.isfile(flsfilename): flsContent = open(flsfilename, "rb").read() bcffiles = openout_bcf_re.findall(flsContent) # remove duplicates dups = {} for x in bcffiles: dups[x] = 1 bcffiles = list(dups.keys()) if Verbose: print("auxfiles ",auxfiles) print("bcffiles ",bcffiles) # Now decide if bibtex will need to be run. # The information that bibtex reads from the .aux file is # pass-independent. If we find (below) that the .bbl file is unchanged, # then the last latex saw a correct bibliography. # Therefore only do this once # Go through all .aux files and remember the files already done. for auxfilename in auxfiles: if auxfilename not in already_bibtexed: already_bibtexed.append(auxfilename) target_aux = os.path.join(targetdir, auxfilename) if os.path.isfile(target_aux): content = open(target_aux, "rb").read() if content.find("bibdata") != -1: if Verbose: print("Need to run bibtex on ",auxfilename) bibfile = env.fs.File(SCons.Util.splitext(target_aux)[0]) result = BibTeXAction(bibfile, bibfile, env) if result != 0: check_file_error_message(env['BIBTEX'], 'blg') must_rerun_latex = True # Now decide if biber will need to be run. # When the backend for biblatex is biber (by choice or default) the # citation information is put in the .bcf file. # The information that biber reads from the .bcf file is # pass-independent. If we find (below) that the .bbl file is unchanged, # then the last latex saw a correct bibliography. # Therefore only do this once # Go through all .bcf files and remember the files already done. for bcffilename in bcffiles: if bcffilename not in already_bibtexed: already_bibtexed.append(bcffilename) target_bcf = os.path.join(targetdir, bcffilename) if os.path.isfile(target_bcf): content = open(target_bcf, "rb").read() if content.find("bibdata") != -1: if Verbose: print("Need to run biber on ",bcffilename) bibfile = env.fs.File(SCons.Util.splitext(target_bcf)[0]) result = BiberAction(bibfile, bibfile, env) if result != 0: check_file_error_message(env['BIBER'], 'blg') must_rerun_latex = True # Now decide if latex will need to be run again due to index. if check_MD5(suffix_nodes['.idx'],'.idx') or (count == 1 and run_makeindex): # We must run makeindex if Verbose: print("Need to run makeindex") idxfile = suffix_nodes['.idx'] result = MakeIndexAction(idxfile, idxfile, env) if result != 0: check_file_error_message(env['MAKEINDEX'], 'ilg') return result # TO-DO: need to add a way for the user to extend this list for whatever # auxiliary files they create in other (or their own) packages # Harder is case is where an action needs to be called -- that should be rare (I hope?) for index in check_suffixes: check_MD5(suffix_nodes[index],index) # Now decide if latex will need to be run again due to nomenclature. if check_MD5(suffix_nodes['.nlo'],'.nlo') or (count == 1 and run_nomenclature): # We must run makeindex if Verbose: print("Need to run makeindex for nomenclature") nclfile = suffix_nodes['.nlo'] result = MakeNclAction(nclfile, nclfile, env) if result != 0: check_file_error_message('%s (nomenclature)' % env['MAKENCL'], 'nlg') #return result # Now decide if latex will need to be run again due to glossary. if check_MD5(suffix_nodes['.glo'],'.glo') or (count == 1 and run_glossaries) or (count == 1 and run_glossary): # We must run makeindex if Verbose: print("Need to run makeindex for glossary") glofile = suffix_nodes['.glo'] result = MakeGlossaryAction(glofile, glofile, env) if result != 0: check_file_error_message('%s (glossary)' % env['MAKEGLOSSARY'], 'glg') #return result # Now decide if latex will need to be run again due to acronyms. if check_MD5(suffix_nodes['.acn'],'.acn') or (count == 1 and run_acronyms): # We must run makeindex if Verbose: print("Need to run makeindex for acronyms") acrfile = suffix_nodes['.acn'] result = MakeAcronymsAction(acrfile, acrfile, env) if result != 0: check_file_error_message('%s (acronyms)' % env['MAKEACRONYMS'], 'alg') return result # Now decide if latex will need to be run again due to newglossary command. for ig in range(len(newglossary_suffix)): if check_MD5(suffix_nodes[newglossary_suffix[ig][2]],newglossary_suffix[ig][2]) or (count == 1): # We must run makeindex if Verbose: print("Need to run makeindex for newglossary") newglfile = suffix_nodes[newglossary_suffix[ig][2]] MakeNewGlossaryAction = SCons.Action.Action("$MAKENEWGLOSSARYCOM ${SOURCE.filebase}%s -s ${SOURCE.filebase}.ist -t ${SOURCE.filebase}%s -o ${SOURCE.filebase}%s" % (newglossary_suffix[ig][2],newglossary_suffix[ig][0],newglossary_suffix[ig][1]), "$MAKENEWGLOSSARYCOMSTR") result = MakeNewGlossaryAction(newglfile, newglfile, env) if result != 0: check_file_error_message('%s (newglossary)' % env['MAKENEWGLOSSARY'], newglossary_suffix[ig][0]) return result # Now decide if latex needs to be run yet again to resolve warnings. if warning_rerun_re.search(logContent): must_rerun_latex = True if Verbose: print("rerun Latex due to latex or package rerun warning") if rerun_citations_re.search(logContent): must_rerun_latex = True if Verbose: print("rerun Latex due to 'Rerun to get citations correct' warning") if undefined_references_re.search(logContent): must_rerun_latex = True if Verbose: print("rerun Latex due to undefined references or citations") if (count >= int(env.subst('$LATEXRETRIES')) and must_rerun_latex): print("reached max number of retries on Latex ,",int(env.subst('$LATEXRETRIES'))) # end of while loop # rename Latex's output to what the target name is if not (str(target[0]) == resultfilename and os.path.isfile(resultfilename)): if os.path.isfile(resultfilename): print("move %s to %s" % (resultfilename, str(target[0]), )) shutil.move(resultfilename,str(target[0])) # Original comment (when TEXPICTS was not restored): # The TEXPICTS enviroment variable is needed by a dvi -> pdf step # later on Mac OSX so leave it # # It is also used when searching for pictures (implicit dependencies). # Why not set the variable again in the respective builder instead # of leaving local modifications in the environment? What if multiple # latex builds in different directories need different TEXPICTS? for var in SCons.Scanner.LaTeX.LaTeX.env_variables: if var == 'TEXPICTS': continue if saved_env[var] is _null: try: del env['ENV'][var] except KeyError: pass # was never set else: env['ENV'][var] = saved_env[var] return result def LaTeXAuxAction(target = None, source= None, env=None): result = InternalLaTeXAuxAction( LaTeXAction, target, source, env ) return result LaTeX_re = re.compile("\\\\document(style|class)") def is_LaTeX(flist,env,abspath): """Scan a file list to decide if it's TeX- or LaTeX-flavored.""" # We need to scan files that are included in case the # \documentclass command is in them. # get path list from both env['TEXINPUTS'] and env['ENV']['TEXINPUTS'] savedpath = modify_env_var(env, 'TEXINPUTS', abspath) paths = env['ENV']['TEXINPUTS'] if SCons.Util.is_List(paths): pass else: # Split at os.pathsep to convert into absolute path paths = paths.split(os.pathsep) # now that we have the path list restore the env if savedpath is _null: try: del env['ENV']['TEXINPUTS'] except KeyError: pass # was never set else: env['ENV']['TEXINPUTS'] = savedpath if Verbose: print("is_LaTeX search path ",paths) print("files to search :",flist) # Now that we have the search path and file list, check each one for f in flist: if Verbose: print(" checking for Latex source ",str(f)) content = f.get_text_contents() if LaTeX_re.search(content): if Verbose: print("file %s is a LaTeX file" % str(f)) return 1 if Verbose: print("file %s is not a LaTeX file" % str(f)) # now find included files inc_files = [ ] inc_files.extend( include_re.findall(content) ) if Verbose: print("files included by '%s': "%str(f),inc_files) # inc_files is list of file names as given. need to find them # using TEXINPUTS paths. # search the included files for src in inc_files: srcNode = FindFile(src,['.tex','.ltx','.latex'],paths,env,requireExt=False) # make this a list since is_LaTeX takes a list. fileList = [srcNode,] if Verbose: print("FindFile found ",srcNode) if srcNode is not None: file_test = is_LaTeX(fileList, env, abspath) # return on first file that finds latex is needed. if file_test: return file_test if Verbose: print(" done scanning ",str(f)) return 0 def TeXLaTeXFunction(target = None, source= None, env=None): """A builder for TeX and LaTeX that scans the source file to decide the "flavor" of the source and then executes the appropriate program.""" # find these paths for use in is_LaTeX to search for included files basedir = os.path.split(str(source[0]))[0] abspath = os.path.abspath(basedir) if is_LaTeX(source,env,abspath): result = LaTeXAuxAction(target,source,env) if result != 0: check_file_error_message(env['LATEX']) else: result = TeXAction(target,source,env) if result != 0: check_file_error_message(env['TEX']) return result def TeXLaTeXStrFunction(target = None, source= None, env=None): """A strfunction for TeX and LaTeX that scans the source file to decide the "flavor" of the source and then returns the appropriate command string.""" if env.GetOption("no_exec"): # find these paths for use in is_LaTeX to search for included files basedir = os.path.split(str(source[0]))[0] abspath = os.path.abspath(basedir) if is_LaTeX(source,env,abspath): result = env.subst('$LATEXCOM',0,target,source)+" ..." else: result = env.subst("$TEXCOM",0,target,source)+" ..." else: result = '' return result def tex_eps_emitter(target, source, env): """An emitter for TeX and LaTeX sources when executing tex or latex. It will accept .ps and .eps graphics files """ (target, source) = tex_emitter_core(target, source, env, TexGraphics) return (target, source) def tex_pdf_emitter(target, source, env): """An emitter for TeX and LaTeX sources when executing pdftex or pdflatex. It will accept graphics files of types .pdf, .jpg, .png, .gif, and .tif """ (target, source) = tex_emitter_core(target, source, env, LatexGraphics) return (target, source) def ScanFiles(theFile, target, paths, file_tests, file_tests_search, env, graphics_extensions, targetdir, aux_files): """ For theFile (a Node) update any file_tests and search for graphics files then find all included files and call ScanFiles recursively for each of them""" content = theFile.get_text_contents() if Verbose: print(" scanning ",str(theFile)) for i in range(len(file_tests_search)): if file_tests[i][0] is None: if Verbose: print("scan i ",i," files_tests[i] ",file_tests[i], file_tests[i][1]) file_tests[i][0] = file_tests_search[i].search(content) if Verbose and file_tests[i][0]: print(" found match for ",file_tests[i][1][-1]) # for newglossary insert the suffixes in file_tests[i] if file_tests[i][0] and file_tests[i][1][-1] == 'newglossary': findresult = file_tests_search[i].findall(content) for l in range(len(findresult)) : (file_tests[i][1]).insert(0,'.'+findresult[l][3]) (file_tests[i][1]).insert(0,'.'+findresult[l][2]) (file_tests[i][1]).insert(0,'.'+findresult[l][0]) suffix_list = ['.'+findresult[l][0],'.'+findresult[l][2],'.'+findresult[l][3] ] newglossary_suffix.append(suffix_list) if Verbose: print(" new suffixes for newglossary ",newglossary_suffix) incResult = includeOnly_re.search(content) if incResult: aux_files.append(os.path.join(targetdir, incResult.group(1))) if Verbose: print("\include file names : ", aux_files) # recursively call this on each of the included files inc_files = [ ] inc_files.extend( include_re.findall(content) ) if Verbose: print("files included by '%s': "%str(theFile),inc_files) # inc_files is list of file names as given. need to find them # using TEXINPUTS paths. for src in inc_files: srcNode = FindFile(src,['.tex','.ltx','.latex'],paths,env,requireExt=False) if srcNode is not None: file_tests = ScanFiles(srcNode, target, paths, file_tests, file_tests_search, env, graphics_extensions, targetdir, aux_files) if Verbose: print(" done scanning ",str(theFile)) return file_tests def tex_emitter_core(target, source, env, graphics_extensions): """An emitter for TeX and LaTeX sources. For LaTeX sources we try and find the common created files that are needed on subsequent runs of latex to finish tables of contents, bibliographies, indices, lists of figures, and hyperlink references. """ basename = SCons.Util.splitext(str(source[0]))[0] basefile = os.path.split(str(basename))[1] targetdir = os.path.split(str(target[0]))[0] targetbase = os.path.join(targetdir, basefile) basedir = os.path.split(str(source[0]))[0] abspath = os.path.abspath(basedir) target[0].attributes.path = abspath # # file names we will make use of in searching the sources and log file # emit_suffixes = ['.aux', '.log', '.ilg', '.blg', '.nls', '.nlg', '.gls', '.glg', '.alg'] + all_suffixes auxfilename = targetbase + '.aux' logfilename = targetbase + '.log' flsfilename = targetbase + '.fls' syncfilename = targetbase + '.synctex.gz' env.SideEffect(auxfilename,target[0]) env.SideEffect(logfilename,target[0]) env.SideEffect(flsfilename,target[0]) env.SideEffect(syncfilename,target[0]) if Verbose: print("side effect :",auxfilename,logfilename,flsfilename,syncfilename) env.Clean(target[0],auxfilename) env.Clean(target[0],logfilename) env.Clean(target[0],flsfilename) env.Clean(target[0],syncfilename) content = source[0].get_text_contents() # set up list with the regular expressions # we use to find features used file_tests_search = [auxfile_re, makeindex_re, bibliography_re, bibunit_re, multibib_re, addbibresource_re, tableofcontents_re, listoffigures_re, listoftables_re, hyperref_re, makenomenclature_re, makeglossary_re, makeglossaries_re, makeacronyms_re, beamer_re, newglossary_re, biblatex_re ] # set up list with the file suffixes that need emitting # when a feature is found file_tests_suff = [['.aux','aux_file'], ['.idx', '.ind', '.ilg','makeindex'], ['.bbl', '.blg','bibliography'], ['.bbl', '.blg','bibunit'], ['.bbl', '.blg','multibib'], ['.bbl', '.blg','.bcf','addbibresource'], ['.toc','contents'], ['.lof','figures'], ['.lot','tables'], ['.out','hyperref'], ['.nlo', '.nls', '.nlg','nomenclature'], ['.glo', '.gls', '.glg','glossary'], ['.glo', '.gls', '.glg','glossaries'], ['.acn', '.acr', '.alg','acronyms'], ['.nav', '.snm', '.out', '.toc','beamer'], ['newglossary',], ['.bcf', '.blg','biblatex'] ] # for newglossary the suffixes are added as we find the command # build the list of lists file_tests = [] for i in range(len(file_tests_search)): file_tests.append( [None, file_tests_suff[i]] ) # TO-DO: need to add a way for the user to extend this list for whatever # auxiliary files they create in other (or their own) packages # get path list from both env['TEXINPUTS'] and env['ENV']['TEXINPUTS'] savedpath = modify_env_var(env, 'TEXINPUTS', abspath) paths = env['ENV']['TEXINPUTS'] if SCons.Util.is_List(paths): pass else: # Split at os.pathsep to convert into absolute path paths = paths.split(os.pathsep) # now that we have the path list restore the env if savedpath is _null: try: del env['ENV']['TEXINPUTS'] except KeyError: pass # was never set else: env['ENV']['TEXINPUTS'] = savedpath if Verbose: print("search path ",paths) # scan all sources for side effect files aux_files = [] file_tests = ScanFiles(source[0], target, paths, file_tests, file_tests_search, env, graphics_extensions, targetdir, aux_files) for (theSearch,suffix_list) in file_tests: # add side effects if feature is present.If file is to be generated,add all side effects if Verbose and theSearch: print("check side effects for ",suffix_list[-1]) if (theSearch != None) or (not source[0].exists() ): file_list = [targetbase,] # for bibunit we need a list of files if suffix_list[-1] == 'bibunit': file_basename = os.path.join(targetdir, 'bu*.aux') file_list = glob.glob(file_basename) # remove the suffix '.aux' for i in range(len(file_list)): file_list.append(SCons.Util.splitext(file_list[i])[0]) # for multibib we need a list of files if suffix_list[-1] == 'multibib': for multibibmatch in multibib_re.finditer(content): if Verbose: print("multibib match ",multibibmatch.group(1)) if multibibmatch != None: baselist = multibibmatch.group(1).split(',') if Verbose: print("multibib list ", baselist) for i in range(len(baselist)): file_list.append(os.path.join(targetdir, baselist[i])) # now define the side effects for file_name in file_list: for suffix in suffix_list[:-1]: env.SideEffect(file_name + suffix,target[0]) if Verbose: print("side effect tst :",file_name + suffix, " target is ",str(target[0])) env.Clean(target[0],file_name + suffix) for aFile in aux_files: aFile_base = SCons.Util.splitext(aFile)[0] env.SideEffect(aFile_base + '.aux',target[0]) if Verbose: print("side effect aux :",aFile_base + '.aux') env.Clean(target[0],aFile_base + '.aux') # read fls file to get all other files that latex creates and will read on the next pass # remove files from list that we explicitly dealt with above if os.path.isfile(flsfilename): content = open(flsfilename, "rb").read() out_files = openout_re.findall(content) myfiles = [auxfilename, logfilename, flsfilename, targetbase+'.dvi',targetbase+'.pdf'] for filename in out_files[:]: if filename in myfiles: out_files.remove(filename) env.SideEffect(out_files,target[0]) if Verbose: print("side effect fls :",out_files) env.Clean(target[0],out_files) return (target, source) TeXLaTeXAction = None def generate(env): """Add Builders and construction variables for TeX to an Environment.""" global TeXLaTeXAction if TeXLaTeXAction is None: TeXLaTeXAction = SCons.Action.Action(TeXLaTeXFunction, strfunction=TeXLaTeXStrFunction) env.AppendUnique(LATEXSUFFIXES=SCons.Tool.LaTeXSuffixes) generate_common(env) from . import dvi dvi.generate(env) bld = env['BUILDERS']['DVI'] bld.add_action('.tex', TeXLaTeXAction) bld.add_emitter('.tex', tex_eps_emitter) def generate_darwin(env): try: environ = env['ENV'] except KeyError: environ = {} env['ENV'] = environ if (platform.system() == 'Darwin'): try: ospath = env['ENV']['PATHOSX'] except: ospath = None if ospath: env.AppendENVPath('PATH', ospath) def generate_common(env): """Add internal Builders and construction variables for LaTeX to an Environment.""" # Add OSX system paths so TeX tools can be found # when a list of tools is given the exists() method is not called generate_darwin(env) # A generic tex file Action, sufficient for all tex files. global TeXAction if TeXAction is None: TeXAction = SCons.Action.Action("$TEXCOM", "$TEXCOMSTR") # An Action to build a latex file. This might be needed more # than once if we are dealing with labels and bibtex. global LaTeXAction if LaTeXAction is None: LaTeXAction = SCons.Action.Action("$LATEXCOM", "$LATEXCOMSTR") # Define an action to run BibTeX on a file. global BibTeXAction if BibTeXAction is None: BibTeXAction = SCons.Action.Action("$BIBTEXCOM", "$BIBTEXCOMSTR") # Define an action to run Biber on a file. global BiberAction if BiberAction is None: BiberAction = SCons.Action.Action("$BIBERCOM", "$BIBERCOMSTR") # Define an action to run MakeIndex on a file. global MakeIndexAction if MakeIndexAction is None: MakeIndexAction = SCons.Action.Action("$MAKEINDEXCOM", "$MAKEINDEXCOMSTR") # Define an action to run MakeIndex on a file for nomenclatures. global MakeNclAction if MakeNclAction is None: MakeNclAction = SCons.Action.Action("$MAKENCLCOM", "$MAKENCLCOMSTR") # Define an action to run MakeIndex on a file for glossaries. global MakeGlossaryAction if MakeGlossaryAction is None: MakeGlossaryAction = SCons.Action.Action("$MAKEGLOSSARYCOM", "$MAKEGLOSSARYCOMSTR") # Define an action to run MakeIndex on a file for acronyms. global MakeAcronymsAction if MakeAcronymsAction is None: MakeAcronymsAction = SCons.Action.Action("$MAKEACRONYMSCOM", "$MAKEACRONYMSCOMSTR") try: environ = env['ENV'] except KeyError: environ = {} env['ENV'] = environ # Some Linux platforms have pdflatex set up in a way # that requires that the HOME environment variable be set. # Add it here if defined. v = os.environ.get('HOME') if v: environ['HOME'] = v CDCOM = 'cd ' if platform.system() == 'Windows': # allow cd command to change drives on Windows CDCOM = 'cd /D ' env['TEX'] = 'tex' env['TEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode -recorder') env['TEXCOM'] = CDCOM + '${TARGET.dir} && $TEX $TEXFLAGS ${SOURCE.file}' env['PDFTEX'] = 'pdftex' env['PDFTEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode -recorder') env['PDFTEXCOM'] = CDCOM + '${TARGET.dir} && $PDFTEX $PDFTEXFLAGS ${SOURCE.file}' env['LATEX'] = 'latex' env['LATEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode -recorder') env['LATEXCOM'] = CDCOM + '${TARGET.dir} && $LATEX $LATEXFLAGS ${SOURCE.file}' env['LATEXRETRIES'] = 4 env['PDFLATEX'] = 'pdflatex' env['PDFLATEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode -recorder') env['PDFLATEXCOM'] = CDCOM + '${TARGET.dir} && $PDFLATEX $PDFLATEXFLAGS ${SOURCE.file}' env['BIBTEX'] = 'bibtex' env['BIBTEXFLAGS'] = SCons.Util.CLVar('') env['BIBTEXCOM'] = CDCOM + '${TARGET.dir} && $BIBTEX $BIBTEXFLAGS ${SOURCE.filebase}' env['BIBER'] = 'biber' env['BIBERFLAGS'] = SCons.Util.CLVar('') env['BIBERCOM'] = CDCOM + '${TARGET.dir} && $BIBER $BIBERFLAGS ${SOURCE.filebase}' env['MAKEINDEX'] = 'makeindex' env['MAKEINDEXFLAGS'] = SCons.Util.CLVar('') env['MAKEINDEXCOM'] = CDCOM + '${TARGET.dir} && $MAKEINDEX $MAKEINDEXFLAGS ${SOURCE.file}' env['MAKEGLOSSARY'] = 'makeindex' env['MAKEGLOSSARYSTYLE'] = '${SOURCE.filebase}.ist' env['MAKEGLOSSARYFLAGS'] = SCons.Util.CLVar('-s ${MAKEGLOSSARYSTYLE} -t ${SOURCE.filebase}.glg') env['MAKEGLOSSARYCOM'] = CDCOM + '${TARGET.dir} && $MAKEGLOSSARY ${SOURCE.filebase}.glo $MAKEGLOSSARYFLAGS -o ${SOURCE.filebase}.gls' env['MAKEACRONYMS'] = 'makeindex' env['MAKEACRONYMSSTYLE'] = '${SOURCE.filebase}.ist' env['MAKEACRONYMSFLAGS'] = SCons.Util.CLVar('-s ${MAKEACRONYMSSTYLE} -t ${SOURCE.filebase}.alg') env['MAKEACRONYMSCOM'] = CDCOM + '${TARGET.dir} && $MAKEACRONYMS ${SOURCE.filebase}.acn $MAKEACRONYMSFLAGS -o ${SOURCE.filebase}.acr' env['MAKENCL'] = 'makeindex' env['MAKENCLSTYLE'] = 'nomencl.ist' env['MAKENCLFLAGS'] = '-s ${MAKENCLSTYLE} -t ${SOURCE.filebase}.nlg' env['MAKENCLCOM'] = CDCOM + '${TARGET.dir} && $MAKENCL ${SOURCE.filebase}.nlo $MAKENCLFLAGS -o ${SOURCE.filebase}.nls' env['MAKENEWGLOSSARY'] = 'makeindex' env['MAKENEWGLOSSARYCOM'] = CDCOM + '${TARGET.dir} && $MAKENEWGLOSSARY ' def exists(env): generate_darwin(env) return env.Detect('tex') # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
import os from django.utils import six from django.core.files.base import File, ContentFile from django.core.files.storage import ( default_storage, Storage) from django.db.models.fields.files import ImageFieldFile, FieldFile from django.core.files.images import get_image_dimensions from django.utils.safestring import mark_safe from django.utils.html import escape from django.utils import timezone from easy_thumbnails import engine, exceptions, models, utils, signals, storage from easy_thumbnails.alias import aliases from easy_thumbnails.conf import settings from easy_thumbnails.options import ThumbnailOptions def get_thumbnailer(obj, relative_name=None): """ Get a :class:`Thumbnailer` for a source file. The ``obj`` argument is usually either one of the following: * ``FieldFile`` instance (i.e. a model instance file/image field property). * A string, which will be used as the relative name (the source will be set to the default storage). * ``Storage`` instance - the ``relative_name`` argument must also be provided. Or it could be: * A file-like instance - the ``relative_name`` argument must also be provided. In this case, the thumbnailer won't use or create a cached reference to the thumbnail (i.e. a new thumbnail will be created for every :meth:`Thumbnailer.get_thumbnail` call). If ``obj`` is a ``Thumbnailer`` instance, it will just be returned. If it's an object with an ``easy_thumbnails_thumbnailer`` then the attribute is simply returned under the assumption it is a Thumbnailer instance) """ if hasattr(obj, 'easy_thumbnails_thumbnailer'): return obj.easy_thumbnails_thumbnailer if isinstance(obj, Thumbnailer): return obj elif isinstance(obj, FieldFile): if not relative_name: relative_name = obj.name return ThumbnailerFieldFile(obj.instance, obj.field, relative_name) source_storage = None if isinstance(obj, six.string_types): relative_name = obj obj = None if not relative_name: raise ValueError( "If object is not a FieldFile or Thumbnailer instance, the " "relative name must be provided") if isinstance(obj, File): obj = obj.file if isinstance(obj, Storage) or obj == default_storage: source_storage = obj obj = None return Thumbnailer( file=obj, name=relative_name, source_storage=source_storage, remote_source=obj is not None) def generate_all_aliases(fieldfile, include_global): """ Generate all of a file's aliases. :param fieldfile: A ``FieldFile`` instance. :param include_global: A boolean which determines whether to generate thumbnails for project-wide aliases in addition to field, model, and app specific aliases. """ all_options = aliases.all(fieldfile, include_global=include_global) if all_options: thumbnailer = get_thumbnailer(fieldfile) for options in all_options.values(): thumbnailer.get_thumbnail(options) def database_get_image_dimensions(file, close=False, dimensions=None): """ Returns the (width, height) of an image, given ThumbnailFile. Set 'close' to True to close the file at the end if it is initially in an open state. Will attempt to get the dimensions from the file itself if they aren't in the db. """ storage_hash = utils.get_storage_hash(file.storage) dimensions = None dimensions_cache = None try: thumbnail = models.Thumbnail.objects.select_related('dimensions').get( storage_hash=storage_hash, name=file.name) except models.Thumbnail.DoesNotExist: thumbnail = None else: try: dimensions_cache = thumbnail.dimensions except models.ThumbnailDimensions.DoesNotExist: dimensions_cache = None if dimensions_cache: return dimensions_cache.width, dimensions_cache.height dimensions = get_image_dimensions(file, close=close) if settings.THUMBNAIL_CACHE_DIMENSIONS and thumbnail: dimensions_cache = models.ThumbnailDimensions(thumbnail=thumbnail) dimensions_cache.width, dimensions_cache.height = dimensions dimensions_cache.save() return dimensions class FakeField(object): name = 'fake' def __init__(self, storage=None): if storage is None: storage = default_storage self.storage = storage def generate_filename(self, instance, name, *args, **kwargs): return name class FakeInstance(object): def save(self, *args, **kwargs): pass class ThumbnailFile(ImageFieldFile): """ A thumbnailed file. This can be used just like a Django model instance's property for a file field (i.e. an ``ImageFieldFile`` object). """ def __init__(self, name, file=None, storage=None, thumbnail_options=None, *args, **kwargs): fake_field = FakeField(storage=storage) super(ThumbnailFile, self).__init__( FakeInstance(), fake_field, name, *args, **kwargs) del self.field if file: self.file = file if thumbnail_options is None: thumbnail_options = ThumbnailOptions() elif not isinstance(thumbnail_options, ThumbnailOptions): thumbnail_options = ThumbnailOptions(thumbnail_options) self.thumbnail_options = thumbnail_options def save(self, *args, **kwargs): # Can't save a ``ThumbnailFile`` directly. raise NotImplementedError() def delete(self, *args, **kwargs): # Can't delete a ``ThumbnailFile`` directly, it doesn't have a # reference to the source image, so it can't update the cache. If you # really need to do this, do it with ``self.storage.delete`` directly. raise NotImplementedError() # Be consistant with standard behaviour, even though these methods don't # actually alter data any more. save.alters_data = True delete.alters_data = True def _get_image(self): """ Get a PIL Image instance of this file. The image is cached to avoid the file needing to be read again if the function is called again. """ if not hasattr(self, '_image_cache'): from easy_thumbnails.source_generators import pil_image self.image = pil_image(self) return self._image_cache def _set_image(self, image): """ Set the image for this file. This also caches the dimensions of the image. """ if image: self._image_cache = image self._dimensions_cache = image.size else: if hasattr(self, '_image_cache'): del self._cached_image if hasattr(self, '_dimensions_cache'): del self._dimensions_cache image = property(_get_image, _set_image) def tag(self, alt='', use_size=None, **attrs): """ Return a standard XHTML ``<img ... />`` tag for this field. :param alt: The ``alt=""`` text for the tag. Defaults to ``''``. :param use_size: Whether to get the size of the thumbnail image for use in the tag attributes. If ``None`` (default), the size will only be used it if won't result in a remote file retrieval. All other keyword parameters are added as (properly escaped) extra attributes to the `img` tag. """ if use_size is None: if getattr(self, '_dimensions_cache', None): use_size = True else: try: self.storage.path(self.name) use_size = True except NotImplementedError: use_size = False attrs['alt'] = alt attrs['src'] = self.url if use_size: attrs.update(dict(width=self.width, height=self.height)) attrs = ' '.join(['%s="%s"' % (key, escape(value)) for key, value in sorted(attrs.items())]) return mark_safe('<img %s />' % attrs) def _get_file(self): self._require_file() if not hasattr(self, '_file') or self._file is None: self._file = self.storage.open(self.name, 'rb') return self._file def _set_file(self, value): if value is not None and not isinstance(value, File): value = File(value) self._file = value self._committed = False def _del_file(self): del self._file file = property(_get_file, _set_file, _del_file) def open(self, mode=None, *args, **kwargs): if self.closed and self.name: mode = mode or getattr(self, 'mode', None) or 'rb' self.file = self.storage.open(self.name, mode) else: return super(ThumbnailFile, self).open(mode, *args, **kwargs) def _get_image_dimensions(self): if not hasattr(self, '_dimensions_cache'): close = self.closed self.open() self._dimensions_cache = database_get_image_dimensions( self, close=close) return self._dimensions_cache def set_image_dimensions(self, thumbnail): """ Set image dimensions from the cached dimensions of a ``Thumbnail`` model instance. """ try: dimensions = getattr(thumbnail, 'dimensions', None) except models.ThumbnailDimensions.DoesNotExist: dimensions = None if not dimensions: return False self._dimensions_cache = dimensions.size return self._dimensions_cache class Thumbnailer(File): """ A file-like object which provides some methods to generate thumbnail images. You can subclass this object and override the following properties to change the defaults (pulled from the default settings): * source_generators * thumbnail_processors """ #: A list of source generators to use. If ``None``, will use the default #: generators defined in settings. source_generators = None #: A list of thumbnail processors. If ``None``, will use the default #: processors defined in settings. thumbnail_processors = None def __init__(self, file=None, name=None, source_storage=None, thumbnail_storage=None, remote_source=False, generate=True, *args, **kwargs): super(Thumbnailer, self).__init__(file, name, *args, **kwargs) if source_storage is None: source_storage = default_storage self.source_storage = source_storage if thumbnail_storage is None: thumbnail_storage = storage.thumbnail_default_storage self.thumbnail_storage = thumbnail_storage self.remote_source = remote_source self.alias_target = None self.generate = generate # Set default properties. For backwards compatibilty, check to see # if the attribute exists already (it could be set as a class property # on a subclass) before getting it from settings. for default in ( 'basedir', 'subdir', 'prefix', 'quality', 'extension', 'preserve_extensions', 'transparency_extension', 'check_cache_miss', 'high_resolution', 'highres_infix', 'namer'): attr_name = 'thumbnail_%s' % default if getattr(self, attr_name, None) is None: value = getattr(settings, attr_name.upper()) setattr(self, attr_name, value) def __getitem__(self, alias): """ Retrieve a thumbnail matching the alias options (or raise a ``KeyError`` if no such alias exists). """ options = aliases.get(alias, target=self.alias_target) if not options: raise KeyError(alias) return self.get_thumbnail(options, silent_template_exception=True) def get_options(self, thumbnail_options, **kwargs): """ Get the thumbnail options that includes the default options for this thumbnailer (and the project-wide default options). """ if isinstance(thumbnail_options, ThumbnailOptions): return thumbnail_options args = [] if thumbnail_options is not None: args.append(thumbnail_options) opts = ThumbnailOptions(*args, **kwargs) if 'quality' not in thumbnail_options: opts['quality'] = self.thumbnail_quality return opts def generate_thumbnail(self, thumbnail_options, high_resolution=False, silent_template_exception=False): """ Return an unsaved ``ThumbnailFile`` containing a thumbnail image. The thumbnail image is generated using the ``thumbnail_options`` dictionary. """ thumbnail_options = self.get_options(thumbnail_options) orig_size = thumbnail_options['size'] # remember original size # Size sanity check. min_dim, max_dim = 0, 0 for dim in orig_size: try: dim = int(dim) except (TypeError, ValueError): continue min_dim, max_dim = min(min_dim, dim), max(max_dim, dim) if max_dim == 0 or min_dim < 0: raise exceptions.EasyThumbnailsError( "The source image is an invalid size (%sx%s)" % orig_size) if high_resolution: thumbnail_options['size'] = (orig_size[0] * 2, orig_size[1] * 2) image = engine.generate_source_image( self, thumbnail_options, self.source_generators, fail_silently=silent_template_exception) if image is None: raise exceptions.InvalidImageFormatError( "The source file does not appear to be an image") thumbnail_image = engine.process_image(image, thumbnail_options, self.thumbnail_processors) if high_resolution: thumbnail_options['size'] = orig_size # restore original size filename = self.get_thumbnail_name( thumbnail_options, transparent=utils.is_transparent(thumbnail_image), high_resolution=high_resolution) quality = thumbnail_options['quality'] subsampling = thumbnail_options['subsampling'] img = engine.save_image( thumbnail_image, filename=filename, quality=quality, subsampling=subsampling) data = img.read() thumbnail = ThumbnailFile( filename, file=ContentFile(data), storage=self.thumbnail_storage, thumbnail_options=thumbnail_options) thumbnail.image = thumbnail_image thumbnail._committed = False return thumbnail def get_thumbnail_name(self, thumbnail_options, transparent=False, high_resolution=False): """ Return a thumbnail filename for the given ``thumbnail_options`` dictionary and ``source_name`` (which defaults to the File's ``name`` if not provided). """ thumbnail_options = self.get_options(thumbnail_options) path, source_filename = os.path.split(self.name) source_extension = os.path.splitext(source_filename)[1][1:] preserve_extensions = self.thumbnail_preserve_extensions if preserve_extensions and ( preserve_extensions is True or source_extension.lower() in preserve_extensions): extension = source_extension elif transparent: extension = self.thumbnail_transparency_extension else: extension = self.thumbnail_extension extension = extension or 'jpg' prepared_opts = thumbnail_options.prepared_options() opts_text = '_'.join(prepared_opts) data = {'opts': opts_text} basedir = self.thumbnail_basedir % data subdir = self.thumbnail_subdir % data if isinstance(self.thumbnail_namer, six.string_types): namer_func = utils.dynamic_import(self.thumbnail_namer) else: namer_func = self.thumbnail_namer filename = namer_func( thumbnailer=self, source_filename=source_filename, thumbnail_extension=extension, thumbnail_options=thumbnail_options, prepared_options=prepared_opts, ) if high_resolution: filename = self.thumbnail_highres_infix.join( os.path.splitext(filename)) filename = '%s%s' % (self.thumbnail_prefix, filename) return os.path.join(basedir, path, subdir, filename) def get_existing_thumbnail(self, thumbnail_options, high_resolution=False): """ Return a ``ThumbnailFile`` containing an existing thumbnail for a set of thumbnail options, or ``None`` if not found. """ thumbnail_options = self.get_options(thumbnail_options) names = [ self.get_thumbnail_name( thumbnail_options, transparent=False, high_resolution=high_resolution)] transparent_name = self.get_thumbnail_name( thumbnail_options, transparent=True, high_resolution=high_resolution) if transparent_name not in names: names.append(transparent_name) for filename in names: exists = self.thumbnail_exists(filename) if exists: thumbnail_file = ThumbnailFile( name=filename, storage=self.thumbnail_storage, thumbnail_options=thumbnail_options) if settings.THUMBNAIL_CACHE_DIMENSIONS: # If this wasn't local storage, exists will be a thumbnail # instance so we can store the image dimensions now to save # a future potential query. thumbnail_file.set_image_dimensions(exists) return thumbnail_file def get_thumbnail(self, thumbnail_options, save=True, generate=None, silent_template_exception=False): """ Return a ``ThumbnailFile`` containing a thumbnail. If a matching thumbnail already exists, it will simply be returned. By default (unless the ``Thumbnailer`` was instanciated with ``generate=False``), thumbnails that don't exist are generated. Otherwise ``None`` is returned. Force the generation behaviour by setting the ``generate`` param to either ``True`` or ``False`` as required. The new thumbnail image is generated using the ``thumbnail_options`` dictionary. If the ``save`` argument is ``True`` (default), the generated thumbnail will be saved too. """ thumbnail_options = self.get_options(thumbnail_options) if generate is None: generate = self.generate thumbnail = self.get_existing_thumbnail(thumbnail_options) if not thumbnail: if generate: thumbnail = self.generate_thumbnail( thumbnail_options, silent_template_exception=silent_template_exception) if save: self.save_thumbnail(thumbnail) else: signals.thumbnail_missed.send( sender=self, options=thumbnail_options, high_resolution=False) if 'HIGH_RESOLUTION' in thumbnail_options: generate_high_resolution = thumbnail_options.get('HIGH_RESOLUTION') else: generate_high_resolution = self.thumbnail_high_resolution if generate_high_resolution: thumbnail.high_resolution = self.get_existing_thumbnail( thumbnail_options, high_resolution=True) if not thumbnail.high_resolution: if generate: thumbnail.high_resolution = self.generate_thumbnail( thumbnail_options, high_resolution=True, silent_template_exception=silent_template_exception) if save: self.save_thumbnail(thumbnail.high_resolution) else: signals.thumbnail_missed.send( sender=self, options=thumbnail_options, high_resolution=False) return thumbnail def save_thumbnail(self, thumbnail): """ Save a thumbnail to the thumbnail_storage. Also triggers the ``thumbnail_created`` signal and caches the thumbnail values and dimensions for future lookups. """ filename = thumbnail.name try: self.thumbnail_storage.delete(filename) except Exception: pass self.thumbnail_storage.save(filename, thumbnail) thumb_cache = self.get_thumbnail_cache( thumbnail.name, create=True, update=True) # Cache thumbnail dimensions. if settings.THUMBNAIL_CACHE_DIMENSIONS: dimensions_cache, created = ( models.ThumbnailDimensions.objects.get_or_create( thumbnail=thumb_cache, defaults={'width': thumbnail.width, 'height': thumbnail.height})) if not created: dimensions_cache.width = thumbnail.width dimensions_cache.height = thumbnail.height dimensions_cache.save() signals.thumbnail_created.send(sender=thumbnail) def thumbnail_exists(self, thumbnail_name): """ Calculate whether the thumbnail already exists and that the source is not newer than the thumbnail. If the source and thumbnail file storages are local, their file modification times are used. Otherwise the database cached modification times are used. """ if self.remote_source: return False if utils.is_storage_local(self.source_storage): source_modtime = utils.get_modified_time( self.source_storage, self.name) else: source = self.get_source_cache() if not source: return False source_modtime = source.modified if not source_modtime: return False local_thumbnails = utils.is_storage_local(self.thumbnail_storage) if local_thumbnails: thumbnail_modtime = utils.get_modified_time( self.thumbnail_storage, thumbnail_name) if not thumbnail_modtime: return False return source_modtime <= thumbnail_modtime thumbnail = self.get_thumbnail_cache(thumbnail_name) if not thumbnail: return False thumbnail_modtime = thumbnail.modified if thumbnail.modified and source_modtime <= thumbnail.modified: return thumbnail return False def get_source_cache(self, create=False, update=False): if self.remote_source: return None if hasattr(self, '_source_cache') and not update: if self._source_cache or not create: return self._source_cache update_modified = (update or create) and timezone.now() self._source_cache = models.Source.objects.get_file( create=create, update_modified=update_modified, storage=self.source_storage, name=self.name, check_cache_miss=self.thumbnail_check_cache_miss) return self._source_cache def get_thumbnail_cache(self, thumbnail_name, create=False, update=False): if self.remote_source: return None source = self.get_source_cache(create=True) update_modified = (update or create) and timezone.now() return models.Thumbnail.objects.get_file( create=create, update_modified=update_modified, storage=self.thumbnail_storage, source=source, name=thumbnail_name, check_cache_miss=self.thumbnail_check_cache_miss) def open(self, mode=None): if self.closed: mode = mode or getattr(self, 'mode', None) or 'rb' self.file = self.source_storage.open(self.name, mode) else: self.seek(0) # open() doesn't alter the file's contents, but it does reset the pointer. open.alters_data = True class ThumbnailerFieldFile(FieldFile, Thumbnailer): """ A field file which provides some methods for generating (and returning) thumbnail images. """ def __init__(self, *args, **kwargs): super(ThumbnailerFieldFile, self).__init__(*args, **kwargs) self.source_storage = self.field.storage thumbnail_storage = getattr(self.field, 'thumbnail_storage', None) if thumbnail_storage: self.thumbnail_storage = thumbnail_storage self.alias_target = self def save(self, name, content, *args, **kwargs): """ Save the file, also saving a reference to the thumbnail cache Source model. """ super(ThumbnailerFieldFile, self).save(name, content, *args, **kwargs) self.get_source_cache(create=True, update=True) def delete(self, *args, **kwargs): """ Delete the image, along with any generated thumbnails. """ source_cache = self.get_source_cache() # First, delete any related thumbnails. self.delete_thumbnails(source_cache) # Next, delete the source image. super(ThumbnailerFieldFile, self).delete(*args, **kwargs) # Finally, delete the source cache entry. if source_cache: source_cache.delete() delete.alters_data = True def delete_thumbnails(self, source_cache=None): """ Delete any thumbnails generated from the source image. :arg source_cache: An optional argument only used for optimisation where the source cache instance is already known. :returns: The number of files deleted. """ source_cache = self.get_source_cache() deleted = 0 if source_cache: thumbnail_storage_hash = utils.get_storage_hash( self.thumbnail_storage) for thumbnail_cache in source_cache.thumbnails.all(): # Only attempt to delete the file if it was stored using the # same storage as is currently used. if thumbnail_cache.storage_hash == thumbnail_storage_hash: self.thumbnail_storage.delete(thumbnail_cache.name) # Delete the cache thumbnail instance too. thumbnail_cache.delete() deleted += 1 return deleted delete_thumbnails.alters_data = True def get_thumbnails(self, *args, **kwargs): """ Return an iterator which returns ThumbnailFile instances. """ # First, delete any related thumbnails. source_cache = self.get_source_cache() if source_cache: thumbnail_storage_hash = utils.get_storage_hash( self.thumbnail_storage) for thumbnail_cache in source_cache.thumbnails.all(): # Only iterate files which are stored using the current # thumbnail storage. if thumbnail_cache.storage_hash == thumbnail_storage_hash: yield ThumbnailFile(name=thumbnail_cache.name, storage=self.thumbnail_storage) class ThumbnailerImageFieldFile(ImageFieldFile, ThumbnailerFieldFile): """ A field file which provides some methods for generating (and returning) thumbnail images. """ def save(self, name, content, *args, **kwargs): """ Save the image. The image will be resized down using a ``ThumbnailField`` if ``resize_source`` (a dictionary of thumbnail options) is provided by the field. """ options = getattr(self.field, 'resize_source', None) if options: if 'quality' not in options: options['quality'] = self.thumbnail_quality content = Thumbnailer(content, name).generate_thumbnail(options) # If the generated extension differs from the original, use it # instead. orig_name, ext = os.path.splitext(name) generated_ext = os.path.splitext(content.name)[1] if generated_ext.lower() != ext.lower(): name = orig_name + generated_ext super(ThumbnailerImageFieldFile, self).save(name, content, *args, **kwargs)
# -*- coding: utf-8 -*- """ Created on Sat May 4 22:07:54 2013 @author: matt Tries to allow users to specify simple UI elements for functions using decorators. For controls, it makes more sense to decorate the set function, specifying an optional get function. def get_volume(self): return self.volume @slider(getfunc=get_volume) def volume(self, newval): self.volume=newval This is framework agnostic, and needs a framework-specific module to map a decorated object into a UI. We expect the framework to provide a closable object called Framework exposing two functions: Framework.get_obj_widget(o) Should return a widget compatible with the framework, containing the elements from the decorated object o. This is designed to be incorporated in an application already using framewowrk. Framework.display_widgets(ws) Displays a list of widgets. These can be created from single or multiple calls to get_obj_widget. Optionally, the class can also expose Framework.get_main_window() Which returns the main window for the framework. To display a decorated object o, the following should be fine: with contextlib.closing(Framework()) as f: f.display_widgets([f.get_obj_widget(o)]) We may also want this object to provide a way to execute functions on the UI thread? """ from functools import wraps import logging import types class FrameworkBase(object): def get_main_window(self): """return a framework-dependant UI Window, if available""" raise NotImplementedError("get_main_window") def get_obj_widget(self, o): """return a framework-dependant widget for the decorated object o""" raise NotImplementedError("get_obj_wdiget") def display_widgets(self, o): """display the framework-dependant widgets ws. Blocks until window is dismissed""" raise NotImplementedError("display_widgets") def run_on_ui_thread(self, f): """Runs the function f on a UI thread""" raise NotImplementedError("run_on_ui_thread") def close(self): """Frees up any resources""" raise NotImplementedError("close") def display(self, o): """ Displays a UI for the decorated object o, blocking until dismissed """ self.display_widgets([self.get_obj_widget(o)]) def get_filename(self, mode="load"): """Returns a user specified file name. mode should be 'load' or 'save'""" raise NotImplementedError("close") def metadata(name, props): """ Decorator generator that wraps simple metadata to a function. Returns the function as is, with an attribute called name set to props eg >>> @metadata("mydata", {1:1, 2:4, "bob":"fish"}) ... def myfunc(): ... print "my func" ... >>> myfunc.mydata["bob"] 'fish' """ def metadata_imp(func): @wraps(func) def wrapped_metadata(*args, **kwargs): return func(*args, **kwargs) setattr(wrapped_metadata, name, props) return wrapped_metadata return metadata_imp # file for some UI stuff where properties determine display properties def slider(getfunc=None, minimum=0, maximum=100, scale=1): """ Decorator generator creates a decorator to suggest that any UI uses a slider to set the given function. An optional getfunc, if present, is used to get the current value at startup. It must be defined before this decorator is applied! See source file for examples """ def my_slider(func): # note wraps must be the bottom decorator, meaning it gets # applied first, that way w_func has the right name when being sent to # notifying setter... @metadata("_slider", dict( minimum=minimum, maximum=maximum, scale=scale, getfunc=getfunc)) @notifying_setter @wraps(func) def w_func(*args, **kwargs): return func(*args, **kwargs) return w_func return my_slider def combobox(options, getfunc=None): """ Decorator generator creates a decorator to suggest that any UI uses a combobox to set the given function. options is the set of available options presented to the user, converted to strings. An optional getfunc, if present, is used to get the current value at startup. See source file for examples """ def my_slider(func): @metadata("_combobox", dict( getfunc=getfunc, options=options)) @notifying_setter @wraps(func) def w_func(*args, **kwargs): return func(*args, **kwargs) return w_func return my_slider def checkbox(getfunc=None): def my_slider(func): @metadata("_checkbox", dict( getfunc=getfunc)) @notifying_setter @wraps(func) def w_func(*args, **kwargs): return func(*args, **kwargs) return w_func return my_slider def textbox(getfunc=None): def my_slider(func): @metadata("_textbox", dict( getfunc=getfunc)) @notifying_setter @wraps(func) def w_func(*args, **kwargs): return func(*args, **kwargs) return w_func return my_slider def button(func): @metadata("_button", {}) @wraps(func) def w_func(*args, **kwargs): return func(*args, **kwargs) return w_func def notifying_setter(func): """ For the method func, adds a 'listeners' method to the function namespace which returns a list. Each item in the list gets called with the either the same args as the setter or the non-None return value from it The list is stored internally in the parent object with the name "_{}_listeners",format(func.__name__) >>> def printfunc(args): ... print args ... >>> class A: ... @notifying_setter ... def set_val(self, val): ... setattr(self, "val", val) ... print "val set to ", val ... @notifying_setter ... def set_val2(self, val): ... val = int(val) ... setattr(self, "val2", val) ... print "val2 set to ", val ... return val # this value is sent to the notification ... >>> a = A() >>> A.set_val.listeners(a).append(lambda x: printfunc("val listener: %s" % x)) >>> A.set_val2.listeners(a).append(lambda x: printfunc("val2 listener: %s" % x)) >>> a.set_val(2) val set to 2 val listener: 2 >>> a.set_val2("42") val2 set to 42 val2 listener: 42 42 """ # Did think about making this work with functions too: # we could specify get/set functions that set either to # self if a class method or globals if a function. # Turns out it's hard to tell at decorate time if a function # is a class or not. Could try to do at runtime, but it makes # things complicated (eg, we'd have to pass get/set functions to the # get_listeners function, making it almost unusable outside this function) # Also, neither newfunc nor func ever get im_class, (ie neither # are identical to A.set_val), so even at run time this isn't trivial. listener_list_name = "_{}_listeners".format(func.__name__) def get_listeners(self, func=func, name=listener_list_name): listeners = getattr(self, listener_list_name, None) if listeners is None: listeners = [] setattr(self, name, listeners) return listeners logging.debug("Creating listener obj: %s", listener_list_name) @metadata("listeners", get_listeners) @wraps(func) def newfunc(self, *args, **kwargs): # call the original setter, storing the return value ret = func(self, *args, **kwargs) for l in newfunc.listeners(self): if ret is None: l(*args, **kwargs) else: l(ret) return ret return newfunc if __name__=="__main__": import doctest doctest.testmod() class Test: def __init__(self): self.value=0 self.bval = True self.optionval = "Maybe" self.textval="Matt" self._height=5 def get_test(self): print "Getting as", self.value return self.value @slider(getfunc=get_test) def test(self, newval): """ Simple example of how to use the slider decorator. We decorate the setter function, assuming UI controls will change rather than display values. When this function is called it will automatically update the linked slider. """ print "Setting to", newval self.value = newval @slider(getfunc=get_test) def test2(self, newval): """ Another slider with the same getfunc. When this gets set it calls the test function, so the test slider will update automatically. However, calling test will not update the test2 slider. """ self.test(newval) @button def button1(self): """ This calls the regular set function, UI elements will get updated! """ self.test(50) self.boolval(not self.get_bool()) self.combo("Maybe") def get_combo(self): print "Getting Yes" return self.optionval @combobox(getfunc=get_combo, options=["Yes", "No", "Maybe"]) def combo(self, t): print "setting combo to", t self.optionval = t def get_bool(self): return self.bval @checkbox(getfunc=get_bool) def boolval(self, newval): print "Setting boolval to", newval self.bval = newval def get_name(self): return self.textval @textbox(getfunc=get_name) def name(self, val): print "setting name to", val self.textval = val def get_height(self): return self._height @slider(getfunc=get_height) @textbox(getfunc=get_height) def height(self, val): val = float(val) self._height=val return val # return a float, t=Test() from qt_framework import Framework import contextlib with contextlib.closing(Framework()) as f: f.display(t)
import builtins import collections import datetime import decimal import enum import functools import math import re import types import uuid from django.db import models from django.db.migrations.operations.base import Operation from django.db.migrations.utils import COMPILED_REGEX_TYPE, RegexObject from django.utils import datetime_safe from django.utils.functional import LazyObject, Promise from django.utils.timezone import utc from django.utils.version import get_docs_version class BaseSerializer: def __init__(self, value): self.value = value def serialize(self): raise NotImplementedError('Subclasses of BaseSerializer must implement the serialize() method.') class BaseSequenceSerializer(BaseSerializer): def _format(self): raise NotImplementedError('Subclasses of BaseSequenceSerializer must implement the _format() method.') def serialize(self): imports = set() strings = [] for item in self.value: item_string, item_imports = serializer_factory(item).serialize() imports.update(item_imports) strings.append(item_string) value = self._format() return value % (", ".join(strings)), imports class BaseSimpleSerializer(BaseSerializer): def serialize(self): return repr(self.value), set() class DatetimeSerializer(BaseSerializer): def serialize(self): if self.value.tzinfo is not None and self.value.tzinfo != utc: self.value = self.value.astimezone(utc) value_repr = repr(self.value).replace("<UTC>", "utc") if isinstance(self.value, datetime_safe.datetime): value_repr = "datetime.%s" % value_repr imports = ["import datetime"] if self.value.tzinfo is not None: imports.append("from django.utils.timezone import utc") return value_repr, set(imports) class DateSerializer(BaseSerializer): def serialize(self): value_repr = repr(self.value) if isinstance(self.value, datetime_safe.date): value_repr = "datetime.%s" % value_repr return value_repr, {"import datetime"} class DecimalSerializer(BaseSerializer): def serialize(self): return repr(self.value), {"from decimal import Decimal"} class DeconstructableSerializer(BaseSerializer): @staticmethod def serialize_deconstructed(path, args, kwargs): name, imports = DeconstructableSerializer._serialize_path(path) strings = [] for arg in args: arg_string, arg_imports = serializer_factory(arg).serialize() strings.append(arg_string) imports.update(arg_imports) for kw, arg in sorted(kwargs.items()): arg_string, arg_imports = serializer_factory(arg).serialize() imports.update(arg_imports) strings.append("%s=%s" % (kw, arg_string)) return "%s(%s)" % (name, ", ".join(strings)), imports @staticmethod def _serialize_path(path): module, name = path.rsplit(".", 1) if module == "django.db.models": imports = {"from django.db import models"} name = "models.%s" % name else: imports = {"import %s" % module} name = path return name, imports def serialize(self): return self.serialize_deconstructed(*self.value.deconstruct()) class DictionarySerializer(BaseSerializer): def serialize(self): imports = set() strings = [] for k, v in sorted(self.value.items()): k_string, k_imports = serializer_factory(k).serialize() v_string, v_imports = serializer_factory(v).serialize() imports.update(k_imports) imports.update(v_imports) strings.append((k_string, v_string)) return "{%s}" % (", ".join("%s: %s" % (k, v) for k, v in strings)), imports class EnumSerializer(BaseSerializer): def serialize(self): enum_class = self.value.__class__ module = enum_class.__module__ v_string, v_imports = serializer_factory(self.value.value).serialize() imports = {'import %s' % module, *v_imports} return "%s.%s(%s)" % (module, enum_class.__name__, v_string), imports class FloatSerializer(BaseSimpleSerializer): def serialize(self): if math.isnan(self.value) or math.isinf(self.value): return 'float("{}")'.format(self.value), set() return super().serialize() class FrozensetSerializer(BaseSequenceSerializer): def _format(self): return "frozenset([%s])" class FunctionTypeSerializer(BaseSerializer): def serialize(self): if getattr(self.value, "__self__", None) and isinstance(self.value.__self__, type): klass = self.value.__self__ module = klass.__module__ return "%s.%s.%s" % (module, klass.__name__, self.value.__name__), {"import %s" % module} # Further error checking if self.value.__name__ == '<lambda>': raise ValueError("Cannot serialize function: lambda") if self.value.__module__ is None: raise ValueError("Cannot serialize function %r: No module" % self.value) module_name = self.value.__module__ if '<' not in self.value.__qualname__: # Qualname can include <locals> return '%s.%s' % (module_name, self.value.__qualname__), {'import %s' % self.value.__module__} raise ValueError( 'Could not find function %s in %s.\n' % (self.value.__name__, module_name) ) class FunctoolsPartialSerializer(BaseSerializer): def serialize(self): # Serialize functools.partial() arguments func_string, func_imports = serializer_factory(self.value.func).serialize() args_string, args_imports = serializer_factory(self.value.args).serialize() keywords_string, keywords_imports = serializer_factory(self.value.keywords).serialize() # Add any imports needed by arguments imports = {'import functools', *func_imports, *args_imports, *keywords_imports} return ( 'functools.%s(%s, *%s, **%s)' % ( self.value.__class__.__name__, func_string, args_string, keywords_string, ), imports, ) class IterableSerializer(BaseSerializer): def serialize(self): imports = set() strings = [] for item in self.value: item_string, item_imports = serializer_factory(item).serialize() imports.update(item_imports) strings.append(item_string) # When len(strings)==0, the empty iterable should be serialized as # "()", not "(,)" because (,) is invalid Python syntax. value = "(%s)" if len(strings) != 1 else "(%s,)" return value % (", ".join(strings)), imports class ModelFieldSerializer(DeconstructableSerializer): def serialize(self): attr_name, path, args, kwargs = self.value.deconstruct() return self.serialize_deconstructed(path, args, kwargs) class ModelManagerSerializer(DeconstructableSerializer): def serialize(self): as_manager, manager_path, qs_path, args, kwargs = self.value.deconstruct() if as_manager: name, imports = self._serialize_path(qs_path) return "%s.as_manager()" % name, imports else: return self.serialize_deconstructed(manager_path, args, kwargs) class OperationSerializer(BaseSerializer): def serialize(self): from django.db.migrations.writer import OperationWriter string, imports = OperationWriter(self.value, indentation=0).serialize() # Nested operation, trailing comma is handled in upper OperationWriter._write() return string.rstrip(','), imports class RegexSerializer(BaseSerializer): def serialize(self): regex_pattern, pattern_imports = serializer_factory(self.value.pattern).serialize() # Turn off default implicit flags (e.g. re.U) because regexes with the # same implicit and explicit flags aren't equal. flags = self.value.flags ^ re.compile('').flags regex_flags, flag_imports = serializer_factory(flags).serialize() imports = {'import re', *pattern_imports, *flag_imports} args = [regex_pattern] if flags: args.append(regex_flags) return "re.compile(%s)" % ', '.join(args), imports class SequenceSerializer(BaseSequenceSerializer): def _format(self): return "[%s]" class SetSerializer(BaseSequenceSerializer): def _format(self): # Serialize as a set literal except when value is empty because {} # is an empty dict. return '{%s}' if self.value else 'set(%s)' class SettingsReferenceSerializer(BaseSerializer): def serialize(self): return "settings.%s" % self.value.setting_name, {"from django.conf import settings"} class TimedeltaSerializer(BaseSerializer): def serialize(self): return repr(self.value), {"import datetime"} class TimeSerializer(BaseSerializer): def serialize(self): value_repr = repr(self.value) if isinstance(self.value, datetime_safe.time): value_repr = "datetime.%s" % value_repr return value_repr, {"import datetime"} class TupleSerializer(BaseSequenceSerializer): def _format(self): # When len(value)==0, the empty tuple should be serialized as "()", # not "(,)" because (,) is invalid Python syntax. return "(%s)" if len(self.value) != 1 else "(%s,)" class TypeSerializer(BaseSerializer): def serialize(self): special_cases = [ (models.Model, "models.Model", []), ] for case, string, imports in special_cases: if case is self.value: return string, set(imports) if hasattr(self.value, "__module__"): module = self.value.__module__ if module == builtins.__name__: return self.value.__name__, set() else: return "%s.%s" % (module, self.value.__name__), {"import %s" % module} class UUIDSerializer(BaseSerializer): def serialize(self): return "uuid.%s" % repr(self.value), {"import uuid"} def serializer_factory(value): from django.db.migrations.writer import SettingsReference if isinstance(value, Promise): value = str(value) elif isinstance(value, LazyObject): # The unwrapped value is returned as the first item of the arguments # tuple. value = value.__reduce__()[1][0] if isinstance(value, models.Field): return ModelFieldSerializer(value) if isinstance(value, models.manager.BaseManager): return ModelManagerSerializer(value) if isinstance(value, Operation): return OperationSerializer(value) if isinstance(value, type): return TypeSerializer(value) # Anything that knows how to deconstruct itself. if hasattr(value, 'deconstruct'): return DeconstructableSerializer(value) # Unfortunately some of these are order-dependent. if isinstance(value, frozenset): return FrozensetSerializer(value) if isinstance(value, list): return SequenceSerializer(value) if isinstance(value, set): return SetSerializer(value) if isinstance(value, tuple): return TupleSerializer(value) if isinstance(value, dict): return DictionarySerializer(value) if isinstance(value, enum.Enum): return EnumSerializer(value) if isinstance(value, datetime.datetime): return DatetimeSerializer(value) if isinstance(value, datetime.date): return DateSerializer(value) if isinstance(value, datetime.time): return TimeSerializer(value) if isinstance(value, datetime.timedelta): return TimedeltaSerializer(value) if isinstance(value, SettingsReference): return SettingsReferenceSerializer(value) if isinstance(value, float): return FloatSerializer(value) if isinstance(value, (bool, int, type(None), bytes, str)): return BaseSimpleSerializer(value) if isinstance(value, decimal.Decimal): return DecimalSerializer(value) if isinstance(value, (functools.partial, functools.partialmethod)): return FunctoolsPartialSerializer(value) if isinstance(value, (types.FunctionType, types.BuiltinFunctionType, types.MethodType)): return FunctionTypeSerializer(value) if isinstance(value, collections.Iterable): return IterableSerializer(value) if isinstance(value, (COMPILED_REGEX_TYPE, RegexObject)): return RegexSerializer(value) if isinstance(value, uuid.UUID): return UUIDSerializer(value) raise ValueError( "Cannot serialize: %r\nThere are some values Django cannot serialize into " "migration files.\nFor more, see https://docs.djangoproject.com/en/%s/" "topics/migrations/#migration-serializing" % (value, get_docs_version()) )
from typing import Any, Dict, List, Optional, Union from django.conf import settings from django.http import HttpRequest, HttpResponse from django.shortcuts import redirect from django.utils.translation import ugettext as _ from zerver.decorator import require_member_or_admin, require_realm_admin from zerver.forms import PASSWORD_TOO_WEAK_ERROR, CreateUserForm from zerver.lib.actions import ( check_change_bot_full_name, check_change_full_name, check_remove_custom_profile_field_value, do_change_avatar_fields, do_change_bot_owner, do_change_default_all_public_streams, do_change_default_events_register_stream, do_change_default_sending_stream, do_change_user_role, do_create_user, do_deactivate_user, do_reactivate_user, do_regenerate_api_key, do_update_bot_config_data, do_update_outgoing_webhook_service, do_update_user_custom_profile_data_if_changed, notify_created_bot, ) from zerver.lib.avatar import avatar_url, get_gravatar_url from zerver.lib.bot_config import set_bot_config from zerver.lib.email_validation import email_allowed_for_realm from zerver.lib.exceptions import CannotDeactivateLastUserError, OrganizationOwnerRequired from zerver.lib.integrations import EMBEDDED_BOTS from zerver.lib.request import REQ, has_request_variables from zerver.lib.response import json_error, json_success from zerver.lib.streams import access_stream_by_id, access_stream_by_name, subscribed_to_stream from zerver.lib.types import Validator from zerver.lib.upload import upload_avatar_image from zerver.lib.url_encoding import add_query_arg_to_redirect_url from zerver.lib.users import ( access_bot_by_id, access_user_by_id, add_service, check_bot_creation_policy, check_bot_name_available, check_full_name, check_short_name, check_valid_bot_config, check_valid_bot_type, check_valid_interface_type, get_api_key, get_raw_user_data, validate_user_custom_profile_data, ) from zerver.lib.utils import generate_api_key from zerver.lib.validator import ( check_bool, check_dict, check_dict_only, check_int, check_int_in, check_list, check_none_or, check_string, check_union, check_url, ) from zerver.models import ( DisposableEmailError, DomainNotAllowedForRealmError, EmailContainsPlusError, InvalidFakeEmailDomain, Message, Realm, Service, Stream, UserProfile, get_user_by_delivery_email, get_user_by_id_in_realm_including_cross_realm, get_user_including_cross_realm, get_user_profile_by_id_in_realm, ) from zproject.backends import check_password_strength def check_last_owner(user_profile: UserProfile) -> bool: owners = set(user_profile.realm.get_human_owner_users()) return user_profile.is_realm_owner and not user_profile.is_bot and len(owners) == 1 def deactivate_user_backend(request: HttpRequest, user_profile: UserProfile, user_id: int) -> HttpResponse: target = access_user_by_id(user_profile, user_id) if target.is_realm_owner and not user_profile.is_realm_owner: raise OrganizationOwnerRequired() if check_last_owner(target): return json_error(_('Cannot deactivate the only organization owner')) return _deactivate_user_profile_backend(request, user_profile, target) def deactivate_user_own_backend(request: HttpRequest, user_profile: UserProfile) -> HttpResponse: if UserProfile.objects.filter(realm=user_profile.realm, is_active=True).count() == 1: raise CannotDeactivateLastUserError(is_last_owner=False) if user_profile.is_realm_owner and check_last_owner(user_profile): raise CannotDeactivateLastUserError(is_last_owner=True) do_deactivate_user(user_profile, acting_user=user_profile) return json_success() def deactivate_bot_backend(request: HttpRequest, user_profile: UserProfile, bot_id: int) -> HttpResponse: target = access_bot_by_id(user_profile, bot_id) return _deactivate_user_profile_backend(request, user_profile, target) def _deactivate_user_profile_backend(request: HttpRequest, user_profile: UserProfile, target: UserProfile) -> HttpResponse: do_deactivate_user(target, acting_user=user_profile) return json_success() def reactivate_user_backend(request: HttpRequest, user_profile: UserProfile, user_id: int) -> HttpResponse: target = access_user_by_id(user_profile, user_id, allow_deactivated=True, allow_bots=True) if target.is_bot: assert target.bot_type is not None check_bot_creation_policy(user_profile, target.bot_type) do_reactivate_user(target, acting_user=user_profile) return json_success() check_profile_data: Validator[List[Dict[str, Optional[Union[int, str, List[int]]]]]] = check_list( check_dict_only([ ('id', check_int), ('value', check_none_or( check_union([check_int, check_string, check_list(check_int)]), )), ]), ) @has_request_variables def update_user_backend( request: HttpRequest, user_profile: UserProfile, user_id: int, full_name: Optional[str] = REQ(default=None, validator=check_string), role: Optional[int] = REQ(default=None, validator=check_int_in( UserProfile.ROLE_TYPES, )), profile_data: Optional[List[Dict[str, Optional[Union[int, str, List[int]]]]]] = REQ( default=None, validator=check_profile_data, ), ) -> HttpResponse: target = access_user_by_id(user_profile, user_id, allow_deactivated=True, allow_bots=True) if role is not None and target.role != role: if target.role == UserProfile.ROLE_REALM_OWNER and check_last_owner(user_profile): return json_error(_('The owner permission cannot be removed from the only organization owner.')) if UserProfile.ROLE_REALM_OWNER in [role, target.role] and not user_profile.is_realm_owner: raise OrganizationOwnerRequired() do_change_user_role(target, role) if (full_name is not None and target.full_name != full_name and full_name.strip() != ""): # We don't respect `name_changes_disabled` here because the request # is on behalf of the administrator. check_change_full_name(target, full_name, user_profile) if profile_data is not None: clean_profile_data = [] for entry in profile_data: assert isinstance(entry["id"], int) if entry["value"] is None or not entry["value"]: field_id = entry["id"] check_remove_custom_profile_field_value(target, field_id) else: clean_profile_data.append({ "id": entry["id"], "value": entry["value"], }) validate_user_custom_profile_data(target.realm.id, clean_profile_data) do_update_user_custom_profile_data_if_changed(target, clean_profile_data) return json_success() def avatar(request: HttpRequest, user_profile: UserProfile, email_or_id: str, medium: bool=False) -> HttpResponse: """Accepts an email address or user ID and returns the avatar""" is_email = False try: int(email_or_id) except ValueError: is_email = True try: realm = user_profile.realm if is_email: avatar_user_profile = get_user_including_cross_realm(email_or_id, realm) else: avatar_user_profile = get_user_by_id_in_realm_including_cross_realm(int(email_or_id), realm) # If there is a valid user account passed in, use its avatar url = avatar_url(avatar_user_profile, medium=medium) except UserProfile.DoesNotExist: # If there is no such user, treat it as a new gravatar email = email_or_id avatar_version = 1 url = get_gravatar_url(email, avatar_version, medium) # We can rely on the url already having query parameters. Because # our templates depend on being able to use the ampersand to # add query parameters to our url, get_avatar_url does '?x=x' # hacks to prevent us from having to jump through decode/encode hoops. assert url is not None url = add_query_arg_to_redirect_url(url, request.META['QUERY_STRING']) return redirect(url) def get_stream_name(stream: Optional[Stream]) -> Optional[str]: if stream: return stream.name return None @require_member_or_admin @has_request_variables def patch_bot_backend( request: HttpRequest, user_profile: UserProfile, bot_id: int, full_name: Optional[str]=REQ(default=None), bot_owner_id: Optional[int]=REQ(validator=check_int, default=None), config_data: Optional[Dict[str, str]]=REQ(default=None, validator=check_dict(value_validator=check_string)), service_payload_url: Optional[str]=REQ(validator=check_url, default=None), service_interface: int=REQ(validator=check_int, default=1), default_sending_stream: Optional[str]=REQ(default=None), default_events_register_stream: Optional[str]=REQ(default=None), default_all_public_streams: Optional[bool]=REQ(default=None, validator=check_bool), ) -> HttpResponse: bot = access_bot_by_id(user_profile, bot_id) if full_name is not None: check_change_bot_full_name(bot, full_name, user_profile) if bot_owner_id is not None: try: owner = get_user_profile_by_id_in_realm(bot_owner_id, user_profile.realm) except UserProfile.DoesNotExist: return json_error(_('Failed to change owner, no such user')) if not owner.is_active: return json_error(_('Failed to change owner, user is deactivated')) if owner.is_bot: return json_error(_("Failed to change owner, bots can't own other bots")) previous_owner = bot.bot_owner if previous_owner != owner: do_change_bot_owner(bot, owner, user_profile) if default_sending_stream is not None: if default_sending_stream == "": stream: Optional[Stream] = None else: (stream, recipient, sub) = access_stream_by_name( user_profile, default_sending_stream) do_change_default_sending_stream(bot, stream) if default_events_register_stream is not None: if default_events_register_stream == "": stream = None else: (stream, recipient, sub) = access_stream_by_name( user_profile, default_events_register_stream) do_change_default_events_register_stream(bot, stream) if default_all_public_streams is not None: do_change_default_all_public_streams(bot, default_all_public_streams) if service_payload_url is not None: check_valid_interface_type(service_interface) assert service_interface is not None do_update_outgoing_webhook_service(bot, service_interface, service_payload_url) if config_data is not None: do_update_bot_config_data(bot, config_data) if len(request.FILES) == 0: pass elif len(request.FILES) == 1: user_file = list(request.FILES.values())[0] upload_avatar_image(user_file, user_profile, bot) avatar_source = UserProfile.AVATAR_FROM_USER do_change_avatar_fields(bot, avatar_source) else: return json_error(_("You may only upload one file at a time")) json_result = dict( full_name=bot.full_name, avatar_url=avatar_url(bot), service_interface = service_interface, service_payload_url = service_payload_url, config_data = config_data, default_sending_stream=get_stream_name(bot.default_sending_stream), default_events_register_stream=get_stream_name(bot.default_events_register_stream), default_all_public_streams=bot.default_all_public_streams, ) # Don't include the bot owner in case it is not set. # Default bots have no owner. if bot.bot_owner is not None: json_result['bot_owner'] = bot.bot_owner.email return json_success(json_result) @require_member_or_admin @has_request_variables def regenerate_bot_api_key(request: HttpRequest, user_profile: UserProfile, bot_id: int) -> HttpResponse: bot = access_bot_by_id(user_profile, bot_id) new_api_key = do_regenerate_api_key(bot, user_profile) json_result = dict( api_key=new_api_key, ) return json_success(json_result) @require_member_or_admin @has_request_variables def add_bot_backend( request: HttpRequest, user_profile: UserProfile, full_name_raw: str=REQ("full_name"), short_name_raw: str=REQ("short_name"), bot_type: int=REQ(validator=check_int, default=UserProfile.DEFAULT_BOT), payload_url: str=REQ(validator=check_url, default=""), service_name: Optional[str]=REQ(default=None), config_data: Dict[str, str]=REQ(default={}, validator=check_dict(value_validator=check_string)), interface_type: int=REQ(validator=check_int, default=Service.GENERIC), default_sending_stream_name: Optional[str]=REQ('default_sending_stream', default=None), default_events_register_stream_name: Optional[str]=REQ('default_events_register_stream', default=None), default_all_public_streams: Optional[bool]=REQ(validator=check_bool, default=None), ) -> HttpResponse: short_name = check_short_name(short_name_raw) if bot_type != UserProfile.INCOMING_WEBHOOK_BOT: service_name = service_name or short_name short_name += "-bot" full_name = check_full_name(full_name_raw) try: email = f'{short_name}@{user_profile.realm.get_bot_domain()}' except InvalidFakeEmailDomain: return json_error(_("Can't create bots until FAKE_EMAIL_DOMAIN is correctly configured.\n" "Please contact your server administrator.")) form = CreateUserForm({'full_name': full_name, 'email': email}) if bot_type == UserProfile.EMBEDDED_BOT: if not settings.EMBEDDED_BOTS_ENABLED: return json_error(_("Embedded bots are not enabled.")) if service_name not in [bot.name for bot in EMBEDDED_BOTS]: return json_error(_("Invalid embedded bot name.")) if not form.is_valid(): # We validate client-side as well return json_error(_('Bad name or username')) try: get_user_by_delivery_email(email, user_profile.realm) return json_error(_("Username already in use")) except UserProfile.DoesNotExist: pass check_bot_name_available( realm_id=user_profile.realm_id, full_name=full_name, ) check_bot_creation_policy(user_profile, bot_type) check_valid_bot_type(user_profile, bot_type) check_valid_interface_type(interface_type) if len(request.FILES) == 0: avatar_source = UserProfile.AVATAR_FROM_GRAVATAR elif len(request.FILES) != 1: return json_error(_("You may only upload one file at a time")) else: avatar_source = UserProfile.AVATAR_FROM_USER default_sending_stream = None if default_sending_stream_name is not None: (default_sending_stream, ignored_rec, ignored_sub) = access_stream_by_name( user_profile, default_sending_stream_name) default_events_register_stream = None if default_events_register_stream_name is not None: (default_events_register_stream, ignored_rec, ignored_sub) = access_stream_by_name( user_profile, default_events_register_stream_name) if bot_type in (UserProfile.INCOMING_WEBHOOK_BOT, UserProfile.EMBEDDED_BOT) and service_name: check_valid_bot_config(bot_type, service_name, config_data) bot_profile = do_create_user(email=email, password=None, realm=user_profile.realm, full_name=full_name, short_name=short_name, bot_type=bot_type, bot_owner=user_profile, avatar_source=avatar_source, default_sending_stream=default_sending_stream, default_events_register_stream=default_events_register_stream, default_all_public_streams=default_all_public_streams) if len(request.FILES) == 1: user_file = list(request.FILES.values())[0] upload_avatar_image(user_file, user_profile, bot_profile) if bot_type in (UserProfile.OUTGOING_WEBHOOK_BOT, UserProfile.EMBEDDED_BOT): assert(isinstance(service_name, str)) add_service(name=service_name, user_profile=bot_profile, base_url=payload_url, interface=interface_type, token=generate_api_key()) if bot_type == UserProfile.INCOMING_WEBHOOK_BOT and service_name: set_bot_config(bot_profile, "integration_id", service_name) if bot_type in (UserProfile.INCOMING_WEBHOOK_BOT, UserProfile.EMBEDDED_BOT): for key, value in config_data.items(): set_bot_config(bot_profile, key, value) notify_created_bot(bot_profile) api_key = get_api_key(bot_profile) json_result = dict( api_key=api_key, avatar_url=avatar_url(bot_profile), default_sending_stream=get_stream_name(bot_profile.default_sending_stream), default_events_register_stream=get_stream_name(bot_profile.default_events_register_stream), default_all_public_streams=bot_profile.default_all_public_streams, ) return json_success(json_result) @require_member_or_admin def get_bots_backend(request: HttpRequest, user_profile: UserProfile) -> HttpResponse: bot_profiles = UserProfile.objects.filter(is_bot=True, is_active=True, bot_owner=user_profile) bot_profiles = bot_profiles.select_related('default_sending_stream', 'default_events_register_stream') bot_profiles = bot_profiles.order_by('date_joined') def bot_info(bot_profile: UserProfile) -> Dict[str, Any]: default_sending_stream = get_stream_name(bot_profile.default_sending_stream) default_events_register_stream = get_stream_name(bot_profile.default_events_register_stream) # Bots are supposed to have only one API key, at least for now. # Therefore we can safely assume that one and only valid API key will be # the first one. api_key = get_api_key(bot_profile) return dict( username=bot_profile.email, full_name=bot_profile.full_name, api_key=api_key, avatar_url=avatar_url(bot_profile), default_sending_stream=default_sending_stream, default_events_register_stream=default_events_register_stream, default_all_public_streams=bot_profile.default_all_public_streams, ) return json_success({'bots': list(map(bot_info, bot_profiles))}) @has_request_variables def get_members_backend(request: HttpRequest, user_profile: UserProfile, user_id: Optional[int]=None, include_custom_profile_fields: bool=REQ(validator=check_bool, default=False), client_gravatar: bool=REQ(validator=check_bool, default=False), ) -> HttpResponse: ''' The client_gravatar field here is set to True if clients can compute their own gravatars, which saves us bandwidth. We want to eventually make this the default behavior, but we have old clients that expect the server to compute this for us. ''' realm = user_profile.realm if realm.email_address_visibility != Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE: # If email addresses are only available to administrators, # clients cannot compute gravatars, so we force-set it to false. client_gravatar = False target_user = None if user_id is not None: target_user = access_user_by_id(user_profile, user_id, allow_deactivated=True, allow_bots=True, read_only=True) members = get_raw_user_data(realm, user_profile, target_user=target_user, client_gravatar=client_gravatar, user_avatar_url_field_optional=False, include_custom_profile_fields=include_custom_profile_fields) if target_user is not None: data: Dict[str, Any] = {"user": members[target_user.id]} else: data = {"members": [members[k] for k in members]} return json_success(data) @require_realm_admin @has_request_variables def create_user_backend(request: HttpRequest, user_profile: UserProfile, email: str=REQ(), password: str=REQ(), full_name_raw: str=REQ("full_name"), short_name: str=REQ()) -> HttpResponse: full_name = check_full_name(full_name_raw) form = CreateUserForm({'full_name': full_name, 'email': email}) if not form.is_valid(): return json_error(_('Bad name or username')) # Check that the new user's email address belongs to the admin's realm # (Since this is an admin API, we don't require the user to have been # invited first.) realm = user_profile.realm try: email_allowed_for_realm(email, user_profile.realm) except DomainNotAllowedForRealmError: return json_error(_("Email '{email}' not allowed in this organization").format( email=email, )) except DisposableEmailError: return json_error(_("Disposable email addresses are not allowed in this organization")) except EmailContainsPlusError: return json_error(_("Email addresses containing + are not allowed.")) try: get_user_by_delivery_email(email, user_profile.realm) return json_error(_("Email '{}' already in use").format(email)) except UserProfile.DoesNotExist: pass if not check_password_strength(password): return json_error(PASSWORD_TOO_WEAK_ERROR) do_create_user(email, password, realm, full_name, short_name) return json_success() def get_profile_backend(request: HttpRequest, user_profile: UserProfile) -> HttpResponse: raw_user_data = get_raw_user_data(user_profile.realm, user_profile, target_user=user_profile, client_gravatar=False, user_avatar_url_field_optional=False) result: Dict[str, Any] = raw_user_data[user_profile.id] result['max_message_id'] = -1 messages = Message.objects.filter(usermessage__user_profile=user_profile).order_by('-id')[:1] if messages: result['max_message_id'] = messages[0].id return json_success(result) @has_request_variables def get_subscription_backend(request: HttpRequest, user_profile: UserProfile, user_id: int=REQ(validator=check_int, path_only=True), stream_id: int=REQ(validator=check_int, path_only=True), ) -> HttpResponse: target_user = access_user_by_id(user_profile, user_id, read_only=True) (stream, recipient, sub) = access_stream_by_id(user_profile, stream_id) subscription_status = {'is_subscribed': subscribed_to_stream(target_user, stream_id)} return json_success(subscription_status)
# coding=utf-8 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import absolute_import, division, print_function, unicode_literals import contextlib import multiprocessing import os import re from collections import namedtuple from multiprocessing.pool import ThreadPool from pants.backend.jvm.tasks.jvm_task import JvmTask from pants.base.exceptions import TaskError from pants.build_graph.target_scopes import Scopes from pants.task.target_restriction_mixins import (HasSkipAndTransitiveOptionsMixin, SkipAndTransitiveOptionsRegistrar) from pants.util import desktop from pants.util.dirutil import safe_mkdir, safe_walk from pants.util.memo import memoized_property from pants.util.process_handler import subprocess Jvmdoc = namedtuple('Jvmdoc', ['tool_name', 'product_type']) # TODO: Shouldn't this be a NailgunTask? # TODO(John Sirois): The --skip flag supports the JarPublish task and is an abstraction leak. # It allows folks doing a local-publish to skip an expensive and un-needed step. # Remove this flag and instead support conditional requirements being registered against # the round manager. This may require incremental or windowed flag parsing that happens bit by # bit as tasks are recursively prepared vs. the current all-at once style. class JvmdocGen(SkipAndTransitiveOptionsRegistrar, HasSkipAndTransitiveOptionsMixin, JvmTask): @classmethod def jvmdoc(cls): """Subclasses should return their Jvmdoc configuration.""" raise NotImplementedError() @classmethod def register_options(cls, register): super(JvmdocGen, cls).register_options(register) tool_name = cls.jvmdoc().tool_name register('--include-codegen', type=bool, fingerprint=True, help='Create {0} for generated code.'.format(tool_name)) register('--combined', type=bool, fingerprint=True, help='Generate {0} for all targets combined, instead of each target ' 'individually.'.format(tool_name)) register('--open', type=bool, help='Open the generated {0} in a browser (implies --combined).'.format(tool_name)) register('--ignore-failure', type=bool, fingerprint=True, help='Do not consider {0} errors to be build errors.'.format(tool_name)) register('--exclude-patterns', type=list, default=[], fingerprint=True, help='Patterns for targets to be excluded from doc generation.') @classmethod def product_types(cls): return [cls.jvmdoc().product_type] def __init__(self, *args, **kwargs): super(JvmdocGen, self).__init__(*args, **kwargs) options = self.get_options() self._include_codegen = options.include_codegen self.open = options.open self.combined = self.open or options.combined self.ignore_failure = options.ignore_failure @memoized_property def _exclude_patterns(self): return [re.compile(x) for x in set(self.get_options().exclude_patterns or [])] def generate_doc(self, language_predicate, create_jvmdoc_command): """ Generate an execute method given a language predicate and command to create documentation language_predicate: a function that accepts a target and returns True if the target is of that language create_jvmdoc_command: (classpath, directory, *targets) -> command (string) that will generate documentation documentation for targets """ catalog = self.context.products.isrequired(self.jvmdoc().product_type) if catalog and self.combined: raise TaskError( 'Cannot provide {} target mappings for combined output'.format(self.jvmdoc().product_type)) def docable(target): if not language_predicate(target): self.context.log.debug('Skipping [{}] because it is does not pass the language predicate'.format(target.address.spec)) return False if not self._include_codegen and target.is_synthetic: self.context.log.debug('Skipping [{}] because it is a synthetic target'.format(target.address.spec)) return False for pattern in self._exclude_patterns: if pattern.search(target.address.spec): self.context.log.debug( "Skipping [{}] because it matches exclude pattern '{}'".format(target.address.spec, pattern.pattern)) return False return True targets = self.get_targets(predicate=docable) if not targets: return with self.invalidated(targets, invalidate_dependents=self.combined) as invalidation_check: def find_invalid_targets(): invalid_targets = set() for vt in invalidation_check.invalid_vts: invalid_targets.update(vt.targets) return invalid_targets invalid_targets = list(find_invalid_targets()) if invalid_targets: if self.combined: self._generate_combined(targets, create_jvmdoc_command) else: self._generate_individual(invalid_targets, create_jvmdoc_command) if self.open and self.combined: try: desktop.ui_open(os.path.join(self.workdir, 'combined', 'index.html')) except desktop.OpenError as e: raise TaskError(e) if catalog: for target in targets: gendir = self._gendir(target) jvmdocs = [] for root, dirs, files in safe_walk(gendir): jvmdocs.extend(os.path.relpath(os.path.join(root, f), gendir) for f in files) self.context.products.get(self.jvmdoc().product_type).add(target, gendir, jvmdocs) def _generate_combined(self, targets, create_jvmdoc_command): gendir = os.path.join(self.workdir, 'combined') if targets: classpath = self.classpath(targets, include_scopes=Scopes.JVM_COMPILE_SCOPES) safe_mkdir(gendir, clean=True) command = create_jvmdoc_command(classpath, gendir, *targets) if command: self.context.log.debug("Running create_jvmdoc in {} with {}".format(gendir, " ".join(command))) result, gendir = create_jvmdoc(command, gendir) self._handle_create_jvmdoc_result(targets, result, command) def _generate_individual(self, targets, create_jvmdoc_command): jobs = {} for target in targets: gendir = self._gendir(target) classpath = self.classpath([target], include_scopes=Scopes.JVM_COMPILE_SCOPES) command = create_jvmdoc_command(classpath, gendir, target) if command: jobs[gendir] = (target, command) if jobs: # Use ThreadPool as there may be dangling processes that cause identical run id and # then buildstats error downstream. https://github.com/pantsbuild/pants/issues/6785 with contextlib.closing( ThreadPool(processes=min(len(jobs), multiprocessing.cpu_count()))) as pool: # map would be a preferable api here but fails after the 1st batch with an internal: # ... # File "...src/python/pants/backend/jvm/tasks/jar_create.py", line 170, in javadocjar # pool.map(createjar, jobs) # File "...lib/python2.6/multiprocessing/pool.py", line 148, in map # return self.map_async(func, iterable, chunksize).get() # File "...lib/python2.6/multiprocessing/pool.py", line 422, in get # raise self._value # NameError: global name 'self' is not defined futures = [] self.context.log.debug("Begin multiprocessing section; output may be misordered or garbled") try: for gendir, (target, command) in jobs.items(): self.context.log.debug("Running create_jvmdoc in {} with {}" .format(gendir, " ".join(command))) futures.append(pool.apply_async(create_jvmdoc, args=(command, gendir))) for future in futures: result, gendir = future.get() target, command = jobs[gendir] self._handle_create_jvmdoc_result([target], result, command) finally: # In the event of an exception, we want to call terminate() because otherwise # we get errors on exit when multiprocessing tries to do it, because what # is dead may never die. pool.terminate() self.context.log.debug("End multiprocessing section") def _handle_create_jvmdoc_result(self, targets, result, command): if result != 0: targetlist = ", ".join(target.address.spec for target in targets) message = 'Failed to process {} for {} [{}]: {}'.format( self.jvmdoc().tool_name, targetlist, result, " ".join(command)) if self.ignore_failure: self.context.log.warn(message) else: raise TaskError(message) def _gendir(self, target): return os.path.join(self.workdir, target.id) def create_jvmdoc(command, gendir): try: safe_mkdir(gendir, clean=True) process = subprocess.Popen(command) result = process.wait() return result, gendir except OSError: return 1, gendir
""" Base class of site movers :author: Alexey Anisenkov """ import hashlib import os, re import time from subprocess import Popen, PIPE, STDOUT from pUtil import tolog # from PilotErrors import PilotErrors, PilotException from Node import Node class BaseSiteMover(object): """ File movers move files between a storage element (of different kinds) and a local directory get_data: SE->local put_data: local->SE check_space: available space in SE mkdirWperm -- create recursively dirs setting appropriate permissions getLocalFileInfo -- get size and checksum of a local file """ schemes = ['srm'] # list of supported schemes for transfers name = "" # unique ID of the Mover implementation, if not set copy_command will be used copy_command = None timeout = 3000 # checksum_type = "adler32" # algorithm name of checksum calculation checksum_command = "adler32" # command to be executed to get checksum, e.g. md5sum (adler32 is internal default implementation) ddmconf = {} # DDMEndpoints configuration from AGIS require_replicas = True ## quick hack to avoid query Rucio to resolve input replicas def __init__(self, setup_path='', **kwargs): self.copysetup = setup_path self.timeout = kwargs.get('timeout', self.timeout) self.ddmconf = kwargs.get('ddmconf', self.ddmconf) self.workDir = kwargs.get('workDir', '') #self.setup_command = self.getSetup() self.trace_report = {} @classmethod def log(self, value): # quick stub #print value tolog(value) @property def copysetup(self): return self._setup @copysetup.setter def copysetup(self, value): value = os.path.expandvars(value.strip()) if value and not os.access(value, os.R_OK): self.log("WARNING: copysetup=%s is invalid: file is not readdable" % value) raise PilotException("Failed to set copysetup: passed invalid file name=%s" % value, code=PilotErrors.ERR_NOSUCHFILE, state="RFCP_FAIL") self._setup = value @classmethod def getID(self): """ return the ID/NAME string of Mover class used to resolve Mover classs name attribute helps to define various movers with the same copy command """ return self.name or self.copy_command @classmethod def getRucioPath(self, scope, lfn, prefix='rucio'): """ Construct a partial Rucio PFN using the scope and the LFN """ # <prefix=rucio>/<scope>/md5(<scope>:<lfn>)[0:2]/md5(<scope:lfn>)[2:4]/<lfn> hash_hex = hashlib.md5('%s:%s' % (scope, lfn)).hexdigest() paths = [prefix] + scope.split('.') + [hash_hex[0:2], hash_hex[2:4], lfn] paths = filter(None, paths) # remove empty parts to avoid double /-chars return '/'.join(paths) #scope = os.path.join(*scope.split('.')) # correct scope #return os.path.join(prefix, scope, hash_hex[0:2], hash_hex[2:4], lfn) def isDeterministic(self, ddmendpoint): return self.ddmconf.get(ddmendpoint, {}).get('is_deterministic', None) def getSURLRucio(self, se, se_path, scope, lfn, job=None): """ Get final destination SURL of file to be moved """ # ANALY/PROD job specific processing ?? prefix = 'rucio' if se_path.rstrip('/').endswith('/' + prefix): # avoid double prefix prefix = '' surl = se + os.path.join(se_path, self.getRucioPath(scope, lfn, prefix=prefix)) return surl def getSURL(self, se, se_path, scope, lfn, job=None, pathConvention=None, ddmEndpoint=None): """ Get final destination SURL of file to be moved job instance is passing here for possible JOB specific processing ?? FIX ME LATER """ # consider only deterministic sites (output destination) # do proper extract is_determetistic flag from DDMEndpoint: TODO # quick hack for now if (ddmEndpoint and self.isDeterministic(ddmEndpoint)) or (se_path and se_path.rstrip('/').endswith('/rucio')): return self.getSURLRucio(se, se_path, scope, lfn) raise Exception("getSURL(): NOT IMPLEMENTED error: processing of non Rucio transfers is not implemented yet, se_path=%s" % se_path) def getSetup(self): """ return full setup command to be executed Can be customized by different site mover """ if not self.copysetup: return '' return 'source %s' % self.copysetup def setup(self): """ Prepare site specific setup initializations Should be implemented by different site mover """ # TODO: vertify setup?? # raise in case of errors return True # rcode=0, output='' def shouldVerifyStageIn(self): """ Should the get operation perform any file size/checksum verifications? can be customized for specific movers """ return True def check_availablespace(self, maxinputsize, files): """ Verify that enough local space is available to stage in and run the job :raise: PilotException in case of not enough space """ if not self.shouldVerifyStageIn(): return totalsize = reduce(lambda x, y: x + y.filesize, files, 0) # verify total filesize if maxinputsize and totalsize > maxinputsize: error = "Too many/too large input files (%s). Total file size=%s B > maxinputsize=%s B" % (len(files), totalsize, maxinputsize) raise PilotException(error, code=PilotErrors.ERR_SIZETOOLARGE) self.log("Total input file size=%s B within allowed limit=%s B (zero value means unlimited)" % (totalsize, maxinputsize)) # get available space wn = Node() wn.collectWNInfo(self.workDir) available_space = int(wn.disk)*1024**2 # convert from MB to B self.log("Locally available space: %d B" % available_space) # are we wihin the limit? if totalsize > available_space: error = "Not enough local space for staging input files and run the job (need %d B, but only have %d B)" % (totalsize, available_space) raise PilotException(error, code=PilotErrors.ERR_NOLOCALSPACE) def getRemoteFileChecksum(self, filename): """ get checksum of remote file Should be implemented by different site mover :return: (checksum, checksum_type) :raise: an exception in case of errors """ return None, None def getRemoteFileSize(self, filename): """ get size of remote file Should be implemented by different site mover :return: length of file :raise: an exception in case of errors """ return None def resolve_replica(self, fspec, protocol, ddm=None): """ :fspec: FileSpec object :protocol: dict('se':'', 'scheme':'str' or list) Resolve input replica either by protocol scheme or manually construct pfn according to protocol.se value (full local path is matched respect to ddm_se default protocol) :return: input file replica details: {'surl':'', 'ddmendpoint':'', 'pfn':''} :raise: PilotException in case of controlled error """ # resolve proper surl (main srm replica) and find related replica if protocol.get('se'): scheme = str(protocol.get('se')).split(':', 1)[0] else: scheme = protocol.get('scheme') if not scheme: raise Exception('Failed to resolve copytool scheme to be used, se field is corrupted?: protocol=%s' % protocol) if isinstance(scheme, str): scheme = [scheme] replica = None # find first matched to protocol spec replica surl = None if protocol.get('se'): # custom settings: match Rucio replica by default protocol se (quick stub until Rucio protocols are proper populated) for ddmendpoint, replicas, ddm_se, ddm_path in fspec.replicas: if not replicas: # ignore ddms with no replicas continue surl = replicas[0] # assume srm protocol is first entry self.log("[stage-in] surl (srm replica) from Rucio: pfn=%s, ddmendpoint=%s, ddm.se=%s, ddm.se_path=%s" % (surl, ddmendpoint, ddm_se, ddm_path)) for r in replicas: if ddm_se and r.startswith(ddm_se): # manually form pfn based on protocol.se r_filename = r.replace(ddm_se, '', 1).replace(ddm_path, '', 1) # resolve replica filename # quick hack: if hosted replica ddmendpoint and input protocol ddmendpoint mismatched => consider replica ddmendpoint.path r_path = protocol.get('path') if ddmendpoint != protocol.get('ddm'): self.log("[stage-in] ignore protocol.path=%s since protocol.ddm=%s differs from found replica.ddm=%s ... will use ddm.path=%s to form TURL" % (protocol.get('path'), protocol.get('ddm'), ddmendpoint, ddm_path)) r_path = ddm_path replica = protocol.get('se') + r_path if replica and r_filename and '/' not in (replica[-1] + r_filename[0]): replica += '/' replica += r_filename self.log("[stage-in] ignore_rucio_replicas since protocol.se is explicitly passed, protocol.se=%s, protocol.path=%s: found replica=%s matched ddm.se=%s, ddm.path=%s .. will use TURL=%s" % (protocol.get('se'), protocol.get('path'), surl, ddm_se, ddm_path, replica)) break if replica: break def get_preferred_replica(replicas, allowed_schemas): for r in replicas: for sval in allowed_schemas: if r and r.startswith('%s://' % sval): return r return None if not replica: # resolve replica from Rucio: use exact pfn from Rucio replicas for ddmendpoint, replicas, ddm_se, ddm_path in fspec.replicas: if not replicas: # ignore ddms with no replicas continue pschema = protocol.get('primary_scheme') if pschema: ## look up first primary schemas if requested replica = get_preferred_replica(replicas, pschema) if not replica: replica = get_preferred_replica(replicas, scheme) if replica: surl = get_preferred_replica(replicas, ['srm']) or replicas[0] # prefer SRM protocol for surl -- to be verified self.log("[stage-in] surl (srm replica) from Rucio: pfn=%s, ddmendpoint=%s, ddm.se=%s, ddm.se_path=%s" % (surl, ddmendpoint, ddm_se, ddm_path)) break if not replica: # replica not found error = 'Failed to find replica for input file, protocol=%s, fspec=%s, allowed schemas=%s' % (protocol, fspec, scheme) self.log("resolve_replica: %s" % error) raise PilotException(error, code=PilotErrors.ERR_REPNOTFOUND) return {'surl':surl, 'ddmendpoint':ddmendpoint, 'pfn':replica} def is_stagein_allowed(self, fspec, job): """ check if stage-in operation is allowed for the mover apply additional job specific checks here if need Should be overwritten by custom sitemover :return: True in case stage-in transfer is allowed :raise: PilotException in case of controlled error """ return True def get_data(self, fspec): """ fspec is FileSpec object :return: file details: {'checksum': '', 'checksum_type':'', 'filesize':''} :raise: PilotException in case of controlled error """ # resolve proper surl and find related replica dst = os.path.join(self.workDir, fspec.lfn) return self.stageIn(fspec.turl, dst, fspec) def stageIn(self, source, destination, fspec): """ Stage in the source file: do stagein file + verify local file :return: file details: {'checksum': '', 'checksum_type':'', 'filesize':''} :raise: PilotException in case of controlled error """ self.trace_report.update(relativeStart=time.time(), transferStart=time.time()) dst_checksum, dst_checksum_type = self.stageInFile(source, destination, fspec) src_fsize = fspec.filesize if not self.shouldVerifyStageIn(): self.log("skipped stage-in verification for lfn=%s" % fspec.lfn) return {'checksum': dst_checksum, 'checksum_type':dst_checksum_type, 'filesize':src_fsize} src_checksum, src_checksum_type = fspec.get_checksum() dst_fsize = os.path.getsize(destination) # verify stagein by checksum self.trace_report.update(validateStart=time.time()) try: if not dst_checksum: dst_checksum, dst_checksum_type = self.calc_file_checksum(destination) except Exception, e: self.log("verify StageIn: caught exception while getting local file=%s checksum: %s .. skipped" % (destination, e)) try: if not src_checksum: src_checksum, src_checksum_type = self.getRemoteFileChecksum(source) except Exception, e: self.log("verify StageIn: caught exception while getting remote file=%s checksum: %s .. skipped" % (source, e)) try: if dst_checksum and dst_checksum_type and src_checksum and src_checksum_type: # verify against source(currently src_checksum is empty when merging es files from NorduGrid) is_verified = src_checksum and src_checksum_type and dst_checksum == src_checksum and dst_checksum_type == src_checksum_type self.log("Remote checksum [%s]: %s (%s)" % (src_checksum_type, src_checksum, source)) self.log("Local checksum [%s]: %s (%s)" % (dst_checksum_type, dst_checksum, destination)) self.log("checksum is_verified = %s" % is_verified) if type(dst_checksum) is str and type(src_checksum) is str: if len(src_checksum) != len(dst_checksum): self.log("Local and remote checksums have different lengths (%s vs %s), will lstrip them" % (dst_checksum, src_checksum)) src_checksum = src_checksum.lstrip('0') dst_checksum = dst_checksum.lstrip('0') is_verified = src_checksum and src_checksum_type and dst_checksum == src_checksum and dst_checksum_type == src_checksum_type self.log("Remote checksum [%s]: %s (%s)" % (src_checksum_type, src_checksum, source)) self.log("Local checksum [%s]: %s (%s)" % (dst_checksum_type, dst_checksum, destination)) self.log("checksum is_verified = %s" % is_verified) if not is_verified: error = "Remote and local checksums (of type %s) do not match for %s (%s != %s)" % \ (src_checksum_type, os.path.basename(destination), dst_checksum, src_checksum) if src_checksum_type == 'adler32': state = 'AD_MISMATCH' rcode = PilotErrors.ERR_GETADMISMATCH else: state = 'MD5_MISMATCH' rcode = PilotErrors.ERR_GETMD5MISMATCH raise PilotException(error, code=rcode, state=state) self.log("verifying stagein done. [by checksum] [%s]" % source) self.trace_report.update(clientState="DONE") return {'checksum': dst_checksum, 'checksum_type': dst_checksum_type, 'filesize': dst_fsize} except PilotException: raise except Exception, e: self.log("verify StageIn: caught exception while doing file checksum verification: %s .. skipped" % e) # verify stage-in by filesize try: if not src_fsize: src_fsize = self.getRemoteFileSize(source) self.log("Remote filesize [%s]: %s" % (os.path.dirname(destination), src_fsize)) self.log("Local filesize [%s]: %s" % (os.path.dirname(destination), dst_fsize)) if not src_fsize: warn = "Source size is unknown, will pass(mark it as successful)" self.log(warn) return {'checksum': dst_checksum, 'checksum_type': dst_checksum_type, 'filesize': dst_fsize} is_verified = src_fsize and src_fsize == dst_fsize self.log("filesize is_verified = %s" % is_verified) if not is_verified: error = "Remote and local file sizes do not match for %s (%s != %s)" % (os.path.basename(destination), dst_fsize, src_fsize) self.log(error) raise PilotException(error, code=PilotErrors.ERR_GETWRONGSIZE, state='FS_MISMATCH') self.log("verifying stagein done. [by filesize] [%s]" % source) self.trace_report.update(clientState="DONE") return {'checksum': dst_checksum, 'checksum_type':dst_checksum_type, 'filesize':dst_fsize} except PilotException: raise except Exception, e: self.log("verify StageIn: caught exception while doing file size verification: %s .. skipped" % e) raise PilotException("Neither checksum nor file size could be verified (failing job)", code=PilotErrors.ERR_NOFILEVERIFICATION, state='NOFILEVERIFICATION') def stageInFile(self, source, destination, fspec=None): """ Stage in the file. Should be implemented by different site mover :return: destination file details (checksum, checksum_type) in case of success, throw exception in case of failure :raise: PilotException in case of controlled error """ raise Exception('NOT IMPLEMENTED') def put_data(self, fspec): """ fspec is FileSpec object :return: remote file details: {'checksum': '', 'checksum_type':'', 'filesize':'', 'surl':''} stageout workflow could be overwritten by specific Mover :raise: PilotException in case of controlled error """ src = os.path.join(self.workDir, fspec.lfn) return self.stageOut(src, fspec.turl, fspec) def stageOut(self, source, destination, fspec): """ Stage out the source file: do stageout file + verify remote file output :return: remote file details: {'checksum': '', 'checksum_type':'', 'filesize':''} :raise: PilotException in case of controlled error """ src_checksum, src_checksum_type = None, None src_fsize = fspec and fspec.filesize or os.path.getsize(source) if fspec: src_checksum, src_checksum_type = fspec.get_checksum() # do stageOutFile self.trace_report.update(relativeStart=time.time(), transferStart=time.time()) file_exist_error, dst_checksum, dst_checksum_type = None, None, None try: dst_checksum, dst_checksum_type = self.stageOutFile(source, destination, fspec) except PilotException, e: # do clean up if e.code == PilotErrors.ERR_FILEEXIST: ## continue execution with further verification of newer file respect to already exist at storage self.log("INFO: StageOutFile() failed with FILEEXIST error: skipped .. will try to verify if newer produced file is the same as from storage") file_exist_error = e else: self.remote_cleanup(destination, fspec) raise # verify stageout by checksum self.trace_report.update(validateStart=time.time()) try: if not dst_checksum: dst_checksum, dst_checksum_type = self.getRemoteFileChecksum(destination) except Exception, e: self.log("verify StageOut: caught exception while getting remote file checksum.. skipped, error=%s" % e) import traceback self.log(traceback.format_exc()) # Ignore in the case of lsm mover if self.name == 'lsm': self.log("Ignoring lsm error") if file_exist_error: ## no way to verify newer file against already exist at storage: do fail transfer with FILEEXIST error raise file_exist_error return {'checksum': None, 'checksum_type':None, 'filesize':src_fsize} else: self.log("Used %s mover" % (self.name)) try: if dst_checksum and dst_checksum_type: # verify against source if not src_checksum: # fspec has no checksum data defined try to calculate from the source src_checksum, src_checksum_type = self.calc_file_checksum(source) is_verified = src_checksum and src_checksum_type and dst_checksum == src_checksum and dst_checksum_type == src_checksum_type self.log("Local checksum [%s]: %s" % (src_checksum_type, src_checksum)) self.log("Remote checksum [%s]: %s" % (dst_checksum_type, dst_checksum)) self.log("checksum is_verified = %s" % is_verified) if type(dst_checksum) is str and type(src_checksum) is str: if len(dst_checksum) < len(src_checksum): self.log("Local and remote checksums have different lengths (%s vs %s)" % (src_checksum, dst_checksum)) if src_checksum[0] == '0': self.log("Stripping initial 0:s from local checksum") src_checksum = src_checksum.lstrip('0') is_verified = src_checksum and src_checksum_type and dst_checksum == src_checksum and dst_checksum_type == src_checksum_type self.log("Local checksum [%s]: %s" % (src_checksum_type, src_checksum)) self.log("Remote checksum [%s]: %s" % (dst_checksum_type, dst_checksum)) self.log("checksum is_verified = %s" % is_verified) if not is_verified and file_exist_error: ## newer file is different respect to one from storage: raise initial FILEEXIST error raise file_exist_error if not is_verified: error = "Remote and local checksums (of type %s) do not match for %s (%s != %s)" % \ (src_checksum_type, os.path.basename(destination), dst_checksum, src_checksum) if src_checksum_type == 'adler32': state = 'AD_MISMATCH' rcode = PilotErrors.ERR_PUTADMISMATCH else: state = 'MD5_MISMATCH' rcode = PilotErrors.ERR_PUTMD5MISMATCH raise PilotException(error, code=rcode, state=state) self.log("verifying stageout done. [by checksum]") self.trace_report.update(clientState="DONE") return {'checksum': dst_checksum, 'checksum_type':dst_checksum_type, 'filesize':src_fsize} except PilotException, e: if not e.code == PilotErrors.ERR_FILEEXIST: self.remote_cleanup(destination, fspec) raise except Exception, e: self.log("verify StageOut: caught exception while doing file checksum verification: %s .. skipped" % e) # verify stageout by filesize try: dst_fsize = self.getRemoteFileSize(destination) is_verified = src_fsize and src_fsize == dst_fsize self.log("Local filesize [%s]: %s" % (os.path.dirname(destination), src_fsize)) self.log("Remote filesize [%s]: %s" % (os.path.dirname(destination), dst_fsize)) self.log("filesize is_verified = %s" % is_verified) if not is_verified and file_exist_error: ## newer file is different respect to one from storage: raise initial FILEEXIST error raise file_exist_error if not is_verified: error = "Remote and local file sizes do not match for %s (%s != %s)" % (os.path.basename(destination), dst_fsize, src_fsize) self.log(error) raise PilotException(error, code=PilotErrors.ERR_PUTWRONGSIZE, state='FS_MISMATCH') self.log("verifying stageout done. [by filesize]") self.trace_report.update(clientState="DONE") return {'checksum': dst_checksum, 'checksum_type':dst_checksum_type, 'filesize':src_fsize} except PilotException: if not e.code == PilotErrors.ERR_FILEEXIST: self.remote_cleanup(destination, fspec) raise except Exception, e: self.log("verify StageOut: caught exception while doing file size verification: %s .. skipped" % e) if file_exist_error: raise file_exist_error else: self.remote_cleanup(destination, fspec) raise PilotException("Neither checksum nor file size could be verified (failing job)", code=PilotErrors.ERR_NOFILEVERIFICATION, state='NOFILEVERIFICATION') def stageOutFile(self, source, destination, fspec): """ Stage out the file. Should be implemented by different site mover :return: destination file details (checksum, checksum_type) in case of success, throw exception in case of failure :raise: PilotException in case of controlled error """ raise Exception('NOT IMPLEMENTED') def remote_cleanup(self, destination, fspec): """ Apply remote clean up: e.g. remove incomplete remote file Should be customized by different site mover """ return True def resolveStageErrorFromOutput(self, output, filename=None, is_stagein=False): """ resolve error code, client state and defined error mesage from the output :return: dict {'rcode', 'state, 'error'} """ ret = {'rcode': PilotErrors.ERR_STAGEINFAILED if is_stagein else PilotErrors.ERR_STAGEOUTFAILED, 'state': 'COPY_ERROR', 'error': 'Copy operation failed [is_stagein=%s]: %s' % (is_stagein, output)} if "Could not establish context" in output: ret['rcode'] = PilotErrors.ERR_NOPROXY ret['state'] = 'CONTEXT_FAIL' ret['error'] = "Could not establish context: Proxy / VO extension of proxy has probably expired: %s" % output elif "File exists" in output or 'SRM_FILE_BUSY' in output or 'file already exists' in output: ret['rcode'] = PilotErrors.ERR_FILEEXIST ret['state'] = 'FILE_EXIST' ret['error'] = "File already exists in the destination: %s" % output elif "No space left on device" in output: ret['rcode'] = PilotErrors.ERR_NOLOCALSPACE ret['state'] = 'NO_SPACE' ret['error'] = "No available space left on local disk: %s" % output elif "globus_xio:" in output: ret['rcode'] = PilotErrors.ERR_GETGLOBUSSYSERR ret['state'] = 'GLOBUS_FAIL' ret['error'] = "Globus system error: %s" % output elif "No such file or directory" and "DBRelease" in filename: ## is it a stageout error?? ret['rcode'] = PilotErrors.ERR_MISSDBREL ret['state'] = 'NO_DBREL' ret['error'] = output elif "No such file or directory" in output: ret['rcode'] = PilotErrors.ERR_NOSUCHFILE ret['state'] = 'NO_FILE' ret['error'] = output elif "query chksum is not supported" in output or "Unable to checksum" in output: ret['rcode'] = PilotErrors.ERR_CHKSUMNOTSUP ret['state'] = 'CHKSUM_NOTSUP' ret['error'] = output elif "does not match the checksum" in output: if 'adler32' in output: state = 'AD_MISMATCH' rcode = PilotErrors.ERR_GETADMISMATCH else: state = 'MD5_MISMATCH' rcode = PilotErrors.ERR_GETMD5MISMATCH ret['rcode'] = rcode ret['state'] = state return ret def getTimeOut(self, filesize): """ Get a proper time-out limit based on the file size """ timeout_max = 5 + 3*3600 # 3 hours ::: FIX ME LATER :: timeout_min = self.timeout timeout = timeout_min + int(filesize/0.5e6) # approx < 0.5 Mb/sec return min(timeout, timeout_max) def calc_file_checksum(self, filename): """ calculate SiteMover specific checksum for a file :return: (checksum, checksum_type) raise an exception if input filename is not exist/readable """ if not self.checksum_command or not self.checksum_type: raise Exception("Failed to get file checksum: incomplete checksum_command declaration: type=%s, command=%s" % (self.checksum_type, self.checksum_command)) fn = getattr(self, "calc_%s_checksum" % self.checksum_type, None) checksum = fn(filename) if callable(fn) else self.calc_checksum(filename, self.checksum_command) return checksum, self.checksum_type @classmethod def calc_adler32_checksum(self, filename): """ calculate the adler32 checksum for a file raise an exception if input filename is not exist/readable """ from zlib import adler32 asum = 1 # default adler32 starting value BLOCKSIZE = 64*1024*1024 # read buffer, 64 Mb with open(filename, 'rb') as f: while True: data = f.read(BLOCKSIZE) if not data: break asum = adler32(data, asum) if asum < 0: asum += 2**32 return "%08x" % asum # convert to hex @classmethod def calc_checksum(self, filename, command='md5sum', setup=None, pattern=None, cmd=None): """ :cmd: quick hack: fix me later calculate file checksum value raise an exception if input filename is not exist/readable """ if not cmd: cmd = "%s %s" % (command, filename) if setup: cmd = "%s 1>/dev/null 2>/dev/null; %s" % (setup, cmd) self.log("Execute command (%s) to calc checksum of file" % cmd) c = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True) output, error = c.communicate() if error: self.log("INFO: calc_checksum: error=%s" % error) if c.returncode: self.log('FAILED to calc_checksum for file=%s, cmd=%s, rcode=%s, output=%s' % (filename, cmd, c.returncode, output)) raise Exception(output) self.log("calc_checksum: output=%s" % output) value = '' if pattern: self.log("INFO: calc_checksum: try to extract checksum value by pattern=%s" % pattern) m = re.match(pattern, output) if m: value = m.groupdict().get('checksum') or '' else: value = output.split()[0] return value # return final checksum @classmethod def removeLocal(self, filename): """ Remove the local file in case of failure to prevent problem with get retry attempt :return: True in case of physical file removal """ if not os.path.exists(filename): # nothing to remove return False try: os.remove(filename) self.log("Successfully removed local file=%s" % filename) is_removed = True except Exception, e: self.log("Could not remove the local file=%s .. skipped, error=%s" % (filename, e)) is_removed = False return is_removed
#!/usr/bin/env python from __future__ import print_function from __future__ import absolute_import import string from .jats import JATSParser from .author_init import AuthorInitial from pyingest.config.config import * class NoSchemaException(Exception): pass class WrongSchemaException(Exception): pass class UnparseableException(Exception): pass class OUPJATSParser(JATSParser): def oup_journals(self, pid): try: bibstem = OUP_PUBLISHER_IDS[pid] except KeyError: return 'XSTEM' else: return bibstem def get_tmp_page(self, bs): try: f = OUP_TMP_DIRS[bs.lower()] except Exception as err: pass else: f = f + "early.dat.nocheck" try: if sys.version_info > (3,): open_mode = 'rb' else: open_mode = 'rU' with open(f, open_mode) as fp: p = fp.readline() return p.split()[0] except Exception as err: pass def update_tmp_file(self, bs, bib, doi): try: f = OUP_TMP_DIRS[bs.lower()] except Exception as err: pass else: f = f + "early.dat.nocheck" l = bib + "\t" + doi + "\n" try: with open(f, 'a') as fp: fp.write(l) # now replace first line c = bib[14:18] c = c.replace('.', '') c = c + "\n" with open(f, 'r') as fp: lines = fp.readlines() # if you're replacing the first line, this is fine lines[0] = c # if you want to insert something before the first line, # use this: # lines.insert(0, c) with open(f, 'w') as fp: # writelines doesn't need a return value, and doing this # will destroy lines after it's done. # lines = fp.writelines(lines) fp.writelines(lines) except Exception as err: pass def dbfrombs(self, bs): db = [] try: bibs = OUP_PUBLISHER_IDS[bs] except KeyError: return 'XSTEM' else: if bibs == 'ptep': database = "PHY" elif bibs == 'gji': database = "PHY" else: database = "AST" return database def getnexttmp(self, bs): tmpid = [] try: bibs = OUP_PUBLISHER_IDS[bs] except KeyError: return 'XSTEM' else: database = "PHY" return tmpid def parse(self, input_data, **kwargs): output_metadata = super(self.__class__, self).parse(input_data, **kwargs) # Publication + isearly = 0 try: pubstring = output_metadata['publication'] except Exception as err: pass else: try: output_metadata['volume'] except Exception as err: pass else: if output_metadata['volume'] == "None": pubstring = pubstring + ', Advance Access' isearly = 1 else: pubstring = pubstring + ', Volume ' + output_metadata['volume'] try: output_metadata['issue'] except TypeError: pass else: pubstring = pubstring + ', Issue ' + output_metadata['issue'] try: output_metadata['page'] except Exception as err: pass else: if "-" in output_metadata['page']: pubstring = pubstring + ', pp.' + output_metadata['page'] else: pubstring = pubstring + ', id.' + output_metadata['page'] if 'numpages' in output_metadata: pubstring = pubstring + ', ' + output_metadata['numpages'] + ' pp.' del(output_metadata['numpages']) output_metadata['publication'] = pubstring # Bibcode try: j_bibstem = self.oup_journals(output_metadata['pub-id']) except KeyError: pass else: year = output_metadata['pubdate'][-4:] bibstem = j_bibstem.ljust(5, '.') if isearly: if 'Letter' in pubstring: issue_letter = "L" bibstem = "MNRASL" else: issue_letter = "." idno = self.get_tmp_page(bibstem) try: idno = int(idno) + 1 except Exception as err: print("Issue with tmp bibstem:", err, idno) idno = str(idno) idno = idno.rjust(4, '.') volume = ".tmp" else: volume = output_metadata['volume'].rjust(4, '.') if output_metadata['pub-id'] == 'ptep': issue_letter = string.ascii_letters[int(output_metadata['issue'])-1] idno = output_metadata['page'] if len(idno) == 6: try: idtwo = string.ascii_letters[int(idno[0:2]) - 1] except Exception as err: idtwo = idno[0:2] idfour = idno[2:] else: idtwo = '' idfour = idno.rjust(4, '.') idno = idfour elif output_metadata['pub-id'] == 'mnrasl': issue_letter = 'L' if output_metadata['page'].find("-"): idno = output_metadata['page'].split("-")[0] else: idno = output_metadata['page'] if idno.startswith("L"): idno = idno.lstrip(idno[:1]) idno = idno.rjust(4, '.') else: issue_letter = '.' if output_metadata['page'].find("-"): idno = output_metadata['page'].split("-")[0] else: idno = output_metadata['page'] idno = idno.rjust(4, '.') try: a = AuthorInitial() author_init = a.get_author_init(output_metadata['authors']) except Exception as err: print(err) author_init = '.' # would be better if I had two different variables for bibstem (since MNRASL shares a bibstem with MNRAS) if bibstem == "MNRASL": bibstem = "MNRAS" output_metadata['bibcode'] = year + bibstem + volume + issue_letter + idno + author_init if issue_letter == 'L': bibstem = "MNRASL" if isearly: v = self.update_tmp_file(bibstem, output_metadata['bibcode'], output_metadata['properties']['DOI']) del output_metadata['page'] isearly = 0 if 'DOI' in output_metadata['properties']: plink = "/".join(["https:/", "academic.oup.com", output_metadata['pub-id'], "pdf-lookup", "doi", output_metadata['properties']['DOI']]) output_metadata['properties'].update({'PDF': plink}) # Return return output_metadata
# encoding: utf-8 ''' @author: Majeed.sahebzadha ''' from __future__ import print_function, unicode_literals import numpy as np import pandas as pd import re import time import os from collections import OrderedDict import json import pickle from pptx import Presentation from pptx.chart.data import ChartData from add_shapes import * from transformations import * from os.path import ( basename, dirname, split ) from pptx.enum.chart import ( XL_CHART_TYPE, XL_LABEL_POSITION, XL_LEGEND_POSITION, XL_TICK_MARK, XL_TICK_LABEL_POSITION ) from pptx.util import ( Emu, Pt, Cm, Inches ) from pptx.enum.dml import ( MSO_THEME_COLOR, MSO_COLOR_TYPE, MSO_FILL ) from pptx.enum.text import ( PP_ALIGN, MSO_AUTO_SIZE, MSO_ANCHOR ) pd.set_option('display.expand_frame_repr', False) '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' def file_exists(file_name): ''' check if file exists ''' return os.path.isfile(file_name) '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' def pickle_file(obj_to_pickle, file_name): if ".pickle" not in file_name: file_name = "%s.pickle" % file_name with open(file_name, 'wb') as handle: pickle.dump(obj_to_pickle, handle) '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' def load_pickled_file(file_name): if ".pickle" not in file_name: file_name = "%s.pickle" % file_name with open(file_name, 'rb') as handle: picked_obj = pickle.load(handle) return picked_obj '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' def save_json(obj, json_path): ''' Saves obj as a json text file at json_path ''' from decoder_ring import as_ascii print("Saving json: {f}".format(f=json_path)) obj = as_ascii(obj, control=False, extended=True, encoding='UTF-8') with open(json_path, 'w') as f: json.dump(obj, f) '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' def rename_duplicate_shape_names(pres_path, overwrite=True): ''' Ensures all shapes have a unique name. Only renames duplicates. Compares shape names one slide at a time. ''' file_name = basename(pres_path).split('.')[0] file_path = dirname(pres_path) prs = Presentation(pres_path) for slide in prs.slides: shape_names = [] for shape in slide.shapes: shape_names.append(shape.name) renamed_shapes = [x + "_" + str(i) if shape_names.count(x)>1 else x for i, x in enumerate(shape_names)] for s_idx, shape in enumerate(slide.shapes): shape.name = renamed_shapes[s_idx] if overwrite: prs.save('{pres_path}\\{pres_name}.pptx'.format( pres_path=file_path, pres_name=file_name)) else: prs.save('{pres_path}\\{pres_name}_edited.pptx'.format( pres_path=file_path, pres_name=file_name)) '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' def read_pptx(pres_path, slide_num=[], save_as_json=True): ''' Iterates through an existing PPTX and prints info re slide and shapes. param: pres_path - full path of target file param: slide_num - list parem: save_as_json - boolean example useage: *read_pptx(pres) *read_pptx(pres, [20,25,15], False) ''' if not isinstance(slide_num, list): slide_num = [slide_num] prs = Presentation(pres_path) file_name = os.path.basename(pres_path).split('.')[0] pptx_tree = OrderedDict() pptx_tree[file_name] = OrderedDict() pptx_tree[file_name]['slides'] = OrderedDict() print('Analysing PPTX content...\n') for i, sld in enumerate(prs.slides, start=1): if slide_num: if i in slide_num: slide_number = str(i) pptx_tree[file_name]['slides'][slide_number] = OrderedDict() print('{indent:>5}slide layout name : {sld_layout_name}\n'. format( indent='', sld_layout_name=sld.slide_layout.name)) pptx_tree[file_name]['slides'][slide_number]['slide layout'] = OrderedDict() slide_layout_name = str(sld.slide_layout.name) pptx_tree[file_name]['slides'][slide_number]['slide layout']['name'] = slide_layout_name pptx_tree[file_name]['slides'][slide_number]['shapes'] = OrderedDict() for x, shp in enumerate(sld.shapes): print('{indent:>10}shape index - {x}'. format( indent='', x=x)) shape_number = str(x) pptx_tree[file_name]['slides'][slide_number]['shapes'][shape_number] = OrderedDict() print('{indent:>15}shape name - {shape_name}'. format( indent='', shape_name=shp.name)) shape_name = str(shp.name) pptx_tree[file_name]['slides'][slide_number]['shapes'][shape_number]['shape name'] = shape_name print('{indent:>15}shape type - {shape_type}'. format( indent='', shape_type=shp.shape_type)) shape_type = str(shp.shape_type) pptx_tree[file_name]['slides'][slide_number]['shapes'][shape_number]['shape type'] = shape_type if str(shp.shape_type) == 'PLACEHOLDER (14)': print('{indent1:>15}placeholder idx - {placeholder_idx},\n' '{indent2:>15}placeholder type - {placeholder_type}'. format( indent1='', indent2='', placeholder_idx=shp.placeholder_format.idx, placeholder_type=shp.placeholder_format.type)) pptx_tree[file_name]['slides'][slide_number]['shapes'][shape_number]['placeholder idx'] = str(shp.placeholder_format.idx) pptx_tree[file_name]['slides'][slide_number]['shapes'][shape_number]['placeholder type'] = str(shp.placeholder_format.type) print('{indent:>15}shape dimensions - ' 'left: {shape_left}, top: {shape_top}, ' 'height: {shape_height}, width: {shape_width}\n'. format( indent='', shape_left=shp.left, shape_height=shp.height, shape_top=shp.top, shape_width=shp.width)) pptx_tree[file_name]['slides'][slide_number]['shapes'][shape_number]['shape dimensions'] = OrderedDict() pptx_tree[file_name]['slides'][slide_number]['shapes'][shape_number]['shape dimensions']['left'] = str(shp.left) pptx_tree[file_name]['slides'][slide_number]['shapes'][shape_number]['shape dimensions']['top'] = str(shp.top) pptx_tree[file_name]['slides'][slide_number]['shapes'][shape_number]['shape dimensions']['width'] = str(shp.width) pptx_tree[file_name]['slides'][slide_number]['shapes'][shape_number]['shape dimensions']['height'] = str(shp.height) else: print('='*110) print('{indent:>0}Slide {i} details:\n'. format( indent='', i=i)) slide_number = str(i) pptx_tree[file_name]['slides'][slide_number] = OrderedDict() print('{indent:>5}slide layout name : {sld_layout_name}\n'. format( indent='', sld_layout_name=sld.slide_layout.name)) pptx_tree[file_name]['slides'][slide_number]['slide layout'] = OrderedDict() slide_layout_name = str(sld.slide_layout.name) pptx_tree[file_name]['slides'][slide_number]['slide layout']['name'] = slide_layout_name pptx_tree[file_name]['slides'][slide_number]['shapes'] = OrderedDict() for x, shp in enumerate(sld.shapes): print('{indent:>10}shape index - {x}'. format( indent='', x=x)) shape_number = str(x) pptx_tree[file_name]['slides'][slide_number]['shapes'][shape_number] = OrderedDict() print('{indent:>15}shape name - {shape_name}'. format( indent='', shape_name=shp.name)) shape_name = str(shp.name) pptx_tree[file_name]['slides'][slide_number]['shapes'][shape_number]['shape name'] = shape_name print('{indent:>15}shape id - {shape_id}'. format( indent='', shape_id=shp.id)) shape_id = str(shp.id) pptx_tree[file_name]['slides'][slide_number]['shapes'][shape_number]['shape id'] = shape_id print('{indent:>15}shape type - {shape_type}'. format( indent='', shape_type=shp.shape_type)) shape_type = str(shp.shape_type) pptx_tree[file_name]['slides'][slide_number]['shapes'][shape_number]['shape type'] = shape_type if str(shp.shape_type) == 'PLACEHOLDER (14)': print('{indent1:>15}placeholder idx - {placeholder_idx},\n' '{indent2:>15}placeholder type - {placeholder_type}'. format( indent1='', indent2='', placeholder_idx=shp.placeholder_format.idx, placeholder_type=shp.placeholder_format.type)) pptx_tree[file_name]['slides'][slide_number]['shapes'][shape_number]['placeholder idx'] = str(shp.placeholder_format.idx) pptx_tree[file_name]['slides'][slide_number]['shapes'][shape_number]['placeholder type'] = str(shp.placeholder_format.type) print('{indent:>15}shape dimensions - ' 'left: {shape_left}, top: {shape_top}, ' 'height: {shape_height}, width: {shape_width}\n'. format( indent='', shape_left=shp.left, shape_height=shp.height, shape_top=shp.top, shape_width=shp.width)) pptx_tree[file_name]['slides'][slide_number]['shapes'][shape_number]['shape dimensions'] = OrderedDict() pptx_tree[file_name]['slides'][slide_number]['shapes'][shape_number]['shape dimensions']['left'] = str(shp.left) pptx_tree[file_name]['slides'][slide_number]['shapes'][shape_number]['shape dimensions']['top'] = str(shp.top) pptx_tree[file_name]['slides'][slide_number]['shapes'][shape_number]['shape dimensions']['width'] = str(shp.width) pptx_tree[file_name]['slides'][slide_number]['shapes'][shape_number]['shape dimensions']['height'] = str(shp.height) if save_as_json: save_json(pptx_tree, file_name+'.json') print('Finished') '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' def read_slide(sld): ''' Takes a slide object and print info regarding the shapes on the given slide. ''' for x, shp in enumerate(sld.shapes): print('{indent:>5}shape index - {x}'.format(indent='', x=x)) print('{indent:>10}shape name - {shape_name}'.format(indent='', shape_name=shp.name)) print('{indent:>15}shape type - {shape_type}'.format(indent='', shape_type=shp.shape_type)) if str(shp.shape_type) == 'PLACEHOLDER (14)': print('{indent:>15}placeholder idx - {placeholder_idx}, placeholder type - {placeholder_type}'. format(indent='', placeholder_idx=shp.placeholder_format.idx, placeholder_type=shp.placeholder_format.type)) print('{indent:>15}shape dimensions - left ({shape_left}), top ({shape_top}), height ({shape_height}), width ({shape_width})\n'. format(indent='', shape_left=shp.left, shape_top=shp.top, shape_height=shp.height, shape_width=shp.width)) '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' def read_chart_properties(pres_path, slide_num, chart_name): ''' This function prints a given chart's property settings. param: pres_path - full path of target file param: slide_num - single slide number param: chart_name - object name as it appears within powerpoint's Object Selection Pane ''' prs = Presentation(pres_path) for i, sld in enumerate(prs.slides, start=1): if i == slide_num: for x, shp in enumerate(sld.shapes): if shp.name == chart_name: print('chart >\n') print(' chart_style: {chart_style}'.format(chart_style=shp.chart.chart_style)) print(' has_legend: {has_legend}'.format(has_legend=shp.chart.has_legend)) print(' legend: {legend}\n'.format(legend=shp.chart.legend)) print('-'*110) caxis = shp.chart.category_axis print('chart > category axis properties\n') print(' has_major_gridlines: {has_major_gridlines}'.format(has_major_gridlines=caxis.has_major_gridlines)) print(' has_minor_gridlines: {has_minor_gridlines}'.format(has_minor_gridlines=caxis.has_minor_gridlines)) print(' major_tick_mark: {major_tick_mark}'.format(major_tick_mark=caxis.major_tick_mark)) print(' maximum_scale: {maximum_scale}'.format(maximum_scale=caxis.maximum_scale)) print(' minimum_scale: {minimum_scale}'.format(minimum_scale=caxis.minimum_scale)) print(' minor_tick_mark: {minor_tick_mark}'.format(minor_tick_mark=caxis.minor_tick_mark)) print(' tick_labels: {tick_labels}'.format(tick_labels=str(caxis.tick_labels))) print(' tick_label_position: {tick_label_position}'.format(tick_label_position=caxis.tick_label_position)) print(' tick_labels_font_name: {tick_labels_font}'.format(tick_labels_font=caxis.tick_labels.font.name)) print(' tick_labels_font_size: {tick_labels_font}'.format(tick_labels_font=caxis.tick_labels.font.size)) print(' tick_labels_font_bold: {tick_labels_font}'.format(tick_labels_font=caxis.tick_labels.font.bold)) print(' tick_labels_font_color: {tick_labels_font}'.format(tick_labels_font=caxis.tick_labels.font.color)) print(' tick_labels_font_italic: {tick_labels_font}'.format(tick_labels_font=caxis.tick_labels.font.italic)) print(' tick_labels_font_underline: {tick_labels_font}'.format(tick_labels_font=caxis.tick_labels.font.underline)) print(' tick_labels_number_format: {tick_labels_number_format}'.format(tick_labels_number_format=caxis.tick_labels.number_format)) print(' tick_labels_number_format_is_linked: {tick_labels_number_format_is_linked}'.format(tick_labels_number_format_is_linked=caxis.tick_labels.number_format_is_linked)) print(' tick_labels_offset: {tick_labels_offset}'.format(tick_labels_offset=caxis.tick_labels.offset)) print(' visible: {visible}\n'.format(visible=caxis.visible)) print('-'*110) vaxis = shp.chart.value_axis print('chart > value axis properties\n') print(' has_major_gridlines: {has_major_gridlines}'.format(has_major_gridlines=vaxis.has_major_gridlines)) print(' has_minor_gridlines: {has_minor_gridlines}'.format(has_minor_gridlines=vaxis.has_minor_gridlines)) print(' major_tick_mark: {major_tick_mark}'.format(major_tick_mark=vaxis.major_tick_mark)) print(' maximum_scale: {maximum_scale}'.format(maximum_scale=vaxis.maximum_scale)) print(' minimum_scale: {minimum_scale}'.format(minimum_scale=vaxis.minimum_scale)) print(' major_unit: {major_unit}'.format(major_unit=vaxis.major_unit)) print(' minor_unit: {minor_unit}'.format(minor_unit=vaxis.minor_unit)) print(' minor_tick_mark: {minor_tick_mark}'.format(minor_tick_mark=vaxis.minor_tick_mark)) print(' tick_labels: {tick_labels}'.format(tick_labels=vaxis.tick_labels)) print(' tick_label_position: {tick_label_position}'.format(tick_label_position=vaxis.tick_label_position)) print(' tick_labels_font_name: {tick_labels_font}'.format(tick_labels_font=vaxis.tick_labels.font.name)) print(' tick_labels_font_size: {tick_labels_font}'.format(tick_labels_font=vaxis.tick_labels.font.size)) print(' tick_labels_font_bold: {tick_labels_font}'.format(tick_labels_font=vaxis.tick_labels.font.bold)) print(' tick_labels_font_color: {tick_labels_font}'.format(tick_labels_font=vaxis.tick_labels.font.color)) print(' tick_labels_font_italic: {tick_labels_font}'.format(tick_labels_font=vaxis.tick_labels.font.italic)) print(' tick_labels_font_underline: {tick_labels_font}'.format(tick_labels_font=vaxis.tick_labels.font.underline)) print(' tick_labels_font: {tick_labels_font}'.format(tick_labels_font=vaxis.tick_labels.font)) print(' tick_labels_number_format: {tick_labels_number_format}'.format(tick_labels_number_format=vaxis.tick_labels.number_format)) print(' tick_labels_number_format_is_linked: {tick_labels_number_format_is_linked}'.format(tick_labels_number_format_is_linked=vaxis.tick_labels.number_format_is_linked)) print(' visible: {visible}\n'.format(visible=vaxis.visible)) print('-'*110) for item in shp.chart.plots: print('chart > plot\n') print(' plot_categories: {plot_cats}'.format(plot_cats=item.categories)) print(' plot_gap_width: {gap_width}'.format(gap_width=item.gap_width)) print(' has_data_labels: {has_data_labels}'.format(has_data_labels=item.has_data_labels)) print(' overlap: {overlap}'.format(overlap=item.overlap)) print(' vary_by_categories: {vary_by_cat}\n'.format(vary_by_cat=item.vary_by_categories)) print('-'*110) font = item.data_labels.font print('chart > plot > data labels > font \n') print(' data_label_font_name: {font_name}'.format(font_name=font.name)) print(' data_label_font_size: {font_size}'.format(font_size=font.size)) print(' data_label_font_bold: {data_label_font}'.format(data_label_font=font.bold)) print(' data_label_font_color {font_color}'.format(font_color=font.color)) print(' data_label_font_fill {font_fill}'.format(font_fill=font.fill)) print(' data_label_font_italic: {font_italic}'.format(font_italic=font.italic)) print(' data_label_font_underline: {font_underline}\n'.format(font_underline=font.underline)) print('-'*110) for ser in item.series: print('chart > plot > series\n') print(' series_fill_type: {fill_type}'.format(fill_type=ser.fill.type)) print(' series_invert_if_neg: {invert_if_neg}'.format(invert_if_neg=ser.invert_if_negative)) print(' series_line: {line}'.format(line=ser.line)) print(' series_name: {name}'.format(name=ser.name)) print(' series_values: {values}'.format(values=ser.values)) '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' def get_chart_data_from_prs(pres_path, slide_num, chart_name): ''' This function 1) pulls a given chart's data and 2) returns it as a pandas dataframe object in a list param: pres_path - full path of target file param: slide_num - takes a list of slides param: chart_name - object name as it appears within powerpoint's Object Selection Pane ''' prs = Presentation(pres_path) collection_of_dfs = [] for i, sld in enumerate(prs.slides, start=1): if i in slide_num: for x, shp in enumerate(sld.shapes): if shp.name == chart_name: plot = shp.chart.plots[0] columns = [] data = [] for series in plot.series: columns.append(str(series.name)) data.append(series.values) data = np.array(data) rows = np.array(plot.categories) df = pd.DataFrame(data.T, index=rows, columns=columns) collection_of_dfs.append(df) return(collection_of_dfs) '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' def replace_chart_data_in_prs(pres_path, slide_num, chart_name, df): ''' This function 1) enters an existing powerpoint, 2) finds given slides, 3) finds given chart by name and 4) replaces the given chart's underlying data with new data in the form of a dataframe. param: pres_path - takes the full path of target file param: slide_num - takes a list of slides param: chart_name - object name as it appears within powerpoint's Object Selection Pane param: df - takes a list of pandas dataframe objects ''' PRES_FOLDER_FOLDER = dirname(pres_path) PRES_NAME = basename(pres_path).replace('.pptx','') prs = Presentation(pres_path) loop_counter=0 for i, sld in enumerate(prs.slides, start=1): if i in slide_num: for x, shp in enumerate(sld.shapes): if shp.name == chart_name: single_df = df[loop_counter] chart_data = ChartData() chart_data.categories = single_df.index for col_idx, col in enumerate(single_df.columns): chart_data.add_series(col, (single_df.ix[:, col_idx].values)) shp.chart.replace_data(chart_data) loop_counter+=1 prs.save('{pres_path}\\{pres_name}_edited.pptx'.format( pres_path=PRES_FOLDER_FOLDER, pres_name=PRES_NAME)) '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' def get_slide_layout_names(pptx): ''' Print slide layout names ''' for i, slide_layout in enumerate(pptx.slide_layouts): print(slide_layout.name) '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' def return_slide_layout_by_name(pptx, slide_layout_name): ''' Loop over the slide layout object and find slide layout by name, return slide layout object. example: myslide = get_slide_layout_by_name(prs, 'Inhaltsverzeichnis') slide = prs.slides.add_slide(myslide) ''' for slide_layout in pptx.slide_layouts: if slide_layout.name == slide_layout_name: return slide_layout else: raise Exception( ('Slide layout: {sld_layout} not found\n').format( sld_layout = slide_layout_name)) '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' def get_chart_data(shape): plot = shape.chart.plots[0] columns = [] data = [] for series in plot.series: columns.append(series.name) data.append(series.values) data = np.array(data) rows = np.array(plot.categories) df = pd.DataFrame(data.T, index=rows, columns=columns) return df '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' def get_chart_data_temp(shape): plot = shape.chart.plots[0] series_names =[] data = [] for series in plot.series: series_names.append(series.name) data.append(series.values) cols = plot.categories df = pd.DataFrame(data, index=series_names, columns=cols) return df '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' def replace_chart_data(shape, df): chart_data = ChartData() chart_data.categories = df.index for col_idx, col in enumerate(df.columns): chart_data.add_series(col, (df.ix[:, col_idx].values)) shape.chart.replace_data(chart_data) return shape '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' def get_slide(pptx, slide_num): '''' active slides are slides which exist in the VIEW mode, not in slide master. ''' return pptx.slides[slide_num] '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' def get_shape(slide_num, shape_name): for shp in slide_num.shapes: if shp.name == shape_name: if shp.is_placeholder: p_idx = shp.placeholder_format.idx shp = slide_num.placeholders[p_idx] return shp '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' def copy_txtbox_properties(shp_copy_from, shp_copy_to): ''' Copies over one textbox's properties to another. ''' # get original slide's shapes dimensions left = shp_copy_from.left top = shp_copy_from.top width = shp_copy_from.width height = shp_copy_from.height # access textframe property for both original and replica shapes txtframe_ori = shp_copy_from.text_frame txtframe_rep = shp_copy_to.text_frame # transfer textbox setters from original to replica at textbox level txtframe_rep.margin_bottom = txtframe_ori.margin_bottom txtframe_rep.margin_left = txtframe_ori.margin_left txtframe_rep.margin_right = txtframe_ori.margin_right txtframe_rep.margin_top = txtframe_ori.margin_top txtframe_rep.vertical_anchor = txtframe_ori.vertical_anchor txtframe_rep.word_wrap = txtframe_ori.word_wrap txtframe_rep.paragraphs[0].text = txtframe_ori.paragraphs[0].text txtframe_rep.paragraphs[0].alignment = txtframe_ori.paragraphs[0].alignment # color textboxes accordingly try: color_code = str(shp_copy_from.fill.fore_color.rgb) txfill = shp_copy_to.fill txfill.solid() txfill.fore_color.rgb = RGBColor.from_string(color_code) except: pass # get font size and transfer it to replica shapes for paragraph in txtframe_ori.paragraphs: for run in paragraph.runs: font = run.font try: font_size = font.size.pt t = txtframe_rep.paragraphs[0] t.font.size = Pt(font_size) except: pass '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' def copy_chart_properties(shp_copy_from, sld_rep): original_shapes_chart_type = str(shp_copy_from.chart.chart_type).split(" ")[0] df = get_chart_data(shp_copy_from) #-------------------------------------------------------------------- add_bar_chart( sld=sld_rep, dataframe=df, left=shp_copy_from.left, top=shp_copy_from.top, width=shp_copy_from.width, height=shp_copy_from.height, chart_style=shp_copy_from.chart.chart_style, has_legend=shp_copy_from.chart.has_legend, legend_position='right', legend_in_layout=False, legend_horz_offset = 0.1583, legend_font_name="Calibri", legend_font_size=10, legend_font_bold=False, legend_font_italic=False, legend_font_color=(89,89,89), legend_font_brightness=0, caxis_visible=True, caxis_tick_label_position='none', caxis_tick_labels_offset=730, caxis_has_major_gridlines=shp_copy_from.chart.category_axis.has_major_gridlines, caxis_has_minor_gridlines=False, caxis_major_tick_mark='outside', caxis_minor_tick_mark='none', caxis_tick_labels_font_name="Calibri", caxis_tick_labels_font_size=10, caxis_tick_labels_font_bold=False, caxis_tick_labels_font_italic=False, caxis_tick_labels_font_color=(89,89,89), vaxis_visible=shp_copy_from.chart.value_axis.visible, vaxis_tick_label_position='low', vaxis_has_major_gridlines=True, vaxis_has_minor_gridlines=False, vaxis_major_tick_mark='outside', vaxis_minor_tick_mark='none', vaxis_max_scale=100.0, vaxis_min_scale=0, vaxis_major_unit=10, vaxis_minor_unit=None, vaxis_tick_labels_num_format='0"%"', vaxis_tick_labels_font_name="Calibri", vaxis_tick_labels_font_bold=True, vaxis_tick_labels_font_size=10, vaxis_tick_labels_font_italic=False, vaxis_tick_labels_font_color=(89,89,89), plot_has_data_labels=True, data_labels_position='outside_end', data_labels_num_format='0"%"', data_labels_num_format_is_linked=False, data_labels_font_name="Calibri", data_labels_font_size=9, data_labels_font_bold=False, data_labels_font_italic=False, data_labels_font_color=(0,0,0), plot_vary_by_cat=False, series_color_order='reverse', invert_series_color_if_negative=False, plot_gap_width=150, plot_overlap=-10 )